metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "Aesthetics.py",
"repo_name": "mirochaj/ares",
"repo_path": "ares_extracted/ares-main/ares/util/Aesthetics.py",
"type": "Python"
}
|
"""
Aesthetics.py
Author: Jordan Mirocha
Affiliation: University of Colorado at Boulder
Created on: Wed Sep 24 16:15:52 MDT 2014
Description:
"""
import os, imp, re
import numpy as np
from matplotlib import cm
from .ParameterFile import par_info
from matplotlib.colors import ListedColormap
# Charlotte's color-maps
_charlotte1 = ['#301317','#3F2A3D','#2D4A60','#036B66','#48854D','#9D9436','#F69456']
_charlotte2 = ['#001316', '#2d2779', '#9c207e', '#c5492a', '#819c0c', '#3dd470', '#64cdf6']
cmap_charlotte1 = ListedColormap(_charlotte1, name='charlotte1')
cmap_charlotte2 = ListedColormap(_charlotte2, name='charlotte2')
_zall = np.arange(4, 11, 1)
_znormed = (_zall - _zall[0]) / float(_zall[-1] - _zall[0])
_ch_c1 = cm.get_cmap(cmap_charlotte1, _zall.size)
_ch_c2 = cm.get_cmap(cmap_charlotte2, _zall.size)
_normz = lambda zz: (zz - _zall[0]) / float(_zall[-1] - _zall[0])
colors_charlotte1 = lambda z: _ch_c1(_normz(z))
colors_charlotte2 = lambda z: _ch_c2(_normz(z))
# Load custom defaults
HOME = os.environ.get('HOME')
if os.path.exists('{!s}/.ares/labels.py'.format(HOME)):
f, filename, data = imp.find_module('labels', ['{!s}/.ares/'.format(HOME)])
custom_labels = imp.load_module('labels.py', f, filename, data).pf
else:
custom_labels = {}
prefixes = ['igm_', 'cgm_']
#
## Common axis labels
label_flux_nrg = r'$J_{\nu} \ (\mathrm{erg} \ \mathrm{s}^{-1} \ \mathrm{cm}^{-2} \ \mathrm{Hz}^{-1} \ \mathrm{sr}^{-1})$'
label_flux_phot = r'$J_{\nu} \ (\mathrm{s}^{-1} \ \mathrm{cm}^{-2} \ \mathrm{Hz}^{-1} \ \mathrm{sr}^{-1})$'
label_flux_nw = r'$J_{\nu} \ [\mathrm{nW} \ \mathrm{m}^{-2} \ \mathrm{sr}^{-1}]$'
label_logflux_nw = r'$\log_{10} (J_{\nu} / [\mathrm{nW} \ \mathrm{m}^{-2} \ \mathrm{sr}^{-1}])$'
label_power_nw = r'$q^2 P(q)/(2\pi) \ (\mathrm{nW}^2 \ \mathrm{m}^{-4} \ \mathrm{sr}^{-2})$'
label_power_nw_sqrt = r'$\sqrt{q^2 P(q)/(2\pi)} \ (\mathrm{nW} \ \mathrm{m}^{-2} \ \mathrm{sr}^{-1})$'
label_power_Cl_sqrt = r'$\left[ l(l+1) C_l^{\nu \nu^{\prime}} / (2\pi) \right]^{1/2} \ (\mathrm{nW} \ \mathrm{m}^{-2} \ \mathrm{sr}^{-1})$'
label_power_Cl = r'$l(l+1) C_l^{\nu \nu^{\prime}} / (2\pi) \ (\mathrm{nW}^{2} \ \mathrm{m}^{-4} \ \mathrm{sr}^{-2})$'
label_flux_nuInu = r'$\nu I_{\nu} \ (\mathrm{nW} \ \mathrm{m}^{-2} \ \mathrm{sr}^{-1})$'
label_nrg = r'$h\nu \ (\mathrm{eV})$'
label_heat_mpc = r'$\epsilon_{\mathrm{heat}} \ (\mathrm{erg} \ \mathrm{s}^{-1} \ \mathrm{cMpc}^{-3})$'
label_dTbdnu = r'$d (\delta T_{\mathrm{b}}) / d\nu \ (\mathrm{mK/MHz})$'
label_MAR = r'$\dot{M}_h \ [M_{\odot} \ \mathrm{yr}^{-1}]$'
label_logMAR = r'$\log_{10} \left(\dot{M}_h / [M_{\odot} \ \mathrm{yr}^{-1}]\right)$'
label_L_nu = r'$L_{\nu} \ [\mathrm{erg} \ \mathrm{s}^{-1} \ \mathrm{Hz}^{-1}]$'
label_L_lam = r'$L_{\lambda} \ [\mathrm{erg} \ \mathrm{s}^{-1} \ \AA^{-1}]$'
states = \
{
'h_1': r'$x_{\mathrm{HI}}$',
'h_2': r'$x_{\mathrm{HII}}$',
'he_1': r'$x_{\mathrm{HeI}}$',
'he_2': r'$x_{\mathrm{HeII}}$',
'he_3': r'$x_{\mathrm{HeIII}}$',
'Tk': r'$T_K$',
}
rates = \
{
'k_ion': r'$\kappa_{\mathrm{ion}}$',
'k_ion2': r'$\kappa_{\mathrm{ion, sec}}$',
'k_heat': r'$\kappa_{\mathrm{heat}}$',
'k_diss': r'$\kappa_{\mathrm{diss}}$',
}
derived = \
{
'Ts': r'$T_S$',
'dTb': r'$\delta T_b \ (\mathrm{mK})$',
#'hwhm_diff': r'$\Delta \nu_{\min}$',
#'squash': r'$\delta T_b(\nu_{\min}) / \mathrm{FWHM}$',
'hwhm_diff': r'$\mathcal{A} \ (\mathrm{MHz})$',
'squash': r'$\mathcal{W} \ (\mathrm{mK} \ \mathrm{MHz}^{-1})$',
'fwhm': r'$\mathrm{FWHM}$',
'fwqm': r'$\mathrm{FWQM}$',
'mean_slope': r'$\langle \delta T_b^{\prime} \rangle$',
'mean_slope_hi': r'$\langle \delta T_b^{\prime} \rangle_{\mathrm{hi}}$',
'mean_slope_lo': r'$\langle \delta T_b^{\prime} \rangle_{\mathrm{lo}}$',
}
labels = {}
labels.update(states)
labels.update(rates)
labels.update(derived)
# Also account for prefixes
labels_w_prefix = {}
for prefix in prefixes:
for key in labels:
labels_w_prefix['{0!s}{1!s}'.format(prefix, key)] = labels[key]
labels.update(labels_w_prefix)
common = \
{
'nu': r'$\nu \ (\mathrm{MHz})$',
't_myr': r'$t \ (\mathrm{Myr})$',
'flux': label_flux_phot,
'flux_E': label_flux_nrg,
'flux_nW': label_flux_nw,
'logflux_nW': label_logflux_nw,
'power_nirb': label_power_nw,
'power_nirb_sqrt': label_power_nw_sqrt,
'power_nirb_Cl_sqrt': label_power_Cl_sqrt,
'power_nirb_Cl': label_power_Cl,
'angular_scale_q_min': r'$2 \pi / q \ [\mathrm{arcmin}]$',
'angular_scale_q_sec': r'$2 \pi / q \ [\mathrm{arcsec}]$',
'angular_scale_l': r'Multipole moment, $l$',
'flux_nuInu': label_flux_nuInu,
'intensity_AA': r'$\mathrm{erg} \ \mathrm{s}^{-1} \ \mathrm{\AA}^{-1}$',
'lambda_AA': r'$\lambda \ (\AA)$',
'L_nu': label_L_nu,
'L_lam': label_L_lam,
'E': label_nrg,
'heat_mpc': label_heat_mpc,
'dTbdnu': label_dTbdnu,
'fX': r'$f_X$',
'fstar': r'$f_{\ast}$',
'fesc': r'$f_{\mathrm{esc}}$',
'Nion': r'$N_{\mathrm{ion}}$',
'Tmin': r'$T_{\mathrm{min}}$',
'MAR': label_MAR,
'logMAR': label_logMAR,
'Nlw': r'$N_{\alpha}$',
'fbh': r'$f_{\bullet}$',
'xi_XR': r'$\xi_{X}$',
'xi_LW': r'$\xi_{\mathrm{LW}}$',
'xi_UV': r'$\xi_{\mathrm{ion}}$',
'sfrd': r'$\dot{\rho}_{\ast} \ [M_{\odot} \ \mathrm{yr}^{-1} \ \mathrm{cMpc}^{-3}]$',
'sfr': r'$\dot{M}_{\ast} \ [M_{\odot} \ \mathrm{yr}^{-1}]$',
'logsfr': r'$\log_{10} \dot{M}_{\ast} \ [M_{\odot} \ \mathrm{yr}^{-1}]$',
'emissivity': r'$\epsilon \ [\mathrm{erg} \ \mathrm{s}^{-1} \ \mathrm{cMpc}^{-3}]$',
'nh': r'$n_h \ [\mathrm{cMpc}^{-3}]$',
'extinction_redshift': r'$z_{\mathrm{ext}}$',
'source_logN': r'$\log_{10} N_{\mathrm{H}}$',
'source_alpha': r'$\alpha$',
'source_temperature': r'$T_{\ast}$',
'z': r'$z$',
'igm_k_heat_h_1': r'$\epsilon_{X, \mathrm{HI}}$',
'igm_k_heat_he_1': r'$\epsilon_{X, \mathrm{HI}}$',
'igm_k_heat_he_2': r'$\epsilon_{X, \mathrm{HI}}$',
'igm_k_heat': r'$\epsilon_X$',
'cgm_k_ion_h_1': r'$\Gamma_{\mathrm{HI},\mathrm{cgm}}}$',
'igm_k_ion_h_1': r'$\Gamma_{\mathrm{HI},\mathrm{igm}}}$',
'igm_k_ion_he_1': r'$\Gamma_{\mathrm{HeI}}$',
'igm_k_ion_he_2': r'$\Gamma_{\mathrm{HeII}}$',
'igm_k_ion2_h_1': r'$\gamma_{\mathrm{HI}}$',
'igm_k_ion2_he_1': r'$\gamma_{\mathrm{HeI}}$',
'igm_k_ion2_he_2': r'$\gamma_{\mathrm{HeII}}$',
# Partial secondary ionizations
'igm_k_ion2_h_1_h_1': r'$\gamma_{\mathrm{HI},\mathrm{HI}}$',
'igm_k_ion2_h_1_he_1': r'$\gamma_{\mathrm{HI}, \mathrm{HeI}}$',
'igm_k_ion2_h_1_he_2': r'$\gamma_{\mathrm{HI}, \mathrm{HeII}}$',
'Tk': r'$T_K \ (\mathrm{K})$',
'tau_e': r'$\tau_e$',
'tau_tot': r'$\tau_e$',
'z_dec': r'$z_{\mathrm{dec}}$',
'skewness_absorption': r'$\mu_{3, \mathrm{abs}}$',
'kurtosis_absorption': r'$\mu_{4, \mathrm{abs}}$',
'skewness_emission': r'$\mu_{3, \mathrm{em}}$',
'kurtosis_emission': r'$\mu_{4, \mathrm{em}}$',
'igm_initial_temperature': r'$T_0$',
}
##
#
history_elements = \
{
'igm_h_1': r'$x_{\mathrm{HI}}$',
'igm_h_2': r'$x_{\mathrm{HII}}$',
'igm_he_1': r'$x_{\mathrm{HeI}}$',
'igm_he_2': r'$x_{\mathrm{HeII}}$',
'igm_he_3': r'$x_{\mathrm{HeIII}}$',
'igm_Tk': r'$T_K$',
'cgm_h_2': r'$Q_{\mathrm{HII}}$',
'xavg': r'$\overline{x}_i$',
'Ts': r'$T_S$',
'z': r'$z$',
'nu': r'$\nu$',
'Ja': r'$J_{\alpha}$',
'Jlw': r'$J_{\mathrm{LW}}$',
'dTb': r'$\delta T_b \ (\mathrm{mK})$',
'dlogTk_dlogt': r'$d\log T_K / d\log t$',
'slope': r'$\delta^{\prime} T_b \ [\mathrm{mK} \ \mathrm{MHz}^{-1}]$',
'curvature': r'$\delta^{\prime \prime} T_b \ [\mathrm{mK}^2 \ \mathrm{MHz}^{-2}]$',
}
tp_parameters = {}
hist_plus_derived = history_elements
hist_plus_derived.update(derived)
for key in hist_plus_derived:
for tp in ['A', 'B', 'C', 'D', 'ZC']:
if key in ['z', 'nu']:
tp_parameters['{0!s}_{1!s}'.format(key, tp)] = \
r'{0!s}_{{\mathrm{{{1!s}}}}}$'.format(hist_plus_derived[key][0:-1], tp)
else:
tp_parameters['{0!s}_{1!s}'.format(key, tp)] = \
r'{0!s}(\nu_{{\mathrm{{{1!s}}}}})$'.format(hist_plus_derived[key][0:-1], tp)
for key in hist_plus_derived:
for tp in ['A', 'B', 'C', 'D']:
if key in ['z', 'nu']:
tp_parameters['{0!s}_{1!s}p'.format(key, tp)] = \
r'{0!s}_{{\mathrm{{{1!s}}}}}^{{\prime}}$'.format(hist_plus_derived[key][0:-1], tp)
else:
tp_parameters['{0!s}_{1!s}p'.format(key, tp)] = \
r'{0!s}(\nu_{{\mathrm{{{1!s}}}}}^{{\prime}})$'.format(hist_plus_derived[key][0:-1], tp)
tanh_parameters = \
{
'tanh_J0': r'$\left(J_0 / J_{21}\right)$',
'tanh_Jz0': r'$z_J$',
'tanh_Jdz': r'$\Delta z_J$',
'tanh_T0': r'$T_0$',
'tanh_Tz0': r'$z_T$',
'tanh_Tdz': r'$\Delta z_T$',
'tanh_x0': r'$\overline{x}_{i,0}$',
'tanh_xz0': r'$z_x$',
'tanh_xdz': r'$\Delta z_x$',
'tanh_bias_freq': r'$b_{\nu}$',
'tanh_bias_temp': r'$b_{\mathrm{T}}$',
}
gauss_parameters = \
{
'gaussian_A': r'$A_0 \ (\mathrm{mK})$',
'gaussian_nu': r'$\nu_0 \ (\mathrm{MHz})$',
'gaussian_sigma': r'$\sigma_0 \ (\mathrm{MHz})$',
}
lf_parameters = \
{
'MUV': r'$M_{\mathrm{UV}}$',
'pop_lf_Mstar': r'$M_{\ast}$',
'pop_lf_pstar': r'$\phi_{\ast}$',
'pop_lf_alpha': r'$\alpha$',
'Mpeak': r'$M_{\mathrm{peak}}$',
'fpeak': r'$f_{\ast} (M_{\mathrm{peak}})$',
'gamma': r'$\gamma$',
'Mh': r'$M_h / M_{\odot}$',
'Lh': r'$L_h / (\mathrm{erg} \ \mathrm{s}^{-1} \ \mathrm{Hz}^{-1})$',
}
pop_parameters = \
{
'pop_Z': r'$Z/Z_{\odot}$',
'pop_sfr': r'$\dot{M}_{\ast}$',
'pop_lf_beta': r'$\Beta_{\mathrm{UV}}$',
'pop_fstar': r'$f_{\ast}$',
'pop_fobsc': r'$f_{\mathrm{obsc}}$',
'fobsc': r'$f_{\mathrm{obsc}}$',
'pop_acc_frac_stellar': r'$f_{\ast}^{\mathrm{acc}}$',
'pop_acc_frac_metals': r'$f_Z^{\mathrm{acc}}$',
'pop_acc_frac_gas': r'$f_g^{\mathrm{acc}}$',
'pop_metal_retention': r'$f_{\mathrm{ret,Z}}$',
'pop_abun_limit': r'$\mathcal{Z}_c$',
'pop_bind_limit': r'$\mathcal{E}_c$',
'pop_time_limit': r'$\mathcal{T}_c$',
}
sfe_parameters = \
{
"lf": r'$\phi(M_{1600}) \ [\mathrm{mag}^{-1} \ \mathrm{cMpc}^{-3}]$',
"galaxy_lf": r'$\phi(M_{1600}) \ [\mathrm{mag}^{-1} \ \mathrm{cMpc}^{-3}]$',
"galaxy_lf_muv": r'$\phi(M_{\mathrm{UV}}) \ [\mathrm{mag}^{-1} \ \mathrm{cMpc}^{-3}]$',
"galaxy_lf_mag": r'$\phi(M) \ [\mathrm{mag}^{-1} \ \mathrm{cMpc}^{-3}]$',
"galaxy_lf_1500": r'$\phi(M_{1500}) \ [\mathrm{mag}^{-1} \ \mathrm{cMpc}^{-3}]$',
"galaxy_lf_1600": r'$\phi(M_{1600}) \ [\mathrm{mag}^{-1} \ \mathrm{cMpc}^{-3}]$',
"galaxy_smf": r'$\phi(M_{\ast}) \ [\mathrm{dex}^{-1} \ \mathrm{cMpc}^{-3}]$',
}
for i in range(6):
sfe_parameters['pq_func_par{}'.format(i)] = r'$p_{%i}$' % i
powspec = \
{
'k': r'$k \ [\mathrm{cMpc}^{-1}]$',
'dpow': r'$\overline{\delta T_b}^2 \Delta_{21}^2 \ \left[\mathrm{mK}^2 \right]$',
'pow': r'$P(k)$',
}
other = \
{
'load': 'processor #',
'contrast': r'$1 - T_{\gamma} / T_S$',
}
labels.update(history_elements)
labels.update(tanh_parameters)
labels.update(gauss_parameters)
labels.update(other)
labels.update(common)
labels.update(lf_parameters)
labels.update(pop_parameters)
labels.update(tp_parameters)
labels.update(sfe_parameters)
labels.update(powspec)
# Add custom labels
labels.update(custom_labels)
def logify_str(s, sup=None):
s_no_dollar = str(s.replace('$', ''))
new_s = s_no_dollar
if sup is not None:
new_s += '[{!s}]'.format(sup_scriptify_str(s))
return r'$\mathrm{log}_{10}' + new_s + '$'
def undo_mathify(s):
return str(s.replace('$', ''))
def mathify_str(s):
return r'${!s}$'.format(s)
class Labeler(object): # pragma: no cover
def __init__(self, pars, is_log=False, extra_labels={}, **kwargs):
self.pars = self.parameters = pars
self.base_kwargs = kwargs
self.extras = extra_labels
self.labels = labels.copy()
self.labels.update(self.extras)
if type(is_log) == bool:
self.is_log = {par:is_log for par in pars}
else:
self.is_log = {}
for par in pars:
if par in self.parameters:
k = self.parameters.index(par)
self.is_log[par] = is_log[k]
else:
# Blobs are never log10-ified before storing to disk
self.is_log[par] = False
def units(self, prefix):
units = None
for kwarg in self.base_kwargs:
if not re.search(prefix, kwarg):
continue
if re.search('units', kwarg):
units = self.base_kwargs[kwarg]
return units
def _find_par(self, popid, phpid):
kwarg = None
look_for_1 = '{{{}}}'.format(popid)
look_for_2 = '[{}]'.format(phpid)
for kwarg in self.base_kwargs:
if phpid is not None:
if self.base_kwargs[kwarg] == 'pq[{}]'.format(phpid):
break
return kwarg.replace('{{{}}}'.format(popid), '')
def label(self, par, take_log=False, un_log=False):
"""
Create a pretty label for this parameter (if possible).
"""
if par in self.labels:
label = self.labels[par]
if par in self.parameters:
if take_log:
return mathify_str('\mathrm{log}_{10}' + undo_mathify(label))
elif self.is_log[par] and (not un_log):
return mathify_str('\mathrm{log}_{10}' + undo_mathify(label))
else:
return label
else:
return label
prefix, popid, phpid = par_info(par)
_par = par
# Correct prefix is phpid is not None
if phpid is not None:
s = 'pq[{}]'.format(phpid)
for _par in self.base_kwargs:
if self.base_kwargs[_par] != s:
continue
break
prefix = _par
units = self.units(prefix)
label = None
# Simplest case. Not popid, not a PQ, label found.
if popid == phpid == None and (prefix in self.labels):
label = self.labels[prefix]
# Has pop ID number but is not a PQ, label found.
elif (popid is not None) and (phpid is None) and (prefix in self.labels):
label = self.labels[prefix]
elif (popid is not None) and (phpid is None) and (prefix.strip('pop_') in self.labels):
label = self.labels[prefix.strip('pop_')]
# Has Pop ID, not a PQ, no label found.
elif (popid is not None) and (phpid is None) and (prefix not in self.labels):
try:
hard = self._find_par(popid, phpid)
except:
hard = None
if hard is not None:
# If all else fails, just typset the parameter decently
label = prefix
#parnum = int(re.findall(r'\d+', prefix)[0]) # there can only be one
#label = r'${0!s}\{{{1}\}}[{2}]<{3}>$'.format(hard.replace('_', '\_'),
# popid, phpid, parnum)
# Is PQ, label found. Just need to parse []s.
elif phpid is not None and (prefix in self.labels):
parnum = list(map(int, re.findall(r'\d+', par.replace('[{}]'.format(phpid),''))))
if len(parnum) == 1:
label = r'${0!s}^{{\mathrm{{par}}\ {1}}}$'.format(\
undo_mathify(self.labels[prefix]), parnum[0])
else:
label = r'${0!s}^{{\mathrm{{par}}\ {1},{2}}}$'.format(\
undo_mathify(self.labels[prefix]), parnum[0], parnum[1])
# Otherwise, just use number. Not worth the trouble right now.
elif (popid is None) and (phpid is not None) and par.startswith('pq_'):
label = 'par {}'.format(self.parameters.index(par))
# Troubleshoot if label not found
if label is None:
label = prefix
if re.search('pop_', prefix):
if prefix[4:] in self.labels:
label = self.labels[prefix[4:]]
else:
label = r'${!s}$'.format(par.replace('_', '\_'))
if par in self.parameters:
#print('{0} {1} {2} {3}'.format(par, take_log, self.is_log[par],\
# un_log))
if take_log:
return mathify_str('\mathrm{log}_{10}' + undo_mathify(label))
elif self.is_log[par] and (not un_log):
return mathify_str('\mathrm{log}_{10}' + undo_mathify(label))
else:
return label
return label
|
mirochajREPO_NAMEaresPATH_START.@ares_extracted@ares-main@ares@util@Aesthetics.py@.PATH_END.py
|
{
"filename": "testdpg.py",
"repo_name": "mef51/frbgui",
"repo_path": "frbgui_extracted/frbgui-main/testdpg.py",
"type": "Python"
}
|
import dpg
dpg.set_main_window_size(500, 500)
dpg.set_main_window_title("Group Test")
with dpg.window('FRB Analysis', width=200, height=200, x_pos=10, y_pos=30):
with dpg.group("Hello"):
pass
with dpg.group("Bye", parent="Hello"):
dpg.add_button("A button", parent="Hello")
dpg.add_button("B button", parent="Hello")
dpg.add_button("C button", parent="Hello")
dpg.delete_item("Hello", children_only=True)
dpg.start_dearpygui()
|
mef51REPO_NAMEfrbguiPATH_START.@frbgui_extracted@frbgui-main@testdpg.py@.PATH_END.py
|
{
"filename": "qt.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/qt.py",
"type": "Python"
}
|
import sys
import os
from IPython.external.qt_for_kernel import QtCore, QtGui, enum_helper
from IPython import get_ipython
# If we create a QApplication, keep a reference to it so that it doesn't get
# garbage collected.
_appref = None
_already_warned = False
def _exec(obj):
# exec on PyQt6, exec_ elsewhere.
obj.exec() if hasattr(obj, "exec") else obj.exec_()
def _reclaim_excepthook():
shell = get_ipython()
if shell is not None:
sys.excepthook = shell.excepthook
def inputhook(context):
global _appref
app = QtCore.QCoreApplication.instance()
if not app:
if sys.platform == 'linux':
if not os.environ.get('DISPLAY') \
and not os.environ.get('WAYLAND_DISPLAY'):
import warnings
global _already_warned
if not _already_warned:
_already_warned = True
warnings.warn(
'The DISPLAY or WAYLAND_DISPLAY environment variable is '
'not set or empty and Qt5 requires this environment '
'variable. Deactivate Qt5 code.'
)
return
try:
QtCore.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
except AttributeError: # Only for Qt>=5.6, <6.
pass
try:
QtCore.QApplication.setHighDpiScaleFactorRoundingPolicy(
QtCore.Qt.HighDpiScaleFactorRoundingPolicy.PassThrough
)
except AttributeError: # Only for Qt>=5.14.
pass
_appref = app = QtGui.QApplication([" "])
# "reclaim" IPython sys.excepthook after event loop starts
# without this, it defaults back to BaseIPythonApplication.excepthook
# and exceptions in the Qt event loop are rendered without traceback
# formatting and look like "bug in IPython".
QtCore.QTimer.singleShot(0, _reclaim_excepthook)
event_loop = QtCore.QEventLoop(app)
if sys.platform == 'win32':
# The QSocketNotifier method doesn't appear to work on Windows.
# Use polling instead.
timer = QtCore.QTimer()
timer.timeout.connect(event_loop.quit)
while not context.input_is_ready():
# NOTE: run the event loop, and after 50 ms, call `quit` to exit it.
timer.start(50) # 50 ms
_exec(event_loop)
timer.stop()
else:
# On POSIX platforms, we can use a file descriptor to quit the event
# loop when there is input ready to read.
notifier = QtCore.QSocketNotifier(
context.fileno(), enum_helper("QtCore.QSocketNotifier.Type").Read
)
try:
# connect the callback we care about before we turn it on
# lambda is necessary as PyQT inspect the function signature to know
# what arguments to pass to. See https://github.com/ipython/ipython/pull/12355
notifier.activated.connect(lambda: event_loop.exit())
notifier.setEnabled(True)
# only start the event loop we are not already flipped
if not context.input_is_ready():
_exec(event_loop)
finally:
notifier.setEnabled(False)
# This makes sure that the event loop is garbage collected.
# See issue 14240.
event_loop.setParent(None)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipython@py3@IPython@terminal@pt_inputhooks@qt.py@.PATH_END.py
|
{
"filename": "cluster_model.py",
"repo_name": "Moyoxkit/cluster-counts",
"repo_path": "cluster-counts_extracted/cluster-counts-main/cluster_model/cluster_model.py",
"type": "Python"
}
|
import numpy as np
from astropy.cosmology import FlatLambdaCDM, z_at_value
import astropy.units as u
import matplotlib.pyplot as plt
from tqdm import tqdm
from scipy.integrate import dblquad, quad
from scipy.interpolate import (
interp1d,
LinearNDInterpolator,
NearestNDInterpolator,
griddata,
)
from scipy.special import erf
from hmf import MassFunction
import pandas as pd
import unyt
import pickle
from hmf import MassFunction
import MiraTitanHMFemulator
import commah
from colossus.cosmology import cosmology as colo_cosmo
from colossus.halo import concentration as colo_conc
class cluster_cosmology_model:
"""
Class that has all the tools for creating simple cluster count models.
"""
def __init__(
self,
FLAMINGO_info,
y_cut,
cosmo_info=None,
fit_model=None,
power_law_meds=False,
log_normal_scatter=False,
true_halo_mass_function=False,
mira_titan=False,
log_normal_lognsigy=0.075,
power_law_args=(0.79169079, 1.67645383, 0.66),
use_hydro_hmf_ratio=False,
):
"""
Initialises the mass definition used for the halo mass function,
initializes the default FLAMINGO cosmology and sets up the interpolators
for the FLAMINGO medians and scatter
"""
self.FLAMINGO_functions = FLAMINGO_info
if cosmo_info == None:
self.set_flamingo_cosmology()
else:
self.set_cosmology(cosmo_info)
power_law_args = [
power_law_args[0],
power_law_args[1],
power_law_args[2],
log_normal_lognsigy,
]
if power_law_meds or log_normal_scatter:
self.FLAMINGO_functions.init_other_interpolators(
self.astropy_cosmology, power_law_args
)
self.mira_titan_hmf = MiraTitanHMFemulator.Emulator()
if mira_titan == False and true_halo_mass_function == False:
self.hmf = MassFunction(
cosmo_model=self.astropy_cosmology,
mdef_model="SOCritical",
mdef_params={"overdensity": 500},
dlog10m=0.001,
hmf_model=fit_model,
transfer_model="EH",
Mmin=10.5,
Mmax=17,
sigma_8=self.cosmological_parameters["sigma_8"],
n=self.cosmological_parameters["n_s"],
)
self.init_number_counts_sz(
y_cut,
power_law_meds=power_law_meds,
log_normal_scatter=log_normal_scatter,
true_halo_mass_function=true_halo_mass_function,
mira_titan=mira_titan,
log_normal_lognsigy=log_normal_lognsigy,
power_law_args=power_law_args,
use_hydro_hmf_ratio=use_hydro_hmf_ratio,
)
def mass_translator(self, M500s, z, cosmology):
"""
Function that converts and array of M500s to an array of M200s at
a given cosmology and z.
"""
def find_R500(R_200, c):
conc_Y_c = np.log(1 + c) - c / (1 + c)
delta_c = 200 * c**3 / (3 * conc_Y_c)
R_s = R_200 / c
rmax = np.logspace(-6, 1, 100)
den_enclosed_in_crits_d = (3 * delta_c * (R_s / rmax) ** 3) * (
np.log(((R_s + rmax) / R_s)) - (rmax / (R_s + rmax))
)
den_to_r = interp1d(den_enclosed_in_crits_d, rmax)
return den_to_r(500)
M200s = np.logspace(12, 16.5, 45)
concs = colo_conc.modelDiemer19(
M200s / (self.cosmological_parameters["H_0"] / 100), z, statistic="mean"
)
concs = concs[0]
crit_den = (
self.astropy_cosmology.critical_density(z).to(u.Msun / (u.Mpc**3)).value
)
R_200s = (M200s / (4 * np.pi * 200 * crit_den / 3)) ** (1 / 3)
int_M500s = np.zeros(len(R_200s))
for i in range(len(M200s)):
R_500 = find_R500(R_200s[i], concs[i])
int_M500s[i] = 4 * np.pi * 500 * crit_den * R_500**3 / 3
M500_to_M200 = interp1d(int_M500s, M200s)
return M500_to_M200(M500s)
def set_cosmology(self, cosmological_parameters, mira_titan=False):
"""
Sets the cosmology used for the halo mass function and the
differential volume element
"""
m_nu = [cosmological_parameters["m_nu"], 0.00, 0.00] * u.eV
self.astropy_cosmology = FlatLambdaCDM(
H0=cosmological_parameters["H_0"],
Om0=cosmological_parameters["Omega_m"],
m_nu=m_nu,
Ob0=cosmological_parameters["Omega_b"],
Tcmb0=2.725,
)
self.cosmological_parameters = cosmological_parameters
self.mira_titan_cosmology = {
"Ommh2": cosmological_parameters["Omega_m"]
* (cosmological_parameters["H_0"] / 100) ** 2,
"Ombh2": cosmological_parameters["Omega_b"]
* (cosmological_parameters["H_0"] / 100) ** 2,
"Omnuh2": cosmological_parameters["m_nu"] * (0.01 / 0.94),
"n_s": cosmological_parameters["n_s"],
"h": cosmological_parameters["H_0"] / 100,
"w_0": cosmological_parameters["w_0"],
"w_a": cosmological_parameters["w_a"],
"sigma_8": cosmological_parameters["sigma_8"],
}
self.commah_cosmology = {
"omega_M_0": cosmological_parameters["Omega_m"],
"omega_b_0": cosmological_parameters["Omega_b"],
"omega_lambda_0": 1 - cosmological_parameters["Omega_m"],
"omega_n_0": cosmological_parameters["m_nu"] * (0.01 / 0.94),
"n": cosmological_parameters["n_s"],
"h": cosmological_parameters["H_0"] / 100,
"w_0": cosmological_parameters["w_0"],
"w_a": cosmological_parameters["w_a"],
"sigma_8": cosmological_parameters["sigma_8"],
}
bla = colo_cosmo.fromAstropy(
self.astropy_cosmology,
cosmological_parameters["sigma_8"],
cosmological_parameters["n_s"],
cosmo_name="colo_cosmo",
)
colo_cosmo.setCurrent(bla)
# Update the hmf cosmology if it has been already initialized
if hasattr(self, "hmf") and mira_titan == False:
self.hmf.update(
cosmo_model=self.astropy_cosmology,
sigma_8=self.cosmological_parameters["sigma_8"],
n=self.cosmological_parameters["n_s"],
)
def get_cosmology(self):
return self.cosmological_parameters
def set_flamingo_cosmology(self):
"""
Sets all the cosmology objects to the fiducial FLAMINGO cosmology
and initialises the halo mass function at this cosmology
"""
self.flamingo_cosmology = {
"Omega_m": 0.306,
"Omega_b": 0.0486,
"m_nu": 0.06,
"H_0": 68.1,
"sigma_8": 0.807,
"n_s": 0.967,
"w_0": -1,
"w_a": 0,
}
self.set_cosmology(self.flamingo_cosmology)
def differential_volume(self, z, solid_angle=4 * np.pi):
"""
Returns the dV element at a input redshift for the reference cosmology in Mpc^3
Parameters:
z : redshift
solid_angle : Angle subtended on the sky, default is 4pi, the full sky in sr
"""
return (
(
self.astropy_cosmology.differential_comoving_volume(z)
* solid_angle
* u.sr
)
.to(u.Mpc**3)
.value
)
def halo_mass_function(self, M500s, z, mira_titan=False):
"""
Returns the value of the halo mass function for a given mass and redshift
Parameters:
M500s : Mass in terms of M500crit
z : Redshift
"""
little_h = self.astropy_cosmology.H(0).value / 100
if mira_titan:
if z > 2.02:
return np.zeros(len(M500s))
masses_to_interpolate = np.logspace(13, 16, 1000) / little_h
numden_to_interpolate = (
self.mira_titan_hmf.predict(
self.mira_titan_cosmology,
z,
masses_to_interpolate * little_h,
get_errors=False,
)[0][0]
* little_h**3
* np.log(10)
)
dn_dlog10m_interpolator = interp1d(
masses_to_interpolate,
numden_to_interpolate,
fill_value=0,
bounds_error=False,
)
mass_limit_200 = self.mass_translator(M500s, z, self.commah_cosmology)
# for index, mass_limit_200_i in enumerate(mass_limit_200):
# mass_limit_200[index] = np.mean(
# 10 ** np.random.normal(np.log10(mass_limit_200_i), 0.16, 1000)
# )
return dn_dlog10m_interpolator(mass_limit_200)
else:
self.hmf.update(z=z)
dn_dlog10m_interpolator = interp1d(
self.hmf.m / little_h, self.hmf.dndlog10m * little_h**3
)
return dn_dlog10m_interpolator(M500s)
def init_number_counts_sz(
self,
y_cut,
power_law_meds=False,
log_normal_scatter=False,
true_halo_mass_function=False,
mira_titan=False,
log_normal_lognsigy=0.075,
power_law_args=(0.79169079, 1.67645383, 0.66),
use_hydro_hmf_ratio=False,
):
"""
Calculate the number counts using simple integration. Integrates
the expected number counts between a lower redshift z_low and a higher
redshift z_high for a survey with an SZ cut at y_cut. By default it
uses the medians and scatter from the input FLAMINGO simulation but has
options to use a power-law scaling relation instead.
Parameters:
z_low : redshift at which to start integration
z_high : redshift to integrate to
y_cut : Compton Y cut that defines the selection
power_law_meds : Boolean, use a power law for medians?
log_normal_scatter: Boolean, use log normal scatter?
true_halo_mass_function: Boolean, use the HMF from FLAMINGO?
log_normal_lognsigy: log normal scatter in dex. Only used when log_normal_scatter=True
use_hydro_hmf_ratio: Apply the ratio of DMO to hydro to alter the HMF
"""
# Check if we want both log normal scatter and a power law
power_law_and_log_normal = False
if log_normal_scatter and power_law_meds:
power_law_and_log_normal = True
to_integrate = np.zeros(
(
len(self.FLAMINGO_functions.all_redshifts),
len(self.FLAMINGO_functions.all_masses),
)
)
for red_ind in range(len(self.FLAMINGO_functions.all_redshifts)):
if true_halo_mass_function:
number_densities = self.FLAMINGO_functions.flamingo_number_densities[
red_ind, :
]
else:
number_densities = self.halo_mass_function(
self.FLAMINGO_functions.all_masses,
self.FLAMINGO_functions.all_redshifts[red_ind],
mira_titan=mira_titan,
)
if use_hydro_hmf_ratio:
number_densities = (
self.FLAMINGO_functions.hmf_ratios[red_ind, :] * number_densities
)
volume = self.differential_volume(
self.FLAMINGO_functions.all_redshifts[red_ind]
)
for mass_ind in range(len(self.FLAMINGO_functions.all_masses)):
# Start by checking if we want power law + log normal as this overides all other options
if power_law_and_log_normal:
halo_fraction = self.FLAMINGO_functions.scatter_interpolators_LN_PL[
red_ind
][mass_ind](np.log10(y_cut))
elif power_law_meds:
halo_fraction = self.FLAMINGO_functions.scatter_interpolators_PL[
red_ind
][mass_ind](np.log10(y_cut))
elif log_normal_scatter:
halo_fraction = self.FLAMINGO_functions.scatter_interpolators_LN[
red_ind
][mass_ind](np.log10(y_cut))
else:
halo_fraction = self.FLAMINGO_functions.scatter_interpolators[
red_ind
][mass_ind](np.log10(y_cut))
to_integrate[red_ind, mass_ind] = (
halo_fraction * number_densities[mass_ind] * volume
)
# first integral over mass is performed by summing out one dimension of the array
# No need for interpolation as we always want to include all halo masses
integrated_with_m500 = np.sum(to_integrate, axis=1) * np.mean(
np.diff(np.log10(self.FLAMINGO_functions.all_masses))
)
# We need to have a continuos answer in z so we interpolate the resulting array
self.func_for_int = interp1d(
self.FLAMINGO_functions.all_redshifts, integrated_with_m500
)
def number_counts_sz(self, z_low, z_high):
"""
Use the previously initialised z_integral to calculate the actual number counts
"""
# We can then do the integral over a continuous integral
return quad(self.func_for_int, z_low, z_high)
|
MoyoxkitREPO_NAMEcluster-countsPATH_START.@cluster-counts_extracted@cluster-counts-main@cluster_model@cluster_model.py@.PATH_END.py
|
{
"filename": "_idssrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattersmith/_idssrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class IdssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="idssrc", parent_name="scattersmith", **kwargs):
super(IdssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattersmith@_idssrc.py@.PATH_END.py
|
{
"filename": "reducepoldata.py",
"repo_name": "saltastro/polsalt",
"repo_path": "polsalt_extracted/polsalt-master/scripts/reducepoldata.py",
"type": "Python"
}
|
import os, sys, glob
import argparse
import numpy as np
import pyfits
# np.seterr(invalid='raise')
import polsalt
datadir = os.path.dirname(polsalt.__file__)+'/data/'
from polsalt.imred import imred
from polsalt.specpolwavmap import specpolwavmap
from polsalt.specpolextract import specpolextract
from polsalt.specpolrawstokes import specpolrawstokes
from polsalt.specpolfinalstokes import specpolfinalstokes
parser = argparse.ArgumentParser(description='Reduce SALT Lens Data')
parser.add_argument('ddir', help='Top level directory with SALT data')
parser.add_argument('-s', dest='basic_red', default=True, action='store_false',
help='Skip basic reduction')
parser.add_argument('-w', dest='basic_wave', default=True, action='store_false',
help='Skip wavelength calibration')
args = parser.parse_args()
obsdate = args.ddir
os.chdir(obsdate)
if not os.path.isdir('sci'): os.mkdir('sci')
os.chdir('sci')
#basic image reductions
infile_list = glob.glob('../raw/P*fits')
if args.basic_red:
imred(infile_list, './', datadir+'bpm_rss_11.fits', cleanup=True)
#basic polarimetric reductions
logfile='specpol'+obsdate+'.log'
#target and wavelength map
infile_list = sorted(glob.glob('m*fits'))
linelistlib=""
if args.basic_wave:
specpolwavmap(infile_list, linelistlib=linelistlib, logfile=logfile)
#background subtraction and extraction
#infile_list = sorted(glob.glob('wm*fits'))
#specpolextract(infile_list, logfile=logfile, debug=True)
#raw stokes
#infile_list = sorted(glob.glob('e*0[6-9].fits')) # subselection
#infile_list = sorted(glob.glob('e*fits'))
#specpolrawstokes(infile_list, logfile=logfile)
#final stokes
#polcal = 'polcal0.txt' # null calibration
#infile_list = sorted(glob.glob('*_h[0,2]*.fits')) # subselection
#polcal = 'polcal.txt'
#infile_list = sorted(glob.glob('*_h*.fits'))
#specpolfinalstokes(infile_list, polcal=polcal, logfile=logfile)
|
saltastroREPO_NAMEpolsaltPATH_START.@polsalt_extracted@polsalt-master@scripts@reducepoldata.py@.PATH_END.py
|
{
"filename": "bticino.py",
"repo_name": "jabesq-org/pyatmo",
"repo_path": "pyatmo_extracted/pyatmo-master/src/pyatmo/modules/bticino.py",
"type": "Python"
}
|
"""Module to represent BTicino modules."""
from __future__ import annotations
import logging
from pyatmo.modules.module import (
DimmableMixin,
Module,
Shutter,
ShutterMixin,
Switch,
SwitchMixin,
)
LOG = logging.getLogger(__name__)
class BNDL(Module):
"""BTicino door lock."""
class BNSL(Switch): # pylint: disable=too-many-ancestors
"""BTicino staircase light."""
class BNCX(Module):
"""BTicino internal panel = gateway."""
class BNEU(Module):
"""BTicino external unit."""
class BNCS(Switch):
"""Bticino module Controlled Socket."""
class BNXM(Module):
"""BTicino X meter."""
class BNMS(Shutter):
"""BTicino motorized shade."""
class BNAS(ShutterMixin, Module):
"""BTicino automatic shutter."""
class BNAB(Shutter):
"""BTicino automatic blind."""
class BNMH(Module):
"""BTicino MyHome server."""
class BNTH(Module):
"""BTicino thermostat."""
class BNFC(Module):
"""BTicino fan coil."""
class BNTR(Module):
"""BTicino radiator thermostat."""
class BNIL(SwitchMixin, Module):
"""BTicino intelligent light."""
class BNLD(DimmableMixin, SwitchMixin, Module):
"""BTicino dimmer light."""
|
jabesq-orgREPO_NAMEpyatmoPATH_START.@pyatmo_extracted@pyatmo-master@src@pyatmo@modules@bticino.py@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "gbrammer/eazy-py",
"repo_path": "eazy-py_extracted/eazy-py-master/eazy/utils.py",
"type": "Python"
}
|
import os
import warnings
import numpy as np
import matplotlib.pyplot as plt
import astropy.stats
import astropy.units as u
CLIGHT = 299792458.0 # m/s
TRUE_VALUES = [True, 1, '1', 'True', 'TRUE', 'true', 'y', 'yes', 'Y', 'Yes']
FALSE_VALUES = [False, 0, '0', 'False', 'FALSE', 'false', 'n', 'no', 'N', 'No']
FNU_CGS = u.erg/u.second/u.cm**2/u.Hz
FLAM_CGS = u.erg/u.second/u.cm**2/u.Angstrom
DATA_PATH = None
def set_data_path(path='$EAZYCODE'):
"""
Make symbolic links to EAZY inputs
Parameters
----------
path : str
Full directory path or environment variable pointing to the old eazy
C-code repository that provides the template and filter files.
If `path.startswith('$')` then treat path as an environment variable.
If you install from the repository that provides the eazy-photozy
code as a submodule, then you should be able to run with `path=None`
and retrieve the files directly from the repository. This should
also work with the `pip` installation.
Another safe way to ensure that the necessary files are avialable is
to clone the `eazy-photoz` repository and set an environment variable
to point to it (e.g, 'EAZYCODE'), which you then pass as the `path`
argument.
"""
global DATA_PATH
if path.startswith('$'):
path = os.getenv(path)
if path is None:
# Use the code attached to the repository
path = os.path.join(os.path.dirname(__file__), 'data/')
DATA_PATH = path
return path
set_data_path()
def bool_param(value, false_values=FALSE_VALUES, true_values=TRUE_VALUES, which='false', check_both=True):
"""
Flexible booleans
If ``which == 'false'``, test that ``value not in false_values``.
If ``which == 'true'``, test ``value in true_values``.
If ``check_both`` and ``value`` isn't in either list, return the value
itself.
"""
if which == 'false':
test = value not in false_values
elif which == 'true':
test = value in true_values
else:
raise ValueError("Option ``which`` must be 'true' or 'false'")
if check_both:
if value not in true_values + false_values:
test = value
return test
def path_to_eazy_data():
"""
Return internal path to ``eazy/data``.
"""
return os.path.join(os.path.dirname(__file__), 'data')
def set_warnings(numpy_level='ignore', astropy_level='ignore'):
"""
Set global numpy and astropy warnings
Parameters
----------
numpy_level : 'ignore', 'warn', 'raise', 'call', 'print', 'log'
Numpy error level (see `numpy.seterr`).
astropy_level : 'error', 'ignore', 'always', 'default', 'module', 'once'
Astropy error level (see `warnings.simplefilter`).
"""
from astropy.utils.exceptions import AstropyWarning
np.seterr(all=numpy_level)
warnings.simplefilter(astropy_level, category=AstropyWarning)
def running_median(xi, yi, NBIN=10, reverse=False, bins=None, x_func=np.median, y_func=np.median, std_func=astropy.stats.mad_std, x_kwargs={}, y_kwargs={}, std_kwargs={}, use_biweight=False, integrate=False, **kwargs):
"""
Binned median/biweight/nmad statistics
Parameters
----------
xi : array-like
Data of independent variable
yi : array-like
Data of dependent variable
NBIN : int
Number of bins along `xi`
reverse : bool
Calculate bins starting at largest values of `xi`
bins : array-like
Fixed bins, rather than calculating with `NBIN`
x_func : function
Function to compute moments of `xi`
y_func, std_func : function
Functions to compute moments of `yi`. Assumed to be the central
value and dispersion, but don't have to be
x_kwargs, y_kwargs, std_kwargs : dict
Keyword arguments to pass to moment functions
use_biweight : bool
Use robust biweight estimators:
- `x_func` : `astropy.stats.biweight_location`
- `y_func` : `astropy.stats.biweight_location`
- `std_func` : `astropy.stats.biweight_midvariance`
integrate : bool
Numerically integrate `yi` with the trapezoidal rule within the bins
Returns
-------
xm, ym, ys : array-like
Binned moments of `xi` and `yi`
yn : array-like
Number of entries per bin
"""
NPER = xi.size // NBIN
if bins is None:
so = np.argsort(xi)
if reverse:
so = so[::-1]
bx = np.linspace(0,len(xi),NBIN+1)
bins = np.interp(bx, np.arange(len(xi)), xi[so])
if reverse:
bins = bins[::-1]
NBIN = len(bins)-1
xm = np.ones(NBIN)
xs = np.zeros_like(xm)
ym = np.zeros_like(xm)
ys = np.zeros_like(xm)
N = np.zeros(NBIN, dtype=int)
if use_biweight:
x_func = astropy.stats.biweight_location
y_func = astropy.stats.biweight_location
std_func = astropy.stats.biweight_midvariance
#idx = np.arange(NPER, dtype=int)
for i in range(NBIN):
in_bin = (xi > bins[i]) & (xi <= bins[i+1])
N[i] = in_bin.sum() #N[i] = xi[so][idx+NPER*i].size
if integrate:
xso = np.argsort(xi[in_bin])
ma = xi[in_bin].max()
mi = xi[in_bin].min()
xm[i] = (ma+mi)/2.
dx = (ma-mi)
ym[i] = np.trapz(yi[in_bin][xso], xi[in_bin][xso])/dx
else:
xm[i] = x_func(xi[in_bin], **x_kwargs)
ym[i] = y_func(yi[in_bin], **y_kwargs)
ys[i] = std_func(yi[in_bin], **std_kwargs)
return xm, ym, ys, N
def nmad(data):
"""
Normalized median absolute deviation statistic
Just a wrapper around `astropy.stats.mad_std`.
"""
import astropy.stats
#return 1.48*astropy.stats.median_absolute_deviation(arr)
return astropy.stats.mad_std(data)
def log_zgrid(zr=[0.7,3.4], dz=0.01):
"""Make a logarithmically spaced redshift grid
Parameters
----------
zr : [float, float]
Minimum and maximum of the desired grid
dz : float
Step size, dz/(1+z)
Returns
-------
zgrid : array-like
Redshift grid
"""
zgrid = np.exp(np.arange(np.log(1+zr[0]), np.log(1+zr[1]), dz))-1
return zgrid
def trapz_dx(x):
"""
Return trapezoid rule coefficients, useful for numerical integration
using a dot product
Parameters
----------
x : array-like
Independent variable
Returns
-------
dx : array_like
Coefficients for trapezoidal rule integration.
"""
dx = np.zeros_like(x)
diff = np.diff(x)/2.
dx[:-1] += diff
dx[1:] += diff
return dx
def clipLog(im, lexp=1000, cmap=[-1.4914, 0.6273], scale=[-0.1,10]):
"""
Return normalized array like DS9 log
"""
import numpy as np
contrast, bias = cmap
clip = (np.clip(im, scale[0], scale[1])-scale[0])/(scale[1]-scale[0])
clip_log = np.clip((np.log10(lexp*clip+1)/np.log10(lexp)-bias)*contrast+0.5, 0, 1)
return clip_log
def get_mw_dust(ra, dec, **kwargs):
"""
Wrapper around functions to try to query for the MW E(B-V)
"""
try:
ebv = get_dustmaps_dust(ra, dec, web=True)
return ebv
except:
pass
try:
ebv = get_dustmaps_dust(ra, dec, web=False)
return ebv
except:
pass
try:
ebv = get_irsa_dust(ra, dec, **kwargs)
return ebv
except:
pass
return 0.00
def get_dustmaps_dust(ra, dec, web=True, **kwargs):
"Use https://github.com/gregreen/dustmaps"
from dustmaps.sfd import SFDQuery, SFDWebQuery
from astropy.coordinates import SkyCoord
coords = SkyCoord(ra, dec, unit='deg', frame='icrs')
if web:
sfd = SFDWebQuery()
else:
sfd = SFDQuery()
ebv = sfd(coords)
return ebv
def get_irsa_dust(ra=53.1227, dec=-27.805089, type='SandF'):
"""
Get Galactic dust reddening from NED/IRSA at a given position
http://irsa.ipac.caltech.edu/applications/DUST/docs/dustProgramInterface.html
Parameters
----------
ra, dec : float
RA/Dec in decimal degrees.
type : 'SFD', 'SandF'
Dust model, with
SandF = Schlafly & Finkbeiner 2011 (ApJ 737, 103)
SFD = Schlegel et al. 1998 (ApJ 500, 525)
Returns
-------
ebv : float
Color excess E(B-V), in magnitudes
"""
import os
import tempfile
import urllib.request
from astropy.table import Table
from lxml import objectify
query = 'http://irsa.ipac.caltech.edu/cgi-bin/DUST/nph-dust?locstr={0:.4f}+{1:.4f}+equ+j2000'.format(ra, dec)
req = urllib.request.Request(query)
response = urllib.request.urlopen(req)
resp_text = response.read().decode('utf-8')
root = objectify.fromstring(resp_text)
stats = root.result.statistics
if type == 'SFD':
return float(str(stats.refPixelValueSFD).split()[0])
else:
return float(str(stats.refPixelValueSandF).split()[0])
def fill_between_steps(x, y, z, ax=None, *args, **kwargs):
"""
Make `matplotlib.pyplot.fill_between` work like linestyle='steps-mid'.
"""
so = np.argsort(x)
mid = x[so][:-1] + np.diff(x[so])/2.
xfull = np.append(np.append(x, mid), mid+np.diff(x[so])/1.e6)
yfull = np.append(np.append(y, y[:-1]), y[1:])
zfull = np.append(np.append(z, z[:-1]), z[1:])
so = np.argsort(xfull)
if ax is None:
ax = plt.gca()
ax.fill_between(xfull[so], yfull[so], zfull[so], *args, **kwargs)
def safe_invert(arr):
"""
Version-safe matrix inversion using `numpy.linalg.inv` or `numpy.matrix.I`
"""
try:
from numpy.linalg import inv
_inv = inv(arr)
except:
_inv = np.matrix(arr).I.A
return _inv
class GalacticExtinction(object):
def __init__(self, EBV=0, Rv=3.1, force=None, radec=None, ebv_type='SandF'):
"""
Wrapper to use either `specutils.extinction` or the `extinction`
modules, which have different calling formats. The results from
both of these modules should be equivalent.
Parameters
----------
EBV : float
Galactic reddening, e.g., from `https://irsa.ipac.caltech.edu/applications/DUST/`.
Rv : float
Selective extinction ratio, `Rv=Av/(E(B-V))`.
radec : None or (float, float)
If provided, try to determine EBV based on these coordinates
with `get_irsa_dust(type=[ebv_type])` or `dustmaps`.
force : None, 'extinction', 'specutils.extinction'
Force use one or the other modules. If `None`, then first try
to import `specutils.extinction` and if that fails use
`extinction`.
"""
import importlib
# Import handler
if force == 'specutils.extinction':
import specutils.extinction
self.module = 'specutils.extinction'
elif force == 'extinction':
from extinction import Fitzpatrick99
self.module = 'extinction'
elif force == 'dust_extinction':
from dust_extinction.parameter_averages import F99
self.module = 'dust_extinction'
else:
modules = [['dust_extinction.parameter_averages', 'F99'],
['extinction','Fitzpatrick99'],
['specutils.extinction','ExtinctionF99']]
self.module = None
for (mod, cla) in modules:
try:
_F99 = getattr(importlib.import_module(mod), cla)
self.module = mod
break
except:
continue
if self.module is None:
raise ImportError("Couldn't import extinction module from "
"dust_extinction, extinction or specutils")
# try:
# from specutils.extinction import ExtinctionF99
# self.module = 'specutils.extinction'
# except:
# from extinction import Fitzpatrick99
# self.module = 'extinction'
if radec is not None:
self.EBV = get_mw_dust(ra=radec[0], dec=radec[1], type=ebv_type)
else:
self.EBV = EBV
self.Rv = Rv
if self.module == 'dust_extinction.parameter_averages':
self.f99 = _F99(Rv=self.Rv)
elif self.module == 'specutils.extinction':
self.f99 = _F99(self.Av)
#self.Alambda = f99(self.wave*u.angstrom)
else:
self.f99 = _F99(self.Rv)
#self.Alambda = f99(self.wave*u.angstrom, Av)
@property
def Av(self):
return self.EBV*self.Rv
@property
def info(self):
msg = ('F99 extinction with `{0}`: Rv={1:.1f}, '
'E(B-V)={2:.3f} (Av={3:.2f})')
return msg.format(self.module, self.Rv, self.EBV, self.Av)
def __call__(self, wave):
"""
Compute Fitzpatrick99 extinction.
Parameters
----------
wave : float or `numpy.ndarray`
Observed-frame wavelengths. If no `unit` attribute available,
assume units are `astropy.units.Angstrom`.
Returns
-------
Alambda : like ``wave``
F99 extinction (mags) as a function of wavelength. Output will
be set to zero below 909 Angstroms and above 6 microns as the
extinction modules themselves don't compute outside that range.
"""
import astropy.units as u
if not hasattr(wave, 'unit'):
unit = u.Angstrom
else:
unit = 1
inwave = np.squeeze(wave).flatten()
if self.module == 'dust_extinction.parameter_averages':
clip = (inwave*unit > 1/10.*u.micron)
clip &= (inwave*unit < 1/0.3*u.micron)
else:
clip = (inwave*unit > 909*u.angstrom) & (inwave*unit < 6*u.micron)
Alambda = np.zeros(inwave.shape)
if clip.sum() == 0:
return Alambda
else:
if self.module == 'dust_extinction.parameter_averages':
flam = self.f99.extinguish(inwave[clip]*unit, Av=self.Av)
Alambda[clip] = -2.5*np.log10(flam)
elif self.module == 'specutils.extinction':
Alambda[clip] = self.f99(inwave[clip]*unit)
else:
Alambda[clip] = self.f99(inwave[clip]*unit, self.Av)
return Alambda
def abs_mag_to_luminosity(absmag, pivot=None, output_unit=u.L_sun):
"""
Convert absolute AB mag to luminosity units
Parameters
----------
absmag : array-like
Absolute AB magnitude.
pivot : float
Filter pivot wavelength associated with the magnitude. If no units,
then assume `astropy.units.Angstrom`.
output_unit : `astropy.units.core.Unit`
Desired output unit. Must specify a ``pivot`` wavelength for output
power units, e.g., `astropy.unit.L_sun`.
"""
if pivot is None:
nu = 1.
else:
if hasattr(pivot, 'unit'):
wunit = 1
else:
wunit = u.Angstrom
nu = ((CLIGHT*u.m/u.second)/(pivot*wunit)).to(u.Hz)
fjy = 3631*u.jansky * 10**(-0.4*absmag)
d10 = (10*u.pc).to(u.cm)
f10 = fjy * 4 * np.pi * d10**2 * nu
return f10.to(output_unit)
def zphot_zspec(zphot, zspec, zlimits=None, zmin=0, zmax=4, axes=None, figsize=[6,7], minor=0.5, skip=2, selection=None, catastrophic_limit=0.15, title=None, min_zphot=0.02, alpha=0.2, extra_xlabel='', extra_ylabel='', xlabel=r'$z_\mathrm{spec}$', ylabel=r'$z_\mathrm{phot}$', label_pos=(0.05, 0.95), label_kwargs=dict(ha='left', va='top', fontsize=10), label_prefix='', format_axes=True, color='k', point_label=None, **kwargs):
"""
Make zphot_zspec plot scaled by log(1+z) and show uncertainties
Parameters
----------
zphot : array-like
Redshift on dependent axis
zspec : array-like
Redshift on independent axis
zlimits : (N, 2) array
Redshifts to use for photo-z errorbars, e.g. from
`~eazy.photoz.Photoz.pz_percentiles`, where `N` is the number of
objects as in `zphot` and `zspec`
zmin, zmax : float
Plot limits
axes : `matplotlib` axes, None
If specified, overplot in existing axes rather than generating a new
plot. For example, run the function once to generate the figure and
then plot different points onto the existing axes:
>>> fig = eazy.utils.zphot_spec(zphot, zspec, selection=sample1)
>>> _ = eazy.utils.zphot_spec(zphot, zspec, selection=sample2,
>>> axes=fig.axes, color='b')
figsize : list
Figure canvas dimensions
minor : float
Axis tick interval
skip : int
Put axis labels every `skip` ticks
selection : array-like
Subsample selection (boolean or indices) applied as `zphot[selection]`
catastrophic_limit : float
Limit to define "catastrophic" failures, which is used for computing
precision / outlier statistics printed on the plot
title : str
Title to add to the plot axes
Returns
-------
fig : `matplotlib.figure.Figure`
Figure object
"""
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
clip = (zphot > min_zphot) & (zspec > zmin) & (zspec <= zmax)
if selection is not None:
clip &= selection
dz = (zphot-zspec)/(1+zspec)
#izbest = np.argmin(self.fit_chi2, axis=1)
clip_cat = (np.abs(dz) < catastrophic_limit)
frac_cat = 1-(clip & clip_cat).sum() / clip.sum()
NOUT = (clip & ~clip_cat).sum()
gs = GridSpec(2,1, height_ratios=[6,1])
NEW_AXES = axes is None
if NEW_AXES:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(gs[0,0])
else:
ax = axes[0]
fig = None
if title is not None:
ax.set_title(title)
if zlimits is not None:
yerr = np.log10(1+np.abs(zlimits.T - zphot))
ax.errorbar(np.log10(1+zspec[clip & ~clip_cat]),
np.log10(1+zphot[clip & ~clip_cat]),
yerr=yerr[:,clip & ~clip_cat], marker='.', alpha=alpha,
color='r', linestyle='None')
ax.errorbar(np.log10(1+zspec[clip & clip_cat]),
np.log10(1+zphot[clip & clip_cat]),
yerr=yerr[:,clip & clip_cat], marker='.', alpha=alpha,
color=color, linestyle='None', label=point_label)
else:
ax.scatter(np.log10(1+zspec[clip & ~clip_cat]),
np.log10(1+zphot[clip & ~clip_cat]),
marker='.', alpha=alpha, color='r')
ax.scatter(np.log10(1+zspec[clip & clip_cat]),
np.log10(1+zphot[clip & clip_cat]),
marker='.', alpha=alpha, color=color, label=point_label)
if NEW_AXES | format_axes:
xt = np.arange(zmin, zmax+0.1, minor)
xl = np.log10(1+xt)
ax.plot(xl, xl, color='r', alpha=0.5)
ax.set_xlim(xl[0], xl[-1])
ax.set_ylim(xl[0],xl[-1])
xtl = list(xt)
if skip > 0:
for i in range(1, len(xt), skip):
xtl[i] = ''
ax.set_xticks(xl)
if axes is None:
ax.set_xticklabels([])
else:
if len(axes) == 1:
ax.set_xticks(xl)
ax.set_xticklabels(xtl);
ax.set_xlabel(xlabel + extra_xlabel)
ax.set_yticks(xl); ax.set_yticklabels(xtl);
ax.set_ylabel(ylabel + extra_ylabel)
sample_nmad = nmad(dz[clip])
sample_cat_nmad = nmad(dz[clip & clip_cat])
if label_pos is not None:
msg = r'{label_prefix} N={N} ({NOUT}, {err_frac:4.1f}%), $\sigma$={sample_nmad:.4f} ({sample_cat_nmad:.4f})'
msg = msg.format(label_prefix=label_prefix,
N=clip.sum(), err_frac=frac_cat*100,
sample_nmad=sample_nmad,
sample_cat_nmad=sample_cat_nmad, NOUT=NOUT)
ax.text(label_pos[0], label_pos[1], msg, transform=ax.transAxes)
if axes is None:
ax = fig.add_subplot(gs[1,0])
else:
if len(axes) == 2:
ax = axes[1]
else:
return True
if zlimits is not None:
yerr = np.abs(zlimits.T-zphot)
ax.errorbar(np.log10(1+zspec[clip & ~clip_cat]), dz[clip & ~clip_cat],
yerr=yerr[:,clip & ~clip_cat],
marker='.', alpha=alpha, color='r', linestyle='None')
ax.errorbar(np.log10(1+zspec[clip & clip_cat]), dz[clip & clip_cat],
yerr=yerr[:,clip & clip_cat],
marker='.', alpha=alpha, color='k', linestyle='None')
else:
ax.scatter(np.log10(1+zspec[clip & ~clip_cat]), dz[clip & ~clip_cat],
marker='.', alpha=alpha, color='r')
ax.scatter(np.log10(1+zspec[clip & clip_cat]), dz[clip & clip_cat],
marker='.', alpha=alpha, color='k')
if fig is not None:
ax.set_xticks(xl); ax.set_xticklabels(xtl);
ax.set_xlim(xl[0], xl[-1])
ax.set_ylim(-6*sample_nmad, 6*sample_nmad)
ax.set_yticks([-3*sample_nmad, 0, 3*sample_nmad])
ax.set_yticklabels([r'$-3\sigma$',r'$0$',r'$+3\sigma$'])
ax.set_xlabel(xlabel + extra_xlabel)
ax.set_ylabel(r'$\Delta z / 1+z$')
for a in fig.axes:
a.grid()
fig.tight_layout(pad=0.1)
return fig
else:
return True
def query_html(ra, dec, with_coords=True, replace_comma=True, queries=['CDS','ESO','MAST','ALMA', 'LEG','HSC'], **kwargs):
"""
Return HTML string of queries around a position
Parameters
----------
ra, dec : float
Coordinates in decimal degrees
with_coords : bool
Include '(ra, dec)' in output string
replace_comma : bool
Replace ',' with URL-safe '%2C'
queries : list
- CDS: Vizier/CDS catalogs
- ESO: ESO archive
- MAST: STScI/MAST HST archive
- ALMA: ALMA archive
- LEG/LEGACY: LegacySurvey map interface
- HSC: HSC map interface
Returns
-------
html : str
HTML-formatted string with query links
"""
if with_coords:
html = [f"({ra:.6f}, {dec:.6f})"]
else:
html = []
# Function/name mapping
funcs = [cds_query, eso_query, mast_query, alma_query, show_legacysurvey, hscmap_query]
names = ['CDS','ESO','MAST','ALMA', 'LEG','HSC']
query_map = {}
for name, func in zip(names, funcs):
query_map[name] = func
query_map['LEGACY'] = query_map['LEG']
for name in queries:
if name in query_map:
func = query_map[name]
else:
continue
url = func(ra, dec, **kwargs)
html.append(f'<a href="{url}">{name}</a>')
html = ' '.join(html)
if replace_comma:
html = html.replace(',','%2C')
return html
def cds_query(ra, dec, radius=1., unit='s', **kwargs):
"""
Open browswer with CDS catalog query around central position
"""
#rd = self.get('pan fk5').strip()
rd = f'{ra} {dec}'
rdst = rd.replace('+', '%2B').replace('-', '%2D').replace(' ', '+')
url = (f'http://vizier.u-strasbg.fr/viz-bin/VizieR?'
f'-c={rdst}&-c.r{unit}={radius}')
#os.system(f'open {url}')
return url
def eso_query(ra, dec, radius=1., unit='m', dp_types=['CUBE','IMAGE'], extra='', **kwargs):
"""
Open browser with ESO archive query around central position.
Note: ESO query is data footprint **contains*** point
"""
#ra, dec = self.get('pan fk5').strip().split()
# native is deg
if unit == 'd':
r = f'{radius:.2f}'
elif unit == 'm':
r = f'{radius/60:.3f}'
elif unit == 's':
r = f'{radius/3600:.5f}'
dp_type = ','.join(dp_types)
url = (f'https://archive.eso.org/scienceportal/home?'
f'pos={ra},{dec}&r={r}&dp_type={dp_type}{extra}')
#os.system(f'open {url}')
return url
def mast_query(ra, dec, instruments=['WFC3','ACS','WFPC2'], mast_radius=1., mast_unit='m', max=1000, **kwargs):
"""
Open browser with MAST archive query around central position
Note: MAST query is **distance to** point
"""
#ra, dec = self.get('pan fk5').strip().split()
if len(instruments) > 0:
instr='&sci_instrume='+','.join(instruments)
else:
instr = ''
# native is arcmin
if mast_unit == 'd':
r = f'{mast_radius*60:.2f}'
elif mast_unit == 'm':
r = f'{mast_radius:.3f}'
elif mast_unit == 's':
r = f'{mast_radius/60:.5f}'
url = (f'https://archive.stsci.edu/hst/search.php?RA={ra}&DEC={dec}'
f'&radius={r}'
f'&sci_aec=S{instr}&max_records={max}&outputformat=HTML_Table'
'&action=Search')
#os.system(f'open {url}')
return url
def alma_query(ra, dec, mirror="almascience.eso.org", radius=1, unit='m', extra='', **kwargs):
"""
Open browser with ALMA archive query around central position
"""
# native is arcmin
if unit == 'd':
r = f'{radius*60:.2f}'
elif unit == 'm':
r = f'{radius:.3f}'
elif unit == 's':
r = f'{radius/60:.5f}'
url = (f"https://{mirror}/aq/?result_view=observation"
f"&raDec={ra}%20{dec},{r}{extra}")
#os.system(f'open "{url}"')
return url
def hscmap_query(ra, dec, open=True, **kwargs):
"""
Function to open HSC explorer in browser centered on target coordinates
"""
import os
rrad = ra/180*np.pi
drad = dec/180*np.pi
url = (f"https://hsc-release.mtk.nao.ac.jp/hscMap-pdr2/app/#/?_=%7B%22view%22%3A%7B%22a%22%3A{rrad},%22d%22%3A{drad}"
",%22fovy%22%3A0.00009647627785850188,%22roll%22%3A0%7D,%22sspParams%22%3A%7B%22type%22%3A%22"
"SDSS_TRUE_COLOR%22,%22filter%22%3A%5B%22HSC-Y%22,%22HSC-Z%22,%22HSC-I%22%5D,%22simpleRgb"
"%22%3A%7B%22beta%22%3A22026.465794806718,%22a%22%3A1,%22bias%22%3A0.05,%22b0%22%3A0%7D,%22"
"sdssTrueColor%22%3A%7B%22beta%22%3A40106.59228119989,%22a%22%3A2.594451857120983,%22bias%22%3A0.05,"
"%22b0%22%3A0%7D%7D,%22externalTiles%22%3A%5B%5D,%22activeReruns%22%3A%5B%22pdr2_wide%22,%22pdr2_dud"
"%22%5D%7D")
return url
def show_legacysurvey(ra, dec, layer='dr8', zoom=17, **kwargs):
"""
Open browser with legacysurvey.org panner around central position
"""
#ra, dec = self.get('pan fk5').strip().split()
url = (f'http://legacysurvey.org/viewer?ra={ra}&dec={dec}'
f'&layer={layer}&zoom={zoom}')
#os.system(f'open {url}')
return url
def interp_conserve(x, xp, fp, left=0., right=0.):
"""
Interpolation analogous to `numpy.interp` but conserving "flux".
Parameters
----------
x : `numpy.ndarray`
Desired interpolation locations
xp, fp : `numpy.ndarray`
The `x` and `y` coordinates of the function to be interpolated. The
`x` array can be irregularly spaced but should be increase
monotonically.
left, right : float
Values to use for extrapolation below the minimum and maximum limits
of `x`.
Returns
-------
y : like `x`
Interpolated values.
Interpolation performed by trapezoidal integration between the midpoints
of the output `x` array with `numpy.trapz`.
.. note:: For a faster `cython` implementation of this function, see
`grizli.utils_c.interp_conserve_c`.
"""
midpoint = (x[1:]-x[:-1])/2.+x[:-1]
midpoint = np.append(midpoint, np.array([x[0],x[-1]]))
midpoint = midpoint[np.argsort(midpoint)]
int_midpoint = np.interp(midpoint, xp, fp, left=left, right=right)
int_midpoint[midpoint > xp.max()] = right
int_midpoint[midpoint < xp.min()] = left
fullx = np.append(xp, midpoint)
fully = np.append(fp, int_midpoint)
so = np.argsort(fullx)
fullx, fully = fullx[so], fully[so]
outy = x*0.
dx = midpoint[1:]-midpoint[:-1]
for i in range(len(x)):
bin = (fullx >= midpoint[i]) & (fullx <= midpoint[i+1])
outy[i] = np.trapz(fully[bin], fullx[bin])/dx[i]
return outy
class emceeChain():
def __init__(self, chain=None, file=None, param_names=[],
burn_fraction=0.5, sampler=None):
self.param_names = []
if chain is not None:
self.chain = chain
if file is not None:
if 'fits' in file.lower():
self.load_fits(file=file)
else:
self.load_chain(file=file)
self.process_chain(param_names = param_names,
burn_fraction=burn_fraction)
#
if sampler is not None:
from numpy import unravel_index
max_ix = unravel_index(sampler.lnprobability.argmax(), sampler.lnprobability.shape)
self.map = self.chain[max_ix[0], max_ix[1],:]
self.is_map = True
else:
self.map = self.median
self.is_map = False
def process_chain(self, param_names=[], burn_fraction=0.5):
"""
Define parameter names and get parameter statistics
"""
self.nwalkers, self.nstep, self.nparam = self.chain.shape
if param_names == []:
if self.param_names == []:
for i in range(self.nparam):
param_names.append('a%d' %(i+1))
self.param_names = param_names
else:
if len(param_names) != self.nparam:
print('param_names must have N=%d (or zero) entries' %(self.nparam))
return False
self.param_names = param_names
self.param_dict = {}
for i in range(self.nparam):
self.param_dict[self.param_names[i]] = i
self.nburn = int(np.round(burn_fraction*self.nstep))
self.stats = {}
self.median = np.zeros(self.nparam)
for param in self.param_names:
pid = self.param_dict[param]
self.stats[param] = self.get_stats(pid, burn=self.nburn)
self.median[pid] = self.stats[param]['q50']
def get_stats(self, pid, burn=0, raw=False):
"""
Get percentile statistics for a parameter in the chain
"""
if raw:
pchain = pid*1.
else:
pchain = self.chain[:,burn:,pid].flatten()
stats = {}
stats['q05'] = np.percentile(pchain, 5)
stats['q16'] = np.percentile(pchain, 16)
stats['q50'] = np.percentile(pchain, 50)
stats['q84'] = np.percentile(pchain, 84)
stats['q95'] = np.percentile(pchain, 95)
stats['mean'] = np.mean(pchain)
stats['std'] = np.std(pchain)
stats['width'] = (stats['q84']-stats['q16'])/2.
return stats
def show_chain(self, param='a1', chain=None, alpha=0.15, color='blue', scale=1, diff=0, ax = None, add_labels=True, hist=False, autoscale=True, *args, **kwargs):
"""
Make a plot of the chain for a given parameter.
For plotting, multiply the parameter by `scale` and subtract `diff`.
"""
if chain is None:
pid = self.param_dict[param]
chain = self.chain[:,:,pid]
if ax is not None:
plotter = ax
xlabel = ax.set_xlabel
ylabel = ax.set_ylabel
ylim = ax.set_ylim
else:
plotter = plt
xlabel = plt.xlabel
ylabel = plt.ylabel
ylim = plt.ylim
if hist:
h = plotter.hist(chain[:,self.nburn:].flatten(), alpha=alpha, color=color, *args, **kwargs)
if add_labels:
ylabel('N')
xlabel(param)
else:
for i in range(self.nwalkers):
p = plotter.plot(chain[i,:]*scale-diff, alpha=alpha, color=color, *args, **kwargs)
if add_labels:
xlabel('Step')
ylabel(param)
#
if autoscale:
ylim(self.stats[param]['q50']*scale + np.array([-8,8])*self.stats[param]['width']*scale)
def save_chain(self, file='emcee_chain.pkl', verbose=True):
"""
Save the chain to a Pkl file
"""
import cPickle as pickle
fp = open(file,'wb')
pickle.dump(self.nwalkers, fp)
pickle.dump(self.nstep, fp)
pickle.dump(self.nparam, fp)
pickle.dump(self.param_names, fp)
pickle.dump(self.chain, fp)
fp.close()
if verbose:
print('Wrote %s.' %(file))
def load_chain(self, file='emcee_chain.pkl'):
"""
Read the chain from the pickle file
"""
import cPickle as pickle
fp = open(file, 'rb')
self.nwalkers = pickle.load(fp)
self.nstep = pickle.load(fp)
self.nparam = pickle.load(fp)
self.param_names = pickle.load(fp)
self.chain = pickle.load(fp)
fp.close()
def save_fits(self, file='emcee_chain.fits', verbose=True):
"""
Make a FITS file of an EMCEE chain
"""
import astropy.io.fits as pyfits
header = pyfits.Header()
header.update('NWALKERS', self.nwalkers)
header.update('NSTEP', self.nstep)
header.update('NPARAM', self.nparam)
hdu = [pyfits.PrimaryHDU(header=header)]
for param in self.param_names:
header.update('PARAM', param)
hdu.append(pyfits.ImageHDU(data=self.__getitem__(param), header=header, name=param))
hduList = pyfits.HDUList(hdu)
hduList.writeto(file, clobber=True, output_verify='silentfix')
if verbose:
print('Wrote %s.' %(file))
def load_fits(self, file='emcee_chain.fits'):
"""
Load emcee chain fits file created by ``save_fits``.
"""
import astropy.io.fits as pyfits
im = pyfits.open(file)
self.nwalkers = im[0].header['NWALKERS']
self.nstep = im[0].header['NSTEP']
self.nparam = im[0].header['NPARAM']
self.param_names = []
self.chain = np.ones((self.nwalkers, self.nstep, self.nparam))
for i in range(self.nparam):
self.param_names.append(im[i+1].header['PARAM'])
self.chain[:,:,i] = im[i+1].data
im.close()
def parameter_correlations(self, size=8, shrink=5, show=None, file=None):
if show is None:
show = self.param_names
NP = len(show)
#fig = unicorn.plotting.plot_init(square=True, aspect=1, xs=size, left=0.05, right=0.01, bottom=0.01, top=0.01, NO_GUI=False, use_tex=False, fontsize=7)
fig = plt.figure(figsize=(7,7))
fig.subplots_adjust(wspace=0.0,hspace=0.0)
counter = 0
for i in range(NP):
for j in range(NP):
counter = counter + 1
ax = fig.add_subplot(NP, NP, counter)
a = ax.plot(self[show[i]][:,self.nburn::shrink].flatten(), self[show[j]][:,self.nburn::shrink].flatten(), alpha=0.03, color='black', linestyle='None', marker=',')
a = ax.set_xlim(self.stats[show[i]]['q50']-3*self.stats[show[i]]['std'], self.stats[show[i]]['q50']+3*self.stats[show[i]]['std'])
a = ax.set_ylim(self.stats[show[j]]['q50']-3*self.stats[show[j]]['std'], self.stats[show[j]]['q50']+3*self.stats[show[j]]['std'])
if i == j:
a = ax.text(0.5, 0.92, show[i], fontsize=8, color='red', horizontalalignment='center', verticalalignment='top', transform=ax.transAxes)
if file is not None:
fig.savefig(file)
def draw_random(self, N=10):
"""
Draw random sets of parameters from the chain
"""
#ok_walk = self.sampler.acceptance_fraction > min_acceptance
iwalk = np.asarray(np.random.rand(N)*self.nwalkers,dtype=int)
istep = self.nburn + np.asarray(np.random.rand(N)*(self.nstep-self.nburn),dtype=int)
draw = self.chain[iwalk, istep, :]
return draw
def __getitem__(self, param):
pid = self.param_dict[param]
return self.chain[:,:,pid]
def contour(self, p1, p2, labels=None, levels=[0.683, 0.955], colors=None, limits=None, bins=20, ax=None, fill=False, **kwargs):
"""
Plot sigma contours
"""
import astroML.plotting
if isinstance(p1, str):
trace1 = self.__getitem__(p1)[:,self.nburn:].flatten()
pname1 = p1
else:
trace1 = p1.flatten()
pname1 = ''
if isinstance(p2, str):
trace2 = self.__getitem__(p2)[:,self.nburn:].flatten()
pname2 = p2
else:
trace2 = p2.flatten()
pname2 = ''
if labels is None:
labels = [pname1, pname2]
if limits is None:
limits = [(t.min(), t.max()) for t in [trace1, trace2]]
bins = [np.linspace(limits[i][0], limits[i][1], bins + 1)
for i in range(2)]
H, xbins, ybins = np.histogram2d(trace1, trace2, bins=(bins[0], bins[1]))
H[H == 0] = 1E-16
Nsigma = astroML.plotting.mcmc.convert_to_stdev(np.log(H))
if ax is None:
ax = plt
ax.contour(0.5 * (xbins[1:] + xbins[:-1]),
0.5 * (ybins[1:] + ybins[:-1]),
Nsigma.T, levels=levels, **kwargs)
if fill:
if ax is plt:
col = plt.gca().collections
else:
col = ax.collections
n_levels = len(levels)
if colors is None:
dc = 1./(n_levels+1)
colors = ['%.2f' %((i+1)*dc) for i in np.arange(n_levels)]
print(colors)
for i in range(n_levels):
print(colors[i])
col[i].set_facecolor(colors[i])
col[i].set_edgecolor(colors[i])
col[i].set_zorder(-10-i)
col[i].set_alpha(0.8)
if ax is plt:
ax.xlabel(labels[0])
ax.ylabel(labels[1])
ax.xlim(limits[0])
ax.ylim(limits[1])
else:
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.set_xlim(limits[0])
ax.set_ylim(limits[1])
|
gbrammerREPO_NAMEeazy-pyPATH_START.@eazy-py_extracted@eazy-py-master@eazy@utils.py@.PATH_END.py
|
{
"filename": "EccAndIncDamping.ipynb",
"repo_name": "dtamayo/reboundx",
"repo_path": "reboundx_extracted/reboundx-main/ipython_examples/EccAndIncDamping.ipynb",
"type": "Jupyter Notebook"
}
|
# Eccentricity & Inclination Damping
For modifying orbital elements, REBOUNDx offers two implementations. `modify_orbits_direct` directly calculates orbital elements and modifies those, while `modify_orbits_forces` applies forces that when orbit-averaged yield the desired behavior. Let's set up a simple simulation of two planets on initially eccentric and inclined orbits:
```python
import rebound
import reboundx
import numpy as np
sim = rebound.Simulation()
ainner = 1.
aouter = 10.
e0 = 0.1
inc0 = 0.1
sim.add(m=1.)
sim.add(m=1e-6,a=ainner,e=e0, inc=inc0)
sim.add(m=1e-6,a=aouter,e=e0, inc=inc0)
sim.move_to_com() # Moves to the center of momentum frame
ps = sim.particles
```
As opposed to most of the other effects, `modify_orbits_direct` is an operator rather than a force, so we have to add it as such:
```python
rebx = reboundx.Extras(sim)
mod = rebx.load_operator("modify_orbits_direct")
rebx.add_operator(mod)
```
Both `modify_orbits_forces` and `modify_orbits_direct` exponentially alter the eccentricities and inclinations, on an e-folding timescale `tau_e` and `tau_inc`, respectively. **Negative timescales yield exponential decay, while positive timescales give exponential growth:**:
\begin{equation}
e = e_0e^{t/\tau_e},\:\:i = i_0e^{t/\tau_i}
\end{equation}
In general, each body will have different damping timescales. By default, all particles have timescales of infinity, i.e., no effect. The units of time are set by the units of time in your simulation.
Let's set a maximum time for our simulation, and give our two planets different (damping) timescales. This can simply be done through:
```python
tmax = 1.e3
ps[1].params["tau_e"] = -tmax/10.
ps[1].params["tau_inc"] = -tmax/10.
ps[2].params["tau_e"] = -tmax
ps[2].params["tau_inc"] = -tmax
```
Now we simply run the simulation like we would normally with REBOUND. Here we store the semimajor axes at 1000 equally spaced intervals:
```python
Nout = 1000
e1,e2,inc1,inc2 = np.zeros(Nout), np.zeros(Nout), np.zeros(Nout), np.zeros(Nout)
times = np.linspace(0.,tmax,Nout)
for i,time in enumerate(times):
sim.integrate(time)
e1[i] = ps[1].e
e2[i] = ps[2].e
inc1[i] = ps[1].inc
inc2[i] = ps[2].inc
```
Now let's plot it on a linear-log scale to check whether we get the expected exponential behavior. We'll also overplot the expected exponential decays for comparison.
```python
e1pred = [e0*np.e**(t/ps[1].params["tau_e"]) for t in times]
e2pred = [e0*np.e**(t/ps[2].params["tau_e"]) for t in times]
%matplotlib inline
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(15,5))
ax = plt.subplot(111)
ax.set_yscale('log')
plt.plot(times,e1)
plt.plot(times,e1pred, 'r--')
plt.plot(times,e2)
plt.plot(times,e2pred, 'r--')
plt.axes().set_xlabel("Time", fontsize=24)
plt.axes().set_ylabel("Eccentricity", fontsize=24)
```
<matplotlib.text.Text at 0x10d8d1860>

```python
inc1pred = [inc0*np.e**(t/ps[1].params["tau_inc"]) for t in times]
inc2pred = [inc0*np.e**(t/ps[2].params["tau_inc"]) for t in times]
%matplotlib inline
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(15,5))
ax = plt.subplot(111)
ax.set_yscale('log')
plt.plot(times,inc1)
plt.plot(times,inc1pred, 'r--')
plt.plot(times,inc2)
plt.plot(times,inc2pred, 'r--')
plt.axes().set_xlabel("Time", fontsize=24)
plt.axes().set_ylabel("Inclination (rad)", fontsize=24)
```
<matplotlib.text.Text at 0x110b75908>

**Eccentricity-semimajor axis coupling**
Goldreich & Schlichting (2014) argue that a physical process that induces eccentricity damping should induce semimajor axis damping at order $e^2$, e.g., tides. We follow the Deck & Batygin (2015) of parametrizing this through a coefficient $p$ that varies between 0 and 1. p=0 corresponds to no coupling, while p=1 represents the limit of eccentricity damping at constant angular momentum, which to a good approximation is the case with tides (our p=1 therefore corresponds to Golreich and Schlichting's p=3). We set effect parameters through the effect object returned when we add the effect, which we called `effect` above. To set p:
```python
mod.params["p"] = 0.7
```
The default is `p = 0`, i.e., no coupling, so for a single planet, if you don't set `tau_a`, the planet will not migrate. The current `modify_orbits_forces` implementation always damps eccentricity at constant angular momentum, i.e., p=1 (so you can't set it to an arbitrary value).
**Coordinate Systems**
Everything in REBOUND by default uses Jacobi coordinates. If you would like to change the reference relative to which the particles are damped:
```python
mod.params["coordinates"] = reboundx.coordinates["BARYCENTRIC"]
```
to reference orbits to the system's barycenter, or
```python
mod.params["coordinates"] = reboundx.coordinates["PARTICLE"]
ps[0].params["primary"] = 1
```
to reference orbits to a particular particle (here `sim.particles[0]`)
```python
```
|
dtamayoREPO_NAMEreboundxPATH_START.@reboundx_extracted@reboundx-main@ipython_examples@EccAndIncDamping.ipynb@.PATH_END.py
|
{
"filename": "_variantsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/table/cells/font/_variantsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="variantsrc", parent_name="table.cells.font", **kwargs
):
super(VariantsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@table@cells@font@_variantsrc.py@.PATH_END.py
|
{
"filename": "_bordercolorsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/table/hoverlabel/_bordercolorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BordercolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bordercolorsrc", parent_name="table.hoverlabel", **kwargs
):
super(BordercolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@table@hoverlabel@_bordercolorsrc.py@.PATH_END.py
|
{
"filename": "_show.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/isosurface/slices/y/_show.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="show", parent_name="isosurface.slices.y", **kwargs):
super(ShowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@isosurface@slices@y@_show.py@.PATH_END.py
|
{
"filename": "runCosmoHammerPseudoCmb.py",
"repo_name": "cosmo-ethz/CosmoHammer",
"repo_path": "CosmoHammer_extracted/CosmoHammer-master/examples/runCosmoHammerPseudoCmb.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""
Runs CosmoHammer with a likelihood module simulating the WMAP likelihood by assuming the parameter distributions to be gaussian.
Yields results very similar to ones gathered using CAMB and WMAP in default config, but only needs a fraction of the time.
"""
from __future__ import print_function, division, absolute_import, unicode_literals
import numpy as np
from cosmoHammer import LikelihoodComputationChain
from cosmoHammer import CosmoHammerSampler
from cosmoHammer.util import InMemoryStorageUtil
from cosmoHammer.util import Params
from cosmoHammer.modules import PseudoCmbModule
from cosmoHammer.pso.ParticleSwarmOptimizer import ParticleSwarmOptimizer
#parameter start center, min, max, start width
params = Params(("hubble", [70, 65, 80, 3]),
("ombh2", [0.0226, 0.01, 0.03, 0.001]),
("omch2", [0.122, 0.09, 0.2, 0.01]),
("scalar_amp", [2.1e-9, 1.8e-9, 2.35e-9, 1e-10]),
("scalar_spectral_index", [0.96, 0.8, 1.2, 0.02]),
("re_optical_depth", [0.09, 0.01, 0.1, 0.03]),
("sz_amp", [1,0,2,0.4]))
chain = LikelihoodComputationChain(params[:,1], params[:,2])
chain.params = params
chain.addLikelihoodModule(PseudoCmbModule())
chain.setup()
# find the best fit value and update our params knowledge
print("find best fit point")
pso = ParticleSwarmOptimizer(chain, params[:,1], params[:,2])
psoTrace = np.array([pso.gbest.position.copy() for _ in pso.sample()])
params[:, 0] = pso.gbest.position
storageUtil = InMemoryStorageUtil()
sampler = CosmoHammerSampler(
params= params,
likelihoodComputationChain=chain,
filePrefix="pseudoCmb_pso",
walkersRatio=50,
burninIterations=0,
sampleIterations=100,
storageUtil=storageUtil,
threadCount=4
)
print("start sampling")
sampler.startSampling()
print("done!")
print("plotting")
import matplotlib.pyplot as plt
import triangle
data = storageUtil.samples
K = data.shape[1]
factor = 2.0 # size of one side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # w/hspace size
plotdim = factor * K + factor * (K - 1.) * whspace
dim = lbdim + plotdim + trdim
fig, ax = plt.subplots(K, K, figsize=(dim, dim), tight_layout=False)
triangle.corner(data, fig=fig, labels=params.keys)
for i in range(K):
for j in range(i):
ax[i, j].plot(psoTrace[:, j], psoTrace[:, i], "1-")
plt.show()
|
cosmo-ethzREPO_NAMECosmoHammerPATH_START.@CosmoHammer_extracted@CosmoHammer-master@examples@runCosmoHammerPseudoCmb.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/contour/contours/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._labelfont import Labelfont
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._labelfont.Labelfont"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@contour@contours@__init__.py@.PATH_END.py
|
{
"filename": "Armentrout_2015.py",
"repo_name": "geodynamics/burnman",
"repo_path": "burnman_extracted/burnman-main/burnman/calibrants/Armentrout_2015.py",
"type": "Python"
}
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for
# the Earth and Planetary Sciences
# Copyright (C) 2012 - 2024 by the BurnMan team, released under the GNU
# GPL v2 or later.
from burnman.eos.birch_murnaghan import BirchMurnaghanBase as BM3
from burnman.eos.mie_grueneisen_debye import MGDBase
from burnman.classes.calibrant import Calibrant
"""
Armentrout_2015
^^^^^^^^^^^^^^^
"""
class Co_fcc(Calibrant):
"""
The FCC Cobalt pressure standard reported by Armentrout
(2015; https://doi.org/10.1063/1.4935087).
"""
def __init__(self):
def _pressure_Armentrout_fccCo(volume, temperature, params):
# Isothermal pressure (GPa)
pressure_model = BM3()
P0 = pressure_model.pressure(params["T_0"], volume, params)
# Thermal pressure
thermal_model = MGDBase()
Pth0 = thermal_model._thermal_pressure(params["T_0"], volume, params)
Pth = thermal_model._thermal_pressure(temperature, volume, params)
# Total pressure
P = P0 + Pth - Pth0
return P
_params_Armentrout_fccCo = {
"V_0": 6.7529e-06,
"K_0": 196.0e9,
"Kprime_0": 4.7,
"Debye_0": 385.0,
"grueneisen_0": 2.0,
"q_0": 1.3,
"n": 1.0,
"T_0": 300.0,
"P_0": 0.0,
"Z": 4.0,
}
Calibrant.__init__(
self, _pressure_Armentrout_fccCo, "pressure", _params_Armentrout_fccCo
)
|
geodynamicsREPO_NAMEburnmanPATH_START.@burnman_extracted@burnman-main@burnman@calibrants@Armentrout_2015.py@.PATH_END.py
|
{
"filename": "AMR_comparison_analysis.py",
"repo_name": "Astroua/AstroStat_Results",
"repo_path": "AstroStat_Results_extracted/AstroStat_Results-master/AMR_comparison_analysis.py",
"type": "Python"
}
|
'''
Compare the timestep 30 fiducials with and without AMR.
'''
import numpy as np
import astropy.units as u
import os
import sys
from astropy.utils.console import ProgressBar
import pandas as pd
from copy import copy
import statsmodels.api as sm
import statsmodels.formula.api as smf
from turbustat.data_reduction import Mask_and_Moments
from turbustat.statistics import statistics_list
from jasper.analysis_funcs import (files_sorter, sort_distances,
timestep_wrapper)
# sys.argv[1] - path to the full (non-AMR) dataset
# sys.argv[2] - path to the AMR fiducials
# sys.argv[3] - output path to save results in
path_to_data = sys.argv[1]
moments_path = os.path.join(path_to_data, "moments/")
path_to_amrdata = sys.argv[2]
amrmoments_path = os.path.join(path_to_amrdata, "moments/")
results_path = sys.argv[3]
save_path = os.path.join(results_path, "amr_comparison")
if not os.path.exists(save_path):
os.mkdir(save_path)
faces = [0] # [0, 2]
fiducials, _, _ = \
files_sorter(path_to_data, timesteps='last', faces=faces,
append_prefix=True, design_labels=[], verbose=False)
# Now the AMR cubes
fiducials_amr, _, _ = \
files_sorter(path_to_amrdata, append_prefix=True, design_labels=[],
faces=faces, timesteps='last', verbose=False)
# If the AMR moments path doesn't exist, make the moment arrays and save.
if not os.path.exists(amrmoments_path):
os.mkdir(amrmoments_path)
for face in faces:
for fid in fiducials_amr[face]:
fid_name = fiducials_amr[face][fid]
mask_mom = Mask_and_Moments(fid_name,
scale=0.001 * u.K)
mask_mom.make_moments()
mask_mom.make_moment_errors()
save_name = os.path.splitext(os.path.basename(fid_name))[0]
mask_mom.to_fits(os.path.join(amrmoments_path, save_name))
# Now run the distances AMR vs. none.
statistics = copy(statistics_list)
statistics.append("DeltaVariance_Centroid_Curve")
statistics.append("DeltaVariance_Centroid_Slope")
print "Statistics to run: %s" % (statistics)
num_statistics = len(statistics)
for face in faces:
for fid in ProgressBar([3, 4]):
distances = np.zeros((len(statistics),
len(fiducials_amr[face].keys())))
fid_name = fiducials[face][fid]
for i, amr_fid in enumerate(fiducials_amr[face].keys()):
fid_amr_name = fiducials_amr[face][amr_fid]
out = timestep_wrapper(fid_name, fid_amr_name, statistics, False)
out = [out]
distances[:, i] = sort_distances(statistics, out).T.squeeze()
df = pd.DataFrame(distances, index=statistics).T
# Save each fiducial as a csv file
save_name = "SimSuite8_fiducial{0}_amr_comparison_" \
"face_{1}.csv".format(fid, face)
df.to_csv(os.path.join(save_path, save_name))
# And the distances between the AMR fiducials.
dists = {}
for fid in ProgressBar(fiducials_amr[face].keys()):
fid_name = fiducials_amr[face][fid]
for i, amr_fid in enumerate(fiducials_amr[face].keys()):
if amr_fid == fid:
continue
fid_amr_name = fiducials_amr[face][amr_fid]
out = timestep_wrapper(fid_name, fid_amr_name, statistics, False)
out = [out]
dists["{0}_{1}".format(fid, amr_fid)] = \
sort_distances(statistics, out).T.squeeze()
df_fids = pd.DataFrame(dists, index=statistics).T
# Save each fiducial as a csv file
save_name = "SimSuite8_fiducial_to_fiducial_amr_comparison_" \
"face_{0}.csv".format(face)
df_fids.to_csv(os.path.join(save_path, save_name))
# Now run the analysis.
# Need the path to the most recent clean run
clean_path = os.path.join(results_path, "clean")
folds = [os.path.join(clean_path, fold) for fold in
os.listdir(clean_path) if "clean" in fold]
folds.sort()
print("Using the clean results from: {}".format(folds[-1]))
path_to_128dists = folds[-1]
niters = 10000
for face in faces:
print("Running on face {}.".format(face))
# non-AMR to non-AMR
fid_fid = pd.read_csv(os.path.join(folds[-1],
"fiducials_{0}_{0}.csv".format(face)))
# AMR to AMR
filename = "SimSuite8_fiducial_to_fiducial_amr_comparison_" \
"face_{}.csv".format(face)
amr_amr = pd.read_csv(os.path.join(save_path, filename))
# Load in each fiducial comparison and stack
for fid in np.arange(0, 5):
name = "SimSuite8_fiducial{0}_amr_comparison_face_" \
"{1}.csv".format(fid, face)
df = pd.read_csv(os.path.join(save_path, name))
# Add in row to identify which fiducial this is
df["Fiducial"] = pd.Series(np.array([fid] * 5))
if fid == 0:
amr_fid = df
else:
amr_fid = pd.concat([amr_fid, df])
# Signficance of difference between AMR to Fid and Fid to Fid
pvals_amr_to_fid = dict.fromkeys(amr_amr.columns)
# Signficance of difference between AMR to AMR and Fid to Fid
# So does the AMR just plain have more scatter associated?
pvals_fid_to_fid = dict.fromkeys(amr_amr.columns)
for stat in ProgressBar(pvals_amr_to_fid.keys()):
# Stack all distances together w/ a dummy variable
# QUESTION: Treat the testing against the same as one group?
dists = np.hstack([amr_fid[stat], amr_amr[stat],
fid_fid[stat]]).astype(float)
x = np.hstack([2 * np.ones_like(amr_fid[stat], dtype=np.float),
np.ones_like(amr_amr[stat], dtype=np.float),
np.zeros_like(fid_fid[stat], dtype=np.float)])
stacked = np.hstack([dists[:, np.newaxis], x[:, np.newaxis]])
df_dists = pd.DataFrame(stacked, columns=['Dists', 'Categ'])
model = smf.ols("Dists ~ C(Categ)", data=df_dists)
results = model.fit()
mean_diff_amrfid = results.params[1]
mean_diff_fidfid = results.params[2]
# Now permute and re-test
counter_amrfid = 0
counter_fidfid = 0
for _ in xrange(niters):
perm_dists = np.random.permutation(dists)
stacked_perm = np.hstack([perm_dists[:, np.newaxis],
x[:, np.newaxis]])
df_perm_dists = \
pd.DataFrame(stacked_perm, columns=['Dists', 'Categ'])
perm_model = smf.ols("Dists ~ C(Categ)", data=df_perm_dists)
perm_results = perm_model.fit()
if perm_results.params[1] > mean_diff_amrfid:
counter_amrfid += 1
if perm_results.params[2] > mean_diff_fidfid:
counter_fidfid += 1
pvals_amr_to_fid[stat] = float(counter_amrfid) / float(niters)
pvals_fid_to_fid[stat] = float(counter_fidfid) / float(niters)
pval_df = pd.DataFrame({"pvals AMR to Fid": pvals_amr_to_fid,
"pvals Fid to Fid": pvals_fid_to_fid})
output_name = os.path.join(save_path,
"pvals_face_{}.csv".format(face))
pval_df.to_csv(output_name)
|
AstrouaREPO_NAMEAstroStat_ResultsPATH_START.@AstroStat_Results_extracted@AstroStat_Results-master@AMR_comparison_analysis.py@.PATH_END.py
|
{
"filename": "test_imports.py",
"repo_name": "crossbario/crossbar",
"repo_path": "crossbar_extracted/crossbar-master/test/test_imports.py",
"type": "Python"
}
|
import time
import sys
import os
def test_import_1():
started = time.monotonic_ns()
import autobahn
ended = time.monotonic_ns()
return ended - started
def test_import_2():
started = time.monotonic_ns()
from autobahn import xbr
ended = time.monotonic_ns()
return ended - started
def test_import_3():
started = time.monotonic_ns()
from crossbar import personality as standalone
ended = time.monotonic_ns()
return ended - started
def test_import_4():
started = time.monotonic_ns()
from crossbar import personality as standalone
from crossbar import edge
from crossbar import network
from crossbar import master
from crossbar.node.main import main
ended = time.monotonic_ns()
return ended - started
def test_import_5():
started = time.monotonic_ns()
import zlmdb
ended = time.monotonic_ns()
return ended - started
def test_import_6():
started = time.monotonic_ns()
import cfxdb
ended = time.monotonic_ns()
return ended - started
def test_import_7():
started = time.monotonic_ns()
import autobahn
import cbor2
import flatbuffers
import numpy
import multihash
import txaio
import click
import web3
import zlmdb
ended = time.monotonic_ns()
return ended - started
tests = {
'1': (test_import_1, 'only autobahn'),
'2': (test_import_2, 'xbr from autobahn'),
'3': (test_import_3, 'crossbar oss'),
'4': (test_import_4, 'crossbar full'),
'5': (test_import_5, 'only zlmdb'),
'6': (test_import_6, 'cfxdb'),
'7': (test_import_7, 'all cfxdb deps'),
}
test = None
if len(sys.argv) > 1:
test, test_desc = tests.get(sys.argv[1], (None, None))
if test:
dur = test()
print('test {} ("{}") ran in {} seconds'.format(sys.argv[1], test_desc, dur / 10**9))
print()
else:
for i in range(len(tests)):
cmd = ' '.join([sys.executable, __file__, str(i + 1)])
os.system(cmd)
|
crossbarioREPO_NAMEcrossbarPATH_START.@crossbar_extracted@crossbar-master@test@test_imports.py@.PATH_END.py
|
{
"filename": "calc_avg_pres.py",
"repo_name": "cshsgy/ExoCubed",
"repo_path": "ExoCubed_extracted/ExoCubed-main/examples/2023-Chen-exo3/calc_avg_pres.py",
"type": "Python"
}
|
import numpy as np
from netCDF4 import Dataset
from scipy.interpolate import interp1d
import os
from tqdm import tqdm
# Set the filepath to your single combined .nc file
filepath = 'pres_hotjupiter.nc' # Change this to your file path
# Set the name of the output file where the results will be saved
output_file = 'averages_and_products.nc'
# Variables to process
variables_to_process = ["temp", "vlat", "vlon", "vel1"]
# Initialize accumulators for summing data across all time steps
data_sums = {}
upvp_sum = None
upwp_sum = None
uv_variance_sum = None
timestep_count = 0
P0 = 1E5
R = 3779
g = 8.0
pressure_levels = np.linspace(1E5, 0.01E5, 100) # Replace with actual pressure levels
# Extract number of time steps in the file
with Dataset(filepath, 'r') as nc:
num_time_steps = len(nc.variables['time'][:])
# Process each time step
start_t = 100
Rp = 1E8
for t in tqdm(range(start_t,num_time_steps), desc="Processing time steps"):
with Dataset(filepath, mode='r') as nc:
if t==start_t:
x1 = nc.variables['press'][:]
latitudes = nc.variables['lat'][:]
# If this is the first time step, initialize data_sums and other accumulators
if timestep_count == 0:
for var in variables_to_process:
# Initialize the sum arrays
data_sums[var] = np.zeros((nc.variables[var].shape[1], nc.variables[var].shape[2]))
upvp_sum = np.zeros((nc.variables['vlat'].shape[1], nc.variables['vlat'].shape[2]))
upwp_sum = np.zeros((nc.variables['vlat'].shape[1], nc.variables['vlat'].shape[2]))
uv_variance_sum = np.zeros((nc.variables['vlat'].shape[1], nc.variables['vlat'].shape[2]))
# Update sums
mean_rho = np.mean(nc.variables['rho'][t], axis=2)
for var in variables_to_process:
data = nc.variables[var][t] # We take the first index of the time dimension
zonal_mean = np.mean(data, axis=2) # Zonal mean over longitude, reducing dimension
data_sums[var] += zonal_mean # Summing up the zonal mean data
if var in ['vlat', 'vlon', 'vel1']:
prime = data - np.expand_dims(zonal_mean, axis=2) # Subtracting zonal mean, keeping dimensions consistent
if var == 'vlat':
u_prime = prime
else:
if var == 'vlon':
v_prime = prime
else:
w_prime = prime
# Calculate and sum u'v' and (u'^2 + v'^2) / 2 for each time step
upvp = np.mean(u_prime * v_prime, axis=2) # Zonal mean of the product
upwp = np.mean(u_prime * w_prime, axis=2) # Zonal mean of the product
uv_variance = np.mean((u_prime**2 + v_prime**2 + w_prime**2) / 2, axis=2) # Zonal mean of the variance
upvp_sum += upvp
upwp_sum += upwp
uv_variance_sum += uv_variance
timestep_count += 1 # Update the timestep count
# After processing all files, calculate the averages
averages = {var: data_sum / timestep_count for var, data_sum in data_sums.items()} # Averaging over all files
upvp_avg = upvp_sum / timestep_count # Averaging over all files
upwp_avg = upwp_sum / timestep_count # Averaging over all files
uv_variance_avg = uv_variance_sum / timestep_count # Averaging over all files
# Save the results to a new .nc file
with Dataset(output_file, mode='w') as new_nc:
# Create dimensions
new_nc.createDimension('pressure', len(pressure_levels))
new_nc.createDimension('lat', averages['temp'].shape[1])
# Create pressure variable
pressure_var = new_nc.createVariable('pressure', np.float32, ('pressure',))
pressure_var[:] = pressure_levels
pressure_var.units = 'Pa'
# Create latitude variable
lat_var = new_nc.createVariable('lat', np.float32, ('lat',))
lat_var[:] = np.linspace(-90, 90, averages['temp'].shape[1])
lat_var.units = 'degrees_north'
# Create variables for the zonal means and other variables
for var, data in averages.items():
new_var = new_nc.createVariable(var + '_avg', np.float32, ('pressure', 'lat'))
new_var[:] = data
# Create variables for 'upvp' and 'uv_variance'
upvp_var = new_nc.createVariable('upvp', np.float32, ('pressure', 'lat'))
upvp_var[:] = upvp_avg
upwp_var = new_nc.createVariable('upwp', np.float32, ('pressure', 'lat'))
upwp_var[:] = upwp_avg
uv_variance_var = new_nc.createVariable('uv_variance', np.float32, ('pressure', 'lat'))
uv_variance_var[:] = uv_variance_avg
# Optionally, add descriptions, units, or other metadata as attributes to the variables
# Print out a message indicating the script has finished
print(f"Data processing completed. Results saved to {output_file}.")
|
cshsgyREPO_NAMEExoCubedPATH_START.@ExoCubed_extracted@ExoCubed-main@examples@2023-Chen-exo3@calc_avg_pres.py@.PATH_END.py
|
{
"filename": "fftarma.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/sandbox/tsa/fftarma.py",
"type": "Python"
}
|
"""
Created on Mon Dec 14 19:53:25 2009
Author: josef-pktd
generate arma sample using fft with all the lfilter it looks slow
to get the ma representation first
apply arma filter (in ar representation) to time series to get white noise
but seems slow to be useful for fast estimation for nobs=10000
change/check: instead of using marep, use fft-transform of ar and ma
separately, use ratio check theory is correct and example works
DONE : feels much faster than lfilter
-> use for estimation of ARMA
-> use pade (scipy.interpolate) approximation to get starting polynomial
from autocorrelation (is autocorrelation of AR(p) related to marep?)
check if pade is fast, not for larger arrays ?
maybe pade does not do the right thing for this, not tried yet
scipy.pade([ 1. , 0.6, 0.25, 0.125, 0.0625, 0.1],2)
raises LinAlgError: singular matrix
also does not have roots inside unit circle ??
-> even without initialization, it might be fast for estimation
-> how do I enforce stationarity and invertibility,
need helper function
get function drop imag if close to zero from numpy/scipy source, where?
"""
import numpy as np
import numpy.fft as fft
#import scipy.fftpack as fft
from scipy import signal
#from try_var_convolve import maxabs
from statsmodels.tsa.arima_process import ArmaProcess
#trying to convert old experiments to a class
class ArmaFft(ArmaProcess):
'''fft tools for arma processes
This class contains several methods that are providing the same or similar
returns to try out and test different implementations.
Notes
-----
TODO:
check whether we do not want to fix maxlags, and create new instance if
maxlag changes. usage for different lengths of timeseries ?
or fix frequency and length for fft
check default frequencies w, terminology norw n_or_w
some ffts are currently done without padding with zeros
returns for spectral density methods needs checking, is it always the power
spectrum hw*hw.conj()
normalization of the power spectrum, spectral density: not checked yet, for
example no variance of underlying process is used
'''
def __init__(self, ar, ma, n):
#duplicates now that are subclassing ArmaProcess
super().__init__(ar, ma)
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.nobs = n
#could make the polynomials into cached attributes
self.arpoly = np.polynomial.Polynomial(ar)
self.mapoly = np.polynomial.Polynomial(ma)
self.nar = len(ar) #1d only currently
self.nma = len(ma)
def padarr(self, arr, maxlag, atend=True):
'''pad 1d array with zeros at end to have length maxlag
function that is a method, no self used
Parameters
----------
arr : array_like, 1d
array that will be padded with zeros
maxlag : int
length of array after padding
atend : bool
If True (default), then the zeros are added to the end, otherwise
to the front of the array
Returns
-------
arrp : ndarray
zero-padded array
Notes
-----
This is mainly written to extend coefficient arrays for the lag-polynomials.
It returns a copy.
'''
if atend:
return np.r_[arr, np.zeros(maxlag-len(arr))]
else:
return np.r_[np.zeros(maxlag-len(arr)), arr]
def pad(self, maxlag):
'''construct AR and MA polynomials that are zero-padded to a common length
Parameters
----------
maxlag : int
new length of lag-polynomials
Returns
-------
ar : ndarray
extended AR polynomial coefficients
ma : ndarray
extended AR polynomial coefficients
'''
arpad = np.r_[self.ar, np.zeros(maxlag-self.nar)]
mapad = np.r_[self.ma, np.zeros(maxlag-self.nma)]
return arpad, mapad
def fftar(self, n=None):
'''Fourier transform of AR polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ar, n))
def fftma(self, n):
'''Fourier transform of MA polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ma, n))
def fftarma(self, n=None):
'''Fourier transform of ARMA polynomial, zero-padded at end to n
The Fourier transform of the ARMA process is calculated as the ratio
of the fft of the MA polynomial divided by the fft of the AR polynomial.
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftarma : ndarray
fft of zero-padded arma polynomial
'''
if n is None:
n = self.nobs
return (self.fftma(n) / self.fftar(n))
def spd(self, npos):
'''raw spectral density, returns Fourier transform
n is number of points in positive spectrum, the actual number of points
is twice as large. different from other spd methods with fft
'''
n = npos
w = fft.fftfreq(2*n) * 2 * np.pi
hw = self.fftarma(2*n) #not sure, need to check normalization
#return (hw*hw.conj()).real[n//2-1:] * 0.5 / np.pi #does not show in plot
return (hw*hw.conj()).real * 0.5 / np.pi, w
def spdshift(self, n):
'''power spectral density using fftshift
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
mapadded = self.padarr(self.ma, n)
arpadded = self.padarr(self.ar, n)
hw = fft.fft(fft.fftshift(mapadded)) / fft.fft(fft.fftshift(arpadded))
#return np.abs(spd)[n//2-1:]
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(n//2-1, None, None)
#return (hw*hw.conj()).real[wslice], w[wslice]
return (hw*hw.conj()).real, w
def spddirect(self, n):
'''power spectral density using padding to length n done by fft
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
#abs looks wrong
hw = fft.fft(self.ma, n) / fft.fft(self.ar, n)
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(None, n//2, None)
#return (np.abs(hw)**2)[wslice], w[wslice]
return (np.abs(hw)**2) * 0.5/np.pi, w
def _spddirect2(self, n):
'''this looks bad, maybe with an fftshift
'''
#size = s1+s2-1
hw = (fft.fft(np.r_[self.ma[::-1],self.ma], n)
/ fft.fft(np.r_[self.ar[::-1],self.ar], n))
return (hw*hw.conj()) #.real[n//2-1:]
def spdroots(self, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
'''
return self._spdroots(self.arroots, self.maroots, w)
def _spdroots(self, arroots, maroots, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
Parameters
----------
arroots : ndarray
roots of ar (denominator) lag-polynomial
maroots : ndarray
roots of ma (numerator) lag-polynomial
w : array_like
frequencies for which spd is calculated
Notes
-----
this should go into a function
'''
w = np.atleast_2d(w).T
cosw = np.cos(w)
#Greene 5th edt. p626, section 20.2.7.a.
maroots = 1./maroots
arroots = 1./arroots
num = 1 + maroots**2 - 2* maroots * cosw
den = 1 + arroots**2 - 2* arroots * cosw
#print 'num.shape, den.shape', num.shape, den.shape
hw = 0.5 / np.pi * num.prod(-1) / den.prod(-1) #or use expsumlog
return np.squeeze(hw), w.squeeze()
def spdpoly(self, w, nma=50):
'''spectral density from MA polynomial representation for ARMA process
References
----------
Cochrane, section 8.3.3
'''
mpoly = np.polynomial.Polynomial(self.arma2ma(nma))
hw = mpoly(np.exp(1j * w))
spd = np.real_if_close(hw * hw.conj() * 0.5/np.pi)
return spd, w
def filter(self, x):
'''
filter a timeseries with the ARMA filter
padding with zero is missing, in example I needed the padding to get
initial conditions identical to direct filter
Initial filtered observations differ from filter2 and signal.lfilter, but
at end they are the same.
See Also
--------
tsa.filters.fftconvolve
'''
n = x.shape[0]
if n == self.fftarma:
fftarma = self.fftarma
else:
fftarma = self.fftma(n) / self.fftar(n)
tmpfft = fftarma * fft.fft(x)
return fft.ifft(tmpfft)
def filter2(self, x, pad=0):
'''filter a time series using fftconvolve3 with ARMA filter
padding of x currently works only if x is 1d
in example it produces same observations at beginning as lfilter even
without padding.
TODO: this returns 1 additional observation at the end
'''
from statsmodels.tsa.filters import fftconvolve3
if not pad:
pass
elif pad == 'auto':
#just guessing how much padding
x = self.padarr(x, x.shape[0] + 2*(self.nma+self.nar), atend=False)
else:
x = self.padarr(x, x.shape[0] + int(pad), atend=False)
return fftconvolve3(x, self.ma, self.ar)
def acf2spdfreq(self, acovf, nfreq=100, w=None):
'''
not really a method
just for comparison, not efficient for large n or long acf
this is also similarly use in tsa.stattools.periodogram with window
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)[:, None]
nac = len(acovf)
hw = 0.5 / np.pi * (acovf[0] +
2 * (acovf[1:] * np.cos(w*np.arange(1,nac))).sum(1))
return hw
def invpowerspd(self, n):
'''autocovariance from spectral density
scaling is correct, but n needs to be large for numerical accuracy
maybe padding with zero in fft would be faster
without slicing it returns 2-sided autocovariance with fftshift
>>> ArmaFft([1, -0.5], [1., 0.4], 40).invpowerspd(2**8)[:10]
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
>>> ArmaFft([1, -0.5], [1., 0.4], 40).acovf(10)
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
'''
hw = self.fftarma(n)
return np.real_if_close(fft.ifft(hw*hw.conj()), tol=200)[:n]
def spdmapoly(self, w, twosided=False):
'''ma only, need division for ar, use LagPolynomial
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)
return 0.5 / np.pi * self.mapoly(np.exp(w*1j))
def plot4(self, fig=None, nobs=100, nacf=20, nfreq=100):
"""Plot results"""
rvs = self.generate_sample(nsample=100, burnin=500)
acf = self.acf(nacf)[:nacf] #TODO: check return length
pacf = self.pacf(nacf)
w = np.linspace(0, np.pi, nfreq)
spdr, wr = self.spdroots(w)
if fig is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
ax.plot(rvs)
ax.set_title(f'Random Sample \nar={self.ar}, ma={self.ma}')
ax = fig.add_subplot(2,2,2)
ax.plot(acf)
ax.set_title(f'Autocorrelation \nar={self.ar}, ma={self.ma!r}s')
ax = fig.add_subplot(2,2,3)
ax.plot(wr, spdr)
ax.set_title(f'Power Spectrum \nar={self.ar}, ma={self.ma}')
ax = fig.add_subplot(2,2,4)
ax.plot(pacf)
ax.set_title(f'Partial Autocorrelation \nar={self.ar}, ma={self.ma}')
return fig
def spdar1(ar, w):
if np.ndim(ar) == 0:
rho = ar
else:
rho = -ar[1]
return 0.5 / np.pi /(1 + rho*rho - 2 * rho * np.cos(w))
if __name__ == '__main__':
def maxabs(x,y):
return np.max(np.abs(x-y))
nobs = 200 #10000
ar = [1, 0.0]
ma = [1, 0.0]
ar2 = np.zeros(nobs)
ar2[:2] = [1, -0.9]
uni = np.zeros(nobs)
uni[0]=1.
#arrep = signal.lfilter(ma, ar, ar2)
#marep = signal.lfilter([1],arrep, uni)
# same faster:
arcomb = np.convolve(ar, ar2, mode='same')
marep = signal.lfilter(ma,arcomb, uni) #[len(ma):]
print(marep[:10])
mafr = fft.fft(marep)
rvs = np.random.normal(size=nobs)
datafr = fft.fft(rvs)
y = fft.ifft(mafr*datafr)
print(np.corrcoef(np.c_[y[2:], y[1:-1], y[:-2]],rowvar=0))
arrep = signal.lfilter([1],marep, uni)
print(arrep[:20]) # roundtrip to ar
arfr = fft.fft(arrep)
yfr = fft.fft(y)
x = fft.ifft(arfr*yfr).real #imag part is e-15
# the next two are equal, roundtrip works
print(x[:5])
print(rvs[:5])
print(np.corrcoef(np.c_[x[2:], x[1:-1], x[:-2]],rowvar=0))
# ARMA filter using fft with ratio of fft of ma/ar lag polynomial
# seems much faster than using lfilter
#padding, note arcomb is already full length
arcombp = np.zeros(nobs)
arcombp[:len(arcomb)] = arcomb
map_ = np.zeros(nobs) #rename: map was shadowing builtin
map_[:len(ma)] = ma
ar0fr = fft.fft(arcombp)
ma0fr = fft.fft(map_)
y2 = fft.ifft(ma0fr/ar0fr*datafr)
#the next two are (almost) equal in real part, almost zero but different in imag
print(y2[:10])
print(y[:10])
print(maxabs(y, y2)) # from chfdiscrete
#1.1282071239631782e-014
ar = [1, -0.4]
ma = [1, 0.2]
arma1 = ArmaFft([1, -0.5,0,0,0,00, -0.7, 0.3], [1, 0.8], nobs)
nfreq = nobs
w = np.linspace(0, np.pi, nfreq)
w2 = np.linspace(0, 2*np.pi, nfreq)
import matplotlib.pyplot as plt
plt.close('all')
plt.figure()
spd1, w1 = arma1.spd(2**10)
print(spd1.shape)
_ = plt.plot(spd1)
plt.title('spd fft complex')
plt.figure()
spd2, w2 = arma1.spdshift(2**10)
print(spd2.shape)
_ = plt.plot(w2, spd2)
plt.title('spd fft shift')
plt.figure()
spd3, w3 = arma1.spddirect(2**10)
print(spd3.shape)
_ = plt.plot(w3, spd3)
plt.title('spd fft direct')
plt.figure()
spd3b = arma1._spddirect2(2**10)
print(spd3b.shape)
_ = plt.plot(spd3b)
plt.title('spd fft direct mirrored')
plt.figure()
spdr, wr = arma1.spdroots(w)
print(spdr.shape)
plt.plot(w, spdr)
plt.title('spd from roots')
plt.figure()
spdar1_ = spdar1(arma1.ar, w)
print(spdar1_.shape)
_ = plt.plot(w, spdar1_)
plt.title('spd ar1')
plt.figure()
wper, spdper = arma1.periodogram(nfreq)
print(spdper.shape)
_ = plt.plot(w, spdper)
plt.title('periodogram')
startup = 1000
rvs = arma1.generate_sample(startup+10000)[startup:]
import matplotlib.mlab as mlb
plt.figure()
sdm, wm = mlb.psd(x)
print('sdm.shape', sdm.shape)
sdm = sdm.ravel()
plt.plot(wm, sdm)
plt.title('matplotlib')
from nitime.algorithms import LD_AR_est
#yule_AR_est(s, order, Nfreqs)
wnt, spdnt = LD_AR_est(rvs, 10, 512)
plt.figure()
print('spdnt.shape', spdnt.shape)
_ = plt.plot(spdnt.ravel())
print(spdnt[:10])
plt.title('nitime')
fig = plt.figure()
arma1.plot4(fig)
#plt.show()
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@sandbox@tsa@fftarma.py@.PATH_END.py
|
{
"filename": "point_source.py",
"repo_name": "Herculens/herculens",
"repo_path": "herculens_extracted/herculens-main/herculens/PointSourceModel/point_source.py",
"type": "Python"
}
|
# Copyright (c) 2023, herculens developers and contributors
__author__ = 'austinpeel'
import functools
import numpy as np
import jax.numpy as jnp
try:
from helens import LensEquationSolver
except ImportError:
_solver_installed = False
else:
_solver_installed = True
__all__ = ['PointSource']
class PointSource(object):
"""A point source defined in the image or source plane.
A point source is considered to be either
(1) a single position and amplitude defined in the source plane, or else
(2) multiple positions and amplitudes defined in the image plane which
correspond to a single point in the source plane.
"""
def __init__(self, point_source_type, mass_model, image_plane):
"""Instantiate a point source.
Parameters
----------
point_source_type : str
Either 'LENSED_POSITIONS' or 'SOURCE_POSITION'.
mass_model : instance of `herculens.MassModel.mass_model.MassModel`
Model of the lensing mass used to map positions between the source
and image planes. Default is None.
image_plane : instance of `herculens.Coordinates.pixel_grid.PixelGrid`
Pixel grid used for triangulation in solving the lens equation.
"""
self.type = point_source_type
self.mass_model = mass_model
self.image_plane = image_plane
if self.type == 'SOURCE_POSITION':
self._check_solver_install(f"type = '{self.type}'")
@property
def solver(self):
if not hasattr(self, '_solver'):
# TODO: support the argument k != None
ray_shooting_func = functools.partial(self.mass_model.ray_shooting, k=None)
x_grid, y_grid = self.image_plane.pixel_coordinates
self._solver = LensEquationSolver(x_grid, y_grid, ray_shooting_func)
return self._solver
def image_positions_and_amplitudes(self, kwargs_point_source,
kwargs_lens=None, kwargs_solver=None,
zero_amp_duplicates=True, re_compute=False):
"""Compute image plane positions and corresponding amplitudes
of the point source, optionally "turning-off" (zeroing their amplitude)
potentially duplicated images predicted by the lens equation solver.
Parameters
----------
kwargs_point_source : list of dict
Keyword arguments corresponding to the point source instances.
kwargs_lens : list of dict, optional
Keyword arguments for the lensing mass model. Default is None.
kwargs_solver : dict, optional
Keyword arguments for the lens equation solver. Default is None.
zero_amp_duplicates : bool, optional
If True, amplitude of duplicated images are forced to be zero.
Note that it may affect point source ordering!.
Default is True.
re_compute : bool, optional
If True, re-compute (solving the lens equation) image positions,
even for point source models of type 'IMAGE_POSITIONS'.
Default is False.
Return
------
theta_x, theta_y, amp : tuple of 1D arrays
Positions (x, y) in image plane and amplitude of the lensed images.
"""
theta_x, theta_y = self.image_positions(
kwargs_point_source, kwargs_lens=kwargs_lens,
kwargs_solver=kwargs_solver, re_compute=re_compute,
)
amp = self.image_amplitudes(
theta_x, theta_y, kwargs_point_source, kwargs_lens=kwargs_lens,
)
if zero_amp_duplicates and self.type == 'SOURCE_POSITION':
amp, theta_x, theta_y = self._zero_amp_duplicated_images(
amp, theta_x, theta_y, kwargs_solver,
)
return theta_x, theta_y, amp
def image_positions(self, kwargs_point_source, kwargs_lens=None, kwargs_solver=None, re_compute=False):
"""Compute image plane positions of the point source.
Parameters
----------
kwargs_point_source : list of dict
Keyword arguments corresponding to the point source instances.
kwargs_lens : list of dict, optional
Keyword arguments for the lensing mass model. Default is None.
kwargs_solver : dict, optional
Keyword arguments for the lens equation solver. Default is None.
"""
if self.type == 'IMAGE_POSITIONS' and not re_compute:
theta_x = jnp.atleast_1d(kwargs_point_source['ra'])
theta_y = jnp.atleast_1d(kwargs_point_source['dec'])
return theta_x, theta_y
elif self.type == 'SOURCE_POSITION' or re_compute:
if self.type == 'IMAGE_POSITIONS': # i.e. re_compute = True
beta_x, beta_y = self.source_position(kwargs_point_source, kwargs_lens=kwargs_lens)
else:
beta_x, beta_y = kwargs_point_source['ra'], kwargs_point_source['dec']
# Solve the lens equation
beta = jnp.array([beta_x, beta_y])
if kwargs_solver is None:
kwargs_solver = {} # fall back to default lens equation solver settings
theta, beta = self.solver.solve(
beta, kwargs_lens, **kwargs_solver,
)
return theta.T
def image_amplitudes(self, theta_x, theta_y, kwargs_point_source, kwargs_lens=None):
"""Determine the amplitudes of the multiple images of the point source.
Parameters
----------
theta_x : array_like
X position of points in the image plane [arcsec].
theta_y : array_like
Y position of points in the image plane [arcsec].
kwargs_point_source : list of dict
Keyword arguments corresponding to the point source instances.
kwargs_lens : list of dict, optional
Keyword arguments for the lensing mass model. Default is None.
"""
amp = kwargs_point_source['amp']
if self.type == 'IMAGE_POSITIONS':
return jnp.atleast_1d(amp)
elif self.type == 'SOURCE_POSITION':
mag = self.mass_model.magnification(theta_x, theta_y, kwargs_lens)
return amp * jnp.abs(mag)
def source_position(self, kwargs_point_source, kwargs_lens=None):
"""Compute the source plane position of the point source.
Parameters
----------
kwargs_point_source : list of dict
Keyword arguments corresponding to the point source instances.
kwargs_lens : list of dict, optional
Keyword arguments for the lensing mass model. Default is None.
"""
if self.type == 'IMAGE_POSITIONS':
theta_x = jnp.array(kwargs_point_source['ra'])
theta_y = jnp.array(kwargs_point_source['dec'])
beta = self.mass_model.ray_shooting(theta_x, theta_y, kwargs_lens)
return jnp.mean(beta[0]), jnp.mean(beta[1])
elif self.type == 'SOURCE_POSITION':
beta_x = jnp.atleast_1d(kwargs_point_source['ra'])
beta_y = jnp.atleast_1d(kwargs_point_source['dec'])
return beta_x, beta_y
def source_amplitude(self, kwargs_point_source, kwargs_lens=None):
"""Determine the amplitude of the point source in the source plane.
Parameters
----------
kwargs_point_source : list of dict
Keyword arguments corresponding to the point source instances.
kwargs_lens : list of dict, optional
Keyword arguments for the lensing mass model. Default is None.
"""
if self.type == 'IMAGE_POSITIONS':
theta_x = jnp.atleast_1d(kwargs_point_source['ra'])
theta_y = jnp.atleast_1d(kwargs_point_source['dec'])
mag = self.mass_model.magnification(theta_x, theta_y, kwargs_lens)
amps = jnp.atleast_1d(kwargs_point_source['amp']) / abs(mag)
return jnp.mean(amps)
elif self.type == 'SOURCE_POSITION':
return jnp.array(kwargs_point_source['amp'])
def error_image_plane(self, kwargs_point_source, kwargs_lens, kwargs_solver):
self._check_solver_install("log_prob_image_plane")
# get the optimized image positions
theta_x_opti = jnp.array(kwargs_point_source['ra'])
theta_y_opti = jnp.array(kwargs_point_source['dec'])
# find source position via ray-tracing
beta = self.mass_model.ray_shooting(theta_x_opti, theta_y_opti, kwargs_lens)
beta_x, beta_y = beta[0].mean(), beta[1].mean()
beta = jnp.array([beta_x, beta_y])
# solve lens equation to find the predicted image positions
theta, beta = self.solver.solve(
beta, kwargs_lens, **kwargs_solver,
)
theta_x_pred, theta_y_pred = theta.T
# return departures between original and new positions
return jnp.sqrt((theta_x_opti - theta_x_pred)**2 + (theta_y_opti - theta_y_pred)**2)
def log_prob_image_plane(self, kwargs_point_source, kwargs_lens,
kwargs_solver, sigma_image=1e-3):
error_image = self.error_image_plane(kwargs_point_source, kwargs_lens, kwargs_solver)
# penalize departures between original and new positions
return - jnp.sum((error_image / sigma_image)**2)
def error_source_plane(self, kwargs_point_source, kwargs_lens):
# find source position via ray-tracing
theta_x_in = jnp.array(kwargs_point_source['ra'])
theta_y_in = jnp.array(kwargs_point_source['dec'])
beta_x, beta_y = self.mass_model.ray_shooting(theta_x_in, theta_y_in, kwargs_lens)
# compute distance between mean position and ray-traced positions
return jnp.sqrt((beta_x - beta_x.mean())**2 + (beta_y - beta_y.mean())**2)
def log_prob_source_plane(self, kwargs_point_source, kwargs_lens, sigma_source=1e-3):
error_source = self.error_source_plane(kwargs_point_source, kwargs_lens)
return - jnp.sum((error_source / sigma_source)**2)
def _zero_amp_duplicated_images(self, amp_in, theta_x_in, theta_y_in, kwargs_solver):
"""This function takes as input the list of multiply lensed images
(amplitudes and positions) and assign zero amplitude to any image
that have a x coordinate equal to up to `decimals` decimals.
WARNING: this function may change the original ordering of images!
Parameters
----------
amp_in : array_like
Amplitude of point sources
theta_x : array_like
X position of point sources in the image plane.
theta_y : array_like
Y position of point sources in the image plane.
kwargs_solver : dict
Keyword arguments for the LensEquation solver, used to estimate the
accuracy of point source positions and use it to find duplicated images.
Returns
-------
amp_out, theta_x_out, theta_y_out : tuple of 3 1D arrays
Amplitudes (potentially some being zero-ed) and positions in image plane.
"""
# TODO: find a way not to change the image ordering (might be slower though).
num_images = kwargs_solver['nsolutions']
position_accuracy = self.solver.estimate_accuracy(
kwargs_solver['niter'],
kwargs_solver['scale_factor'],
kwargs_solver['nsubdivisions'],
)
# TODO: the following choice for truncation the digits may not be general enough!
position_decimals = np.floor(- np.log10(position_accuracy)).astype(int) - 1
unique_theta_x, unique_indices = jnp.unique(
jnp.round(theta_x_in, decimals=position_decimals), # TODO: issue when original value close to zero -> rounded to exactly zero!
return_index=True,
fill_value=False, # effectively zero
size=num_images,
)
condition = jnp.where(unique_theta_x, True, False)
unique_amp = amp_in[unique_indices] # order amplitudes as the positions
zero_amp = 1e-20 # not exactly 0 to avoid problems with autodiff gradients
amp_out = jnp.where(condition, unique_amp, jnp.full(num_images, zero_amp))
theta_x_out = theta_x_in[unique_indices]
theta_y_out = theta_y_in[unique_indices]
return amp_out, theta_x_out, theta_y_out
def _check_solver_install(self, feature):
if not _solver_installed:
raise RuntimeError(f"A lens equation solver is required for the "
f"require point source modeling feature ('{feature}'). "
f"Please install `helens` from https://github.com/Herculens/helens.")
|
HerculensREPO_NAMEherculensPATH_START.@herculens_extracted@herculens-main@herculens@PointSourceModel@point_source.py@.PATH_END.py
|
{
"filename": "test_healpix.py",
"repo_name": "astropy/reproject",
"repo_path": "reproject_extracted/reproject-main/reproject/healpix/tests/test_healpix.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import os
import numpy as np
import pytest
from astropy.io import fits
from astropy.wcs import WCS
from astropy_healpix import nside_to_npix
from ...interpolation.tests.test_core import as_high_level_wcs
from ...tests.test_high_level import ALL_DTYPES
from ..high_level import reproject_from_healpix, reproject_to_healpix
from ..utils import parse_coord_system
DATA = os.path.join(os.path.dirname(__file__), "data")
def get_reference_header(overscan=1, oversample=2, nside=1):
reference_header = fits.Header()
reference_header.update(
{
"CDELT1": -180.0 / (oversample * 4 * nside),
"CDELT2": 180.0 / (oversample * 4 * nside),
"CRPIX1": overscan * oversample * 4 * nside,
"CRPIX2": overscan * oversample * 2 * nside,
"CRVAL1": 180.0,
"CRVAL2": 0.0,
"CTYPE1": "RA---CAR",
"CTYPE2": "DEC--CAR",
"CUNIT1": "deg",
"CUNIT2": "deg",
"NAXIS": 2,
"NAXIS1": overscan * oversample * 8 * nside,
"NAXIS2": overscan * oversample * 4 * nside,
}
)
return reference_header
@pytest.mark.parametrize(
"nside,nested,healpix_system,image_system,dtype,order",
itertools.product(
[1, 2, 4, 8, 16, 32, 64],
[True, False],
"C",
"C",
ALL_DTYPES,
["bilinear", "nearest-neighbor"],
),
)
def test_reproject_healpix_to_image_footprint(
nside, nested, healpix_system, image_system, dtype, order
):
"""Test that HEALPix->WCS conversion correctly flags pixels that do not
have valid WCS coordinates."""
npix = nside_to_npix(nside)
healpix_data = np.random.uniform(size=npix).astype(dtype)
reference_header = get_reference_header(overscan=2, oversample=2, nside=nside)
wcs_out = WCS(reference_header)
shape_out = reference_header["NAXIS2"], reference_header["NAXIS1"]
image_data, footprint = reproject_from_healpix(
(healpix_data, healpix_system),
wcs_out,
shape_out=shape_out,
order=order,
nested=nested,
)
if order == "bilinear":
expected_footprint = ~np.isnan(image_data)
else:
coord_system_in = parse_coord_system(healpix_system)
yinds, xinds = np.indices(shape_out)
world_in = wcs_out.pixel_to_world(xinds, yinds).transform_to(coord_system_in)
world_in_unitsph = world_in.represent_as("unitspherical")
lon_in, lat_in = world_in_unitsph.lon, world_in_unitsph.lat
expected_footprint = ~(np.isnan(lon_in) | np.isnan(lat_in))
np.testing.assert_array_equal(footprint, expected_footprint)
@pytest.mark.parametrize(
"wcsapi,nside,nested,healpix_system,image_system,dtype",
itertools.product([True, False], [1, 2, 4, 8, 16, 32, 64], [True, False], "C", "C", ALL_DTYPES),
)
def test_reproject_healpix_to_image_round_trip(
wcsapi, nside, nested, healpix_system, image_system, dtype
):
"""Test round-trip HEALPix->WCS->HEALPix conversion for a random map,
with a WCS projection large enough to store each HEALPix pixel"""
npix = nside_to_npix(nside)
healpix_data = np.random.uniform(size=npix).astype(dtype)
reference_header = get_reference_header(oversample=2, nside=nside)
wcs_out = WCS(reference_header)
shape_out = reference_header["NAXIS2"], reference_header["NAXIS1"]
if wcsapi:
wcs_out = as_high_level_wcs(wcs_out)
image_data, footprint = reproject_from_healpix(
(healpix_data, healpix_system),
wcs_out,
shape_out=shape_out,
order="nearest-neighbor",
nested=nested,
)
healpix_data_2, footprint = reproject_to_healpix(
(image_data, wcs_out), healpix_system, nside=nside, order="nearest-neighbor", nested=nested
)
np.testing.assert_array_equal(healpix_data, healpix_data_2)
def test_reproject_file():
reference_header = get_reference_header(oversample=2, nside=8)
data, footprint = reproject_from_healpix(
os.path.join(DATA, "bayestar.fits.gz"), reference_header
)
reference_result = fits.getdata(os.path.join(DATA, "reference_result.fits"))
np.testing.assert_allclose(data, reference_result, rtol=1.0e-5)
def test_reproject_invalid_order():
reference_header = get_reference_header(oversample=2, nside=8)
with pytest.raises(ValueError) as exc:
reproject_from_healpix(
os.path.join(DATA, "bayestar.fits.gz"), reference_header, order="bicubic"
)
assert exc.value.args[0] == "Only nearest-neighbor and bilinear interpolation are supported"
def test_reproject_to_healpix_input_types(valid_celestial_input_data):
array_ref, wcs_in_ref, input_value, kwargs_in = valid_celestial_input_data
# Compute reference
healpix_data_ref, footprint_ref = reproject_to_healpix((array_ref, wcs_in_ref), "C", nside=64)
# Compute test
healpix_data_test, footprint_test = reproject_to_healpix(
input_value, "C", nside=64, **kwargs_in
)
# Make sure there are some valid values
assert np.sum(~np.isnan(healpix_data_ref)) == 4
np.testing.assert_allclose(healpix_data_ref, healpix_data_test)
np.testing.assert_allclose(footprint_ref, footprint_test)
def test_reproject_from_healpix_output_types(valid_celestial_output_projections):
wcs_out_ref, shape_ref, output_value, kwargs_out = valid_celestial_output_projections
array_input = np.random.random(12 * 64**2)
# Compute reference
output_ref, footprint_ref = reproject_from_healpix(
(array_input, "C"), wcs_out_ref, nested=True, shape_out=shape_ref
)
# Compute test
output_test, footprint_test = reproject_from_healpix(
(array_input, "C"), output_value, nested=True, **kwargs_out
)
np.testing.assert_allclose(output_ref, output_test)
np.testing.assert_allclose(footprint_ref, footprint_test)
def test_reproject_to_healpix_exact_allsky():
# Regression test for a bug that caused artifacts in the final image if the
# WCS covered the whole sky - this was due to using scipy's map_coordinates
# one instead of our built-in one which deals properly with the pixels
# around the rim.
shape_out = (160, 320)
wcs = WCS(naxis=2)
wcs.wcs.crpix = [(shape_out[1] + 1) / 2, (shape_out[0] + 1) / 2]
wcs.wcs.cdelt = np.array([-360.0 / shape_out[1], 180.0 / shape_out[0]])
wcs.wcs.crval = [0, 0]
wcs.wcs.ctype = ["RA---CAR", "DEC--CAR"]
array = np.ones(shape_out)
healpix_array, footprint = reproject_to_healpix(
(array, wcs),
coord_system_out="galactic",
nside=64,
nested=False,
order="bilinear",
)
assert np.all(footprint > 0)
assert not np.any(np.isnan(healpix_array))
|
astropyREPO_NAMEreprojectPATH_START.@reproject_extracted@reproject-main@reproject@healpix@tests@test_healpix.py@.PATH_END.py
|
{
"filename": "cycoverage.py",
"repo_name": "mpi4py/mpi4py",
"repo_path": "mpi4py_extracted/mpi4py-master/conf/cycoverage.py",
"type": "Python"
}
|
import os
from coverage.plugin import (
CoveragePlugin,
FileTracer,
FileReporter
)
from coverage.files import (
canonical_filename,
)
CYTHON_EXTENSIONS = {".pxd", ".pyx", ".pxi"}
class CythonCoveragePlugin(CoveragePlugin):
def configure(self, config):
self.exclude = config.get_option("report:exclude_lines")
def file_tracer(self, filename):
filename = canonical_filename(os.path.abspath(filename))
_, ext = os.path.splitext(filename)
if ext in CYTHON_EXTENSIONS:
return CythonFileTracer(filename)
return None
def file_reporter(self, filename):
filename = canonical_filename(os.path.abspath(filename))
_, ext = os.path.splitext(filename)
if ext in CYTHON_EXTENSIONS:
return CythonFileReporter(filename, self.exclude)
return None
class CythonFileTracer(FileTracer):
def __init__(self, filename):
super().__init__()
self.filename = filename
def source_filename(self):
return self.filename
class CythonFileReporter(FileReporter):
def __init__(self, filename, exclude=None):
super().__init__(filename)
self.exclude = exclude
def lines(self):
_setup_lines(self.exclude)
return self._get_lines(CODE_LINES)
def excluded_lines(self):
_setup_lines(self.exclude)
return self._get_lines(EXCL_LINES)
def translate_lines(self, lines):
_setup_lines(self.exclude)
exec_lines = self._get_lines(EXEC_LINES)
return set(lines).union(exec_lines)
def _get_lines(self, lines_map):
key = os.path.relpath(self.filename, TOPDIR)
lines = lines_map.get(key, {})
return set(lines)
TOPDIR = os.path.dirname(os.path.dirname(__file__))
SRCDIR = os.path.join(TOPDIR, 'src')
CODE_LINES = None
EXEC_LINES = None
EXCL_LINES = None
def _setup_lines(exclude):
global CODE_LINES, EXEC_LINES, EXCL_LINES
if CODE_LINES is None or EXEC_LINES is None or EXCL_LINES is None:
source = os.path.join(SRCDIR, 'mpi4py', 'MPI.c')
CODE_LINES, EXEC_LINES, EXCL_LINES = _parse_c_file(source, exclude)
def _parse_c_file(c_file, exclude_list):
from collections import defaultdict
import re
match_filetab_begin = 'static const char *__pyx_f[] = {'
match_filetab_begin = re.compile(re.escape(match_filetab_begin)).match
match_filetab_entry = re.compile(r' *"(.*)",').match
match_source_path_line = re.compile(r' */[*] +"(.*)":([0-9]+)$').match
match_current_code_line = re.compile(r' *[*] (.*) # <<<<<<+$').match
match_comment_end = re.compile(r' *[*]/$').match
match_trace_line = re.compile(
r' *__Pyx_TraceLine\((\d+),\d+,__PYX_ERR\((\d+),'
).match
not_executable = re.compile(
'|'.join([
r'\s*c(?:type)?def\s+'
r'(?:(?:public|external)\s+)?'
r'(?:struct|union|enum|class)'
r'(\s+[^:]+|)\s*:',
])
).match
if exclude_list:
line_is_excluded = re.compile("|".join([
rf'(?:{regex})' for regex in exclude_list
])).search
else:
def line_is_excluded(_):
return False
filetab = []
modinit = False
code_lines = defaultdict(dict)
exec_lines = defaultdict(dict)
executable_lines = defaultdict(set)
excluded_lines = defaultdict(set)
with open(c_file) as lines:
lines = iter(lines)
for line in lines:
if match_filetab_begin(line):
for line in lines:
match = match_filetab_entry(line)
if not match:
break
filename = match.group(1)
filetab.append(filename)
match = match_source_path_line(line)
if not match:
if '__Pyx_TraceCall("__Pyx_PyMODINIT_FUNC ' in line:
modinit = True
if '__Pyx_TraceLine(' in line:
trace_line = match_trace_line(line)
if trace_line:
lineno, fid = map(int, trace_line.groups())
executable_lines[filetab[fid]].add(lineno)
continue
filename, lineno = match.groups()
lineno = int(lineno)
for comment_line in lines:
match = match_current_code_line(comment_line)
if match:
code_line = match.group(1).rstrip()
if not_executable(code_line):
break
if line_is_excluded(code_line):
excluded_lines[filename].add(lineno)
break
code_lines[filename][lineno] = code_line
if modinit:
exec_lines[filename][lineno] = code_line
break
if match_comment_end(comment_line):
# unexpected comment format - false positive?
break
# Remove lines that generated code but are not traceable.
for filename, lines in code_lines.items():
dead_lines = set(lines).difference(executable_lines.get(filename, ()))
for lineno in dead_lines:
del lines[lineno]
for filename, lines in exec_lines.items():
dead_lines = set(lines).difference(executable_lines.get(filename, ()))
for lineno in dead_lines:
del lines[lineno]
return code_lines, exec_lines, excluded_lines
def coverage_init(reg, options): # noqa: ARG001
plugin = CythonCoveragePlugin()
reg.add_configurer(plugin)
reg.add_file_tracer(plugin)
|
mpi4pyREPO_NAMEmpi4pyPATH_START.@mpi4py_extracted@mpi4py-master@conf@cycoverage.py@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "radis/radis",
"repo_path": "radis_extracted/radis-master/radis/test/utils.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""Tools to test RADIS library.
Summary
-------
Tools to test RADIS library
Examples
--------
Run all tests::
cd radis/test
pytest
Run only "fast" tests (tests that have a "fast" label, and should be
a few seconds only)::
cd radis/test
pytest -m fast
-------------------------------------------------------------------------------
"""
import os
from os.path import dirname, exists, join
from radis.db.utils import getFile
from radis.misc.config import (
addDatabankEntries,
diffDatabankEntries,
getDatabankEntries,
getDatabankList,
)
TEST_FOLDER_PATH = join(dirname(dirname(__file__)), "test")
def getTestFile(file, force=False):
"""Return the full path of a test file, if it exists. Used by test
functions not to worry about the project architecture. Using :ref:`test
files <label_dev_test_files>` is recommended when writing tests.
Parameters
----------
file: str
filename. See :ref:`the list of available test files <label_dev_test_files>`
Returns
-------
path: str
absolute path of ``file`` on the local machine. Raises an error
if test file not present, unless you use ``force=True``
Examples
--------
::
from radis.test.utils import getTestFile
from radis import load_spec
load_spec(getTestFile('CO_Tgas1500K_mole_fraction0.01.spec'))
See Also
--------
:py:func:`~radis.test.utils.getValidationCase`
"""
path = join(TEST_FOLDER_PATH, "files", file)
if not exists(path) and not force:
raise FileNotFoundError(
"Test file `{0}` does not exist. Choose one of: \n- {1} or use force=True".format(
file, "\n- ".join(os.listdir(join(TEST_FOLDER_PATH, "files")))
)
)
return path
def getValidationCase(file, force=False):
"""Return the full path of a validation case file, if it exists. Used by
test functions not to worry about the project architecture. Using
:ref:`validation test files <label_dev_test_files>` is recommended when
writing validation cases.
Parameters
----------
file: str
filename. See :ref:`the list of available validation files <label_dev_test_files>`
Returns
-------
path: str
absolute path of ``file`` on the local machine. Raises an error
if validation file not present, unless you use ``force=True``
Examples
--------
Load the reference case from the [Klarenaar2017]_ paper ::
from radis.test.utils import getValidationCase
from radis import Spectrum
s_exp = Spectrum.from_txt(
getValidationCase(
join(
"test_CO2_3Tvib_vs_klarenaar_data", "klarenaar_2017_digitized_data.csv",
)
),
"transmittance_noslit",
wunit="cm-1",
unit="",
delimiter=",",
name="Klarenaar 2017",
)
See Also
--------
:py:func:`~radis.test.utils.getTestFile`
"""
path = join(TEST_FOLDER_PATH, "validation", file)
if not exists(path) and not force:
raise FileNotFoundError(
"Validation case `{0}` does not exist. Choose one of: \n- {1} or use force=True".format(
file, "\n- ".join(os.listdir(join(TEST_FOLDER_PATH, "validation")))
)
)
return path
# Python 3.6+ only
getTestFile.__annotations__["file"] = os.listdir(join(TEST_FOLDER_PATH, "files"))
getValidationCase.__annotations__["file"] = os.listdir(
join(TEST_FOLDER_PATH, "validation")
)
# %% Convenience function
def test_spectrum(**kwargs):
"""Generate the :ref:`first example spectrum <label_first_example>` with ::
import radis
s = radis.test_spectrum()
s.plot()
Other Parameters
----------------
kwargs: sent to :py:func:`~radis.lbl.calc.calc_spectrum`
"""
from radis import calc_spectrum
conditions = {
"wavenum_min": 1900,
"wavenum_max": 2300,
"molecule": "CO",
"isotope": "1,2,3",
"pressure": 1.01325, # bar
"Tgas": 700, # K
"mole_fraction": 0.1,
"path_length": 1, # cm
"databank": "hitran",
}
conditions.update(kwargs)
if "wmin" in kwargs and "wmax" in kwargs:
conditions.pop("wavenum_min")
conditions.pop("wavenum_max")
s = calc_spectrum(**conditions)
return s
# %% Comparison functions
def testEqual(a, b, info=""):
if a != b:
print("Mismatch", info, ":", a, "!=", b)
return a == b
# %% Test Databases
TEST_DATABASES = {
"HITRAN-CO2-TEST": {
"info": "HITRAN 2016 database, CO2, 1 main isotope (CO2-626), bandhead: "
+ "2380-2398 cm-1 (4165-4200 nm)",
"path": [getTestFile(r"hitran_co2_626_bandhead_4165_4200nm.par")],
"format": "hitran",
"parfuncfmt": "hapi",
"levelsfmt": "radis",
},
"HITRAN-CO-TEST": {
"info": "HITRAN 2016 database, CO, 3 main isotopes (CO-26, 36, 28), "
+ "2000-2300 cm-1",
"path": [getTestFile(r"hitran_co_3iso_2000_2300cm.par")],
"format": "hitran",
"parfuncfmt": "hapi",
"levelsfmt": "radis",
},
"HITEMP-CO2-TEST": {
"info": "HITEMP-2010, CO2, 3 main isotope (CO2-626, 636, 628), "
+ "2283.7-2285.1 cm-1",
"path": [getTestFile(r"cdsd_hitemp_09_fragment.txt")],
"format": "cdsd-hitemp", # CDSD-HITEMP version (same lines as HITEMP-2010).
"parfuncfmt": "hapi",
"levelsfmt": "radis",
},
"HITEMP-CO2-HAMIL-TEST": {
"info": "HITEMP-2010, CO2, 3 main isotope (CO2-626, 636, 628), "
+ "2283.7-2285.1 cm-1, energies calculated from Tashkun effective hamiltonian",
"path": [getTestFile(r"cdsd_hitemp_09_fragment.txt")],
"format": "cdsd-hitemp", # CDSD-HITEMP version (same lines as HITEMP-2010).
"parfunc": getFile("CO2", "partition_functions.txt"),
"parfuncfmt": "cdsd",
"levels": {1: getTestFile(r"co2_cdsd_hamiltonian_fragment.levels")},
"levelsfmt": "cdsd-hamil",
},
}
"""dict: test databases added in the :ref:`Configuration file <label_lbl_config_file>`
by :py:func:`~radis.test.utils.setup_test_line_databases`
"""
# %% Utils to test spec module
def setup_test_line_databases(verbose=True):
"""Build :py:data:`~radis.test.utils.TEST_DATABASES` and add them in
~/radis.json. Generate the file if it doesnt exist.
In particular:
- HITRAN-CO2-TEST: CO2, HITRAN 2016, 4165-4200 nm
- HITRAN-CO-TEST: CO, HITRAN 2016, 2000-2300 cm-1
- HITEMP-CO2-TEST: CO2, HITEMP-2010, 2283.7-2285.1 cm-1, 3 isotopes
- HITEMP-CO2-HAMIL-TEST: same as previous, with (some) energy levels computed
from Tashkun effective Hamiltonian.
These test databases are used to run the different test routines. They can
obviously be used by Users to run simulations, but we suggest Users to download
their own line databases files and add them to ~/radis.json so they have more control
on it
Examples
--------
Initialize the Line databases::
from radis import setup_test_line_databases
setup_test_line_databases()
Plot a CO2 spectrum at high temperature::
from radis import calc_spectrum
calc_spectrum(2284,
2285,
Tgas=2000,
pressure=1,
molecule='CO2',
isotope=1
databank='HITEMP-CO2-TEST').plot()
Note that 'HITEMP-CO2-TEST' is defined on 2283.7-2285.1 cm-1 only, as
can be shown by reading the Database information:
from radis.misc.config import printDatabankEntries
printDatabankEntries('HITEMP-CO2-TEST')
>>> 'HITEMP-CO2-TEST':
>>> {'info': 'HITEMP-2010, CO2, 3 main isotope (CO2-626, 636, 628), 2283.7-2285.1 cm-1',
>>> 'path': ['/USER/PATH/TO\\radis\\radis\\test\\files\\cdsd_hitemp_09_fragment.txt'],
>>> 'format': 'cdsd-hitemp'
>>> 'parfuncfmt': 'hapi'
>>> 'levelsfmt': 'radis'
See Also
--------
:ref:`Configuration file <label_lbl_config_file>`,
:py:func:`~radis.misc.config.getDatabankList`,
:py:func:`~radis.misc.config.printDatabankEntries`
"""
# TODO: generate large band databases for the main species (let's say CO2,
# H2O and CH4) and main isotopes by fetching the HITRAN 2016 database.
# Get list of databases
try:
dbnames = getDatabankList()
except FileNotFoundError:
dbnames = []
# %% Add test databases
for dbname, dbentries in TEST_DATABASES.items():
if dbname in dbnames: # Check entries are correct
# for k
diff = diffDatabankEntries(
getDatabankEntries(dbname), dbentries, verbose=False
)
if diff is not None:
raise ValueError(
"{0}".format(diff)
+ "\nIn ~/radis.json\n----------\n{0}".format(
getDatabankEntries(dbname)
)
+ "\n\nExpected\n---------\n{0}\n\n".format(dbentries)
+ "Test Database {0} doesnt match expected ".format(dbname)
+ "entries for key `{0}`. See comparison above. ".format(diff)
+ "To regenerate test databases just delete the {0} ".format(dbname)
+ "entry in your ~/radis.json"
)
else: # add them (create ~/radis.json file if doesnt exist yet)
addDatabankEntries(dbname, dbentries)
return
# %% Edit existing Line databases
def define_Evib_as_sum_of_Evibi(levels):
"""Note that this is arbitrary for a polyatomic molecule. Lookup Pannier,
Dubuet and Laux 2020 for more.
We also update Erot to maintain the sum Evib+Erot = E :
::
Evib = Evib1 + Evib2 + Evib3
Erot = E - Evib # to be consistent with equilibrium
"""
levels["Evib"] = levels.Evib1 + levels.Evib2 + levels.Evib3
levels["Erot"] = levels.E - levels.Evib
return levels
def define_Evib_as_min_of_polyad(levels, keys):
"""Here we define the vibrational energy as the minimum energy in a polyad.
Here, the polyad is defined for each combination of ``keys`` Typically,
``keys=['p', 'c', 'N']`` or keys=['p', 'c'].
Rotational energy is the rest::
Evib = min(E(p,c,j,n) for a given set of (p,c))
Erot = E - Evib
.. warning::
See Pannier, Dubuet & Laux 2020 for a quantitative comparison
of the different possible methods to define vibrational energy.
Parameters
----------
sf: SpectrumFactory object
"""
def fill_EvibErot(grp):
Evib0 = grp.E.min()
grp["Evib"] = Evib0
grp["Erot"] = grp.E - Evib0
return grp
levels = levels.groupby(keys).apply(fill_EvibErot)
# Note: The `allow_duplicates` parameter controls whether duplicate column
# labels are allowed. The `True` behaviour used to be the default in
# Pandas <2.0.
levels.reset_index(allow_duplicates=True)
return levels
def discard_lines_with_na_levels(sf):
"""In the test Levels databases, not all levels are given (to save space).
Consequently, in the Line databases, some lines have N/A levels and cannot
be calculated at nonequilibrium. This function cleans the line databases
from such lines by first running a dummy calculation and removing the lines
where levels were N/A.
.. warning::
results from such a calculation are physically wrong. Only use
to test the functions!
Parameters
----------
sf: SpectrumFactory
"""
# Calculate populations using the non-equilibrium module:
# This will crash the first time because the Levels Database is just a fragment and does not include all levels.
try:
sf.non_eq_spectrum(300, 300)
except AssertionError: # expected
sf.df0.dropna(inplace=True)
# %% Deal with missing databases
def _failsafe_if_no_db(testcase, *args, **kwargs):
"""finally not implemented?"""
from radis.misc.utils import DatabankNotFound
try:
testcase(*args, **kwargs)
except DatabankNotFound:
import sys
print((sys.exc_info()))
print(
(
"Testing {0}: Database not defined. \n".format(testcase.__name__)
+ "Ignoring the test"
)
)
return True
#%%
class EmulateMatlab:
def __init__(self):
r"""Creates a class that allows to use Matlab syntax in Radis,
in order to show & run valid Matlab syntax code directly from the
Python documentation and tests.
::
py = EmulateMatlab()
py.radis.calc_spectrum()
It is used in the :ref:`Run from Matlab example <example_run_from_matlab>`
.. minigallery:: radis.test.utils.EmulateMatlab
Successful Radis in Matlab screenshots can be found in https://github.com/radis/radis/pull/547
"""
import radis as py_radis
self.radis = py_radis
# Overload the __getattr__ method to catch all undefined methods
def __getattr__(self, name):
r"""catch all undefined methods and redirect to radis"""
return getattr(self.radis, name)
def __call__(self, *args, **kwargs):
r"""catch all undefined methods and redirect to radis"""
return self.radis(*args, **kwargs)
def __repr__(self):
return self.radis.__repr__()
def __str__(self):
return self.radis.__str__()
def __dir__(self):
return self.radis.__dir__()
#%%
if __name__ == "__main__":
# run_tests()
setup_test_line_databases()
|
radisREPO_NAMEradisPATH_START.@radis_extracted@radis-master@radis@test@utils.py@.PATH_END.py
|
{
"filename": "configure.py",
"repo_name": "astro-friedel/CADRE",
"repo_path": "CADRE_extracted/CADRE-master/configure.py",
"type": "Python"
}
|
import os
ok = False
try :
import flagging
ok = True
except :
pass
if(not ok) :
print "Configuring the pipeline for your system (this should only need to be run once)"
try :
from pipeline_miriadwrap import *
except:
print "Could not locate the python MIRIAD wrappers"
print "Please install them, the install script is located in $MIR/src/scripts/python/subwrap"
print " and be sure the libraries (usually in $MIRLIB/python) are in your python path"
os._exit(0)
try :
handle, iostat = hopen("testMiriadFile", "old")
if(iostat != 0):
raise Exception, "File %s not found" % (file)
if(not hdprsnt(handle, "bandpass")):
raise Exception, "No bandpass present in %s" % (file)
nfeeds = rdhdi(handle,"nfeeds",1)
ngains = rdhdi(handle,"ngains",1)
ntau = rdhdi(handle,"ntau")
nchan = rdhdi(handle,"nchan0")
nspect = rdhdi(handle,"nspect0")
if(nfeeds <= 0 or ngains <= 0) :
raise Exception, "Bad gain table size information"
nants = ngains / (nfeeds+ntau)
if(nants*(nfeeds+ntau) != ngains) :
raise Exception, "Number of gains does equal nants*nfeeds"
if(nchan <= 0) :
raise Exception, "Bad number of frequencies"
if(nspect <= 0 or nspect > nchan) :
raise Exception, "Bad number of frequency spectral windows"
item, iostat = haccess(handle,"freqs","read")
if(iostat != 0) :
raise Exception, "Error accessing the bandpass frequency table"
off = 8
nschan,iostat = hreadi(item,off,4)
print nschan,iostat
if(iostat != 0 or nschan[0] != 15) :
os.system("mv flagging32.py flagging.py")
else :
os.system("mv flagging64.py flagging.py")
except :
print "Could not configure for your system"
os._exit(0)
|
astro-friedelREPO_NAMECADREPATH_START.@CADRE_extracted@CADRE-master@configure.py@.PATH_END.py
|
{
"filename": "hubconf.py",
"repo_name": "pmelchior/spender",
"repo_path": "spender_extracted/spender-main/spender/hubconf.py",
"type": "Python"
}
|
../hubconf.py
|
pmelchiorREPO_NAMEspenderPATH_START.@spender_extracted@spender-main@spender@hubconf.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattergeo/marker/colorbar/tickfont/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="scattergeo.marker.colorbar.tickfont",
**kwargs,
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattergeo@marker@colorbar@tickfont@_color.py@.PATH_END.py
|
{
"filename": "helio_jd.py",
"repo_name": "segasai/astrolibpy",
"repo_path": "astrolibpy_extracted/astrolibpy-master/astrolib/helio_jd.py",
"type": "Python"
}
|
from numpy import array, cos, sin, tan, pi, poly1d, deg2rad
from xyz import xyz
from bprecess import bprecess
def helio_jd(date, ra, dec, b1950=False, time_diff=False):
"""
NAME:
HELIO_JD
PURPOSE:
Convert geocentric (reduced) Julian date to heliocentric Julian date
EXPLANATION:
This procedure correct for the extra light travel time between the Earth
and the Sun.
An online calculator for this quantity is available at
http://www.physics.sfasu.edu/astro/javascript/hjd.html
CALLING SEQUENCE:
jdhelio = HELIO_JD( date, ra, dec, /B1950, /TIME_DIFF)
INPUTS
date - reduced Julian date (= JD - 2400000), scalar or vector, MUST
be double precision
ra,dec - scalars giving right ascension and declination in DEGREES
Equinox is J2000 unless the /B1950 keyword is set
OUTPUTS:
jdhelio - heliocentric reduced Julian date. If /TIME_DIFF is set, then
HELIO_JD() instead returns the time difference in seconds
between the geocentric and heliocentric Julian date.
OPTIONAL INPUT KEYWORDS
/B1950 - if set, then input coordinates are assumed to be in equinox
B1950 coordinates.
/TIME_DIFF - if set, then HELIO_JD() returns the time difference
(heliocentric JD - geocentric JD ) in seconds
EXAMPLE:
What is the heliocentric Julian date of an observation of V402 Cygni
(J2000: RA = 20 9 7.8, Dec = 37 09 07) taken June 15, 1973 at 11:40 UT?
IDL> juldate, [1973,6,15,11,40], jd ;Get geocentric Julian date
IDL> hjd = helio_jd( jd, ten(20,9,7.8)*15., ten(37,9,7) )
==> hjd = 41848.9881
Wayne Warren (Raytheon ITSS) has compared the results of HELIO_JD with the
FORTRAN subroutines in the STARLINK SLALIB library (see
http://star-www.rl.ac.uk/).
Time Diff (sec)
Date RA(2000) Dec(2000) STARLINK IDL
1999-10-29T00:00:00.0 21 08 25. -67 22 00. -59.0 -59.0
1999-10-29T00:00:00.0 02 56 33.4 +00 26 55. 474.1 474.1
1940-12-11T06:55:00.0 07 34 41.9 -00 30 42. 366.3 370.2
1992-02-29T03:15:56.2 12 56 27.4 +42 10 17. 350.8 350.9
2000-03-01T10:26:31.8 14 28 36.7 -20 42 11. 243.7 243.7
2100-02-26T09:18:24.2 08 26 51.7 +85 47 28. 104.0 108.8
PROCEDURES CALLED:
bprecess, xyz
REVISION HISTORY:
Algorithm from the book Astronomical Photometry by Henden, p. 114
Written, W. Landsman STX June, 1989
Make J2000 default equinox, add B1950, /TIME_DIFF keywords, compute
variation of the obliquity W. Landsman November 1999
Converted to python Sergey Koposov July 2010
"""
#Because XYZ uses default B1950 coordinates, we'll convert everything to B1950
if not b1950:
ra1, dec1 = bprecess(ra, dec)
else:
ra1 = ra
dec1 = dec
delta_t = (array(date).astype(float) - 33282.42345905e0) / 36525.0e0
epsilon_sec = poly1d([44.836e0, -46.8495, -0.00429, 0.00181][::-1])(delta_t)
epsilon = deg2rad(23.433333e0 + epsilon_sec / 3600.0e0)
ra1 = deg2rad(ra1)
dec1 = deg2rad(dec1)
x, y, z, tmp, tmp, tmp = xyz(date)
#Find extra distance light must travel in AU, multiply by 1.49598e13 cm/AU,
#and divide by the speed of light, and multiply by 86400 second/year
time = -499.00522e0 * (cos(dec1) * cos(ra1) * x + (tan(epsilon) * sin(dec1) + cos(dec1) * sin(ra1)) * y)
if time_diff:
return time
else:
return array(date).astype(float) + time / 86400.0e0
|
segasaiREPO_NAMEastrolibpyPATH_START.@astrolibpy_extracted@astrolibpy-master@astrolib@helio_jd.py@.PATH_END.py
|
{
"filename": "test_overrides.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/numpy/py3/numpy/core/tests/test_overrides.py",
"type": "Python"
}
|
import inspect
import sys
import os
import tempfile
from io import StringIO
from unittest import mock
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex)
from numpy.core.overrides import (
_get_implementing_args, array_function_dispatch,
verify_matching_signatures)
from numpy.compat import pickle
import pytest
def _return_not_implemented(self, *args, **kwargs):
return NotImplemented
# need to define this at the top level to test pickling
@array_function_dispatch(lambda array: (array,))
def dispatched_one_arg(array):
"""Docstring."""
return 'original'
@array_function_dispatch(lambda array1, array2: (array1, array2))
def dispatched_two_arg(array1, array2):
"""Docstring."""
return 'original'
class TestGetImplementingArgs:
def test_ndarray(self):
array = np.array(1)
args = _get_implementing_args([array])
assert_equal(list(args), [array])
args = _get_implementing_args([array, array])
assert_equal(list(args), [array])
args = _get_implementing_args([array, 1])
assert_equal(list(args), [array])
args = _get_implementing_args([1, array])
assert_equal(list(args), [array])
def test_ndarray_subclasses(self):
class OverrideSub(np.ndarray):
__array_function__ = _return_not_implemented
class NoOverrideSub(np.ndarray):
pass
array = np.array(1).view(np.ndarray)
override_sub = np.array(1).view(OverrideSub)
no_override_sub = np.array(1).view(NoOverrideSub)
args = _get_implementing_args([array, override_sub])
assert_equal(list(args), [override_sub, array])
args = _get_implementing_args([array, no_override_sub])
assert_equal(list(args), [no_override_sub, array])
args = _get_implementing_args(
[override_sub, no_override_sub])
assert_equal(list(args), [override_sub, no_override_sub])
def test_ndarray_and_duck_array(self):
class Other:
__array_function__ = _return_not_implemented
array = np.array(1)
other = Other()
args = _get_implementing_args([other, array])
assert_equal(list(args), [other, array])
args = _get_implementing_args([array, other])
assert_equal(list(args), [array, other])
def test_ndarray_subclass_and_duck_array(self):
class OverrideSub(np.ndarray):
__array_function__ = _return_not_implemented
class Other:
__array_function__ = _return_not_implemented
array = np.array(1)
subarray = np.array(1).view(OverrideSub)
other = Other()
assert_equal(_get_implementing_args([array, subarray, other]),
[subarray, array, other])
assert_equal(_get_implementing_args([array, other, subarray]),
[subarray, array, other])
def test_many_duck_arrays(self):
class A:
__array_function__ = _return_not_implemented
class B(A):
__array_function__ = _return_not_implemented
class C(A):
__array_function__ = _return_not_implemented
class D:
__array_function__ = _return_not_implemented
a = A()
b = B()
c = C()
d = D()
assert_equal(_get_implementing_args([1]), [])
assert_equal(_get_implementing_args([a]), [a])
assert_equal(_get_implementing_args([a, 1]), [a])
assert_equal(_get_implementing_args([a, a, a]), [a])
assert_equal(_get_implementing_args([a, d, a]), [a, d])
assert_equal(_get_implementing_args([a, b]), [b, a])
assert_equal(_get_implementing_args([b, a]), [b, a])
assert_equal(_get_implementing_args([a, b, c]), [b, c, a])
assert_equal(_get_implementing_args([a, c, b]), [c, b, a])
def test_too_many_duck_arrays(self):
namespace = dict(__array_function__=_return_not_implemented)
types = [type('A' + str(i), (object,), namespace) for i in range(33)]
relevant_args = [t() for t in types]
actual = _get_implementing_args(relevant_args[:32])
assert_equal(actual, relevant_args[:32])
with assert_raises_regex(TypeError, 'distinct argument types'):
_get_implementing_args(relevant_args)
class TestNDArrayArrayFunction:
def test_method(self):
class Other:
__array_function__ = _return_not_implemented
class NoOverrideSub(np.ndarray):
pass
class OverrideSub(np.ndarray):
__array_function__ = _return_not_implemented
array = np.array([1])
other = Other()
no_override_sub = array.view(NoOverrideSub)
override_sub = array.view(OverrideSub)
result = array.__array_function__(func=dispatched_two_arg,
types=(np.ndarray,),
args=(array, 1.), kwargs={})
assert_equal(result, 'original')
result = array.__array_function__(func=dispatched_two_arg,
types=(np.ndarray, Other),
args=(array, other), kwargs={})
assert_(result is NotImplemented)
result = array.__array_function__(func=dispatched_two_arg,
types=(np.ndarray, NoOverrideSub),
args=(array, no_override_sub),
kwargs={})
assert_equal(result, 'original')
result = array.__array_function__(func=dispatched_two_arg,
types=(np.ndarray, OverrideSub),
args=(array, override_sub),
kwargs={})
assert_equal(result, 'original')
with assert_raises_regex(TypeError, 'no implementation found'):
np.concatenate((array, other))
expected = np.concatenate((array, array))
result = np.concatenate((array, no_override_sub))
assert_equal(result, expected.view(NoOverrideSub))
result = np.concatenate((array, override_sub))
assert_equal(result, expected.view(OverrideSub))
def test_no_wrapper(self):
# This shouldn't happen unless a user intentionally calls
# __array_function__ with invalid arguments, but check that we raise
# an appropriate error all the same.
array = np.array(1)
func = lambda x: x
with assert_raises_regex(AttributeError, '_implementation'):
array.__array_function__(func=func, types=(np.ndarray,),
args=(array,), kwargs={})
class TestArrayFunctionDispatch:
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
roundtripped = pickle.loads(
pickle.dumps(dispatched_one_arg, protocol=proto))
assert_(roundtripped is dispatched_one_arg)
def test_name_and_docstring(self):
assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg')
if sys.flags.optimize < 2:
assert_equal(dispatched_one_arg.__doc__, 'Docstring.')
def test_interface(self):
class MyArray:
def __array_function__(self, func, types, args, kwargs):
return (self, func, types, args, kwargs)
original = MyArray()
(obj, func, types, args, kwargs) = dispatched_one_arg(original)
assert_(obj is original)
assert_(func is dispatched_one_arg)
assert_equal(set(types), {MyArray})
# assert_equal uses the overloaded np.iscomplexobj() internally
assert_(args == (original,))
assert_equal(kwargs, {})
def test_not_implemented(self):
class MyArray:
def __array_function__(self, func, types, args, kwargs):
return NotImplemented
array = MyArray()
with assert_raises_regex(TypeError, 'no implementation found'):
dispatched_one_arg(array)
def test_where_dispatch(self):
class DuckArray:
def __array_function__(self, ufunc, method, *inputs, **kwargs):
return "overridden"
array = np.array(1)
duck_array = DuckArray()
result = np.std(array, where=duck_array)
assert_equal(result, "overridden")
class TestVerifyMatchingSignatures:
def test_verify_matching_signatures(self):
verify_matching_signatures(lambda x: 0, lambda x: 0)
verify_matching_signatures(lambda x=None: 0, lambda x=None: 0)
verify_matching_signatures(lambda x=1: 0, lambda x=None: 0)
with assert_raises(RuntimeError):
verify_matching_signatures(lambda a: 0, lambda b: 0)
with assert_raises(RuntimeError):
verify_matching_signatures(lambda x: 0, lambda x=None: 0)
with assert_raises(RuntimeError):
verify_matching_signatures(lambda x=None: 0, lambda y=None: 0)
with assert_raises(RuntimeError):
verify_matching_signatures(lambda x=1: 0, lambda y=1: 0)
def test_array_function_dispatch(self):
with assert_raises(RuntimeError):
@array_function_dispatch(lambda x: (x,))
def f(y):
pass
# should not raise
@array_function_dispatch(lambda x: (x,), verify=False)
def f(y):
pass
def _new_duck_type_and_implements():
"""Create a duck array type and implements functions."""
HANDLED_FUNCTIONS = {}
class MyArray:
def __array_function__(self, func, types, args, kwargs):
if func not in HANDLED_FUNCTIONS:
return NotImplemented
if not all(issubclass(t, MyArray) for t in types):
return NotImplemented
return HANDLED_FUNCTIONS[func](*args, **kwargs)
def implements(numpy_function):
"""Register an __array_function__ implementations."""
def decorator(func):
HANDLED_FUNCTIONS[numpy_function] = func
return func
return decorator
return (MyArray, implements)
class TestArrayFunctionImplementation:
def test_one_arg(self):
MyArray, implements = _new_duck_type_and_implements()
@implements(dispatched_one_arg)
def _(array):
return 'myarray'
assert_equal(dispatched_one_arg(1), 'original')
assert_equal(dispatched_one_arg(MyArray()), 'myarray')
def test_optional_args(self):
MyArray, implements = _new_duck_type_and_implements()
@array_function_dispatch(lambda array, option=None: (array,))
def func_with_option(array, option='default'):
return option
@implements(func_with_option)
def my_array_func_with_option(array, new_option='myarray'):
return new_option
# we don't need to implement every option on __array_function__
# implementations
assert_equal(func_with_option(1), 'default')
assert_equal(func_with_option(1, option='extra'), 'extra')
assert_equal(func_with_option(MyArray()), 'myarray')
with assert_raises(TypeError):
func_with_option(MyArray(), option='extra')
# but new options on implementations can't be used
result = my_array_func_with_option(MyArray(), new_option='yes')
assert_equal(result, 'yes')
with assert_raises(TypeError):
func_with_option(MyArray(), new_option='no')
def test_not_implemented(self):
MyArray, implements = _new_duck_type_and_implements()
@array_function_dispatch(lambda array: (array,), module='my')
def func(array):
return array
array = np.array(1)
assert_(func(array) is array)
assert_equal(func.__module__, 'my')
with assert_raises_regex(
TypeError, "no implementation found for 'my.func'"):
func(MyArray())
@pytest.mark.parametrize("name", ["concatenate", "mean", "asarray"])
def test_signature_error_message_simple(self, name):
func = getattr(np, name)
try:
# all of these functions need an argument:
func()
except TypeError as e:
exc = e
assert exc.args[0].startswith(f"{name}()")
def test_signature_error_message(self):
# The lambda function will be named "<lambda>", but the TypeError
# should show the name as "func"
def _dispatcher():
return ()
@array_function_dispatch(_dispatcher)
def func():
pass
try:
func._implementation(bad_arg=3)
except TypeError as e:
expected_exception = e
try:
func(bad_arg=3)
raise AssertionError("must fail")
except TypeError as exc:
if exc.args[0].startswith("_dispatcher"):
# We replace the qualname currently, but it used `__name__`
# (relevant functions have the same name and qualname anyway)
pytest.skip("Python version is not using __qualname__ for "
"TypeError formatting.")
assert exc.args == expected_exception.args
@pytest.mark.parametrize("value", [234, "this func is not replaced"])
def test_dispatcher_error(self, value):
# If the dispatcher raises an error, we must not attempt to mutate it
error = TypeError(value)
def dispatcher():
raise error
@array_function_dispatch(dispatcher)
def func():
return 3
try:
func()
raise AssertionError("must fail")
except TypeError as exc:
assert exc is error # unmodified exception
def test_properties(self):
# Check that str and repr are sensible
func = dispatched_two_arg
assert str(func) == str(func._implementation)
repr_no_id = repr(func).split("at ")[0]
repr_no_id_impl = repr(func._implementation).split("at ")[0]
assert repr_no_id == repr_no_id_impl
@pytest.mark.parametrize("func", [
lambda x, y: 0, # no like argument
lambda like=None: 0, # not keyword only
lambda *, like=None, a=3: 0, # not last (not that it matters)
])
def test_bad_like_sig(self, func):
# We sanity check the signature, and these should fail.
with pytest.raises(RuntimeError):
array_function_dispatch()(func)
def test_bad_like_passing(self):
# Cover internal sanity check for passing like as first positional arg
def func(*, like=None):
pass
func_with_like = array_function_dispatch()(func)
with pytest.raises(TypeError):
func_with_like()
with pytest.raises(TypeError):
func_with_like(like=234)
def test_too_many_args(self):
# Mainly a unit-test to increase coverage
objs = []
for i in range(40):
class MyArr:
def __array_function__(self, *args, **kwargs):
return NotImplemented
objs.append(MyArr())
def _dispatch(*args):
return args
@array_function_dispatch(_dispatch)
def func(*args):
pass
with pytest.raises(TypeError, match="maximum number"):
func(*objs)
class TestNDArrayMethods:
def test_repr(self):
# gh-12162: should still be defined even if __array_function__ doesn't
# implement np.array_repr()
class MyArray(np.ndarray):
def __array_function__(*args, **kwargs):
return NotImplemented
array = np.array(1).view(MyArray)
assert_equal(repr(array), 'MyArray(1)')
assert_equal(str(array), '1')
class TestNumPyFunctions:
def test_set_module(self):
assert_equal(np.sum.__module__, 'numpy')
assert_equal(np.char.equal.__module__, 'numpy.char')
assert_equal(np.fft.fft.__module__, 'numpy.fft')
assert_equal(np.linalg.solve.__module__, 'numpy.linalg')
def test_inspect_sum(self):
signature = inspect.signature(np.sum)
assert_('axis' in signature.parameters)
def test_override_sum(self):
MyArray, implements = _new_duck_type_and_implements()
@implements(np.sum)
def _(array):
return 'yes'
assert_equal(np.sum(MyArray()), 'yes')
def test_sum_on_mock_array(self):
# We need a proxy for mocks because __array_function__ is only looked
# up in the class dict
class ArrayProxy:
def __init__(self, value):
self.value = value
def __array_function__(self, *args, **kwargs):
return self.value.__array_function__(*args, **kwargs)
def __array__(self, *args, **kwargs):
return self.value.__array__(*args, **kwargs)
proxy = ArrayProxy(mock.Mock(spec=ArrayProxy))
proxy.value.__array_function__.return_value = 1
result = np.sum(proxy)
assert_equal(result, 1)
proxy.value.__array_function__.assert_called_once_with(
np.sum, (ArrayProxy,), (proxy,), {})
proxy.value.__array__.assert_not_called()
def test_sum_forwarding_implementation(self):
class MyArray(np.ndarray):
def sum(self, axis, out):
return 'summed'
def __array_function__(self, func, types, args, kwargs):
return super().__array_function__(func, types, args, kwargs)
# note: the internal implementation of np.sum() calls the .sum() method
array = np.array(1).view(MyArray)
assert_equal(np.sum(array), 'summed')
class TestArrayLike:
def setup_method(self):
class MyArray():
def __init__(self, function=None):
self.function = function
def __array_function__(self, func, types, args, kwargs):
assert func is getattr(np, func.__name__)
try:
my_func = getattr(self, func.__name__)
except AttributeError:
return NotImplemented
return my_func(*args, **kwargs)
self.MyArray = MyArray
class MyNoArrayFunctionArray():
def __init__(self, function=None):
self.function = function
self.MyNoArrayFunctionArray = MyNoArrayFunctionArray
def add_method(self, name, arr_class, enable_value_error=False):
def _definition(*args, **kwargs):
# Check that `like=` isn't propagated downstream
assert 'like' not in kwargs
if enable_value_error and 'value_error' in kwargs:
raise ValueError
return arr_class(getattr(arr_class, name))
setattr(arr_class, name, _definition)
def func_args(*args, **kwargs):
return args, kwargs
def test_array_like_not_implemented(self):
self.add_method('array', self.MyArray)
ref = self.MyArray.array()
with assert_raises_regex(TypeError, 'no implementation found'):
array_like = np.asarray(1, like=ref)
_array_tests = [
('array', *func_args((1,))),
('asarray', *func_args((1,))),
('asanyarray', *func_args((1,))),
('ascontiguousarray', *func_args((2, 3))),
('asfortranarray', *func_args((2, 3))),
('require', *func_args((np.arange(6).reshape(2, 3),),
requirements=['A', 'F'])),
('empty', *func_args((1,))),
('full', *func_args((1,), 2)),
('ones', *func_args((1,))),
('zeros', *func_args((1,))),
('arange', *func_args(3)),
('frombuffer', *func_args(b'\x00' * 8, dtype=int)),
('fromiter', *func_args(range(3), dtype=int)),
('fromstring', *func_args('1,2', dtype=int, sep=',')),
('loadtxt', *func_args(lambda: StringIO('0 1\n2 3'))),
('genfromtxt', *func_args(lambda: StringIO('1,2.1'),
dtype=[('int', 'i8'), ('float', 'f8')],
delimiter=',')),
]
@pytest.mark.parametrize('function, args, kwargs', _array_tests)
@pytest.mark.parametrize('numpy_ref', [True, False])
def test_array_like(self, function, args, kwargs, numpy_ref):
self.add_method('array', self.MyArray)
self.add_method(function, self.MyArray)
np_func = getattr(np, function)
my_func = getattr(self.MyArray, function)
if numpy_ref is True:
ref = np.array(1)
else:
ref = self.MyArray.array()
like_args = tuple(a() if callable(a) else a for a in args)
array_like = np_func(*like_args, **kwargs, like=ref)
if numpy_ref is True:
assert type(array_like) is np.ndarray
np_args = tuple(a() if callable(a) else a for a in args)
np_arr = np_func(*np_args, **kwargs)
# Special-case np.empty to ensure values match
if function == "empty":
np_arr.fill(1)
array_like.fill(1)
assert_equal(array_like, np_arr)
else:
assert type(array_like) is self.MyArray
assert array_like.function is my_func
@pytest.mark.parametrize('function, args, kwargs', _array_tests)
@pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"])
def test_no_array_function_like(self, function, args, kwargs, ref):
self.add_method('array', self.MyNoArrayFunctionArray)
self.add_method(function, self.MyNoArrayFunctionArray)
np_func = getattr(np, function)
# Instantiate ref if it's the MyNoArrayFunctionArray class
if ref == "MyNoArrayFunctionArray":
ref = self.MyNoArrayFunctionArray.array()
like_args = tuple(a() if callable(a) else a for a in args)
with assert_raises_regex(TypeError,
'The `like` argument must be an array-like that implements'):
np_func(*like_args, **kwargs, like=ref)
@pytest.mark.parametrize('numpy_ref', [True, False])
def test_array_like_fromfile(self, numpy_ref):
self.add_method('array', self.MyArray)
self.add_method("fromfile", self.MyArray)
if numpy_ref is True:
ref = np.array(1)
else:
ref = self.MyArray.array()
data = np.random.random(5)
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, "testfile")
data.tofile(fname)
array_like = np.fromfile(fname, like=ref)
if numpy_ref is True:
assert type(array_like) is np.ndarray
np_res = np.fromfile(fname, like=ref)
assert_equal(np_res, data)
assert_equal(array_like, np_res)
else:
assert type(array_like) is self.MyArray
assert array_like.function is self.MyArray.fromfile
def test_exception_handling(self):
self.add_method('array', self.MyArray, enable_value_error=True)
ref = self.MyArray.array()
with assert_raises(TypeError):
# Raises the error about `value_error` being invalid first
np.array(1, value_error=True, like=ref)
@pytest.mark.parametrize('function, args, kwargs', _array_tests)
def test_like_as_none(self, function, args, kwargs):
self.add_method('array', self.MyArray)
self.add_method(function, self.MyArray)
np_func = getattr(np, function)
like_args = tuple(a() if callable(a) else a for a in args)
# required for loadtxt and genfromtxt to init w/o error.
like_args_exp = tuple(a() if callable(a) else a for a in args)
array_like = np_func(*like_args, **kwargs, like=None)
expected = np_func(*like_args_exp, **kwargs)
# Special-case np.empty to ensure values match
if function == "empty":
array_like.fill(1)
expected.fill(1)
assert_equal(array_like, expected)
def test_function_like():
# We provide a `__get__` implementation, make sure it works
assert type(np.mean) is np.core._multiarray_umath._ArrayFunctionDispatcher
class MyClass:
def __array__(self):
# valid argument to mean:
return np.arange(3)
func1 = staticmethod(np.mean)
func2 = np.mean
func3 = classmethod(np.mean)
m = MyClass()
assert m.func1([10]) == 10
assert m.func2() == 1 # mean of the arange
with pytest.raises(TypeError, match="unsupported operand type"):
# Tries to operate on the class
m.func3()
# Manual binding also works (the above may shortcut):
bound = np.mean.__get__(m, MyClass)
assert bound() == 1
bound = np.mean.__get__(None, MyClass) # unbound actually
assert bound([10]) == 10
bound = np.mean.__get__(MyClass) # classmethod
with pytest.raises(TypeError, match="unsupported operand type"):
bound()
def test_scipy_trapz_support_shim():
# SciPy 1.10 and earlier "clone" trapz in this way, so we have a
# support shim in place: https://github.com/scipy/scipy/issues/17811
# That should be removed eventually. This test copies what SciPy does.
# Hopefully removable 1 year after SciPy 1.11; shim added to NumPy 1.25.
import types
import functools
def _copy_func(f):
# Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)
g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__, closure=f.__closure__)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g
trapezoid = _copy_func(np.trapz)
assert np.trapz([1, 2]) == trapezoid([1, 2])
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@numpy@py3@numpy@core@tests@test_overrides.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/layout/scene/xaxis/title/_font.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Font(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.scene.xaxis.title"
_path_str = "layout.scene.xaxis.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One",
"Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow",
"Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# lineposition
# ------------
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
# shadow
# ------
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# style
# -----
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
# textcase
# --------
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
# variant
# -------
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
# weight
# ------
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this axis' title font. Note that the title's font used to
be customized by the now deprecated `titlefont` attribute.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.scene.x
axis.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.scene.xaxis.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.xaxis.title.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("lineposition", None)
_v = lineposition if lineposition is not None else _v
if _v is not None:
self["lineposition"] = _v
_v = arg.pop("shadow", None)
_v = shadow if shadow is not None else _v
if _v is not None:
self["shadow"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("style", None)
_v = style if style is not None else _v
if _v is not None:
self["style"] = _v
_v = arg.pop("textcase", None)
_v = textcase if textcase is not None else _v
if _v is not None:
self["textcase"] = _v
_v = arg.pop("variant", None)
_v = variant if variant is not None else _v
if _v is not None:
self["variant"] = _v
_v = arg.pop("weight", None)
_v = weight if weight is not None else _v
if _v is not None:
self["weight"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@layout@scene@xaxis@title@_font.py@.PATH_END.py
|
{
"filename": "pixelsplines.py",
"repo_name": "desihub/desisim",
"repo_path": "desisim_extracted/desisim-main/py/desisim/pixelsplines.py",
"type": "Python"
}
|
"""
desisim.pixelsplines
====================
Pixel-integrated spline utilities.
Written by A. Bolton, U. of Utah, 2010-2013.
"""
from __future__ import absolute_import, division, print_function
import numpy as n
from scipy import linalg as la
from scipy import sparse as sp
from scipy import special as sf
def compute_duck_slopes(pixbound, flux):
"""
Compute the slope of the illuminating quadratic spline at
the locations of the 'ducks', i.e., the pixel boundaries,
given the integrated flux per unit baseline within the pixels.
ARGUMENTS:
pixbound: (npix + 1) ndarray of pixel boundaries, in units of
wavelength or log-wavelength or frequency or whatever you like.
flux: (npix) ndarray of spectral flux (energy or counts) per
abscissa unit, averaged over the extent of the pixel
RETURNS:
an (npix+1) ndarray of the slope of the underlying/illuminating
flux per unit abscissa spectrum at the position of the pixel
boundaries, a.k.a. 'ducks'. The end conditions are taken to
be zero slope, so the exterior points of the output are zeros.
"""
npix = len(flux)
# Test for correct argument dimensions:
if (len(pixbound) - npix) != 1:
print('Need one more element in pixbound than in flux!')
return 0
# The array of "delta-x" values:
dxpix = pixbound[1:] - pixbound[:-1]
# Test for monotonif increase:
if dxpix.min() <= 0.:
print('Pixel boundaries not monotonically increasing!')
return 0
# Encode the tridiagonal matrix that needs to be solved:
maindiag = (dxpix[:-1] + dxpix[1:]) / 3.
offdiag = dxpix[1:-1] / 6.
upperdiag = n.append(0., offdiag)
lowerdiag = n.append(offdiag, 0.)
band_matrix = n.vstack((upperdiag, maindiag, lowerdiag))
# The right-hand side:
rhs = flux[1:] - flux[:-1]
# Solve the banded matrix and return:
acoeff = la.solve_banded((1,1), band_matrix, rhs)
acoeff = n.append(n.append(0., acoeff), 0.)
return acoeff
def cen2bound(pixelcen):
"""
Convenience function to do the obvious thing to transform
pixel centers to pixel boundaries.
"""
pixbound = 0.5 * (pixelcen[1:] + pixelcen[:-1])
lo_val = 2. * pixbound[0] - pixbound[1]
hi_val = 2. * pixbound[-1] - pixbound[-2]
pixbound = n.append(n.append(lo_val, pixbound), hi_val)
return pixbound
def gauss_blur_matrix(pixbound, sig_conv):
"""
Function to generate a Gaussian blurring matrix for a pixelized
spectrum, from specified pixel boundaries and 'sigma' vector.
The matrix will be flux-conserving if the spectrum to which it is
applied has units of 'counts per unit x', and pixbound and sig_conv
both have units of x.
pixbound should have one more element than sig_conv.
Output is a scipy sparse matrix that can implement the blurring as:
blurflux = gauss_blur_matrix * flux
where 'flux' has the same dimensions as 'sig_conv'.
"""
# Derived values and error checks:
npix = len(pixbound) - 1
if (len(sig_conv) != npix):
raise PixSplineError('Need one more element in pixbound than in \
sig_conv!')
if (sig_conv.min() <= 0.):
raise PixSplineError('sig_conv must be > 0 everywhere!')
xcen = 0.5 * (pixbound[1:] + pixbound[:-1])
dxpix = pixbound[1:] - pixbound[:-1]
if (dxpix.min() <= 0.):
raise PixSplineError('Pixel boundaries not monotonically increasing!')
# Which "new" pixels does each "old" pixel touch?
# Let's go +/- 6 sigma for all:
sig_width = 6.0
# A minor correction factor to preserve flux conservation:
cfact = 1./sf.erf(sig_width / n.sqrt(2.))
xblur_lo = xcen - sig_width * sig_conv
xblur_hi = xcen + sig_width * sig_conv
bin_lo = n.digitize(xblur_lo, pixbound) - 1
bin_hi = n.digitize(xblur_hi, pixbound) - 1
# Restrict the ranges:
#xblur_lo = n.where((xblur_lo > pixbound[0]), xblur_lo, pixbound[0])
#xblur_lo = n.where((xblur_lo < pixbound[-1]), xblur_lo, pixbound[-1])
#xblur_hi = n.where((xblur_hi > pixbound[0]), xblur_hi, pixbound[0])
#xblur_hi = n.where((xblur_hi < pixbound[-1]), xblur_hi, pixbound[-1])
bin_lo = n.where((bin_lo >= 0), bin_lo, 0)
#bin_lo = n.where((bin_lo < npix), bin_lo, npix-1)
#bin_hi = n.where((bin_hi >= 0), bin_hi, 0)
bin_hi = n.where((bin_hi < npix), bin_hi, npix-1)
# Compute total number of non-zero elements in the broadening matrix:
n_each = bin_hi - bin_lo + 1
n_entries = n_each.sum()
ij = n.zeros((2, n_entries), dtype=int)
v_vec = n.zeros(n_entries, dtype=float)
# Loop over pixels in the "old" spectrum:
pcount = 0
roottwo = n.sqrt(2.)
bin_vec = n.arange(npix, dtype=int)
for k in range(npix):
xbound = pixbound[bin_lo[k]:bin_hi[k]+2]
# Gaussian integral in terms of error function:
erf_terms = cfact * 0.5 * sf.erf((xbound - xcen[k]) / (roottwo *
sig_conv[k]))
erf_int = (erf_terms[1:] - erf_terms[:-1]) * \
dxpix[k] / dxpix[bin_lo[k]:bin_hi[k]+1]
ij[0,pcount:pcount+n_each[k]] = bin_vec[bin_lo[k]:bin_hi[k]+1]
ij[1,pcount:pcount+n_each[k]] = k
v_vec[pcount:pcount+n_each[k]] = erf_int
pcount += n_each[k]
conv_matrix = sp.coo_matrix((v_vec, ij), shape=(npix,npix))
return conv_matrix.tocsr()
class PixSplineError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class PixelSpline:
"""
Pixel Spline object class.
Initialize as follows:
PS = PixelSpline(pixbound, flux)
where
pixbound = array of pixel boundaries in baseline units
and
flux = array of specific flux values in baseline units.
Assumptions:
'pixbound' should have one more element than 'flux', and
units of 'flux' are -per-unit-baseline, for the baseline
units in which pixbound is expressed, averaged over the
extent of each pixel.
"""
def __init__(self, pixbound, flux):
npix = len(flux)
# Test for correct argument dimensions:
if (len(pixbound) - npix) != 1:
raise PixSplineError('Need one more element in pixbound \
than in flux!')
# The array of "delta-x" values:
dxpix = pixbound[1:] - pixbound[:-1]
# Test for monotonic increase:
if dxpix.min() <= 0.:
raise PixSplineError('Pixel boundaries not monotonically \
increasing!')
self.npix = npix
self.pixbound = pixbound.copy()
self.dxpix = dxpix.copy()
self.xcen = 0.5 * (pixbound[1:] + pixbound[:-1]).copy()
self.flux = flux.copy()
maindiag = (dxpix[:-1] + dxpix[1:]) / 3.
offdiag = dxpix[1:-1] / 6.
upperdiag = n.append(0., offdiag)
lowerdiag = n.append(offdiag, 0.)
band_matrix = n.vstack((upperdiag, maindiag, lowerdiag))
# The right-hand side:
rhs = flux[1:] - flux[:-1]
# Solve the banded matrix for the slopes at the ducks:
acoeff = la.solve_banded((1,1), band_matrix, rhs)
self.duckslopes = n.append(n.append(0., acoeff), 0.)
def point_evaluate(self, xnew, missing=0.):
"""
Evaluate underlying pixel spline at array of points
BUG: input currently needs to be at least 1D array.
"""
# Initialize output array:
outflux = 0. * self.flux[0] * xnew + missing
# Digitize into bins:
bin_idx = n.digitize(xnew, self.pixbound)
# Find the indices of those that are actually in-bounds:
wh_in = n.where((bin_idx > 0) * (bin_idx < len(self.pixbound)))
if len(wh_in[0]) == 0:
return outflux
xnew_in = xnew[wh_in]
idx_in = bin_idx[wh_in] - 1
# The pixel centers as per the algorithm in use:
adiff = self.duckslopes[idx_in+1] - self.duckslopes[idx_in]
asum = self.duckslopes[idx_in+1] + self.duckslopes[idx_in]
xdiff = xnew_in - self.xcen[idx_in]
fluxvals = adiff * xdiff**2 / (2. * self.dxpix[idx_in]) + asum * xdiff \
/ 2. + self.flux[idx_in] - adiff * self.dxpix[idx_in] / 24.
outflux[wh_in] = fluxvals
return outflux
def find_extrema(self, minima=False):
# Find the formal extrema positions:
x_ext = self.xcen - 0.5 * self.dxpix * \
(self.duckslopes[1:] + self.duckslopes[:-1]) / \
(self.duckslopes[1:] - self.duckslopes[:-1])
# Digitize these into bins:
bin_ext = n.digitize(x_ext, self.pixbound) - 1
# The second derivatives, flipped in sign if minima is set:
curvat = (-1)**(minima == True) * (self.duckslopes[1:] -
self.duckslopes[:-1]) / self.dxpix
# Find in-bin maxima:
wh_ext = n.where((bin_ext == n.arange(self.npix)) * (curvat < 0))
if len(wh_ext[0]) < 1:
return n.array([])
x_ext = x_ext[wh_ext]
return x_ext
def subpixel_average(self, ipix, xlo, xhi):
adiff = self.duckslopes[ipix+1] - self.duckslopes[ipix]
asum = self.duckslopes[ipix+1] + self.duckslopes[ipix]
xlo_c = xlo - self.xcen[ipix]
xhi_c = xhi - self.xcen[ipix]
outval = adiff * ((xhi-xlo)**2 / 6. + xhi_c * xlo_c / 2.) / \
self.dxpix[ipix] + asum * (xhi_c + xlo_c) / 4. - adiff * \
self.dxpix[ipix] / 24. + self.flux[ipix]
return outval
def resample(self, pb_new):
"""
Method to resample a pixelspline analytically onto a new
set of pixel boundaries.
"""
npix_new = len(pb_new) - 1
xnew_lo = pb_new[:-1].copy()
xnew_hi = pb_new[1:].copy()
# Test for monotonic:
new_fulldx = xnew_hi - xnew_lo
if new_fulldx.min() <= 0.:
raise PixSplineError('New pixel boundaries not monotonically \
increasing!')
# Digitize the new boundaries into the original bins:
bin_idx = n.digitize(pb_new, self.pixbound) - 1
bin_lo = bin_idx[:-1].copy()
bin_hi = bin_idx[1:].copy()
# Array for accumulating new counts:
new_counts = n.zeros(npix_new, dtype=self.flux.dtype)
# Array for accumulating new pixel widths by pieces.
# Only used for debugging so far, but may be useful in future.
#new_dxpix = n.zeros(npix_new, dtype=self.flux.dtype)
# For convenience, we define the following.
# Careful not to modify them... they are views, not copies!
xold_lo = self.pixbound[:-1]
xold_hi = self.pixbound[1:]
# 4 cases to cover:
# Case 1: both bin_hi and bin_lo in the same bin:
wh_this = n.where((bin_hi == bin_lo) * (bin_lo >= 0) * \
(bin_hi < self.npix))
if (len(wh_this[0]) > 0):
dx_this = xnew_hi[wh_this] - xnew_lo[wh_this]
avgval_this = self.subpixel_average(bin_lo[wh_this],
xnew_lo[wh_this],
xnew_hi[wh_this])
#new_dxpix[wh_this] += dx_this
new_counts[wh_this] += avgval_this * dx_this
# Case 2: more than one bin, lower segment:
wh_this = n.where((bin_hi > bin_lo) * (bin_lo >= 0))
if (len(wh_this[0]) > 0):
dx_this = xold_hi[bin_lo[wh_this]] - xnew_lo[wh_this]
avgval_this = self.subpixel_average(bin_lo[wh_this],
xnew_lo[wh_this],
xold_hi[bin_lo[wh_this]])
#new_dxpix[wh_this] += dx_this
new_counts[wh_this] += avgval_this * dx_this
# Case 3: more than one bin, upper segment:
wh_this = n.where((bin_hi > bin_lo) * (bin_hi < self.npix))
if (len(wh_this[0]) > 0):
dx_this = xnew_hi[wh_this] - xold_lo[bin_hi[wh_this]]
avgval_this = self.subpixel_average(bin_hi[wh_this],
xold_lo[bin_hi[wh_this]],
xnew_hi[wh_this])
#new_dxpix[wh_this] += dx_this
new_counts[wh_this] += avgval_this * dx_this
# Case 4: enire bins covered, whole pixels:
wh_this = n.where(bin_hi > (bin_lo+1))
nwhole = len(wh_this[0])
if (nwhole > 0):
pcounts = self.flux * self.dxpix
icounts_this = n.array([pcounts[bin_lo[wh_this[0][ii]]+1:\
bin_hi[wh_this[0][ii]]].sum()
for ii in range(nwhole)])
#new_dxpix[wh_this] += dx_this
new_counts[wh_this] += icounts_this
# Divide out for average and return:
return new_counts / new_fulldx
class WeightedRebinCoadder:
"""
Objet class for weighted rebinning and coaddition of spectra
Initialize as follows:
WRC = WeighedRebinCoadder(fluxes, invvars, pixbounds)
where
fluxes = list of arrays of specific flux values
invvars = list of arrays of associated inverse variances
pixbounds = list of arrays of pixel boundaries in baseline units
"""
def __init__(self, fluxes, invvars, pixbounds):
# Determine minimum and maximum values of independent variable:
self.min_indep = [this_bound.min() for this_bound in pixbounds]
self.max_indep = [this_bound.max() for this_bound in pixbounds]
self._n_input = len(fluxes)
# Compute pixel widths:
dpixes = [this_bound[1:] - this_bound[:-1] for this_bound in pixbounds]
# Compute "specific inverse variances":
sp_invvars = [invvars[i] / dpixes[i] for i in range(self._n_input)]
# Compute pixelspline objects for fluxes:
self._PXS_fluxes = [PixelSpline(pixbounds[i], fluxes[i]) for i in \
range(self._n_input)]
# Compute pixelspline objects for specific inverse variances:
self._PXS_sp_invvars = [PixelSpline(pixbounds[i], sp_invvars[i]) for \
i in range(self._n_input)]
def coadd(self, pixbound_out):
# Compute coverage masks:
masks = [(pixbound_out[:-1] > self.min_indep[i]) *
(pixbound_out[1:] < self.max_indep[i]) for i in \
range(self._n_input)]
# Compute output pixel widths:
dpix_out = pixbound_out[1:] - pixbound_out[:-1]
# Compute interpolated fluxes:
new_fluxes = [this_PXS.resample(pixbound_out) for this_PXS in \
self._PXS_fluxes]
# Compute interpolated specific inverse variances (converted
# to inverse variances):
new_invvars = [dpix_out * this_PXS.resample(pixbound_out) for \
this_PXS in self._PXS_sp_invvars]
# Compute coadded flux and inverse variance and return:
flux_coadd = 0.
invvar_coadd = 0.
for i in range(self._n_input):
flux_coadd += new_fluxes[i] * new_invvars[i] * masks[i]
invvar_coadd += new_invvars[i] * masks[i]
is_good = n.where(invvar_coadd > 0.)
flux_coadd[is_good] /= invvar_coadd[is_good]
return flux_coadd, invvar_coadd
|
desihubREPO_NAMEdesisimPATH_START.@desisim_extracted@desisim-main@py@desisim@pixelsplines.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "ajdittmann/multiNestNotes",
"repo_path": "multiNestNotes_extracted/multiNestNotes-master/README.md",
"type": "Markdown"
}
|
# Nested Sampling test problems
Some simple test problems for nested sampling codes (or other Bayesian sampling methods). These include multidimensional normal distributions, [generalized Rosenbrock](https://arxiv.org/abs/1903.09556) distributions, and multidimensional [log-gamma](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.loggamma.html) distributions.
Python scripts to perform each test using MultiNest can be found in the `multiNest` directory.
The `emcee` directory includes and example MultiNest output, a script to initialize an emcee analysis based of of nested sampling outputs, and a script to run the log-gamma problem using [emcee](https://emcee.readthedocs.io/en/stable/).
The `ultraNest` directory includes and example MultiNest output and a script to run the log-gamma problem, utilizing the [warm starting](https://johannesbuchner.github.io/UltraNest/example-warmstart.html) feature of the [UltraNest](https://johannesbuchner.github.io/UltraNest/readme.html) sampler.
Results for these test problems are collected in [https://arxiv.org/abs/2404.16928](https://arxiv.org/abs/2404.16928).
|
ajdittmannREPO_NAMEmultiNestNotesPATH_START.@multiNestNotes_extracted@multiNestNotes-master@README.md@.PATH_END.py
|
{
"filename": "plot_slice.py",
"repo_name": "PrincetonUniversity/athena",
"repo_path": "athena_extracted/athena-master/vis/python/plot_slice.py",
"type": "Python"
}
|
#! /usr/bin/env python
"""
Script for plotting 2D data or 2D slices of 3D data, intended primarily for
Cartesian grids.
Run "plot_slice.py -h" to see description of inputs.
See documentation on athena_read.athdf() for important notes about reading files
with mesh refinement.
Users are encouraged to make their own versions of this script for improved
results by adjusting figure size, spacings, tick locations, axes labels, etc.
The script must also be modified to plot any functions of the quantities in the
file, including combinations of multiple quantities.
"""
# Python standard modules
import argparse
import warnings
# Other Python modules
import numpy as np
# Athena++ modules
import athena_read
# Main function
def main(**kwargs):
# Load Python plotting modules
if kwargs['output_file'] != 'show':
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
# Verify user inputs
slice_erroneously_specified = False
if kwargs['slice_location'] is not None:
if kwargs['stream'] is not None:
if (kwargs['average'] or kwargs['sum']) and kwargs['stream_average']:
slice_erroneously_specified = True
else:
if kwargs['average'] or kwargs['sum']:
slice_erroneously_specified = True
if slice_erroneously_specified:
raise RuntimeError('Slice location specified but all quantities are to be '
'averaged or summed')
# Set default slice location (even if averaging or summing)
if kwargs['slice_location'] is None:
kwargs['slice_location'] = 0.0
# Determine refinement level to use
if kwargs['level'] is not None:
level = kwargs['level']
else:
level = None
# Determine if vector quantities should be read
quantities = [kwargs['quantity']]
if kwargs['stream'] is not None:
if kwargs['direction'] == 1:
quantities.append(kwargs['stream'] + '2')
quantities.append(kwargs['stream'] + '3')
elif kwargs['direction'] == 2:
quantities.append(kwargs['stream'] + '1')
quantities.append(kwargs['stream'] + '3')
else:
quantities.append(kwargs['stream'] + '1')
quantities.append(kwargs['stream'] + '2')
# Read data
if quantities[0] == 'Levels':
data = athena_read.athdf(kwargs['data_file'], quantities=quantities[1:],
level=level, return_levels=True,
num_ghost=kwargs['num_ghost'])
else:
data = athena_read.athdf(kwargs['data_file'], quantities=quantities, level=level,
num_ghost=kwargs['num_ghost'])
# Check that coordinates work with user choices
coordinates = data['Coordinates'].decode('ascii', 'replace')
ave_or_sum = kwargs['average'] or kwargs['sum'] or kwargs['stream_average']
warn_projection = False
warn_vector = False
projection_type = None
if coordinates in ('cartesian', 'minkowski'):
pass
elif coordinates == 'cylindrical':
if ave_or_sum and kwargs['direction'] == 1:
warn_projection = True
if kwargs['stream'] and kwargs['direction'] in (1, 3):
warn_vector = True
if kwargs['direction'] == 3:
# pass
projection_type = "polar"
elif coordinates in ('spherical_polar', 'schwarzschild', 'kerr-schild'):
if ave_or_sum and kwargs['direction'] in (1, 2):
warn_projection = True
if kwargs['stream']:
warn_vector = True
else:
warnings.warn('Coordinates not recognized; results may be misleading')
if warn_projection:
warnings.warn('Sums/slices are not computed with correct volumes')
if warn_vector:
warnings.warn('Vector plot may be misleading')
# Name coordinates
if coordinates in ('cartesian', 'minkowski'):
coord_labels = (r'$x$', r'$y$', r'$z$')
elif coordinates == 'cylindrical':
coord_labels = (r'$R$', r'$\phi$', r'$z$')
elif coordinates in ('spherical_polar', 'schwarzschild', 'kerr-schild'):
coord_labels = (r'$r$', r'$\theta$', r'$\phi$')
else:
coord_labels = (r'$x^1$', r'$x^2$', r'$x^3$')
# Extract basic coordinate information
if kwargs['direction'] == 1:
xf = data['x2f']
xv = data['x2v']
yf = data['x3f']
yv = data['x3v']
zf = data['x1f']
x_label = coord_labels[1]
y_label = coord_labels[2]
elif kwargs['direction'] == 2:
xf = data['x1f']
xv = data['x1v']
yf = data['x3f']
yv = data['x3v']
zf = data['x2f']
x_label = coord_labels[0]
y_label = coord_labels[2]
if kwargs['direction'] == 3:
xf = data['x1f']
xv = data['x1v']
yf = data['x2f']
yv = data['x2v']
zf = data['x3f']
x_label = coord_labels[0]
y_label = coord_labels[1]
# Create grids
x_grid, y_grid = np.meshgrid(xf, yf)
x_stream, y_stream = np.meshgrid(xv, yv)
# Extract scalar data
vals = data[kwargs['quantity']]
if kwargs['average'] or kwargs['sum']:
vals = np.sum(vals, axis=3-kwargs['direction'])
if kwargs['sum']:
vals *= zf[-1] - zf[0]
vals /= len(zf) - 1
else:
if kwargs['slice_location'] < zf[0]:
index = 0
elif kwargs['slice_location'] >= zf[-1]:
index = -1
else:
index = np.where(zf <= kwargs['slice_location'])[0][-1]
if kwargs['direction'] == 1:
vals = vals[:, :, index]
elif kwargs['direction'] == 2:
vals = vals[:, index, :]
else:
vals = vals[index, :, :]
# Extract vector data
if kwargs['stream'] is not None:
if kwargs['direction'] == 1:
vals_x = data[kwargs['stream'] + '2']
vals_y = data[kwargs['stream'] + '3']
elif kwargs['direction'] == 2:
vals_x = data[kwargs['stream'] + '1']
vals_y = data[kwargs['stream'] + '3']
else:
vals_x = data[kwargs['stream'] + '1']
vals_y = data[kwargs['stream'] + '2']
if kwargs['stream_average']:
vals_x = np.sum(vals_x, axis=3-kwargs['direction'])
vals_y = np.sum(vals_y, axis=3-kwargs['direction'])
vals_x /= len(zf) - 1
vals_y /= len(zf) - 1
else:
if kwargs['slice_location'] < zf[0]:
index = 0
elif kwargs['slice_location'] >= zf[-1]:
index = -1
else:
index = np.where(zf <= kwargs['slice_location'])[0][-1]
if kwargs['direction'] == 1:
vals_x = vals_x[:, :, index]
vals_y = vals_y[:, :, index]
elif kwargs['direction'] == 2:
vals_x = vals_x[:, index, :]
vals_y = vals_y[:, index, :]
else:
vals_x = vals_x[index, :, :]
vals_y = vals_y[index, :, :]
# Determine plot limits
x_min = kwargs['x_min'] if kwargs['x_min'] is not None else xf[0]
x_max = kwargs['x_max'] if kwargs['x_max'] is not None else xf[-1]
y_min = kwargs['y_min'] if kwargs['y_min'] is not None else yf[0]
y_max = kwargs['y_max'] if kwargs['y_max'] is not None else yf[-1]
v_min = kwargs['vmin'] if kwargs['vmin'] is not None else vals.min()
v_max = kwargs['vmax'] if kwargs['vmax'] is not None else vals.max()
# Determine colormap norm
if kwargs['logc']:
norm = colors.LogNorm(v_min, v_max)
else:
norm = colors.Normalize(v_min, v_max)
# Make plot
# should make the size editable?
fig = plt.figure(1, figsize=(12, 12))
ax = fig.add_subplot(1, 1, 1, projection=projection_type)
if projection_type == 'polar':
# switch axis for radial and azimuthal
im = ax.pcolormesh(y_grid, x_grid, vals, cmap=kwargs['colormap'], norm=norm)
else:
im = ax.pcolormesh(x_grid, y_grid, vals, cmap=kwargs['colormap'], norm=norm)
if kwargs['stream'] is not None:
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
'invalid value encountered in greater_equal',
RuntimeWarning,
'numpy')
if projection_type == 'polar':
# switch axis for radial and azimuthal and Transpose
ax.streamplot(y_stream.T, x_stream.T, vals_y.T, vals_x.T,
density=kwargs['stream_density'], color='k')
else:
ax.streamplot(x_stream, y_stream, vals_x, vals_y,
density=kwargs['stream_density'], color='k')
if projection_type == 'polar':
ax.set_rmin(x_min)
ax.set_rmax(x_max)
else:
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
fig.colorbar(im)
if not kwargs['fill']:
ax.set_aspect('equal')
if kwargs['output_file'] == 'show':
fig.show()
else:
fig.savefig(kwargs['output_file'], bbox_inches='tight')
# Execute main function
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('data_file',
help='name of input file, possibly including path')
parser.add_argument('quantity',
help='name of quantity to be plotted')
parser.add_argument('output_file',
help=('name of output to be (over)written, possibly including '
'path; use "show" to show interactive plot instead'))
parser.add_argument('-d', '--direction',
type=int,
choices=(1, 2, 3),
default=3,
help=('direction orthogonal to slice for 3D data'))
parser.add_argument('--slice_location',
type=float,
default=None,
help=('coordinate value along which slice is to be taken '
'(default: 0)'))
parser.add_argument('-a', '--average',
action='store_true',
help=('flag indicating averaging should be done in orthogonal '
'direction for 3D data'))
parser.add_argument('-s', '--sum',
action='store_true',
help=('flag indicating summation should be done in orthogonal '
'direction for 3D data'))
parser.add_argument('-l',
'--level',
type=int,
default=None,
help=('refinement level to be used in plotting (default: max '
'level in file)'))
parser.add_argument('--x_min',
type=float,
default=None,
help='minimum extent of plot in first plotted direction')
parser.add_argument('--x_max',
type=float,
default=None,
help='maximum extent of plot in first plotted direction')
parser.add_argument('--y_min',
type=float,
default=None,
help='minimum extent of plot in second plotted direction')
parser.add_argument('--y_max',
type=float,
default=None,
help='maximum extent of plot in second plotted direction')
parser.add_argument('-f', '--fill',
action='store_true',
help='flag indicating image should fill plot area, even if this '
'distorts the aspect ratio')
parser.add_argument('-c',
'--colormap',
default=None,
help=('name of Matplotlib colormap to use instead of default'))
parser.add_argument('--vmin',
type=float,
default=None,
help=('data value to correspond to colormap minimum; use '
'--vmin=<val> if <val> has negative sign'))
parser.add_argument('--vmax',
type=float,
default=None,
help=('data value to correspond to colormap maximum; use '
'--vmax=<val> if <val> has negative sign'))
parser.add_argument('--logc',
action='store_true',
help='flag indicating data should be colormapped logarithmically')
parser.add_argument('--stream',
default=None,
help='name of vector quantity to use to make stream plot')
parser.add_argument('--stream_average',
action='store_true',
help='flag indicating stream plot should be averaged in '
'orthogonal direction for 3D data')
parser.add_argument('--stream_density',
type=float,
default=1.0,
help='density of stream lines')
parser.add_argument('--num_ghost',
type=int,
default=0,
help=('Include number of ghost cells in each direction'))
args = parser.parse_args()
main(**vars(args))
|
PrincetonUniversityREPO_NAMEathenaPATH_START.@athena_extracted@athena-master@vis@python@plot_slice.py@.PATH_END.py
|
{
"filename": "matrix_gen-bias_real_space.py",
"repo_name": "Michalychforever/CLASS-PT",
"repo_path": "CLASS-PT_extracted/CLASS-PT-master/pt_matrices/compute_matrices_python/bias_real_space/matrix_gen-bias_real_space.py",
"type": "Python"
}
|
import numpy as np
#from whichdict import importdict
from sympy.parsing.mathematica import mathematica
from sympy import *
from mpmath import *
mp.dps = 32
mp.pretty = True
nu1 = var('nu1')
nu2 = var('nu2')
def J(nu1,nu2):
return (gamma(1.5 - nu1) * gamma(1.5 - nu2) * gamma(nu1 + nu2 - 1.5) / (gamma(nu1) * gamma(nu2) * gamma(3. - nu2 - nu1))) / (8. * pi**(1.5))
#b=-0.8
#btab={-1.25,-0.8}
kmax = 1.e2
k0 = 0.00005
Nmaxtab=[128,256,512]
#namelist={"b2.txt","G2.txt"}
dictio={"b2.txt":-1.25,"G2.txt":-0.8}
for el in dictio:
name = el
b = dictio[el]
print("[][][][][][][][][][][][][][][][][][][][][][]")
print(name)
print("bias -> "+str(b))
print("[][][][][][][][][][][][][][][][][][][][][][]\n")
with open(name,"r") as file:
expr = file.read()
mathexpr=mathematica(expr)
M12temp=lambdify([nu1,nu2],mathexpr,"mpmath")
def M12(nu1,nu2):
return J(nu1,nu2)*M12temp(nu1,nu2)
print("{}{}{}{}{}{}{}{}{}{}{}")
print("test: M12(2.32,-1.84) -> "+str(M12(2.32,-1.84)))
print("{}{}{}{}{}{}{}{}{}{}{}\n")
for Nmax in Nmaxtab:
Delta = log(kmax/k0) / (Nmax - 1)
jsNm = np.arange(-Nmax/2,Nmax/2+1,1)
etam = b + 2*1j*pi*(jsNm)/Nmax/Delta
print("###################")
print("Nmax is "+str(Nmax))
print("###################\n")
m12mat = np.zeros((Nmax+1,Nmax+1),dtype=complex)
for j1 in range(Nmax+1):
print(j1)
for j2 in range(Nmax+1):
if j1 - Nmax/2 < 1:
m12mat[j1][j2] = M12(-0.5*etam[j1],-0.5*etam[j2])
else:
m12mat[j1][j2] = np.conjugate(m12mat[Nmax - j1][Nmax - j2])
mout = np.zeros((Nmax+1)*(Nmax+1),dtype=complex)
for i in range(Nmax+1):
for j in range(Nmax+1):
mout[i * (Nmax+1)+j] = m12mat[i][j]
mout_red = np.zeros(((Nmax+1)*(Nmax+1)+(Nmax+1))//2, dtype=complex)
for i in range(Nmax+1):
for j in range(i+1):
mout_red[i+(2*(Nmax+1)-1-j)*j//2] = m12mat[i][j]
moutoneline = np.zeros(((Nmax+1)*(Nmax+1)+(Nmax+1)))
for i in range((Nmax+1)*(Nmax+2)//2):
moutoneline[i] = mout_red.real[i]
for i in range((Nmax+1)*(Nmax+2)//2,(Nmax+1)*(Nmax+2)):
moutoneline[i] = mout_red.imag[i-(Nmax+1)*(Nmax+2)//2]
np.savetxt('M12oneline_N'+str(Nmax)+'-bias_real_space-'+name+'.dat',moutoneline)
print("\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n")
print("PT matrices for matter successfully computed!")
|
MichalychforeverREPO_NAMECLASS-PTPATH_START.@CLASS-PT_extracted@CLASS-PT-master@pt_matrices@compute_matrices_python@bias_real_space@matrix_gen-bias_real_space.py@.PATH_END.py
|
{
"filename": "TestOverride.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/tests/unit/NodeList/TestOverride.py",
"type": "Python"
}
|
from SpheralTestUtilities import *
import NodeList
class DummySphNodeList1d(SphNodeList1d):
def __init__(self,
numInternal = 100,
numGhost = 0):
SphNodeList1d(numInternal, numGhost)
print("Instantiating dummy sph node list.")
return
nodes = SphNodeList1d(100)
output("nodes")
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@tests@unit@NodeList@TestOverride.py@.PATH_END.py
|
{
"filename": "tfsa-2021-019.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/security/advisory/tfsa-2021-019.md",
"type": "Markdown"
}
|
## TFSA-2021-019: Heap buffer overflow caused by rounding
### CVE Number
CVE-2021-29529
### Impact
An attacker can trigger a heap buffer overflow in
`tf.raw_ops.QuantizedResizeBilinear` by manipulating input values so that float
rounding results in off-by-one error in accessing image elements:
```python
import tensorflow as tf
l = [256, 328, 361, 17, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 384]
images = tf.constant(l, shape=[1, 1, 15, 1], dtype=tf.qint32)
size = tf.constant([12, 6], shape=[2], dtype=tf.int32)
min = tf.constant(80.22522735595703)
max = tf.constant(80.39215850830078)
tf.raw_ops.QuantizedResizeBilinear(images=images, size=size, min=min, max=max,
align_corners=True, half_pixel_centers=True)
```
This is because the
[implementation](https://github.com/tensorflow/tensorflow/blob/44b7f486c0143f68b56c34e2d01e146ee445134a/tensorflow/core/kernels/quantized_resize_bilinear_op.cc#L62-L66)
computes two integers (representing the upper and lower bounds for
interpolation) by ceiling and flooring a floating point value:
```cc
const float in_f = std::floor(in);
interpolation->lower[i] = std::max(static_cast<int64>(in_f), static_cast<int64>(0));
interpolation->upper[i] = std::min(static_cast<int64>(std::ceil(in)), in_size - 1);
```
For some values of `in`, `interpolation->upper[i]` might be smaller than
`interpolation->lower[i]`. This is an issue if `interpolation->upper[i]` is
capped at `in_size-1` as it means that `interpolation->lower[i]` points outside
of the image. Then, [in the interpolation
code](https://github.com/tensorflow/tensorflow/blob/44b7f486c0143f68b56c34e2d01e146ee445134a/tensorflow/core/kernels/quantized_resize_bilinear_op.cc#L245-L264),
this would result in heap buffer overflow:
```cc
template <int RESOLUTION, typename T, typename T_SCALE, typename T_CALC>
inline void OutputLerpForChannels(const InterpolationCache<T_SCALE>& xs,
const int64 x, const T_SCALE ys_ilerp,
const int channels, const float min,
const float max, const T* ys_input_lower_ptr,
const T* ys_input_upper_ptr,
T* output_y_ptr) {
const int64 xs_lower = xs.lower[x];
...
for (int c = 0; c < channels; ++c) {
const T top_left = ys_input_lower_ptr[xs_lower + c];
...
}
}
```
For the other cases where `interpolation->upper[i]` is smaller than
`interpolation->lower[i]`, we can set them to be equal without affecting the
output.
### Patches
We have patched the issue in GitHub commit
[f851613f8f0fb0c838d160ced13c134f778e3ce7](https://github.com/tensorflow/tensorflow/commit/f851613f8f0fb0c838d160ced13c134f778e3ce7).
The fix will be included in TensorFlow 2.5.0. We will also cherrypick this
commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow
2.1.4, as these are also affected and still in supported range.
### For more information
Please consult [our security
guide](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md) for
more information regarding the security model and how to contact us with issues
and questions.
### Attribution
This vulnerability has been reported by Ying Wang and Yakun Zhang of Baidu X-Team.
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@security@advisory@tfsa-2021-019.md@.PATH_END.py
|
{
"filename": "Data_selector.py",
"repo_name": "astrom-tom/SPARTAN",
"repo_path": "SPARTAN_extracted/SPARTAN-master/spartan/Data_selector.py",
"type": "Python"
}
|
'''
############################
#####
##### The Spartan Project
##### R. THOMAS
##### 2016-18
#####
##### This file contains
##### the code that organizes
##### the data in *Lib.hdf5
##### files
###########################
@License: GPL licence - see LICENCE.txt
'''
#### local imports
from . import Data_phot as phot
from . import Data_spec as spec
from . import Data_comb as comb
def data_selector(CONF):
"""
This function determines what type of data file we have to make
Parameter
---------
CONF Configuration from the input configuration file
"""
status = 'nok'
####First case: Photometry alone
if CONF.CONF['UseSpec'].lower() == 'no' and CONF.CONF['UsePhot'].lower() == 'yes':
status = phot.file_phot(CONF)
####Second case: Spectroscopy alone
if CONF.CONF['UseSpec'].lower() == 'yes' and CONF.CONF['UsePhot'].lower() == 'no':
status = spec.file_spec(CONF)
####Third case: Combined fit
if CONF.CONF['UseSpec'].lower() == 'yes' and CONF.CONF['UsePhot'].lower() == 'yes':
status = comb.file_comb(CONF)
return status
|
astrom-tomREPO_NAMESPARTANPATH_START.@SPARTAN_extracted@SPARTAN-master@spartan@Data_selector.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/docstore/__init__.py",
"type": "Python"
}
|
"""**Docstores** are classes to store and load Documents.
The **Docstore** is a simplified version of the Document Loader.
**Class hierarchy:**
.. code-block::
Docstore --> <name> # Examples: InMemoryDocstore, Wikipedia
**Main helpers:**
.. code-block::
Document, AddableMixin
"""
import importlib
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from langchain_community.docstore.arbitrary_fn import (
DocstoreFn,
)
from langchain_community.docstore.in_memory import (
InMemoryDocstore,
)
from langchain_community.docstore.wikipedia import (
Wikipedia,
)
_module_lookup = {
"DocstoreFn": "langchain_community.docstore.arbitrary_fn",
"InMemoryDocstore": "langchain_community.docstore.in_memory",
"Wikipedia": "langchain_community.docstore.wikipedia",
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
raise AttributeError(f"module {__name__} has no attribute {name}")
__all__ = ["DocstoreFn", "InMemoryDocstore", "Wikipedia"]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@docstore@__init__.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "nespinoza/exonailer",
"repo_path": "exonailer_extracted/exonailer-master/utilities/flicker-noise/setup.py",
"type": "Python"
}
|
from distutils.core import setup, Extension
import numpy
"""
According to GSL documentation (http://www.gnu.org/software/gsl/manual/html_node/Shared-Libraries.html), in order to run the different operations one must include the GSL library, the GSLCBLAS library and the math library. To compile in C one must do:
gcc -Wall -c filename.c
And then:
gcc -static nombredelarchivo.o -lgsl -lgslcblas -lm
The first part is done by Python by this file. The second part (adding "-lgsl -lgslcblas -lm"), obviously isn't. To add any libraries that in C would be called by:
gcc -static nombredelarchivo.o -lname1 -lname2 -lname3...
Is as simple as putting libraries=['name1','name2',...] inside the Extension module. Here we do it with "gsl", "gslcblas" and "m".
"""
module = Extension('FWT', sources = ['FWT.c'],libraries=['m'], include_dirs=[numpy.get_include(),'/usr/local/include'])
setup(name = 'Fast Wavelet Transform, C/Python extension ', version = '1.0', ext_modules = [module])
|
nespinozaREPO_NAMEexonailerPATH_START.@exonailer_extracted@exonailer-master@utilities@flicker-noise@setup.py@.PATH_END.py
|
{
"filename": "_xsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/funnel/_xsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="xsrc", parent_name="funnel", **kwargs):
super(XsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@funnel@_xsrc.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/partners/openai/langchain_openai/output_parsers/__init__.py",
"type": "Python"
}
|
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
JsonOutputToolsParser,
PydanticToolsParser,
)
__all__ = ["JsonOutputKeyToolsParser", "JsonOutputToolsParser", "PydanticToolsParser"]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@partners@openai@langchain_openai@output_parsers@__init__.py@.PATH_END.py
|
{
"filename": "plot_case_A.py",
"repo_name": "galtay/rabacus",
"repo_path": "rabacus_extracted/rabacus-master/cloudy/plot_case_A.py",
"type": "Python"
}
|
import time
import numpy as np
import rabacus as ra
# setup Stromgren sphere
#=================================================================
Nl = 512
T = np.ones(Nl) * 1.0e4 * ra.U.K
Rsphere = 6.6 * ra.U.kpc
Edges = np.linspace( 0.0 * ra.U.kpc, Rsphere, Nl+1 )
nH = np.ones(Nl) * 1.0e-3 / ra.U.cm**3
nHe = np.ones(Nl) * 8.7e-5 / ra.U.cm**3
nHe_null = np.ones(Nl) * 1.0e-15 / ra.U.cm**3
Ln = 5.0e48 / ra.U.s # set photon luminosity
Nnu = 128
q_mono = 1.2
q_min = q_mono
q_max = q_mono
src_mono = ra.rad_src.PointSource( q_min, q_max, 'monochromatic' )
src_mono.normalize_Ln( Ln )
#=================================================================
# Iliev 06 tests
#=================================================================
# radiative transfer case A mono fix T
#-----------------------------------------------------------------
t1 = time.time()
s = ra.f2py.StromgrenSphere(
Edges, T, nH, nHe_null, src_mono, fixed_fcA=1.0 )
t2 = time.time()
print 'time: ', str(t2-t1)
plt.figure()
s.r_c.units = 'cm'
plt.plot( s.r_c, np.log10( s.xH1 ), color='red', ls='-' )
fname = 'raicevic_case_A.ovr'
dat = np.loadtxt( fname )
plt.plot( dat[:,0], dat[:,6], color='blue', ls='--' )
plt.show()
|
galtayREPO_NAMErabacusPATH_START.@rabacus_extracted@rabacus-master@cloudy@plot_case_A.py@.PATH_END.py
|
{
"filename": "test_large_input.py",
"repo_name": "dmlc/xgboost",
"repo_path": "xgboost_extracted/xgboost-master/tests/python-gpu/test_large_input.py",
"type": "Python"
}
|
import cupy as cp
import numpy as np
import pytest
import xgboost as xgb
# Test for integer overflow or out of memory exceptions
def test_large_input():
available_bytes, _ = cp.cuda.runtime.memGetInfo()
# 15 GB
required_bytes = 1.5e10
if available_bytes < required_bytes:
pytest.skip("Not enough memory on this device")
n = 1000
m = ((1 << 31) + n - 1) // n
assert np.log2(m * n) > 31
X = cp.ones((m, n), dtype=np.float32)
y = cp.ones(m)
w = cp.ones(m)
dmat = xgb.QuantileDMatrix(X, y, weight=w)
booster = xgb.train({"tree_method": "gpu_hist", "max_depth": 1}, dmat, 1)
del y
booster.inplace_predict(X)
|
dmlcREPO_NAMExgboostPATH_START.@xgboost_extracted@xgboost-master@tests@python-gpu@test_large_input.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "glue-viz/glue",
"repo_path": "glue_extracted/glue-main/glue/plugins/data_factories/__init__.py",
"type": "Python"
}
|
glue-vizREPO_NAMEgluePATH_START.@glue_extracted@glue-main@glue@plugins@data_factories@__init__.py@.PATH_END.py
|
|
{
"filename": "python__group_weight__first-sentence.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/_includes/work_src/reusage/python__group_weight__first-sentence.md",
"type": "Markdown"
}
|
The weights of all objects within the defined groups from the input data in the form of one-dimensional array-like data.
Used for calculating the final values of trees. By default, it is set to 1 for all objects in all groups.
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@_includes@work_src@reusage@python__group_weight__first-sentence.md@.PATH_END.py
|
{
"filename": "container_instance.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/integrations/prefect-azure/prefect_azure/container_instance.py",
"type": "Python"
}
|
"""
Integrations with the Azure Container Instances service.
Note this module is experimental. The interfaces within may change without notice.
The `AzureContainerInstanceJob` infrastructure block in this module is ideally
configured via the Prefect UI and run via a Prefect agent, but it can be called directly
as demonstrated in the following examples.
Examples:
Run a command using an Azure Container Instances container.
```python
AzureContainerInstanceJob(command=["echo", "hello world"]).run()
```
Run a command and stream the container's output to the local terminal.
```python
AzureContainerInstanceJob(
command=["echo", "hello world"],
stream_output=True,
)
```
Run a command with a specific image
```python
AzureContainerInstanceJob(command=["echo", "hello world"], image="alpine:latest")
```
Run a task with custom memory and CPU requirements
```python
AzureContainerInstanceJob(command=["echo", "hello world"], memory=1.0, cpu=1.0)
```
Run a task with custom memory and CPU requirements
```python
AzureContainerInstanceJob(command=["echo", "hello world"], memory=1.0, cpu=1.0)
```
Run a task with custom memory, CPU, and GPU requirements
```python
AzureContainerInstanceJob(command=["echo", "hello world"], memory=1.0, cpu=1.0,
gpu_count=1, gpu_sku="V100")
```
Run a task with custom environment variables
```python
AzureContainerInstanceJob(
command=["echo", "hello $PLANET"],
env={"PLANET": "earth"}
)
```
Run a task that uses a private ACR registry with a managed identity
```python
AzureContainerInstanceJob(
command=["echo", "hello $PLANET"],
image="my-registry.azurecr.io/my-image",
image_registry=ACRManagedIdentity(
registry_url="my-registry.azurecr.io",
identity="/my/managed/identity/123abc"
)
)
```
"""
from enum import Enum
from pydantic import BaseModel, Field
ACI_DEFAULT_CPU = 1.0
ACI_DEFAULT_MEMORY = 1.0
ACI_DEFAULT_GPU = 0.0
DEFAULT_CONTAINER_ENTRYPOINT = "/opt/prefect/entrypoint.sh"
# environment variables that ACI should treat as secure variables so they
# won't appear in logs
ENV_SECRETS = ["PREFECT_API_KEY"]
# The maximum time to wait for container group deletion before giving up and
# moving on. Deletion is usually quick, so exceeding this timeout means something
# has gone wrong and we should raise an exception to inform the user they should
# check their Azure account for orphaned container groups.
CONTAINER_GROUP_DELETION_TIMEOUT_SECONDS = 30
class ContainerGroupProvisioningState(str, Enum):
"""
Terminal provisioning states for ACI container groups. Per the Azure docs,
the states in this Enum are the only ones that can be relied on as dependencies.
"""
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class ContainerRunState(str, Enum):
"""
Terminal run states for ACI containers.
"""
RUNNING = "Running"
TERMINATED = "Terminated"
class ACRManagedIdentity(BaseModel):
"""
Use a Managed Identity to access Azure Container registry. Requires the
user-assigned managed identity be available to the ACI container group.
"""
registry_url: str = Field(
default=...,
title="Registry URL",
description=(
"The URL to the registry, such as myregistry.azurecr.io. Generally, 'http' "
"or 'https' can be omitted."
),
)
identity: str = Field(
default=...,
description=(
"The user-assigned Azure managed identity for the private registry."
),
)
class AzureContainerInstanceJobResult:
"""
The result of an `AzureContainerInstanceJob` run.
"""
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@integrations@prefect-azure@prefect_azure@container_instance.py@.PATH_END.py
|
{
"filename": "2-IVFFlat.py",
"repo_name": "facebookresearch/faiss",
"repo_path": "faiss_extracted/faiss-main/tutorial/python/2-IVFFlat.py",
"type": "Python"
}
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
d = 64 # dimension
nb = 100000 # database size
nq = 10000 # nb of queries
np.random.seed(1234) # make reproducible
xb = np.random.random((nb, d)).astype('float32')
xb[:, 0] += np.arange(nb) / 1000.
xq = np.random.random((nq, d)).astype('float32')
xq[:, 0] += np.arange(nq) / 1000.
import faiss
nlist = 100
k = 4
quantizer = faiss.IndexFlatL2(d) # the other index
index = faiss.IndexIVFFlat(quantizer, d, nlist, faiss.METRIC_L2)
# here we specify METRIC_L2, by default it performs inner-product search
assert not index.is_trained
index.train(xb)
assert index.is_trained
index.add(xb) # add may be a bit slower as well
D, I = index.search(xq, k) # actual search
print(I[-5:]) # neighbors of the 5 last queries
index.nprobe = 10 # default nprobe is 1, try a few more
D, I = index.search(xq, k)
print(I[-5:]) # neighbors of the 5 last queries
|
facebookresearchREPO_NAMEfaissPATH_START.@faiss_extracted@faiss-main@tutorial@python@2-IVFFlat.py@.PATH_END.py
|
{
"filename": "covariance.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/stats/covariance.py",
"type": "Python"
}
|
"""
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from scipy import integrate, stats
pi2 = np.pi**2
pi2i = 1. / pi2
def _term_integrate(rho):
# needs other terms for spearman rho var calculation
# TODO: streamline calculation and save to linear interpolation, maybe
sin, cos = np.sin, np.cos
f1 = lambda t, x: np.arcsin(sin(x) / (1 + 2 * cos(2 * x))) # noqa
f2 = lambda t, x: np.arcsin(sin(2 * x) / # noqa
np.sqrt(1 + 2 * cos(2 * x)))
f3 = lambda t, x: np.arcsin(sin(2 * x) / # noqa
(2 * np.sqrt(cos(2 * x))))
f4 = lambda t, x: np.arcsin((3 * sin(x) - sin(3 * x)) / # noqa
(4 * cos(2 * x)))
fact = pi2i * (f1(None, rho) +
2 * pi2i * f2(None, rho) +
f3(None, rho) +
0.5 * f4(None, rho))
return fact
def transform_corr_normal(corr, method, return_var=False, possdef=True):
"""transform correlation matrix to be consistent at normal distribution
Parameters
----------
corr : array_like
correlation matrix, either Pearson, Gaussian-rank, Spearman, Kendall
or quadrant correlation matrix
method : string
type of covariance matrix
supported types are 'pearson', 'gauss_rank', 'kendal', 'spearman' and
'quadrant'
return_var : bool
If true, then the asymptotic variance of the normalized correlation
is also returned. The variance of the spearman correlation requires
numerical integration which is calculated with scipy's odeint.
possdef : not implemented yet
Check whether resulting correlation matrix for positive semidefinite
and return a positive semidefinite approximation if not.
Returns
-------
corr : ndarray
correlation matrix, consistent with correlation for a multivariate
normal distribution
var : ndarray (optional)
asymptotic variance of the correlation if requested by `return_var`.
Notes
-----
Pearson and Gaussian-rank correlation are consistent at the normal
distribution and will be returned without changes.
The other correlation matrices are not guaranteed to be positive
semidefinite in small sample after conversion, even if the underlying
untransformed correlation matrix is positive (semi)definite. Croux and
Dehon mention that nobs / k_vars should be larger than 3 for kendall and
larger than 2 for spearman.
References
----------
.. [1] Boudt, Kris, Jonathan Cornelissen, and Christophe Croux. “The
Gaussian Rank Correlation Estimator: Robustness Properties.”
Statistics and Computing 22, no. 2 (April 5, 2011): 471–83.
https://doi.org/10.1007/s11222-011-9237-0.
.. [2] Croux, Christophe, and Catherine Dehon. “Influence Functions of the
Spearman and Kendall Correlation Measures.”
Statistical Methods & Applications 19, no. 4 (May 12, 2010): 497–515.
https://doi.org/10.1007/s10260-010-0142-z.
"""
method = method.lower()
rho = np.asarray(corr)
var = None # initialize
if method in ['pearson', 'gauss_rank']:
corr_n = corr
if return_var:
var = (1 - rho**2)**2
elif method.startswith('kendal'):
corr_n = np.sin(np.pi / 2 * corr)
if return_var:
var = (1 - rho**2) * np.pi**2 * (
1./9 - 4 / np.pi**2 * np.arcsin(rho / 2)**2)
elif method == 'quadrant':
corr_n = np.sin(np.pi / 2 * corr)
if return_var:
var = (1 - rho**2) * (np.pi**2 / 4 - np.arcsin(rho)**2)
elif method.startswith('spearman'):
corr_n = 2 * np.sin(np.pi / 6 * corr)
# not clear which rho is in formula, should be normalized rho,
# but original corr coefficient seems to match results in articles
# rho = corr_n
if return_var:
# odeint only works if grid of rho is large, i.e. many points
# e.g. rho = np.linspace(0, 1, 101)
rho = np.atleast_1d(rho)
idx = np.argsort(rho)
rhos = rho[idx]
rhos = np.concatenate(([0], rhos))
t = np.arcsin(rhos / 2)
# drop np namespace here
sin, cos = np.sin, np.cos
var = (1 - rho**2 / 4) * pi2 / 9 # leading factor
f1 = lambda t, x: np.arcsin(sin(x) / (1 + 2 * cos(2 * x))) # noqa
f2 = lambda t, x: np.arcsin(sin(2 * x) / # noqa
np.sqrt(1 + 2 * cos(2 * x)))
f3 = lambda t, x: np.arcsin(sin(2 * x) / # noqa
(2 * np.sqrt(cos(2 * x))))
f4 = lambda t, x: np.arcsin(( 3 * sin(x) - sin(3 * x)) / # noqa
(4 * cos(2 * x)))
# todo check dimension, odeint return column (n, 1) array
hmax = 1e-1
rf1 = integrate.odeint(f1 , 0, t=t, hmax=hmax).squeeze()
rf2 = integrate.odeint(f2 , 0, t=t, hmax=hmax).squeeze()
rf3 = integrate.odeint(f3 , 0, t=t, hmax=hmax).squeeze()
rf4 = integrate.odeint(f4 , 0, t=t, hmax=hmax).squeeze()
fact = 1 + 144 * (-9 / 4. * pi2i * np.arcsin(rhos / 2)**2 +
pi2i * rf1 +
2 * pi2i * rf2 + pi2i * rf3 +
0.5 * pi2i * rf4)
# fact = 1 - 9 / 4 * pi2i * np.arcsin(rhos / 2)**2
fact2 = np.zeros_like(var) * np.nan
fact2[idx] = fact[1:]
var *= fact2
else:
raise ValueError('method not recognized')
if return_var:
return corr_n, var
else:
return corr_n
def corr_rank(data):
"""Spearman rank correlation
simplified version of scipy.stats.spearmanr
"""
x = np.asarray(data)
axisout = 0
ar = np.apply_along_axis(stats.rankdata, axisout, x)
corr = np.corrcoef(ar, rowvar=False)
return corr
def corr_normal_scores(data):
"""Gaussian rank (normal scores) correlation
Status: unverified, subject to change
Parameters
----------
data : array_like
2-D data with observations in rows and variables in columns
Returns
-------
corr : ndarray
correlation matrix
References
----------
.. [1] Boudt, Kris, Jonathan Cornelissen, and Christophe Croux. “The
Gaussian Rank Correlation Estimator: Robustness Properties.”
Statistics and Computing 22, no. 2 (April 5, 2011): 471–83.
https://doi.org/10.1007/s11222-011-9237-0.
"""
# TODO: a full version should be same as scipy spearmanr
# I think that's not true the croux et al articles mention different
# results
# needs verification for the p-value calculation
x = np.asarray(data)
nobs = x.shape[0]
axisout = 0
ar = np.apply_along_axis(stats.rankdata, axisout, x)
ar = stats.norm.ppf(ar / (nobs + 1))
corr = np.corrcoef(ar, rowvar=axisout)
return corr
def corr_quadrant(data, transform=np.sign, normalize=False):
"""Quadrant correlation
Status: unverified, subject to change
Parameters
----------
data : array_like
2-D data with observations in rows and variables in columns
Returns
-------
corr : ndarray
correlation matrix
References
----------
.. [1] Croux, Christophe, and Catherine Dehon. “Influence Functions of the
Spearman and Kendall Correlation Measures.”
Statistical Methods & Applications 19, no. 4 (May 12, 2010): 497–515.
https://doi.org/10.1007/s10260-010-0142-z.
"""
# try also with tanh transform, a starting corr for DetXXX
# tanh produces a cov not a corr
x = np.asarray(data)
nobs = x.shape[0]
med = np.median(x, 0)
x_dm = transform(x - med)
corr = x_dm.T.dot(x_dm) / nobs
if normalize:
std = np.sqrt(np.diag(corr))
corr /= std
corr /= std[:, None]
return corr
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@stats@covariance.py@.PATH_END.py
|
{
"filename": "_base.py",
"repo_name": "deepmind/optax",
"repo_path": "optax_extracted/optax-main/optax/second_order/_base.py",
"type": "Python"
}
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base types for the second order sub-package."""
import abc
from typing import Any, Protocol
import jax
class LossFn(Protocol):
"""A loss function to be optimized."""
@abc.abstractmethod
def __call__(
self, params: Any, inputs: jax.Array, targets: jax.Array
) -> jax.Array:
...
|
deepmindREPO_NAMEoptaxPATH_START.@optax_extracted@optax-main@optax@second_order@_base.py@.PATH_END.py
|
{
"filename": "test_multi_gauss_expansion.py",
"repo_name": "lenstronomy/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/test/test_Util/test_multi_gauss_expansion.py",
"type": "Python"
}
|
__author__ = "sibirrer"
import lenstronomy.Util.multi_gauss_expansion as mge
import numpy as np
import numpy.testing as npt
from lenstronomy.LightModel.Profiles.sersic import Sersic
from lenstronomy.LightModel.Profiles.hernquist import Hernquist
from lenstronomy.LightModel.Profiles.gaussian import MultiGaussian
import pytest
class TestMGE(object):
"""Tests the Gaussian methods."""
def setup_method(self):
self.sersic = Sersic()
self.multiGaussian = MultiGaussian()
def test_mge_1d_sersic(self):
n_comp = 30
r_sersic = 1.0
n_sersic = 3.7
I0_sersic = 1.0
rs = np.logspace(-2.0, 1.0, 50) * r_sersic
ss = self.sersic.function(
rs, np.zeros_like(rs), amp=I0_sersic, n_sersic=n_sersic, R_sersic=r_sersic
)
amplitudes, sigmas, norm = mge.mge_1d(rs, ss, N=n_comp)
ss_mge = self.multiGaussian.function(
rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas
)
# print((ss - ss_mge)/ss)
for i in range(10, len(ss) - 10):
# print(rs[i])
npt.assert_almost_equal((ss_mge[i] - ss[i]) / ss[i], 0, decimal=1)
amplitudes, sigmas, norm = mge.mge_1d(rs, np.zeros_like(rs), N=n_comp)
assert amplitudes[0] == 0
amplitudes, sigmas, norm = mge.mge_1d(rs, np.zeros_like(rs), N=0)
assert amplitudes[0] == 0
def test_mge_sersic_radius(self):
n_comp = 30
r_sersic = 0.5
n_sersic = 3.7
I0_sersic = 1.0
rs = np.logspace(-2.0, 1.0, 50) * r_sersic
ss = self.sersic.function(
rs, np.zeros_like(rs), amp=I0_sersic, n_sersic=n_sersic, R_sersic=r_sersic
)
amplitudes, sigmas, norm = mge.mge_1d(rs, ss, N=n_comp)
ss_mge = self.multiGaussian.function(
rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas
)
print((ss - ss_mge) / (ss + ss_mge))
for i in range(10, len(ss) - 10):
# print(rs[i])
npt.assert_almost_equal((ss_mge[i] - ss[i]) / (ss[i]), 0, decimal=1)
def test_mge_sersic_n_sersic(self):
n_comp = 20
r_sersic = 1.5
n_sersic = 0.5
I0_sersic = 1.0
rs = np.logspace(-2.0, 1.0, 50) * r_sersic
ss = self.sersic.function(
rs, np.zeros_like(rs), amp=I0_sersic, n_sersic=n_sersic, R_sersic=r_sersic
)
amplitudes, sigmas, norm = mge.mge_1d(rs, ss, N=n_comp)
ss_mge = self.multiGaussian.function(
rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas
)
for i in range(10, len(ss) - 10):
npt.assert_almost_equal(
(ss_mge[i] - ss[i]) / (ss[i] + ss_mge[i]), 0, decimal=1
)
n_comp = 20
r_sersic = 1.5
n_sersic = 3.5
I0_sersic = 1.0
rs = np.logspace(-2.0, 1.0, 50) * r_sersic
ss = self.sersic.function(
rs, np.zeros_like(rs), amp=I0_sersic, n_sersic=n_sersic, R_sersic=r_sersic
)
amplitudes, sigmas, norm = mge.mge_1d(rs, ss, N=n_comp)
ss_mge = self.multiGaussian.function(
rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas
)
for i in range(10, len(ss) - 10):
npt.assert_almost_equal(
(ss_mge[i] - ss[i]) / (ss[i] + ss_mge[i]), 0, decimal=1
)
def test_hernquist(self):
hernquist = Hernquist()
n_comp = 20
sigma0 = 1
r_eff = 1.5
rs = np.logspace(-2.0, 1.0, 50) * r_eff * 0.5
ss = hernquist.function(rs, np.zeros_like(rs), sigma0, Rs=r_eff)
amplitudes, sigmas, norm = mge.mge_1d(rs, ss, N=n_comp)
ss_mge = self.multiGaussian.function(
rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas
)
for i in range(10, len(ss) - 10):
npt.assert_almost_equal(
(ss_mge[i] - ss[i]) / (ss[i] + ss_mge[i]), 0, decimal=2
)
def test_hernquist_deprojection(self):
hernquist = Hernquist()
n_comp = 20
sigma0 = 1
r_eff = 1.5
rs = np.logspace(-2.0, 1.0, 50) * r_eff * 0.5
ss = hernquist.function(rs, np.zeros_like(rs), sigma0, Rs=r_eff)
amplitudes, sigmas, norm = mge.mge_1d(rs, ss, N=n_comp)
amplitudes_3d, sigmas_3d = mge.de_projection_3d(amplitudes, sigmas)
ss_3d_mge = self.multiGaussian.function(
rs, np.zeros_like(rs), amp=amplitudes_3d, sigma=sigmas_3d
)
ss_3d_mulit = self.multiGaussian.light_3d(rs, amp=amplitudes, sigma=sigmas)
for i in range(10, len(ss_3d_mge)):
npt.assert_almost_equal(
(ss_3d_mge[i] - ss_3d_mulit[i]) / (ss_3d_mulit[i] + ss_3d_mge[i]),
0,
decimal=1,
)
ss_3d = hernquist.light_3d(rs, sigma0, Rs=r_eff)
for i in range(10, len(ss_3d) - 10):
npt.assert_almost_equal(
(ss_3d_mge[i] - ss_3d[i]) / (ss_3d[i] + ss_3d_mge[i]), 0, decimal=1
)
def test_spemd(self):
from lenstronomy.LensModel.Profiles.spep import SPEP
from lenstronomy.LensModel.Profiles.multi_gaussian import (
MultiGaussian,
)
spep = SPEP()
mge_kappa = MultiGaussian()
n_comp = 8
theta_E = 1.41
kwargs = {"theta_E": theta_E, "e1": 0, "e2": 0, "gamma": 1.61}
rs = np.logspace(-2.0, 1.0, 100) * theta_E
f_xx, f_xy, f_yx, f_yy = spep.hessian(rs, 0, **kwargs)
kappa = 1 / 2.0 * (f_xx + f_yy)
amplitudes, sigmas, norm = mge.mge_1d(rs, kappa, N=n_comp)
kappa_mge = self.multiGaussian.function(
rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas
)
f_xx_mge, f_xy_mge, f_yx_mge, f_yy_mge = mge_kappa.hessian(
rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas
)
for i in range(0, 80):
npt.assert_almost_equal(
kappa_mge[i], 1.0 / 2 * (f_xx_mge[i] + f_yy_mge[i]), decimal=1
)
npt.assert_almost_equal((kappa[i] - kappa_mge[i]) / kappa[i], 0, decimal=1)
f_ = spep.function(theta_E, 0, **kwargs)
f_mge = mge_kappa.function(theta_E, 0, sigma=sigmas, amp=amplitudes)
npt.assert_almost_equal(f_mge / f_, 1, decimal=2)
def test_example(self):
n_comp = 10
rs = np.array(
[
0.01589126,
0.01703967,
0.01827108,
0.01959148,
0.0210073,
0.02252544,
0.02415329,
0.02589879,
0.02777042,
0.02977731,
0.03192923,
0.03423667,
0.03671086,
0.03936385,
0.04220857,
0.04525886,
0.0485296,
0.0520367,
0.05579724,
0.05982956,
0.06415327,
0.06878945,
0.07376067,
0.07909115,
0.08480685,
0.09093561,
0.09750727,
0.10455385,
0.11210966,
0.12021152,
0.12889887,
0.13821403,
0.14820238,
0.15891255,
0.17039672,
0.18271082,
0.19591482,
0.21007304,
0.22525444,
0.24153295,
0.25898787,
0.2777042,
0.29777311,
0.31929235,
0.34236672,
0.36710861,
0.39363853,
0.42208569,
0.45258865,
0.48529597,
0.52036697,
0.55797244,
0.59829556,
0.64153272,
0.6878945,
0.73760673,
0.79091152,
0.8480685,
0.90935605,
0.97507269,
1.04553848,
1.12109664,
1.20211518,
1.28898871,
1.38214034,
1.48202378,
1.58912553,
1.70396721,
1.82710819,
1.95914822,
2.10073042,
2.25254437,
2.4153295,
2.58987865,
2.77704199,
2.9777311,
3.19292345,
3.42366716,
3.67108607,
3.93638527,
4.22085689,
4.5258865,
4.85295974,
5.20366966,
5.57972441,
5.98295559,
6.41532717,
6.87894505,
7.37606729,
7.90911519,
8.48068497,
9.09356051,
9.75072687,
10.45538481,
11.21096643,
12.02115183,
12.88988708,
13.82140341,
14.82023784,
15.89125526,
]
)
kappa = np.array(
[
12.13776067,
11.60484966,
11.09533396,
10.60818686,
10.14242668,
9.69711473,
9.27135349,
8.86428482,
8.47508818,
8.10297905,
7.7472073,
7.40705574,
7.08183863,
6.77090034,
6.47361399,
6.18938022,
5.917626,
5.65780342,
5.40938864,
5.1718808,
4.94480104,
4.72769151,
4.52011448,
4.3216514,
4.13190214,
3.9504841,
3.77703149,
3.61119459,
3.45263901,
3.30104507,
3.1561071,
3.01753287,
2.88504297,
2.75837025,
2.63725931,
2.52146595,
2.41075668,
2.30490829,
2.20370736,
2.10694982,
2.01444058,
1.92599312,
1.84142909,
1.76057799,
1.6832768,
1.60936965,
1.53870751,
1.47114792,
1.40655465,
1.34479745,
1.28575181,
1.22929867,
1.17532421,
1.12371958,
1.07438074,
1.02720821,
0.98210687,
0.93898578,
0.897758,
0.85834039,
0.82065349,
0.78462129,
0.75017114,
0.71723359,
0.68574222,
0.65563353,
0.62684681,
0.59932403,
0.57300967,
0.5478507,
0.52379638,
0.5007982,
0.47880979,
0.45778683,
0.43768691,
0.41846951,
0.40009589,
0.38252899,
0.3657334,
0.34967525,
0.33432216,
0.31964317,
0.30560868,
0.29219041,
0.27936129,
0.26709545,
0.25536817,
0.24415579,
0.23343571,
0.22318631,
0.21338694,
0.20401782,
0.19506006,
0.18649562,
0.17830721,
0.17047832,
0.16299318,
0.15583668,
0.14899441,
0.14245255,
]
)
amplitudes, sigmas, norm = mge.mge_1d(rs, kappa, N=n_comp)
def test_nfw_sersic(self):
kwargs_lens_nfw = {"alpha_Rs": 1.4129647849966354, "Rs": 7.0991113634274736}
kwargs_lens_sersic = {
"k_eff": 0.24100561407593576,
"n_sersic": 1.8058507329346063,
"R_sersic": 1.0371803141813705,
}
from lenstronomy.LensModel.Profiles.nfw import NFW
from lenstronomy.LensModel.Profiles.sersic import Sersic
nfw = NFW()
sersic = Sersic()
theta_E = 1.5
n_comp = 10
rs = np.logspace(-2.0, 1.0, 100) * theta_E
f_xx_nfw, f_xy_nfw, f_yx_nfw, f_yy_nfw = nfw.hessian(rs, 0, **kwargs_lens_nfw)
f_xx_s, f_xy_s, f_yx_s, f_yy_s = sersic.hessian(rs, 0, **kwargs_lens_sersic)
kappa = 1 / 2.0 * (f_xx_nfw + f_xx_s + f_yy_nfw + f_yy_s)
amplitudes, sigmas, norm = mge.mge_1d(rs, kappa, N=n_comp)
kappa_mge = self.multiGaussian.function(
rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas
)
from lenstronomy.LensModel.Profiles.multi_gaussian import (
MultiGaussian,
)
mge_kappa = MultiGaussian()
f_xx_mge, f_xy_mge, f_yx_mge, f_yy_mge = mge_kappa.hessian(
rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas
)
for i in range(0, 80):
npt.assert_almost_equal(
kappa_mge[i], 1.0 / 2 * (f_xx_mge[i] + f_yy_mge[i]), decimal=1
)
npt.assert_almost_equal((kappa[i] - kappa_mge[i]) / kappa[i], 0, decimal=1)
f_nfw = nfw.function(theta_E, 0, **kwargs_lens_nfw)
f_s = sersic.function(theta_E, 0, **kwargs_lens_sersic)
f_mge = mge_kappa.function(theta_E, 0, sigma=sigmas, amp=amplitudes)
npt.assert_almost_equal(f_mge / (f_nfw + f_s), 1, decimal=2)
if __name__ == "__main__":
pytest.main()
|
lenstronomyREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@test@test_Util@test_multi_gauss_expansion.py@.PATH_END.py
|
{
"filename": "test_rank.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/groupby/methods/test_rank.py",
"type": "Python"
}
|
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
NaT,
Series,
concat,
)
import pandas._testing as tm
def test_rank_unordered_categorical_typeerror():
# GH#51034 should be TypeError, not NotImplementedError
cat = pd.Categorical([], ordered=False)
ser = Series(cat)
df = ser.to_frame()
msg = "Cannot perform rank with non-ordered Categorical"
gb = ser.groupby(cat, observed=False)
with pytest.raises(TypeError, match=msg):
gb.rank()
gb2 = df.groupby(cat, observed=False)
with pytest.raises(TypeError, match=msg):
gb2.rank()
def test_rank_apply():
lev1 = np.array(["a" * 10] * 100, dtype=object)
lev2 = np.array(["b" * 10] * 130, dtype=object)
lab1 = np.random.default_rng(2).integers(0, 100, size=500, dtype=int)
lab2 = np.random.default_rng(2).integers(0, 130, size=500, dtype=int)
df = DataFrame(
{
"value": np.random.default_rng(2).standard_normal(500),
"key1": lev1.take(lab1),
"key2": lev2.take(lab2),
}
)
result = df.groupby(["key1", "key2"]).value.rank()
expected = [piece.value.rank() for key, piece in df.groupby(["key1", "key2"])]
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
result = df.groupby(["key1", "key2"]).value.rank(pct=True)
expected = [
piece.value.rank(pct=True) for key, piece in df.groupby(["key1", "key2"])
]
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals",
[
np.array([2, 2, 8, 2, 6], dtype=dtype)
for dtype in ["i8", "i4", "i2", "i1", "u8", "u4", "u2", "u1", "f8", "f4", "f2"]
]
+ [
[
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-08"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-06"),
],
[
pd.Timestamp("2018-01-02", tz="US/Pacific"),
pd.Timestamp("2018-01-02", tz="US/Pacific"),
pd.Timestamp("2018-01-08", tz="US/Pacific"),
pd.Timestamp("2018-01-02", tz="US/Pacific"),
pd.Timestamp("2018-01-06", tz="US/Pacific"),
],
[
pd.Timestamp("2018-01-02") - pd.Timestamp(0),
pd.Timestamp("2018-01-02") - pd.Timestamp(0),
pd.Timestamp("2018-01-08") - pd.Timestamp(0),
pd.Timestamp("2018-01-02") - pd.Timestamp(0),
pd.Timestamp("2018-01-06") - pd.Timestamp(0),
],
[
pd.Timestamp("2018-01-02").to_period("D"),
pd.Timestamp("2018-01-02").to_period("D"),
pd.Timestamp("2018-01-08").to_period("D"),
pd.Timestamp("2018-01-02").to_period("D"),
pd.Timestamp("2018-01-06").to_period("D"),
],
],
ids=lambda x: type(x[0]),
)
@pytest.mark.parametrize(
"ties_method,ascending,pct,exp",
[
("average", True, False, [2.0, 2.0, 5.0, 2.0, 4.0]),
("average", True, True, [0.4, 0.4, 1.0, 0.4, 0.8]),
("average", False, False, [4.0, 4.0, 1.0, 4.0, 2.0]),
("average", False, True, [0.8, 0.8, 0.2, 0.8, 0.4]),
("min", True, False, [1.0, 1.0, 5.0, 1.0, 4.0]),
("min", True, True, [0.2, 0.2, 1.0, 0.2, 0.8]),
("min", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]),
("min", False, True, [0.6, 0.6, 0.2, 0.6, 0.4]),
("max", True, False, [3.0, 3.0, 5.0, 3.0, 4.0]),
("max", True, True, [0.6, 0.6, 1.0, 0.6, 0.8]),
("max", False, False, [5.0, 5.0, 1.0, 5.0, 2.0]),
("max", False, True, [1.0, 1.0, 0.2, 1.0, 0.4]),
("first", True, False, [1.0, 2.0, 5.0, 3.0, 4.0]),
("first", True, True, [0.2, 0.4, 1.0, 0.6, 0.8]),
("first", False, False, [3.0, 4.0, 1.0, 5.0, 2.0]),
("first", False, True, [0.6, 0.8, 0.2, 1.0, 0.4]),
("dense", True, False, [1.0, 1.0, 3.0, 1.0, 2.0]),
("dense", True, True, [1.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 2.0 / 3.0]),
("dense", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]),
("dense", False, True, [3.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 2.0 / 3.0]),
],
)
def test_rank_args(grps, vals, ties_method, ascending, pct, exp):
key = np.repeat(grps, len(vals))
orig_vals = vals
vals = list(vals) * len(grps)
if isinstance(orig_vals, np.ndarray):
vals = np.array(vals, dtype=orig_vals.dtype)
df = DataFrame({"key": key, "val": vals})
result = df.groupby("key").rank(method=ties_method, ascending=ascending, pct=pct)
exp_df = DataFrame(exp * len(grps), columns=["val"])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals", [[-np.inf, -np.inf, np.nan, 1.0, np.nan, np.inf, np.inf]]
)
@pytest.mark.parametrize(
"ties_method,ascending,na_option,exp",
[
("average", True, "keep", [1.5, 1.5, np.nan, 3, np.nan, 4.5, 4.5]),
("average", True, "top", [3.5, 3.5, 1.5, 5.0, 1.5, 6.5, 6.5]),
("average", True, "bottom", [1.5, 1.5, 6.5, 3.0, 6.5, 4.5, 4.5]),
("average", False, "keep", [4.5, 4.5, np.nan, 3, np.nan, 1.5, 1.5]),
("average", False, "top", [6.5, 6.5, 1.5, 5.0, 1.5, 3.5, 3.5]),
("average", False, "bottom", [4.5, 4.5, 6.5, 3.0, 6.5, 1.5, 1.5]),
("min", True, "keep", [1.0, 1.0, np.nan, 3.0, np.nan, 4.0, 4.0]),
("min", True, "top", [3.0, 3.0, 1.0, 5.0, 1.0, 6.0, 6.0]),
("min", True, "bottom", [1.0, 1.0, 6.0, 3.0, 6.0, 4.0, 4.0]),
("min", False, "keep", [4.0, 4.0, np.nan, 3.0, np.nan, 1.0, 1.0]),
("min", False, "top", [6.0, 6.0, 1.0, 5.0, 1.0, 3.0, 3.0]),
("min", False, "bottom", [4.0, 4.0, 6.0, 3.0, 6.0, 1.0, 1.0]),
("max", True, "keep", [2.0, 2.0, np.nan, 3.0, np.nan, 5.0, 5.0]),
("max", True, "top", [4.0, 4.0, 2.0, 5.0, 2.0, 7.0, 7.0]),
("max", True, "bottom", [2.0, 2.0, 7.0, 3.0, 7.0, 5.0, 5.0]),
("max", False, "keep", [5.0, 5.0, np.nan, 3.0, np.nan, 2.0, 2.0]),
("max", False, "top", [7.0, 7.0, 2.0, 5.0, 2.0, 4.0, 4.0]),
("max", False, "bottom", [5.0, 5.0, 7.0, 3.0, 7.0, 2.0, 2.0]),
("first", True, "keep", [1.0, 2.0, np.nan, 3.0, np.nan, 4.0, 5.0]),
("first", True, "top", [3.0, 4.0, 1.0, 5.0, 2.0, 6.0, 7.0]),
("first", True, "bottom", [1.0, 2.0, 6.0, 3.0, 7.0, 4.0, 5.0]),
("first", False, "keep", [4.0, 5.0, np.nan, 3.0, np.nan, 1.0, 2.0]),
("first", False, "top", [6.0, 7.0, 1.0, 5.0, 2.0, 3.0, 4.0]),
("first", False, "bottom", [4.0, 5.0, 6.0, 3.0, 7.0, 1.0, 2.0]),
("dense", True, "keep", [1.0, 1.0, np.nan, 2.0, np.nan, 3.0, 3.0]),
("dense", True, "top", [2.0, 2.0, 1.0, 3.0, 1.0, 4.0, 4.0]),
("dense", True, "bottom", [1.0, 1.0, 4.0, 2.0, 4.0, 3.0, 3.0]),
("dense", False, "keep", [3.0, 3.0, np.nan, 2.0, np.nan, 1.0, 1.0]),
("dense", False, "top", [4.0, 4.0, 1.0, 3.0, 1.0, 2.0, 2.0]),
("dense", False, "bottom", [3.0, 3.0, 4.0, 2.0, 4.0, 1.0, 1.0]),
],
)
def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
# GH 20561
key = np.repeat(grps, len(vals))
vals = vals * len(grps)
df = DataFrame({"key": key, "val": vals})
result = df.groupby("key").rank(
method=ties_method, ascending=ascending, na_option=na_option
)
exp_df = DataFrame(exp * len(grps), columns=["val"])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals",
[
np.array([2, 2, np.nan, 8, 2, 6, np.nan, np.nan], dtype=dtype)
for dtype in ["f8", "f4", "f2"]
]
+ [
[
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-02"),
np.nan,
pd.Timestamp("2018-01-08"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-06"),
np.nan,
np.nan,
],
[
pd.Timestamp("2018-01-02", tz="US/Pacific"),
pd.Timestamp("2018-01-02", tz="US/Pacific"),
np.nan,
pd.Timestamp("2018-01-08", tz="US/Pacific"),
pd.Timestamp("2018-01-02", tz="US/Pacific"),
pd.Timestamp("2018-01-06", tz="US/Pacific"),
np.nan,
np.nan,
],
[
pd.Timestamp("2018-01-02") - pd.Timestamp(0),
pd.Timestamp("2018-01-02") - pd.Timestamp(0),
np.nan,
pd.Timestamp("2018-01-08") - pd.Timestamp(0),
pd.Timestamp("2018-01-02") - pd.Timestamp(0),
pd.Timestamp("2018-01-06") - pd.Timestamp(0),
np.nan,
np.nan,
],
[
pd.Timestamp("2018-01-02").to_period("D"),
pd.Timestamp("2018-01-02").to_period("D"),
np.nan,
pd.Timestamp("2018-01-08").to_period("D"),
pd.Timestamp("2018-01-02").to_period("D"),
pd.Timestamp("2018-01-06").to_period("D"),
np.nan,
np.nan,
],
],
ids=lambda x: type(x[0]),
)
@pytest.mark.parametrize(
"ties_method,ascending,na_option,pct,exp",
[
(
"average",
True,
"keep",
False,
[2.0, 2.0, np.nan, 5.0, 2.0, 4.0, np.nan, np.nan],
),
(
"average",
True,
"keep",
True,
[0.4, 0.4, np.nan, 1.0, 0.4, 0.8, np.nan, np.nan],
),
(
"average",
False,
"keep",
False,
[4.0, 4.0, np.nan, 1.0, 4.0, 2.0, np.nan, np.nan],
),
(
"average",
False,
"keep",
True,
[0.8, 0.8, np.nan, 0.2, 0.8, 0.4, np.nan, np.nan],
),
("min", True, "keep", False, [1.0, 1.0, np.nan, 5.0, 1.0, 4.0, np.nan, np.nan]),
("min", True, "keep", True, [0.2, 0.2, np.nan, 1.0, 0.2, 0.8, np.nan, np.nan]),
(
"min",
False,
"keep",
False,
[3.0, 3.0, np.nan, 1.0, 3.0, 2.0, np.nan, np.nan],
),
("min", False, "keep", True, [0.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]),
("max", True, "keep", False, [3.0, 3.0, np.nan, 5.0, 3.0, 4.0, np.nan, np.nan]),
("max", True, "keep", True, [0.6, 0.6, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan]),
(
"max",
False,
"keep",
False,
[5.0, 5.0, np.nan, 1.0, 5.0, 2.0, np.nan, np.nan],
),
("max", False, "keep", True, [1.0, 1.0, np.nan, 0.2, 1.0, 0.4, np.nan, np.nan]),
(
"first",
True,
"keep",
False,
[1.0, 2.0, np.nan, 5.0, 3.0, 4.0, np.nan, np.nan],
),
(
"first",
True,
"keep",
True,
[0.2, 0.4, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan],
),
(
"first",
False,
"keep",
False,
[3.0, 4.0, np.nan, 1.0, 5.0, 2.0, np.nan, np.nan],
),
(
"first",
False,
"keep",
True,
[0.6, 0.8, np.nan, 0.2, 1.0, 0.4, np.nan, np.nan],
),
(
"dense",
True,
"keep",
False,
[1.0, 1.0, np.nan, 3.0, 1.0, 2.0, np.nan, np.nan],
),
(
"dense",
True,
"keep",
True,
[
1.0 / 3.0,
1.0 / 3.0,
np.nan,
3.0 / 3.0,
1.0 / 3.0,
2.0 / 3.0,
np.nan,
np.nan,
],
),
(
"dense",
False,
"keep",
False,
[3.0, 3.0, np.nan, 1.0, 3.0, 2.0, np.nan, np.nan],
),
(
"dense",
False,
"keep",
True,
[
3.0 / 3.0,
3.0 / 3.0,
np.nan,
1.0 / 3.0,
3.0 / 3.0,
2.0 / 3.0,
np.nan,
np.nan,
],
),
("average", True, "bottom", False, [2.0, 2.0, 7.0, 5.0, 2.0, 4.0, 7.0, 7.0]),
(
"average",
True,
"bottom",
True,
[0.25, 0.25, 0.875, 0.625, 0.25, 0.5, 0.875, 0.875],
),
("average", False, "bottom", False, [4.0, 4.0, 7.0, 1.0, 4.0, 2.0, 7.0, 7.0]),
(
"average",
False,
"bottom",
True,
[0.5, 0.5, 0.875, 0.125, 0.5, 0.25, 0.875, 0.875],
),
("min", True, "bottom", False, [1.0, 1.0, 6.0, 5.0, 1.0, 4.0, 6.0, 6.0]),
(
"min",
True,
"bottom",
True,
[0.125, 0.125, 0.75, 0.625, 0.125, 0.5, 0.75, 0.75],
),
("min", False, "bottom", False, [3.0, 3.0, 6.0, 1.0, 3.0, 2.0, 6.0, 6.0]),
(
"min",
False,
"bottom",
True,
[0.375, 0.375, 0.75, 0.125, 0.375, 0.25, 0.75, 0.75],
),
("max", True, "bottom", False, [3.0, 3.0, 8.0, 5.0, 3.0, 4.0, 8.0, 8.0]),
("max", True, "bottom", True, [0.375, 0.375, 1.0, 0.625, 0.375, 0.5, 1.0, 1.0]),
("max", False, "bottom", False, [5.0, 5.0, 8.0, 1.0, 5.0, 2.0, 8.0, 8.0]),
(
"max",
False,
"bottom",
True,
[0.625, 0.625, 1.0, 0.125, 0.625, 0.25, 1.0, 1.0],
),
("first", True, "bottom", False, [1.0, 2.0, 6.0, 5.0, 3.0, 4.0, 7.0, 8.0]),
(
"first",
True,
"bottom",
True,
[0.125, 0.25, 0.75, 0.625, 0.375, 0.5, 0.875, 1.0],
),
("first", False, "bottom", False, [3.0, 4.0, 6.0, 1.0, 5.0, 2.0, 7.0, 8.0]),
(
"first",
False,
"bottom",
True,
[0.375, 0.5, 0.75, 0.125, 0.625, 0.25, 0.875, 1.0],
),
("dense", True, "bottom", False, [1.0, 1.0, 4.0, 3.0, 1.0, 2.0, 4.0, 4.0]),
("dense", True, "bottom", True, [0.25, 0.25, 1.0, 0.75, 0.25, 0.5, 1.0, 1.0]),
("dense", False, "bottom", False, [3.0, 3.0, 4.0, 1.0, 3.0, 2.0, 4.0, 4.0]),
("dense", False, "bottom", True, [0.75, 0.75, 1.0, 0.25, 0.75, 0.5, 1.0, 1.0]),
],
)
def test_rank_args_missing(grps, vals, ties_method, ascending, na_option, pct, exp):
key = np.repeat(grps, len(vals))
orig_vals = vals
vals = list(vals) * len(grps)
if isinstance(orig_vals, np.ndarray):
vals = np.array(vals, dtype=orig_vals.dtype)
df = DataFrame({"key": key, "val": vals})
result = df.groupby("key").rank(
method=ties_method, ascending=ascending, na_option=na_option, pct=pct
)
exp_df = DataFrame(exp * len(grps), columns=["val"])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize(
"pct,exp", [(False, [3.0, 3.0, 3.0, 3.0, 3.0]), (True, [0.6, 0.6, 0.6, 0.6, 0.6])]
)
def test_rank_resets_each_group(pct, exp):
df = DataFrame(
{"key": ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"], "val": [1] * 10}
)
result = df.groupby("key").rank(pct=pct)
exp_df = DataFrame(exp * 2, columns=["val"])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize(
"dtype", ["int64", "int32", "uint64", "uint32", "float64", "float32"]
)
@pytest.mark.parametrize("upper", [True, False])
def test_rank_avg_even_vals(dtype, upper):
if upper:
# use IntegerDtype/FloatingDtype
dtype = dtype[0].upper() + dtype[1:]
dtype = dtype.replace("Ui", "UI")
df = DataFrame({"key": ["a"] * 4, "val": [1] * 4})
df["val"] = df["val"].astype(dtype)
assert df["val"].dtype == dtype
result = df.groupby("key").rank()
exp_df = DataFrame([2.5, 2.5, 2.5, 2.5], columns=["val"])
if upper:
exp_df = exp_df.astype("Float64")
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("na_option", ["keep", "top", "bottom"])
@pytest.mark.parametrize("pct", [True, False])
@pytest.mark.parametrize(
"vals", [["bar", "bar", "foo", "bar", "baz"], ["bar", np.nan, "foo", np.nan, "baz"]]
)
def test_rank_object_dtype(rank_method, ascending, na_option, pct, vals):
df = DataFrame({"key": ["foo"] * 5, "val": vals})
mask = df["val"].isna()
gb = df.groupby("key")
res = gb.rank(method=rank_method, ascending=ascending, na_option=na_option, pct=pct)
# construct our expected by using numeric values with the same ordering
if mask.any():
df2 = DataFrame({"key": ["foo"] * 5, "val": [0, np.nan, 2, np.nan, 1]})
else:
df2 = DataFrame({"key": ["foo"] * 5, "val": [0, 0, 2, 0, 1]})
gb2 = df2.groupby("key")
alt = gb2.rank(
method=rank_method, ascending=ascending, na_option=na_option, pct=pct
)
tm.assert_frame_equal(res, alt)
@pytest.mark.parametrize("na_option", [True, "bad", 1])
@pytest.mark.parametrize("pct", [True, False])
@pytest.mark.parametrize(
"vals",
[
["bar", "bar", "foo", "bar", "baz"],
["bar", np.nan, "foo", np.nan, "baz"],
[1, np.nan, 2, np.nan, 3],
],
)
def test_rank_naoption_raises(rank_method, ascending, na_option, pct, vals):
df = DataFrame({"key": ["foo"] * 5, "val": vals})
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
with pytest.raises(ValueError, match=msg):
df.groupby("key").rank(
method=rank_method, ascending=ascending, na_option=na_option, pct=pct
)
def test_rank_empty_group():
# see gh-22519
column = "A"
df = DataFrame({"A": [0, 1, 0], "B": [1.0, np.nan, 2.0]})
result = df.groupby(column).B.rank(pct=True)
expected = Series([0.5, np.nan, 1.0], name="B")
tm.assert_series_equal(result, expected)
result = df.groupby(column).rank(pct=True)
expected = DataFrame({"B": [0.5, np.nan, 1.0]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"input_key,input_value,output_value",
[
([1, 2], [1, 1], [1.0, 1.0]),
([1, 1, 2, 2], [1, 2, 1, 2], [0.5, 1.0, 0.5, 1.0]),
([1, 1, 2, 2], [1, 2, 1, np.nan], [0.5, 1.0, 1.0, np.nan]),
([1, 1, 2], [1, 2, np.nan], [0.5, 1.0, np.nan]),
],
)
def test_rank_zero_div(input_key, input_value, output_value):
# GH 23666
df = DataFrame({"A": input_key, "B": input_value})
result = df.groupby("A").rank(method="dense", pct=True)
expected = DataFrame({"B": output_value})
tm.assert_frame_equal(result, expected)
def test_rank_min_int():
# GH-32859
df = DataFrame(
{
"grp": [1, 1, 2],
"int_col": [
np.iinfo(np.int64).min,
np.iinfo(np.int64).max,
np.iinfo(np.int64).min,
],
"datetimelike": [NaT, datetime(2001, 1, 1), NaT],
}
)
result = df.groupby("grp").rank()
expected = DataFrame(
{"int_col": [1.0, 2.0, 1.0], "datetimelike": [np.nan, 1.0, np.nan]}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("use_nan", [True, False])
def test_rank_pct_equal_values_on_group_transition(use_nan):
# GH#40518
fill_value = np.nan if use_nan else 3
df = DataFrame(
[
[-1, 1],
[-1, 2],
[1, fill_value],
[-1, fill_value],
],
columns=["group", "val"],
)
result = df.groupby(["group"])["val"].rank(
method="dense",
pct=True,
)
if use_nan:
expected = Series([0.5, 1, np.nan, np.nan], name="val")
else:
expected = Series([1 / 3, 2 / 3, 1, 1], name="val")
tm.assert_series_equal(result, expected)
def test_non_unique_index():
# GH 16577
df = DataFrame(
{"A": [1.0, 2.0, 3.0, np.nan], "value": 1.0},
index=[pd.Timestamp("20170101", tz="US/Eastern")] * 4,
)
result = df.groupby([df.index, "A"]).value.rank(ascending=True, pct=True)
expected = Series(
[1.0, 1.0, 1.0, np.nan],
index=[pd.Timestamp("20170101", tz="US/Eastern")] * 4,
name="value",
)
tm.assert_series_equal(result, expected)
def test_rank_categorical():
cat = pd.Categorical(["a", "a", "b", np.nan, "c", "b"], ordered=True)
cat2 = pd.Categorical([1, 2, 3, np.nan, 4, 5], ordered=True)
df = DataFrame({"col1": [0, 1, 0, 1, 0, 1], "col2": cat, "col3": cat2})
gb = df.groupby("col1")
res = gb.rank()
expected = df.astype(object).groupby("col1").rank()
tm.assert_frame_equal(res, expected)
@pytest.mark.parametrize("na_option", ["top", "bottom"])
def test_groupby_op_with_nullables(na_option):
# GH 54206
df = DataFrame({"x": [None]}, dtype="Float64")
result = df.groupby("x", dropna=False)["x"].rank(method="min", na_option=na_option)
expected = Series([1.0], dtype="Float64", name=result.name)
tm.assert_series_equal(result, expected)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@groupby@methods@test_rank.py@.PATH_END.py
|
{
"filename": "BADASS3_autocorr_example-checkpoint.ipynb",
"repo_name": "remingtonsexton/BADASS3",
"repo_path": "BADASS3_extracted/BADASS3-master/example_notebooks/.ipynb_checkpoints/BADASS3_autocorr_example-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
## Bayesian AGN Decomposition Analysis for SDSS Spectra (BADASS)
### Example: Autocorrelation Analysis
This example shows how to use the built-in autocorrelation analysis when using MCMC
to automatically stop the fit when the sampler chains of free parameters have sufficiently
converged on a solution.
#### Remington O. Sexton$^{1}$, Sara M. Doan$^{2}$, William Matzko$^{2}$ Michael A. Reefe$^{2}$,
$^{1}$United States Naval Observatory, $^{2}$George Mason University
```python
import glob
import time
import natsort
from IPython.display import clear_output
import os
import sys
import psutil
import pathlib
import natsort
# Import BADASS here
BADASS_DIR = pathlib.Path(os.getcwd()).resolve().parent
sys.path.insert(1,str(BADASS_DIR))
import badass as badass
import badass_check_input
from IPython.display import display, HTML
display(HTML("<style>.container { width:85% !important; }</style>"))
```
### BADASS Options
```python
################################## Fit Options #################################
# Fitting Parameters
fit_options={
"fit_reg" : (6450,6800),# Fitting region; Note: Indo-US Library=(3460,9464)
"good_thresh": 0.0, # percentage of "good" pixels required in fig_reg for fit.
"mask_bad_pix": False, # mask pixels SDSS flagged as 'bad' (careful!)
"mask_emline" : False, # automatically mask lines for continuum fitting.
"mask_metal": False, # interpolate over metal absorption lines for high-z spectra
"fit_stat": "ML", # fit statistic; ML = Max. Like. , OLS = Ordinary Least Squares
"n_basinhop": 15, # Number of consecutive basinhopping thresholds before solution achieved
"test_lines": False, # Perform line/configuration testing for multiple components
"max_like_niter": 25, # number of maximum likelihood iterations
"output_pars": False, # only output free parameters of fit and stop code (diagnostic)
"cosmology": {"H0":70.0, "Om0": 0.30}, # Flat Lam-CDM Cosmology
}
################################################################################
########################### MCMC algorithm parameters ##########################
mcmc_options={
"mcmc_fit" : True, # Perform robust fitting using emcee
"nwalkers" : 100, # Number of emcee walkers; min = 2 x N_parameters
"auto_stop" : True, # Automatic stop using autocorrelation analysis
"conv_type" : "all", # "median", "mean", "all", or (tuple) of parameters
"min_samp" : 1000, # min number of iterations for sampling post-convergence
"ncor_times" : 10.0, # number of autocorrelation times for convergence
"autocorr_tol": 10.0, # percent tolerance between checking autocorr. times
"write_iter" : 100, # write/check autocorrelation times interval
"write_thresh": 100, # iteration to start writing/checking parameters
"burn_in" : 1500, # burn-in if max_iter is reached
"min_iter" : 1000, # min number of iterations before stopping
"max_iter" : 20000, # max number of MCMC iterations
}
################################################################################
############################ Fit component op dtions #############################
comp_options={
"fit_opt_feii" : False, # optical FeII
"fit_uv_iron" : False, # UV Iron
"fit_balmer" : False, # Balmer continuum (<4000 A)
"fit_losvd" : True, # stellar LOSVD
"fit_host" : False, # host template
"fit_power" : True, # AGN power-law
"fit_poly" : True, # Add polynomial continuum component
"fit_narrow" : True, # narrow lines
"fit_broad" : False, # broad lines
"fit_absorp" : False, # absorption lines
"tie_line_disp" : False, # tie line widths (dispersions)
"tie_line_voff" : False, # tie line velocity offsets
}
# Line options for each narrow, broad, and absorption.
narrow_options = {
# "amp_plim": (0,1), # line amplitude parameter limits; default (0,)
"disp_plim": (0,500), # line dispersion parameter limits; default (0,)
"voff_plim": (-500,500), # line velocity offset parameter limits; default (0,)
"line_profile": "gaussian", # line profile shape*
"n_moments": 4, # number of higher order Gauss-Hermite moments (if line profile is gauss-hermite, laplace, or uniform)
}
broad_options ={
# "amp_plim": (0,40), # line amplitude parameter limits; default (0,)
"disp_plim": (500,3000), # line dispersion parameter limits; default (0,)
"voff_plim": (-1000,1000), # line velocity offset parameter limits; default (0,)
"line_profile": "gauss-hermite", # line profile shape*
"n_moments": 4, # number of higher order Gauss-Hermite moments (if line profile is gauss-hermite, laplace, or uniform)
}
absorp_options = {
# "amp_plim": (-1,0), # line amplitude parameter limits; default (0,)
# "disp_plim": (0,10), # line dispersion parameter limits; default (0,)
# "voff_plim": (-2500,2500), # line velocity offset parameter limits; default (0,)
"line_profile": "gaussian", # line profile shape*
"n_moments": 4, # number of higher order Gauss-Hermite moments (if line profile is gauss-hermite, laplace, or uniform)
}
# Choices for line profile shape include 'gaussian', 'lorentzian', 'voigt',
# 'gauss-hermite', 'laplace', and 'uniform'
################################################################################
########################### Emission Lines & Options ###########################
# If not specified, defaults to SDSS-QSO Emission Lines (http://classic.sdss.org/dr6/algorithms/linestable.html)
################################################################################
# User lines overrides the default line list with a user-input line list!
user_lines = {
"NA_NII_6549" :{"center":6549.859, "amp":"NA_NII_6585_AMP/2.93", "disp":"NA_NII_6585_DISP", "voff":"NA_NII_6585_VOFF", "line_type":"na","label":r"[N II]"},
"NA_H_ALPHA" :{"center":6564.632, "amp":"free" , "disp":"NA_NII_6585_DISP", "voff":"NA_NII_6585_VOFF", "line_type":"na","label":r"H$\alpha$"},
"NA_NII_6585" :{"center":6585.278, "amp":"free" , "disp":"free" , "voff":"free" , "line_type":"na","label":r"[N II]"},
"NA_SII_6718" :{"center":6718.294, "amp":"free" , "disp":"NA_NII_6585_DISP", "voff":"NA_NII_6585_VOFF", "line_type":"na","label":r"[S II]"},
"NA_SII_6732" :{"center":6732.668, "amp":"free" , "disp":"NA_NII_6585_DISP", "voff":"NA_NII_6585_VOFF", "line_type":"na","label":r"[S II]"},
}
user_constraints = [
# ("NA_OIII_5007_AMP","NA_OIII_5007_2_AMP"),
# ("NA_OIII_5007_2_DISP","NA_OIII_5007_DISP"),
]
# User defined masked regions (list of tuples)
user_mask = [
# (4840,5015),
]
# Combined lines; define a composite line and calculate
# its combined parameters. These are automatically
# generated for lines with multiple components (parent+child lines)
combined_lines = {
# "H_BETA_COMP" :["NA_H_BETA","BR_H_BETA"],
}
########################## LOSVD Fitting & Options #############################
# For direct fitting of the stellar kinematics (stellar LOSVD), one can
# specify a stellar template library (Indo-US or Vazdekis 2010).
# One can also hold velocity or dispersion constant to avoid template
# convolution during the fitting process.
################################################################################
losvd_options = {
"library" : "IndoUS", # Options: IndoUS, Vazdekis2010
"vel_const" : {"bool":False, "val":0.0},
"disp_const": {"bool":False, "val":250.0},
}
########################## SSP Host Galaxy Template & Options ##################
# The default is zero velocity, 100 km/s dispersion 10 Gyr template from
# the eMILES stellar library.
################################################################################
host_options = {
"age" : [1.0,5.0,10.0], # Gyr; [0.09 Gyr - 14 Gyr]
"vel_const" : {"bool":False, "val":0.0},
"disp_const": {"bool":False, "val":150.0}
}
########################### AGN power-law continuum & Options ##################
# The default is a simple power law.
################################################################################
power_options = {
"type" : "simple" # alternatively, "broken" for smoothly-broken power-law
}
########################### Polynomial Continuum Options #######################
# Options for an additive legendre polynomial or multiplicative polynomial to be
# included in the fit. NOTE: these polynomials do not include the zeroth-order
# (constant) term to avoid degeneracies with other continuum components.
################################################################################
poly_options = {
"apoly" : {"bool": True , "order": 3}, # Legendre additive polynomial
"mpoly" : {"bool": False, "order": 3}, # Legendre multiplicative polynomial
}
############################### Optical FeII options ###############################
# Below are options for fitting optical FeII. For most objects, you don't need to
# perform detailed fitting on FeII (only fit for amplitudes) use the
# Veron-Cetty 2004 template ('VC04') (2-6 free parameters)
# However in NLS1 objects, FeII is much stronger, and sometimes more detailed
# fitting is necessary, use the Kovacevic 2010 template
# ('K10'; 7 free parameters).
# The options are:
# template : VC04 (Veron-Cetty 2004) or K10 (Kovacevic 2010)
# amp_const : constant amplitude (default False)
# disp_const : constant dispersion (default True)
# voff_const : constant velocity offset (default True)
# temp_const : constant temp ('K10' only)
opt_feii_options={
"opt_template" :{"type":"VC04"},
"opt_amp_const" :{"bool":False, "br_opt_feii_val":1.0 , "na_opt_feii_val":1.0},
"opt_disp_const":{"bool":False, "br_opt_feii_val":3000.0, "na_opt_feii_val":500.0},
"opt_voff_const":{"bool":False, "br_opt_feii_val":0.0 , "na_opt_feii_val":0.0},
}
# or
# opt_feii_options={
# "opt_template" :{"type":"K10"},
# "opt_amp_const" :{"bool":False,"f_feii_val":1.0,"s_feii_val":1.0,"g_feii_val":1.0,"z_feii_val":1.0},
# "opt_disp_const":{"bool":False,"opt_feii_val":1500.0},
# "opt_voff_const":{"bool":False,"opt_feii_val":0.0},
# "opt_temp_const":{"bool":True,"opt_feii_val":10000.0},
# }
################################################################################
############################### UV Iron options ################################
uv_iron_options={
"uv_amp_const" :{"bool":False, "uv_iron_val":1.0},
"uv_disp_const" :{"bool":False, "uv_iron_val":3000.0},
"uv_voff_const" :{"bool":True, "uv_iron_val":0.0},
}
################################################################################
########################### Balmer Continuum options ###########################
# For most purposes, only the ratio R, and the overall amplitude are free paramters
# but if you want to go crazy, you can fit everything.
balmer_options = {
"R_const" :{"bool":True, "R_val":1.0}, # ratio between balmer continuum and higher-order balmer lines
"balmer_amp_const" :{"bool":False, "balmer_amp_val":1.0}, # amplitude of overall balmer model (continuum + higher-order lines)
"balmer_disp_const":{"bool":True, "balmer_disp_val":5000.0}, # broadening of higher-order Balmer lines
"balmer_voff_const":{"bool":True, "balmer_voff_val":0.0}, # velocity offset of higher-order Balmer lines
"Teff_const" :{"bool":True, "Teff_val":15000.0}, # effective temperature
"tau_const" :{"bool":True, "tau_val":1.0}, # optical depth
}
################################################################################
############################### Plotting options ###############################
plot_options={
"plot_param_hist" : False,# Plot MCMC histograms and chains for each parameter
"plot_HTML" : False,# make interactive plotly HTML best-fit plot
}
################################################################################
################################ Output options ################################
output_options={
"write_chain" : False, # Write MCMC chains for all paramters, fluxes, and
# luminosities to a FITS table We set this to false
# because MCMC_chains.FITS file can become very large,
# especially if you are running multiple objects.
# You only need this if you want to reconstruct full chains
# and histograms.
"write_options": False, # output restart file
"verbose" : True, # print out all steps of fitting process
}
################################################################################
```
### Run BADASS on a single spectrum
The following is shows how to fit single SDSS spectra.
#### Directory Structure
```python
nobj = 3 # Object in the spec_dir list
########################## Directory Structure #################################
spec_dir = BADASS_DIR.joinpath("example_spectra") # folder with spectra in it
# print(spec_dir)
# Get full list of spectrum folders; these will be the working directories
spec_loc = natsort.natsorted( glob.glob(str(spec_dir.joinpath("*"))) )[nobj]
################################################################################
print(spec_loc)
```
#### Choose Spectrum
```python
file = glob.glob(str(pathlib.Path(spec_loc).joinpath('*.fits')))[0] # Get name of FITS spectra file
print(file)
```
#### Run IRSA Dust Query
To correct for Galactic extinction. This only needs to be done once so that the data is stored locally.
```python
badass_check_input.fetch_IRSA_dust(spec_loc)
```
#### Run
```python
import importlib
importlib.reload(badass)
# Call the main function in BADASS
badass.run_BADASS(pathlib.Path(file),
# options_file = "BADASS_options",
# restart_file = restart_file,
fit_options = fit_options,
mcmc_options = mcmc_options,
comp_options = comp_options,
# New line options
narrow_options = narrow_options,
broad_options = broad_options,
absorp_options = absorp_options,
#
user_lines = user_lines, # User-lines
user_constraints = user_constraints, # User-constraints
user_mask = user_mask, # User-mask
combined_lines = combined_lines,
losvd_options = losvd_options,
host_options = host_options,
power_options = power_options,
poly_options = poly_options,
opt_feii_options = opt_feii_options,
uv_iron_options = uv_iron_options,
balmer_options = balmer_options,
plot_options = plot_options,
output_options = output_options,
sdss_spec = True,
)
#
```
```python
```
|
remingtonsextonREPO_NAMEBADASS3PATH_START.@BADASS3_extracted@BADASS3-master@example_notebooks@.ipynb_checkpoints@BADASS3_autocorr_example-checkpoint.ipynb@.PATH_END.py
|
{
"filename": "testMassSheets.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/tests/unit/NodeGenerators/testMassSheets.py",
"type": "Python"
}
|
from Spheral3d import *
from GenerateEqualMassSheets3d import *
from VoronoiDistributeNodes import distributeNodes3d as distributeNodes
from SpheralTestUtilities import *
from SpheralVisitDump import *
commandLine(nPerh = 2.01,
hmin = 1e-5,
hmax = 1e6,
rmin = 0.0,
rmax = 2.0,
scaler = 0.6,
nr = 20,
seed = "ico")
class densityProf:
def __init__(self,scaleR):
self.R = scaleR
return
def __call__(self,r):
return 1.0e6*exp(-r/self.R)
#return (2.2-r)/self.R
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
gamma = 1.4
mu = 2.0
eos = GammaLawGasMKS(gamma, mu)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
WT = TableKernel(BSplineKernel(), 1000)
output("WT")
#-------------------------------------------------------------------------------
# Make the NodeList.
#-------------------------------------------------------------------------------
nodes = makeFluidNodeList("nodes", eos,
hmin = hmin,
hmax = hmax,
nPerh = nPerh,
xmin = Vector.one * -1e20,
xmax = Vector.one * 1e20)
output("nodes")
output("nodes.hmin")
output("nodes.hmax")
output("nodes.nodesPerSmoothingScale")
#-------------------------------------------------------------------------------
# Generate them nodes.
#-------------------------------------------------------------------------------
rhoProfile = densityProf(scaler)
generator = GenerateEqualMassSheets3d(nr,nr,rhoProfile,Vector.one * rmin,Vector.one * rmax,
nNodePerh = nPerh,
rhoMin = rhoProfile(rmax))
nodes.numInternalNodes = generator.localNumNodes()
distributeNodes((nodes, generator))
#-------------------------------------------------------------------------------
# Drop a viz file for inspection.
#-------------------------------------------------------------------------------
Hfield = nodes.Hfield()
HfieldInv = SymTensorField("H inverse", nodes)
for i in range(nodes.numNodes):
HfieldInv[i] = Hfield[i].Inverse()
vizfile = SpheralVisitDump(baseFileName = "icosahedron_test",
listOfFields = [nodes.massDensity(),
nodes.mass(),
nodes.velocity(),
nodes.specificThermalEnergy(),
Hfield,
HfieldInv],
)
vizfile.dump(0.0, 0)
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@tests@unit@NodeGenerators@testMassSheets.py@.PATH_END.py
|
{
"filename": "auxfuncs.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/f2py/auxfuncs.py",
"type": "Python"
}
|
"""
Auxiliary functions for f2py2e.
Copyright 1999 -- 2011 Pearu Peterson all rights reserved.
Copyright 2011 -- present NumPy Developers.
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) LICENSE.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
"""
import pprint
import sys
import re
import types
from functools import reduce
from . import __version__
from . import cfuncs
from .cfuncs import errmess
__all__ = [
'applyrules', 'debugcapi', 'dictappend', 'errmess', 'gentitle',
'getargs2', 'getcallprotoargument', 'getcallstatement',
'getfortranname', 'getpymethoddef', 'getrestdoc', 'getusercode',
'getusercode1', 'getdimension', 'hasbody', 'hascallstatement', 'hascommon',
'hasexternals', 'hasinitvalue', 'hasnote', 'hasresultnote',
'isallocatable', 'isarray', 'isarrayofstrings',
'ischaracter', 'ischaracterarray', 'ischaracter_or_characterarray',
'iscomplex',
'iscomplexarray', 'iscomplexfunction', 'iscomplexfunction_warn',
'isdouble', 'isdummyroutine', 'isexternal', 'isfunction',
'isfunction_wrap', 'isint1', 'isint1array', 'isinteger', 'isintent_aux',
'isintent_c', 'isintent_callback', 'isintent_copy', 'isintent_dict',
'isintent_hide', 'isintent_in', 'isintent_inout', 'isintent_inplace',
'isintent_nothide', 'isintent_out', 'isintent_overwrite', 'islogical',
'islogicalfunction', 'islong_complex', 'islong_double',
'islong_doublefunction', 'islong_long', 'islong_longfunction',
'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isvariable',
'isrequired', 'isroutine', 'isscalar', 'issigned_long_longarray',
'isstring', 'isstringarray', 'isstring_or_stringarray', 'isstringfunction',
'issubroutine', 'get_f2py_modulename', 'issubroutine_wrap', 'isthreadsafe',
'isunsigned', 'isunsigned_char', 'isunsigned_chararray',
'isunsigned_long_long', 'isunsigned_long_longarray', 'isunsigned_short',
'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', 'replace',
'show', 'stripcomma', 'throw_error', 'isattr_value', 'getuseblocks',
'process_f2cmap_dict', 'containscommon'
]
f2py_version = __version__.version
show = pprint.pprint
options = {}
debugoptions = []
wrapfuncs = 1
def outmess(t):
if options.get('verbose', 1):
sys.stdout.write(t)
def debugcapi(var):
return 'capi' in debugoptions
def _ischaracter(var):
return 'typespec' in var and var['typespec'] == 'character' and \
not isexternal(var)
def _isstring(var):
return 'typespec' in var and var['typespec'] == 'character' and \
not isexternal(var)
def ischaracter_or_characterarray(var):
return _ischaracter(var) and 'charselector' not in var
def ischaracter(var):
return ischaracter_or_characterarray(var) and not isarray(var)
def ischaracterarray(var):
return ischaracter_or_characterarray(var) and isarray(var)
def isstring_or_stringarray(var):
return _ischaracter(var) and 'charselector' in var
def isstring(var):
return isstring_or_stringarray(var) and not isarray(var)
def isstringarray(var):
return isstring_or_stringarray(var) and isarray(var)
def isarrayofstrings(var): # obsolete?
# leaving out '*' for now so that `character*(*) a(m)` and `character
# a(m,*)` are treated differently. Luckily `character**` is illegal.
return isstringarray(var) and var['dimension'][-1] == '(*)'
def isarray(var):
return 'dimension' in var and not isexternal(var)
def isscalar(var):
return not (isarray(var) or isstring(var) or isexternal(var))
def iscomplex(var):
return isscalar(var) and \
var.get('typespec') in ['complex', 'double complex']
def islogical(var):
return isscalar(var) and var.get('typespec') == 'logical'
def isinteger(var):
return isscalar(var) and var.get('typespec') == 'integer'
def isreal(var):
return isscalar(var) and var.get('typespec') == 'real'
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def isint1(var):
return var.get('typespec') == 'integer' \
and get_kind(var) == '1' and not isarray(var)
def islong_long(var):
if not isscalar(var):
return 0
if var.get('typespec') not in ['integer', 'logical']:
return 0
return get_kind(var) == '8'
def isunsigned_char(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var) == '-1'
def isunsigned_short(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var) == '-2'
def isunsigned(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var) == '-4'
def isunsigned_long_long(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var) == '-8'
def isdouble(var):
if not isscalar(var):
return 0
if not var.get('typespec') == 'real':
return 0
return get_kind(var) == '8'
def islong_double(var):
if not isscalar(var):
return 0
if not var.get('typespec') == 'real':
return 0
return get_kind(var) == '16'
def islong_complex(var):
if not iscomplex(var):
return 0
return get_kind(var) == '32'
def iscomplexarray(var):
return isarray(var) and \
var.get('typespec') in ['complex', 'double complex']
def isint1array(var):
return isarray(var) and var.get('typespec') == 'integer' \
and get_kind(var) == '1'
def isunsigned_chararray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '-1'
def isunsigned_shortarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '-2'
def isunsignedarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '-4'
def isunsigned_long_longarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '-8'
def issigned_chararray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '1'
def issigned_shortarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '2'
def issigned_array(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '4'
def issigned_long_longarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '8'
def isallocatable(var):
return 'attrspec' in var and 'allocatable' in var['attrspec']
def ismutable(var):
return not ('dimension' not in var or isstring(var))
def ismoduleroutine(rout):
return 'modulename' in rout
def ismodule(rout):
return 'block' in rout and 'module' == rout['block']
def isfunction(rout):
return 'block' in rout and 'function' == rout['block']
def isfunction_wrap(rout):
if isintent_c(rout):
return 0
return wrapfuncs and isfunction(rout) and (not isexternal(rout))
def issubroutine(rout):
return 'block' in rout and 'subroutine' == rout['block']
def issubroutine_wrap(rout):
if isintent_c(rout):
return 0
return issubroutine(rout) and hasassumedshape(rout)
def isattr_value(var):
return 'value' in var.get('attrspec', [])
def hasassumedshape(rout):
if rout.get('hasassumedshape'):
return True
for a in rout['args']:
for d in rout['vars'].get(a, {}).get('dimension', []):
if d == ':':
rout['hasassumedshape'] = True
return True
return False
def requiresf90wrapper(rout):
return ismoduleroutine(rout) or hasassumedshape(rout)
def isroutine(rout):
return isfunction(rout) or issubroutine(rout)
def islogicalfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return islogical(rout['vars'][a])
return 0
def islong_longfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return islong_long(rout['vars'][a])
return 0
def islong_doublefunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return islong_double(rout['vars'][a])
return 0
def iscomplexfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return iscomplex(rout['vars'][a])
return 0
def iscomplexfunction_warn(rout):
if iscomplexfunction(rout):
outmess("""\
**************************************************************
Warning: code with a function returning complex value
may not work correctly with your Fortran compiler.
When using GNU gcc/g77 compilers, codes should work
correctly for callbacks with:
f2py -c -DF2PY_CB_RETURNCOMPLEX
**************************************************************\n""")
return 1
return 0
def isstringfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return isstring(rout['vars'][a])
return 0
def hasexternals(rout):
return 'externals' in rout and rout['externals']
def isthreadsafe(rout):
return 'f2pyenhancements' in rout and \
'threadsafe' in rout['f2pyenhancements']
def hasvariables(rout):
return 'vars' in rout and rout['vars']
def isoptional(var):
return ('attrspec' in var and 'optional' in var['attrspec'] and
'required' not in var['attrspec']) and isintent_nothide(var)
def isexternal(var):
return 'attrspec' in var and 'external' in var['attrspec']
def getdimension(var):
dimpattern = r"\((.*?)\)"
if 'attrspec' in var.keys():
if any('dimension' in s for s in var['attrspec']):
return [re.findall(dimpattern, v) for v in var['attrspec']][0]
def isrequired(var):
return not isoptional(var) and isintent_nothide(var)
def isintent_in(var):
if 'intent' not in var:
return 1
if 'hide' in var['intent']:
return 0
if 'inplace' in var['intent']:
return 0
if 'in' in var['intent']:
return 1
if 'out' in var['intent']:
return 0
if 'inout' in var['intent']:
return 0
if 'outin' in var['intent']:
return 0
return 1
def isintent_inout(var):
return ('intent' in var and ('inout' in var['intent'] or
'outin' in var['intent']) and 'in' not in var['intent'] and
'hide' not in var['intent'] and 'inplace' not in var['intent'])
def isintent_out(var):
return 'out' in var.get('intent', [])
def isintent_hide(var):
return ('intent' in var and ('hide' in var['intent'] or
('out' in var['intent'] and 'in' not in var['intent'] and
(not l_or(isintent_inout, isintent_inplace)(var)))))
def isintent_nothide(var):
return not isintent_hide(var)
def isintent_c(var):
return 'c' in var.get('intent', [])
def isintent_cache(var):
return 'cache' in var.get('intent', [])
def isintent_copy(var):
return 'copy' in var.get('intent', [])
def isintent_overwrite(var):
return 'overwrite' in var.get('intent', [])
def isintent_callback(var):
return 'callback' in var.get('intent', [])
def isintent_inplace(var):
return 'inplace' in var.get('intent', [])
def isintent_aux(var):
return 'aux' in var.get('intent', [])
def isintent_aligned4(var):
return 'aligned4' in var.get('intent', [])
def isintent_aligned8(var):
return 'aligned8' in var.get('intent', [])
def isintent_aligned16(var):
return 'aligned16' in var.get('intent', [])
isintent_dict = {isintent_in: 'INTENT_IN', isintent_inout: 'INTENT_INOUT',
isintent_out: 'INTENT_OUT', isintent_hide: 'INTENT_HIDE',
isintent_cache: 'INTENT_CACHE',
isintent_c: 'INTENT_C', isoptional: 'OPTIONAL',
isintent_inplace: 'INTENT_INPLACE',
isintent_aligned4: 'INTENT_ALIGNED4',
isintent_aligned8: 'INTENT_ALIGNED8',
isintent_aligned16: 'INTENT_ALIGNED16',
}
def isprivate(var):
return 'attrspec' in var and 'private' in var['attrspec']
def isvariable(var):
# heuristic to find public/private declarations of filtered subroutines
if len(var) == 1 and 'attrspec' in var and \
var['attrspec'][0] in ('public', 'private'):
is_var = False
else:
is_var = True
return is_var
def hasinitvalue(var):
return '=' in var
def hasinitvalueasstring(var):
if not hasinitvalue(var):
return 0
return var['='][0] in ['"', "'"]
def hasnote(var):
return 'note' in var
def hasresultnote(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return hasnote(rout['vars'][a])
return 0
def hascommon(rout):
return 'common' in rout
def containscommon(rout):
if hascommon(rout):
return 1
if hasbody(rout):
for b in rout['body']:
if containscommon(b):
return 1
return 0
def containsmodule(block):
if ismodule(block):
return 1
if not hasbody(block):
return 0
for b in block['body']:
if containsmodule(b):
return 1
return 0
def hasbody(rout):
return 'body' in rout
def hascallstatement(rout):
return getcallstatement(rout) is not None
def istrue(var):
return 1
def isfalse(var):
return 0
class F2PYError(Exception):
pass
class throw_error:
def __init__(self, mess):
self.mess = mess
def __call__(self, var):
mess = '\n\n var = %s\n Message: %s\n' % (var, self.mess)
raise F2PYError(mess)
def l_and(*f):
l1, l2 = 'lambda v', []
for i in range(len(f)):
l1 = '%s,f%d=f[%d]' % (l1, i, i)
l2.append('f%d(v)' % (i))
return eval('%s:%s' % (l1, ' and '.join(l2)))
def l_or(*f):
l1, l2 = 'lambda v', []
for i in range(len(f)):
l1 = '%s,f%d=f[%d]' % (l1, i, i)
l2.append('f%d(v)' % (i))
return eval('%s:%s' % (l1, ' or '.join(l2)))
def l_not(f):
return eval('lambda v,f=f:not f(v)')
def isdummyroutine(rout):
try:
return rout['f2pyenhancements']['fortranname'] == ''
except KeyError:
return 0
def getfortranname(rout):
try:
name = rout['f2pyenhancements']['fortranname']
if name == '':
raise KeyError
if not name:
errmess('Failed to use fortranname from %s\n' %
(rout['f2pyenhancements']))
raise KeyError
except KeyError:
name = rout['name']
return name
def getmultilineblock(rout, blockname, comment=1, counter=0):
try:
r = rout['f2pyenhancements'].get(blockname)
except KeyError:
return
if not r:
return
if counter > 0 and isinstance(r, str):
return
if isinstance(r, list):
if counter >= len(r):
return
r = r[counter]
if r[:3] == "'''":
if comment:
r = '\t/* start ' + blockname + \
' multiline (' + repr(counter) + ') */\n' + r[3:]
else:
r = r[3:]
if r[-3:] == "'''":
if comment:
r = r[:-3] + '\n\t/* end multiline (' + repr(counter) + ')*/'
else:
r = r[:-3]
else:
errmess("%s multiline block should end with `'''`: %s\n"
% (blockname, repr(r)))
return r
def getcallstatement(rout):
return getmultilineblock(rout, 'callstatement')
def getcallprotoargument(rout, cb_map={}):
r = getmultilineblock(rout, 'callprotoargument', comment=0)
if r:
return r
if hascallstatement(rout):
outmess(
'warning: callstatement is defined without callprotoargument\n')
return
from .capi_maps import getctype
arg_types, arg_types2 = [], []
if l_and(isstringfunction, l_not(isfunction_wrap))(rout):
arg_types.extend(['char*', 'size_t'])
for n in rout['args']:
var = rout['vars'][n]
if isintent_callback(var):
continue
if n in cb_map:
ctype = cb_map[n] + '_typedef'
else:
ctype = getctype(var)
if l_and(isintent_c, l_or(isscalar, iscomplex))(var):
pass
elif isstring(var):
pass
else:
if not isattr_value(var):
ctype = ctype + '*'
if (isstring(var)
or isarrayofstrings(var) # obsolete?
or isstringarray(var)):
arg_types2.append('size_t')
arg_types.append(ctype)
proto_args = ','.join(arg_types + arg_types2)
if not proto_args:
proto_args = 'void'
return proto_args
def getusercode(rout):
return getmultilineblock(rout, 'usercode')
def getusercode1(rout):
return getmultilineblock(rout, 'usercode', counter=1)
def getpymethoddef(rout):
return getmultilineblock(rout, 'pymethoddef')
def getargs(rout):
sortargs, args = [], []
if 'args' in rout:
args = rout['args']
if 'sortvars' in rout:
for a in rout['sortvars']:
if a in args:
sortargs.append(a)
for a in args:
if a not in sortargs:
sortargs.append(a)
else:
sortargs = rout['args']
return args, sortargs
def getargs2(rout):
sortargs, args = [], rout.get('args', [])
auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a])
and a not in args]
args = auxvars + args
if 'sortvars' in rout:
for a in rout['sortvars']:
if a in args:
sortargs.append(a)
for a in args:
if a not in sortargs:
sortargs.append(a)
else:
sortargs = auxvars + rout['args']
return args, sortargs
def getrestdoc(rout):
if 'f2pymultilines' not in rout:
return None
k = None
if rout['block'] == 'python module':
k = rout['block'], rout['name']
return rout['f2pymultilines'].get(k, None)
def gentitle(name):
ln = (80 - len(name) - 6) // 2
return '/*%s %s %s*/' % (ln * '*', name, ln * '*')
def flatlist(lst):
if isinstance(lst, list):
return reduce(lambda x, y, f=flatlist: x + f(y), lst, [])
return [lst]
def stripcomma(s):
if s and s[-1] == ',':
return s[:-1]
return s
def replace(str, d, defaultsep=''):
if isinstance(d, list):
return [replace(str, _m, defaultsep) for _m in d]
if isinstance(str, list):
return [replace(_m, d, defaultsep) for _m in str]
for k in 2 * list(d.keys()):
if k == 'separatorsfor':
continue
if 'separatorsfor' in d and k in d['separatorsfor']:
sep = d['separatorsfor'][k]
else:
sep = defaultsep
if isinstance(d[k], list):
str = str.replace('#%s#' % (k), sep.join(flatlist(d[k])))
else:
str = str.replace('#%s#' % (k), d[k])
return str
def dictappend(rd, ar):
if isinstance(ar, list):
for a in ar:
rd = dictappend(rd, a)
return rd
for k in ar.keys():
if k[0] == '_':
continue
if k in rd:
if isinstance(rd[k], str):
rd[k] = [rd[k]]
if isinstance(rd[k], list):
if isinstance(ar[k], list):
rd[k] = rd[k] + ar[k]
else:
rd[k].append(ar[k])
elif isinstance(rd[k], dict):
if isinstance(ar[k], dict):
if k == 'separatorsfor':
for k1 in ar[k].keys():
if k1 not in rd[k]:
rd[k][k1] = ar[k][k1]
else:
rd[k] = dictappend(rd[k], ar[k])
else:
rd[k] = ar[k]
return rd
def applyrules(rules, d, var={}):
ret = {}
if isinstance(rules, list):
for r in rules:
rr = applyrules(r, d, var)
ret = dictappend(ret, rr)
if '_break' in rr:
break
return ret
if '_check' in rules and (not rules['_check'](var)):
return ret
if 'need' in rules:
res = applyrules({'needs': rules['need']}, d, var)
if 'needs' in res:
cfuncs.append_needs(res['needs'])
for k in rules.keys():
if k == 'separatorsfor':
ret[k] = rules[k]
continue
if isinstance(rules[k], str):
ret[k] = replace(rules[k], d)
elif isinstance(rules[k], list):
ret[k] = []
for i in rules[k]:
ar = applyrules({k: i}, d, var)
if k in ar:
ret[k].append(ar[k])
elif k[0] == '_':
continue
elif isinstance(rules[k], dict):
ret[k] = []
for k1 in rules[k].keys():
if isinstance(k1, types.FunctionType) and k1(var):
if isinstance(rules[k][k1], list):
for i in rules[k][k1]:
if isinstance(i, dict):
res = applyrules({'supertext': i}, d, var)
if 'supertext' in res:
i = res['supertext']
else:
i = ''
ret[k].append(replace(i, d))
else:
i = rules[k][k1]
if isinstance(i, dict):
res = applyrules({'supertext': i}, d)
if 'supertext' in res:
i = res['supertext']
else:
i = ''
ret[k].append(replace(i, d))
else:
errmess('applyrules: ignoring rule %s.\n' % repr(rules[k]))
if isinstance(ret[k], list):
if len(ret[k]) == 1:
ret[k] = ret[k][0]
if ret[k] == []:
del ret[k]
return ret
_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]+)',
re.I).match
_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]*?'
r'__user__[\w_]*)', re.I).match
def get_f2py_modulename(source):
name = None
with open(source) as f:
for line in f:
m = _f2py_module_name_match(line)
if m:
if _f2py_user_module_name_match(line): # skip *__user__* names
continue
name = m.group('name')
break
return name
def getuseblocks(pymod):
all_uses = []
for inner in pymod['body']:
for modblock in inner['body']:
if modblock.get('use'):
all_uses.extend([x for x in modblock.get("use").keys() if "__" not in x])
return all_uses
def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose = False):
"""
Update the Fortran-to-C type mapping dictionary with new mappings and
return a list of successfully mapped C types.
This function integrates a new mapping dictionary into an existing
Fortran-to-C type mapping dictionary. It ensures that all keys are in
lowercase and validates new entries against a given C-to-Python mapping
dictionary. Redefinitions and invalid entries are reported with a warning.
Parameters
----------
f2cmap_all : dict
The existing Fortran-to-C type mapping dictionary that will be updated.
It should be a dictionary of dictionaries where the main keys represent
Fortran types and the nested dictionaries map Fortran type specifiers
to corresponding C types.
new_map : dict
A dictionary containing new type mappings to be added to `f2cmap_all`.
The structure should be similar to `f2cmap_all`, with keys representing
Fortran types and values being dictionaries of type specifiers and their
C type equivalents.
c2py_map : dict
A dictionary used for validating the C types in `new_map`. It maps C
types to corresponding Python types and is used to ensure that the C
types specified in `new_map` are valid.
verbose : boolean
A flag used to provide information about the types mapped
Returns
-------
tuple of (dict, list)
The updated Fortran-to-C type mapping dictionary and a list of
successfully mapped C types.
"""
f2cmap_mapped = []
new_map_lower = {}
for k, d1 in new_map.items():
d1_lower = {k1.lower(): v1 for k1, v1 in d1.items()}
new_map_lower[k.lower()] = d1_lower
for k, d1 in new_map_lower.items():
if k not in f2cmap_all:
f2cmap_all[k] = {}
for k1, v1 in d1.items():
if v1 in c2py_map:
if k1 in f2cmap_all[k]:
outmess(
"\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n"
% (k, k1, f2cmap_all[k][k1], v1)
)
f2cmap_all[k][k1] = v1
if verbose:
outmess('\tMapping "%s(kind=%s)" to "%s"\n' % (k, k1, v1))
f2cmap_mapped.append(v1)
else:
if verbose:
errmess(
"\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n"
% (k, k1, v1, v1, list(c2py_map.keys()))
)
return f2cmap_all, f2cmap_mapped
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@f2py@auxfuncs.py@.PATH_END.py
|
{
"filename": "CHANGELOG.md",
"repo_name": "mj-will/nessai",
"repo_path": "nessai_extracted/nessai-main/CHANGELOG.md",
"type": "Markdown"
}
|
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Added
- Add experimental support for discrete parameters (https://github.com/mj-will/nessai/pull/401)
- Support remapping to the unit hypercube with the standard sampler (https://github.com/mj-will/nessai/pull/398)
- Support reparameterisations that are not one-to-one (https://github.com/mj-will/nessai/pull/418)
- Support user-defined for flow proposal classes via the `nessai.proposals` entry point (https://github.com/mj-will/nessai/pull/411)
- Add an `inverse` method to `FlowModel` (https://github.com/mj-will/nessai/pull/419)
### Changed
- Rework progress bar to no longer use tqdm (https://github.com/mj-will/nessai/pull/422)
- `nessai.proposal.flowproposal.FlowProposal` submodule has been refactored into two classes (https://github.com/mj-will/nessai/pull/419)
- `N` has been renamed to `n_samples` in `FlowProposal.populate`
### Fixed
- Fix outputs of `FlowProposal.backwards_pass` when `discard_nans` and `return_z` are both `True` (https://github.com/mj-will/nessai/pull/419)
### Deprecated
- The `flow_class` argument for `NestedSampler` is deprecated in favour of `flow_proposal_class` (https://github.com/mj-will/nessai/pull/411)
### Removed
- The deprecated `FlowProposal.rejection_sampling` method has been removed (https://github.com/mj-will/nessai/pull/419)
## [0.13.2]
### Fixed
- Handle error when `linear_transform='None'` which occurs when using `bilby_pipe` after the `flow_config` changes (https://github.com/mj-will/nessai/pull/414)
## [0.13.1]
### Changed
- Make tests that require `faiss` are optional in the test suite
(https://github.com/mj-will/nessai/pull/408)
## [0.13.0]
### Added
- Add p-value and additional panel to indices plot
(https://github.com/mj-will/nessai/pull/391)
- Add support for numpy 2.0 (https://github.com/mj-will/nessai/pull/387)
- Add support for arbitrary priors in unit hypercube with the importance sampler
(https://github.com/mj-will/nessai/pull/377)
### Changed
- Simplify rescaling/reparameterisation configuration
https://github.com/mj-will/nessai/pull/395)
- The default reparameterisation has been changed from rescale-to-bounds to
z-score standardisation (https://github.com/mj-will/nessai/pull/395)
- Change default seaborn style to avoid plotting issues on some systems
(https://github.com/mj-will/nessai/pull/397)
- Rework flow configuration to use `flow_config` and `training_config` keyword
arguments (https://github.com/mj-will/nessai/pull/394)
- Skip nested sampling loop and populating live points if run is already finalised
(https://github.com/mj-will/nessai/pull/393, https://github.com/mj-will/nessai/pull/400)
### Deprecated
- Specifying `model_config` in the `flow_config` dictionary is now deprecated
(https://github.com/mj-will/nessai/pull/394)
- `FlowProposal.names`, `FlowProposal.rescaled_names` and
`FlowProposal.rescaled_dims` are now deprecated
(https://github.com/mj-will/nessai/pull/395)
### Removed
- `rescale_parameters`, `boundary_inversion`, `inversion_type`, `rescale_bounds`
`update_bounds`, `detect_edges`, `detect_edges_kwargs`,
have all been removed in favour of using the reparameterisations directly
(https://github.com/mj-will/nessai/pull/395)
- Drop support for Python 3.8 (https://github.com/mj-will/nessai/pull/396)
### Experimental
- Add experimental `ClusteringFlowProposal`
(https://github.com/mj-will/nessai/pull/380)
- Add experimental support for using flows directly from `glasflow`
(https://github.com/mj-will/nessai/pull/386)
## [0.12.0]
This release reworks large parts of the importance nested sampler to enable
drawing i.i.d samples during sampling.
The high-level API remains unchanged but the APIs for the
`ImportanceNestedSampler` and `ImportanceFlowProposal` classes have changed.
Existing runs of the importance nested sampler cannot be resumed with this
version.
### Added
- Add option to accumulate weights during rejection sampling ([#358](https://github.com/mj-will/nessai/pull/358))
- Add option to draw i.i.d samples during sampling when using the importance nested sampler ([#362](https://github.com/mj-will/nessai/pull/362))
- Add the `OrderedSamples` class for handling samples in the importance nested sampler ([#362](https://github.com/mj-will/nessai/pull/362))
- Add the `in_unit_hypercube` and `sample_unit_hypercube` methods to the model class `Model` ([#362](https://github.com/mj-will/nessai/pull/362))
- Add `log-posterior-weights` to `nessai.samplers.importance.ImportanceNestedSampler` (https://github.com/mj-will/nessai/pull/382)
- Add explicit support for Python 3.12 (https://github.com/mj-will/nessai/pull/374)
- Add fractional evidence stopping criterion to the importance nested sampler (https://github.com/mj-will/nessai/pull/371)
- Add option to recompute `log_q` when resuming the importance nested sampler instead of saving it (https://github.com/mj-will/nessai/pull/368)
### Changed
- Standardize how sampling history (run statistics) are stored ([#364](https://github.com/mj-will/nessai/pull/364))
- The importance nested sampler no longer requires the `to_unit_hypercube` method to run ([#362](https://github.com/mj-will/nessai/pull/362))
- The `ratio` stopping criterion is now computed using the log-likelihood threshold instead of the live points ([#362](https://github.com/mj-will/nessai/pull/362))
- Change various defaults related to the importance nested sampler ([#362](https://github.com/mj-will/nessai/pull/362))
- Random seed is now randomly set if not specified and saved in the result file (https://github.com/mj-will/nessai/pull/378)
- Rework how weights are handled in the importance nested sampler (https://github.com/mj-will/nessai/pull/376)
### Fixed
- Fix bug in with legend in `nessai.plot.plot_1d_comparison` ([#360](https://github.com/mj-will/nessai/pull/360))
- Fix bug with `truths` argument in `nessai.plot.corner_plot` (https://github.com/mj-will/nessai/pull/375)
### Removed
- Remove the deprecated `max_threads` argument from `nessai.flowsampler.FlowSampler` and `nessai.utils.threading.configure_threads` ([#363](https://github.com/mj-will/nessai/pull/363))
## [0.11.0]
### Added
- Add log-posterior weights to the result dictionary and file ([#341](https://github.com/mj-will/nessai/pull/341))
- Add support for checkpoint callbacks ([#355](https://github.com/mj-will/nessai/pull/355))
### Changed
- Explicitly support and test against Python 3.11 ([#352](https://github.com/mj-will/nessai/pull/352))
## [0.10.1]
### Fixed
- Relax tolerance used when checking if the log-prior is vectorised such that bilby priors are treated as vectorised ([#343](https://github.com/mj-will/nessai/pull/343))
## [0.10.0]
### Added
- `birth_log_likelihoods` to `NestedSampler` and `logL_birth` to the result dictionary ([#318](https://github.com/mj-will/nessai/pull/318))
- Support for non-vectorised log-prior functions ([#330](https://github.com/mj-will/nessai/pull/330))
- Add the live points to the trace plot for the standard nested sampler ([#334](https://github.com/mj-will/nessai/pull/334))
- Add an option to resume from a pickle object rather than a resume file ([#337](https://github.com/mj-will/nessai/pull/337))
### Changed
- Nested samples are now stored as an array in the result object rather than a dictionary ([#318](https://github.com/mj-will/nessai/pull/318))
- Reduce the size of importance nested sampling checkpoints ([#327](https://github.com/mj-will/nessai/pull/327))
- Rename `nessai.utils.bilbyutils` to `nessai.utils.settings` ([#332](https://github.com/mj-will/nessai/pull/332))
- Changed name of `dZ` to `dlogZ`, this does not change how the stopping criterion is calculated ([#333](https://github.com/mj-will/nessai/pull/333))
### Fixed
- Fix a bug with the prior bounds that occurred when `bounds` and `names` had different orders ([#329](https://github.com/mj-will/nessai/pull/329))
- Fix a bug with `close_pool` that lead to the pool being closed irrespective of the value ([#331](https://github.com/mj-will/nessai/pull/331))
### Deprecated
- `nessai.utils.bilbyutils` is deprecated in favour on `nessai.utils.settings` and will be removed in a future release ([#332](https://github.com/mj-will/nessai/pull/332))
## [0.9.1]
### Fixed
- Fix duplicate parameters when adding reparameterisations (see [#320](https://github.com/mj-will/nessai/issues/320) for details) ([#321](https://github.com/mj-will/nessai/pull/321))
## [0.9.0]
### Added
- Add importance nested sampler ([#285](https://github.com/mj-will/nessai/pull/285))
- Add support for using regex for specifying parameters in the reparametersations dictionary ([#312](https://github.com/mj-will/nessai/pull/312))
### Changed
- Enable constant volume mode with uniform nball latent prior ([#306](https://github.com/mj-will/nessai/pull/306))
- Pass kwargs in RealNVP to the coupling class ([#307](https://github.com/mj-will/nessai/pull/307))
- Use log-scale on state plot ([#308](https://github.com/mj-will/nessai/pull/308))
- Support `forkserver` and `spawn` multiprocessing start methods ([#313](https://github.com/mj-will/nessai/pull/313))
### Fixed
- Fix resume bug with fallback reparameterisation ([#302](https://github.com/mj-will/nessai/pull/302))
- Fix bugs caused by numpy 1.25 ([#311](https://github.com/mj-will/nessai/pull/311))
## [0.8.1]
### Fixed
- Fix incorrect sign in delta phase reparameterisation ([#292](https://github.com/mj-will/nessai/pull/292))
- Remove maximum scipy version ([#295](https://github.com/mj-will/nessai/pull/295))
- Specify three quantiles in default corner kwargs as required by corner 2.2.2 ([#298](https://github.com/mj-will/nessai/pull/298))
## [0.8.0]
### Added
- Add `DeltaPhaseReparameterisation` for GW analyses. ([#244](https://github.com/mj-will/nessai/pull/244))
- Add `nessai.utils.sorting`. ([#244](https://github.com/mj-will/nessai/pull/244))
- Add `log_posterior_weights` and `effective_n_posterior_samples` to the integral state object. ([#248](https://github.com/mj-will/nessai/pull/248))
- Add a check for the multiprocessing start method when using `n_pool`. ([#250](https://github.com/mj-will/nessai/pull/250))
- Add option to reverse reparameterisations in `FlowProposal`.
- Add `disable_vectorisation` to `FlowSampler`. ([#254](https://github.com/mj-will/nessai/pull/254))
- Add `likelihood_chunksize` which allows the user to limit how many points are passed to a vectorised likelihood function at once. ([#256](https://github.com/mj-will/nessai/pull/256))
- Add `allow_multi_valued_likelihood` which allows for multi-valued likelihoods, e.g. that include numerical integration. ([#257](https://github.com/mj-will/nessai/pull/257))
- Add `parameters` keyword argument to `nessai.plot.plot_trace` and pass additional keyword arguments to the plotting function. ([#259](https://github.com/mj-will/nessai/pull/259))
- Add option to construct live points without non-sampling parameters. ([#266](https://github.com/mj-will/nessai/pull/266))
- Add option to use a different estimate of the shrinkage. Default remains unchanged. ([#248](https://github.com/mj-will/nessai/pull/248), [#269](https://github.com/mj-will/nessai/pull/269))
- Add `ScaleAndShift` reparameterisation which includes Z-score normalisation. ([#273](https://github.com/mj-will/nessai/pull/273))
- Add option to specify default result file extension. ([#274](https://github.com/mj-will/nessai/pull/274))
### Changed
- Refactor `nessai.reparameterisations` into a submodule. ([#241](https://github.com/mj-will/nessai/pull/241))
- Use `torch.inference_mode` instead of `torch.no_grad`. ([#245](https://github.com/mj-will/nessai/pull/245))
- Changed `CombinedReparameterisations` to sort and add reparameterisations based on their requirements. ([#244](https://github.com/mj-will/nessai/pull/244), [#253](https://github.com/mj-will/nessai/pull/253))
- Refactor `nessai.evidence._NSIntegralState` to inherit from a base class. ([#248](https://github.com/mj-will/nessai/pull/248))
- Revert default logging level to `INFO`. ([#249](https://github.com/mj-will/nessai/pull/249))
- Rework logging statements to reduce the amount of information printed by default. ([#249](https://github.com/mj-will/nessai/pull/249))
- Refactor `nessai.proposal.FlowProposal.verify_rescaling` to be stricter. ([#253](https://github.com/mj-will/nessai/pull/253))
- Truth input in `nessai.plot.corner_plot` can now be an iterable or a dictionary. ([#255](https://github.com/mj-will/nessai/pull/255))
- Tweak how the prior volume is computed for the final nested sample. This will also change the evidence and posterior weights. ([#248](https://github.com/mj-will/nessai/pull/248), [#269](https://github.com/mj-will/nessai/pull/269))
- Stricter handling of keyword arguments passed to `NestedSampler`. Unknown keyword arguments will now raise an error. ([#270](https://github.com/mj-will/nessai/pull/270))
- Rework `nessai.config` to have `config.livepoints` and `config.plot` which contain global settings. Some of the setting names have also changed. ([#272](https://github.com/mj-will/nessai/pull/272))
- `Rescale` reparameterisation is now an alias for `ScaleAndShift`. ([#273](https://github.com/mj-will/nessai/pull/273))
- Change the default result file extension to `hdf5`, old result file format can be recovered by setting it to `json`. ([#274](https://github.com/mj-will/nessai/pull/274))
- Optimisations to `FlowProposal.populate`, including changes to `Model.in_bounds` and how sampling from the latent prior is handled. ([#277](https://github.com/mj-will/nessai/pull/277))
- Add a maximum figure size (`nessai.config.plotting.max_figsize`) to prevent very large trace plots when the number of dimensions is very high. ([#282](https://github.com/mj-will/nessai/pull/282))
### Fixed
- Fix a bug where setting the livepoint precision (e.g. `f16`) did not work. ([#272](https://github.com/mj-will/nessai/pull/272))
- Fix plotting failing when sampling large number of parameters. ([#281](https://github.com/mj-will/nessai/pull/281), [#282](https://github.com/mj-will/nessai/pull/282))
### Removed
- Removed `nessai._NSIntegralState.reset`. ([#248](https://github.com/mj-will/nessai/pull/248))
- Removed `nessai.gw.legacy`. ([#267](https://github.com/mj-will/nessai/pull/267))
- Removed support for changing the variance of the latent distribution via `draw_latent_kwargs` from `FlowProposal`. ([#277](https://github.com/mj-will/nessai/pull/277))
## [0.7.1]
### Fixed
- Fix bug that led to the multiprocessing pool not being used when resuming. ([#261](https://github.com/mj-will/nessai/pull/261))
## [0.7.0]
**Important:** in this release the flow backend changed from `nflows` to `glasflow` which increased the minimum version of PyTorch to 1.11.0.
### Added
- Add explicit support for Python 3.10. ([#224](https://github.com/mj-will/nessai/pull/224))
- Add more structure utils (`get_subset_arrays`, `isfinite_struct`). ([#209](https://github.com/mj-will/nessai/pull/209))
- Add `nessai.sampler.base.BaseNestedSampler` class. ([#210](https://github.com/mj-will/nessai/pull/210))
- Add option to use multinomial resampling to `nessai.posterior.draw_posterior_samples`. ([#213](https://github.com/mj-will/nessai/pull/213), [#214](https://github.com/mj-will/nessai/pull/214))
- Add features (`log_prob`, `sample`, `end_iteration`, `finalise`, training with weights) to `FlowModel`. ([#216](https://github.com/mj-will/nessai/pull/216))
- Add option to checkpoint based on elapsed time. ([#225](https://github.com/mj-will/nessai/pull/225))
- Add `stream` option to `setup_logger` for setting the stream for `logging.StreamHandler`. ([#229](https://github.com/mj-will/nessai/pull/229))
- Add configurable periodic logging based on either the iteration or elapsed time. ([#229](https://github.com/mj-will/nessai/pull/229))
- Add `glasflow` dependency. ([#228](https://github.com/mj-will/nessai/pull/228))
- Add `posterior_sampling_method` to `FlowSampler.run`. ([#233](https://github.com/mj-will/nessai/pull/233))
- Add options `plot_{indices, posterior, logXlogL}` for disabling plots in `FlowSampler.run`. ([#233](https://github.com/mj-will/nessai/pull/233))
- Add `FlowSampler.terminate_run`. ([#233](https://github.com/mj-will/nessai/pull/233))
- Add `FlowSampler.log_evidence` and `FlowSampler.log_evidence_error`. ([#233](https://github.com/mj-will/nessai/pull/233))
- Add `nessai.utils.bilbyutils`. ([#236](https://github.com/mj-will/nessai/pull/236))
- Add a warning for when the final p-value for the insertion indices is less than 0.05. ([#235](https://github.com/mj-will/nessai/pull/235))
- Add `reset_flow` to `NestedSampler` for resetting the entire flow. ([#238](https://github.com/mj-will/nessai/pull/238))
### Changed
- Change how threading is handled to no longer use `max_threads`. ([#208](https://github.com/mj-will/nessai/pull/208))
- Refactor `nessai.nestedsampler` into the `nessai.samplers` submodule. ([#210](https://github.com/mj-will/nessai/pull/210))
- Refactor `nessai.flowmodel` into a submodule with `nessai.flowmodel.{base, utils, config}`. ([#216](https://github.com/mj-will/nessai/pull/216))
- Change how `noise_scale` is configured `FlowModel`. User can now specify `noise_type` and `noise_scale`. ([#216](https://github.com/mj-will/nessai/pull/216))
- Change `nessai.utils.rescaling.{logit, sigmoid}` to match `torch.{logit, sigmoid}`. ([#218](https://github.com/mj-will/nessai/pull/218))
- Change default checkpoint interval to 10 minutes rather than after training. ([#225](https://github.com/mj-will/nessai/pull/225))
- Change flows to use `glasflow.nflows` instead of `nflows`. ([#228](https://github.com/mj-will/nessai/pull/228))
- Change `close_pool` to be called at the end of `FlowSampler.run` rather than at the end of `NestedSampler.nested_sampling_loop`. ([#233](https://github.com/mj-will/nessai/pull/233))
- Bump minimum PyTorch version to 1.11.0. ([#230](https://github.com/mj-will/nessai/pull/230))
### Fixed
- Fixed a bug in `nessai.flows.utils.configure_model` that only occurred when the specified `device_tag` is invalid. ([#216](https://github.com/mj-will/nessai/pull/216))
- Fixed a bug in `nessai.utils.sampling.draw_truncated_gaussian` where the input was being changed by an in-place operation. ([#217](https://github.com/mj-will/nessai/pull/217))
- Fixed an infinite loop when resuming a run that was interrupted when switching proposal. ([#237](https://github.com/mj-will/nessai/pull/237))
### Deprecated
- Setting `max_threads` is deprecated and will be removed in a future release. ([#208](https://github.com/mj-will/nessai/pull/208))
- `nessai.nestedsampler` is deprecated and will be removed in a future release. Use `nessai.samplers.nestedsampler` instead. ([#226](https://github.com/mj-will/nessai/pull/226))
- `nessai.flows.transforms.LULinear` is deprecated in favour of `glasflow.nflows.transforms.LULinear` and will be removed in a future release. ([#228](https://github.com/mj-will/nessai/pull/228))
### Removed
- Removed unused code for saving live points in `NestedSampler`. ([#210](https://github.com/mj-will/nessai/pull/210))
- Removed `nflows` dependency. ([#228](https://github.com/mj-will/nessai/pull/228))
## [0.6.0] - 2022-08-24
### Added
- Add a warning in `Model.verify_model` when `Model.log_prior` returns an array that has `float16` precision. ([#175](https://github.com/mj-will/nessai/pull/175))
- Add more functionality for configuring live point fields and defaults. ([#170](https://github.com/mj-will/nessai/pull/170))
- Record iteration at which live points are drawn in `it` field of live points. ([#170](https://github.com/mj-will/nessai/pull/170))
- Add `nessai.config` for storing package wide defaults. ([#170](https://github.com/mj-will/nessai/pull/170))
- Add `nessai.utils.testing` submodule which contains functions to use during testing. ([#170](https://github.com/mj-will/nessai/pull/170))
- Add `nessai.livepoint.unstructured_view` and `nessai.model.Model.unstructured_view` for constructing unstructured views of live points. ([#178](https://github.com/mj-will/nessai/pull/178))
- Add `nessai.plot.corner_plot` as an alternative to `plot_live_points` that uses `corner` instead of `seaborn`. ([#189](https://github.com/mj-will/nessai/pull/189))
- Add new examples. ([#195](https://github.com/mj-will/nessai/pull/195), [#198](https://github.com/mj-will/nessai/pull/198))
- Add `filehandler_kwargs` to `nessai.utils.logging.setup_logger` which allows the user to configure the `FileHandler` in the logger. ([#204](https://github.com/mj-will/nessai/pull/204))
- Add `final_p_value` and `final_ks_statistic` to `NestedSampler` and the result file.
### Changed
- Change default values for log-likelihood and log-prior in empty live points to be `np.nan` instead of zero. ([#170](https://github.com/mj-will/nessai/pull/170))
- `nessai.livepoint.get_dtype` now returns an instance of `numpy.dtype`. ([#170](https://github.com/mj-will/nessai/pull/170))
- Style for plots is no longer set globally and can be disabled completely. ([#194](https://github.com/mj-will/nessai/pull/194))
- Update examples. ([#190](https://github.com/mj-will/nessai/pull/190))
- Changed behaviour of `from nessai import *` to no longer imports any modules. ([#201](https://github.com/mj-will/nessai/pull/201))
### Fixed
- Fixed a bug in `FlowProposal.populate` which occurred when the pool of samples was not empty (closes [#176](https://github.com/mj-will/nessai/issues/176)) ([#177](https://github.com/mj-will/nessai/pull/177))
- Fixed a bug in `nessai.model.Model.new_point` where the incorrect number of points were returned. ([#200](https://github.com/mj-will/nessai/pull/200))
### Removed
- Drop support for Python 3.6. ([#188](https://github.com/mj-will/nessai/pull/188))
- Remove a temporary fix for [#46](https://github.com/mj-will/nessai/issues/46) that was introduced in [#47](https://github.com/mj-will/nessai/pull/47). ([#202](https://github.com/mj-will/nessai/pull/202))
## [0.5.1] - 2022-06-20
### Fixed
- Fixed a bug where live points where added to the initial points with incorrect log-likelihood and log-prior. ([#171](https://github.com/mj-will/nessai/pull/171))
## [0.5.0] - 2022-06-14
### Added
- Add `dataframe_to_live_points` function to `nessai.livepoint` for converting from a `pandas.DataFrame` to live points. ([#133](https://github.com/mj-will/nessai/pull/133))
- Add `fallback_reparameterisation` to `FlowProposal`. This allows the user to specify which reparameterisation to use for parameters that are not included in the reparameterisations dictionary. Default behaviour remains unchanged (defaults to no reparameterisation). ([#134](https://github.com/mj-will/nessai/pull/134))
- Add `rolling_mean` to `nessai.utils.stats`. ([#136](https://github.com/mj-will/nessai/pull/136))
- Add `nessai.flows.utils.create_linear_transform` as a common function for creating linear transforms in the flows. ([#137](https://github.com/mj-will/nessai/pull/137))
- Add `nessai.flows.transforms.LULinear` to address a [bug in nflows](https://github.com/bayesiains/nflows/pull/38) that has not been patched and prevents the use of CUDA with `LULinear`. ([#138](https://github.com/mj-will/nessai/pull/138))
- Add `calibration_example.py` to the gravitational wave examples. ([#139](https://github.com/mj-will/nessai/pull/139))
- Add `defaults` keyword argument to `nessai.reparameterisations.get_reparameterisation` for overriding the dictionary of default reparameterisations. ([#142](https://github.com/mj-will/nessai/pull/142))
- Add explicit tests for `nessai.flowsampler` ([#143](https://github.com/mj-will/nessai/pull/143))
- Add more tests for `nessai.reparameterisations` ([#145](https://github.com/mj-will/nessai/pull/145))
- Add more tests for `nessai.gw` ([#144](https://github.com/mj-will/nessai/pull/144))
- Add support for vectorised likelihoods and automatically detect if the likelihood is vectorised. ([#148](https://github.com/mj-will/nessai/pull/148), [#166](https://github.com/mj-will/nessai/pull/166))
- Add support for passing a user-defined pool instead of using `n_pool`. ([#148](https://github.com/mj-will/nessai/pull/148))
- Add an option to disable signal handling in `FlowSampler`. ([#159](https://github.com/mj-will/nessai/pull/159))
- Add support for `ray.util.multiprocessing.Pool` (fixes [#162](https://github.com/mj-will/nessai/issues/162)). ([#163](https://github.com/mj-will/nessai/pull/163))
### Changed
- `NestedSampler.plot_state` now includes the log-prior volume in one of the subplots and the rolling mean of the gradient (|dlogL/dLogX|) is plotted instead of the gradient directly. ([#136](https://github.com/mj-will/nessai/pull/136))
- The figure produced by `NestedSampler.plot_state` now includes a legend for the different vertical lines that can appear in the subplots. ([#136](https://github.com/mj-will/nessai/pull/136))
- `RealNVP` and `NeuralSplineFlow` now use `nessai.flows.utils.create_linear_transform`. ([#137](https://github.com/mj-will/nessai/pull/137))
- Updated all of the examples to reflect the new defaults. ([#139](https://github.com/mj-will/nessai/pull/139))
- Rework `nessai.gw.reparameterisations.get_gw_reparameterisation` to use `get_reparameterisation` with the `defaults` keyword argument. ([#142](https://github.com/mj-will/nessai/pull/142))
- Switch to `os.path.join` for joining paths. ([#143](https://github.com/mj-will/nessai/pull/143), [#161](https://github.com/mj-will/nessai/pull/161))
- Context is now passed to the transform in `nessai.flows.base.NFlow` enabling the use of flows with conditional transforms. ([#146](https://github.com/mj-will/nessai/pull/146))
- Add `context_features` to RealNVP and NeuralSplineFlows ([#146](https://github.com/mj-will/nessai/pull/146))
- Rework `MaskedAutoregressiveFlow` to add `context_features` ([#146](https://github.com/mj-will/nessai/pull/146))
- Rework how likelihood parallelisation is handled. The model now contains the pool instead of the sampler and proposals. ([#148](https://github.com/mj-will/nessai/pull/148))
- Update `parallelisation_example.py` to show use of `n_pool` and `pool` for parallelisation. ([#148](https://github.com/mj-will/nessai/pull/148))
- Simplify how the normalising flow is reset in `FlowModel` and `NestedSampler`. ([#150](https://github.com/mj-will/nessai/pull/150))
- Reduce logging level a some statements in `FlowProposal`. ([#160](https://github.com/mj-will/nessai/pull/160))
### Fixed
- Fixed a bug in `RescaleToBounds` when using `pre_rescaling` without boundary inversion. ([#145](https://github.com/mj-will/nessai/pull/145))
- Fixed slow integration tests not running if a quick integration test is reran after failing. ([#153](https://github.com/mj-will/nessai/pull/153))
- Fixed a bug that prevented the use of `prior_sampling=True` with `FlowSampler`. ([#156](https://github.com/mj-will/nessai/pull/156))
- Fix issue when creating multiple instances of `FlowSampler` with the same output directory when resuming is enabled as reported in [#155](https://github.com/mj-will/nessai/issues/155). ([#157](https://github.com/mj-will/nessai/pull/157))
- Fixed missing square-root in `nessai.flows.distributions.MultivariateGaussian._sample` and fix the corresponding unit test. ([#158](https://github.com/mj-will/nessai/pull/158))
- Fix issue with cosmology in `ComovingDistanceConverter` caused by changes to `astropy.cosmology` in version 5.1. ([#168](https://github.com/mj-will/nessai/pull/168))
- Fixed bug with caching in `LULinear` transform when reloading a checkpointed flow. ([#167](https://github.com/mj-will/nessai/pull/167))
### Removed
- Removed `legacy_gw_example.py` ([#139](https://github.com/mj-will/nessai/pull/139))
- Removed `keep_samples` from `FlowProposal`. ([#140](https://github.com/mj-will/nessai/pull/140))
## [0.4.0] - 2021-11-23
### Added
- Add a constant volume mode to `FlowProposal`. In this mode the radius of the latent contour is fixed to the q'th quantile, which by default is `0.95`. ([#125](https://github.com/mj-will/nessai/pull/125))
- Add a check for `resume_file` when `resume=True`. ([#126](https://github.com/mj-will/nessai/pull/126))
- Change default logging level to `WARNING`. ([#126](https://github.com/mj-will/nessai/pull/126))
- Add `angle-cosine` reparameterisation. ([#126](https://github.com/mj-will/nessai/pull/126))
- Added an explicit check for one-dimensional models that raises a custom exception `OneDimensionalModelError`. ([#121](https://github.com/mj-will/nessai/pull/121))
- `RealNVP` and `NeuralSplineFlow` now raise an error if `features<=1`. ([#121](https://github.com/mj-will/nessai/pull/121))
- Add option in `nessai.reparameterisations.Angle` to set `scale=None`, the scale is then set as `2 * pi / angle_prior_range`. ([#127](https://github.com/mj-will/nessai/pull/127))
- Add `'periodic'` reparameterisation that uses `scale=None` in `nessai.reparameterisations.Angle`. ([#127](https://github.com/mj-will/nessai/pull/127))
- Add the `use_default_reparameterisations` option to `FlowProposal` to allow the use of the default reparameterisations in `GWFlowProposal` without specifying any reparameterisations. ([#129](https://github.com/mj-will/nessai/pull/129))
- Add `chi_1`, `chi_2` and `time_jitter` to known parameters in `GWFlowProposal` with corresponding defaults. ([#130](https://github.com/mj-will/nessai/pull/130))
### Changed
- Reparameterisation `angle-sine` is now an alias for `RescaledToBounds` instead of `Angle` with specific keyword arguments. ([#126](https://github.com/mj-will/nessai/pull/126))
- `maximum_uninformed=None` now defaults to 2 times `nlive` instead of `np.inf`. ([#126](https://github.com/mj-will/nessai/pull/126))
- `nlive=2000` by default. ([#126](https://github.com/mj-will/nessai/pull/126))
- Default `batch_size` is now 1000. ([#126](https://github.com/mj-will/nessai/pull/126))
- Default `n_neurons` is now 2 times the dimensions of the normalising flow. ([#126](https://github.com/mj-will/nessai/pull/126))
- Default mode for `FlowProposal` is `constant_volume_mode=True`. ([#126](https://github.com/mj-will/nessai/pull/126))
- Proposal plots are now disabled by default. ([#126](https://github.com/mj-will/nessai/pull/126))
- `cooldown` now defaults to `200` to reflect the change in `nlive`. ([#126](https://github.com/mj-will/nessai/pull/126))
- Default optimiser is now `adamw`. ([#126](https://github.com/mj-will/nessai/pull/126))
- Rework `AugmentedFlowProposal` to work with the new defaults. ([#126](https://github.com/mj-will/nessai/pull/126))
- `Model.names` and `Model.bounds` are now properties by default and their setters include checks to verify the values provided are valid and raise errors if not. ([#121](https://github.com/mj-will/nessai/pull/121))
- Logger now has propagation enabled by default. ([#128](https://github.com/mj-will/nessai/pull/128))
- `FlowProposal.configure_reparameterisations` can now handle an input of `None`. In this case only the default reparameterisations will be added. ([#129](https://github.com/mj-will/nessai/pull/129))
- Changed default reparameterisation for gravitational-wave parameters `a_1` and `a_2` to `'default'`. ([#130](https://github.com/mj-will/nessai/pull/130))
### Fixed
- Fixed a bug where the parameters list passed to `Reparameterisation` (or its child classes) wasn't being copied and changes made within the reparameterisation would change the original list. ([#127](https://github.com/mj-will/nessai/pull/127))
### Deprecated
- `keep_samples` in `FlowProposal` will be removed in the next minor release.
## [0.3.3] - 2021-11-04
### Fixed
- Fixed a bug in `nessai.livepoint.dict_to_live_points` when passing a dictionary where the entries contained floats instead of objects with a length raised an error. ([#119](https://github.com/mj-will/nessai/pull/119))
## [0.3.2] - 2021-10-12
### Added
- Added more checks to the init method for `nessai.reparameterisations.AnglePair` to catch invalid combinations of priors and/or angle conventions. Now supports RA or azimuth defined on [-pi, pi] in addition to [0, 2pi]. ([#114](https://github.com/mj-will/nessai/pull/114))
- Add a check in `nessai.flowmodel.update_config` for `'noise_scale'`, a `ValueError` is now raised if `noise_scale` is not a float or `'adaptive'`. ([#115](https://github.com/mj-will/nessai/pull/115))
- Add `codespell` to the pre-commit checks. ([#116](https://github.com/mj-will/nessai/pull/116))
### Changed
- The dtype for tensors passed to the flow is now set using `torch.get_default_dtype()` rather than always using `float32`. ([#108](https://github.com/mj-will/nessai/pull/108))
- Incorrect values for `mask` in `nessai.flows.realnvp.RealNVP` now raise `ValueError` and improved the error messages returned by all the exceptions in the class. ([#109](https://github.com/mj-will/nessai/pull/109))
- Change scale of y-axis of the log-prior volume vs. log-likelihood plot from `symlog` to the default linear axis. ([#110](https://github.com/mj-will/nessai/pull/110))
- `nessai.plot.plot_trace` now includes additional parameters such as `logL` and `logP` default, previously the last two parameters (assumed to be `logL` and `logP` were always excluded). ([#111](https://github.com/mj-will/nessai/pull/111))
### Fixed
- Fixed an issue where `nessai.reparameterisations.AnglePair` would silently break when the prior range for RA or azimuth was set to a range that wasn't [0, 2pi]. It now correctly handles both [0, 2pi] and [-pi, pi] and raises an error for any other ranges. ([#114](https://github.com/mj-will/nessai/pull/114))
- Fixed various spelling mistakes throughout the source code and documentation. ([#116](https://github.com/mj-will/nessai/pull/116))
## [0.3.1] Minor improvements and bug fixes - 2021-08-23
This release has a few minor improvements and bug fixes. It also explicitly adds support for python 3.9, which worked previously but was not tested.
### Added
- Add `in_bounds`, `parameter_in_bounds` and `sample_parameter` methods to `nessai.model.Model`. ([#90](https://github.com/mj-will/nessai/pull/90))
- Implemented the option to specify the cosmology in `nessai.gw.utils.ComovingDistanceConverter` using `astropy`. Previously changing the value had no effect of the transformation. ([#91](https://github.com/mj-will/nessai/pull/91))
- Improve test coverage for `nessai.proposal.base.Proposal` ([#92](https://github.com/mj-will/nessai/pull/92))
- Add `'logit'` to the default reparameterisations ([#98](https://github.com/mj-will/nessai/pull/98))
- Add example using the Rosenbrock likelihood in two dimensions ([#99](https://github.com/mj-will/nessai/pull/99))
- Add a `colours` argument to `nessai.plot.plot_1d_comparison` ([#102](https://github.com/mj-will/nessai/pull/102))
- Explicitly support Python 3.9 (Added Python 3.9 to unit tests) ([#103](https://github.com/mj-will/nessai/pull/103))
### Changed
- `nessai.gw.utils.DistanceConverter` now inherits from `abc.ABC` and `to_uniform_parameter` and `from_uniform_parameter` are both abstract methods. ([#91](https://github.com/mj-will/nessai/pull/91))
- `nessai.proposal.base.Proposal` now inherits from `abc.ABC` and `draw` is an abstract method. ([#92](https://github.com/mj-will/nessai/pull/92))
- `nessai.proposal.rejection.RejectionProposal` now inherits from `nessai.proposal.analytic.AnalyticProposal`. Functionality is the same but the code will be easier to maintain since this removes several methods that were identical. ([#93](https://github.com/mj-will/nessai/pull/93))
- `noise_scale='adaptive'` option in `FlowModel` now correctly uses a standard deviation of 0.2 times the mean nearest neighbour separation as described in [Moss 2019](https://arxiv.org/abs/1903.10860). Note that this feature is disabled by default, so this does not change the default behaviour. ([#95](https://github.com/mj-will/nessai/pull/95))
- Refactor `nessai.utils` into a submodule. ([#96](https://github.com/mj-will/nessai/pull/96))
- Change behaviour of `determine_rescaled_bounds` so that `rescale_bounds` is ignored when `inversion=True`. This matches the behaviour in `RescaledToBounds` where when boundary inversion is enabled, values are rescaled to [0, 1] and then if no inversion if applied, changed to [-1, 1]. ([#96](https://github.com/mj-will/nessai/pull/96))
- Tweaked `detect_edges` so that `both` is returned in cases where the lower and upper regions contain zero probability. ([#96](https://github.com/mj-will/nessai/pull/96))
- `NestedSampler` no longer checks capitalisation of `flow_class` when determining which proposal class to use. E.g. `'FlowProposal'` and `'flowproposal'` are now both valid values. ([#100](https://github.com/mj-will/nessai/pull/100))
- `NestedSampler.configure_flow_proposal` now raises `ValueError` instead of `RuntimeError` if `flow_class` is an invalid string. ([#100](https://github.com/mj-will/nessai/pull/100))
- Raise a `ValueError` if `nessai.plot.plot_1d_comparison` is called with a labels list and the length does not match the number of sets of live points being compared. ([#102](https://github.com/mj-will/nessai/pull/102))
- `nessai.flow.base.BaseFlow` now also inherits from `abc.ABC` and methods that should be defined by the user are abstract methods. ([#104](https://github.com/mj-will/nessai/pull/104))
- Changed default to `fuzz=1e-12` in `nessai.utils.rescaling.logit` and `nessai.utils.rescaling.sigmoid` and improved stability. ([#105](https://github.com/mj-will/nessai/pull/105))
### Fixed
- Fixed a typo in `nessai.gw.utils.NullDistanceConverter.from_uniform_parameter` that broke the method. ([#91](https://github.com/mj-will/nessai/pull/91))
- Fixed a bug in `nessai.reparameterisations.RescaleToBounds` when using `offset=True` and `pre_rescaling` where the prime prior bounds were incorrectly set. ([#97](https://github.com/mj-will/nessai/pull/97))
- Fixed a bug that prevented disabling periodic checkpointing. ([#101](https://github.com/mj-will/nessai/pull/101))
- Fixed a bug when calling `nessai.plot.plot_1d_comparison` with live points that contain a field with only infinite values. ([#102](https://github.com/mj-will/nessai/pull/102))
- Fixed the log Jacobian determinant for `nessai.utils.rescaling.logit` and `nessai.utils.rescaling.sigmoid` which previously did not include the Jacobian for the fuzz when it was used. ([#105](https://github.com/mj-will/nessai/pull/105))
## [0.3.0] Testing, testing and more testing - 2021-07-05
This release contains a large number of changes related to bugs and issues that were discovered when writing more tests for `nessai`.
It also adds a number of feature and examples.
**Note:** Runs produced with previous releases are incompatible with this release and cannot be resumed with out manual intervention.
### Added
- Added code to catch errors when calling `plot_live_points` when `gwpy` is installed.
- Added tests for `_NSIntegralState`.
- Add code coverage upload
- Added an example of using unbounded priors, `bilby_unbounded_priors.py`
- Added `Rescale` reparameterisation that just rescales by a constant and does not require prior bounds. Also add
tests for this reparameterisation.
- Added more GW examples.
- Added tests for `AugmentedFlowProposal`.
- Added an example using `AugmentedFlowProposal`.
- Added eggbox example.
- Added an error if calling `FlowProposal.rejection_sampling` with `FlowProposal.truncate=True` but `worst_q=None`.
- Add option to train using dataloaders or directly with tensors. This is faster when using CUDA.
- Add options to train with different optimisers: Adam, AdamW, SGD
- Add tests for `NestedSampler`
- Explicitly check prior bounds when using reparameterisations. This catches cases where infinite bounds are used and break some reparameterisations. (#82)
- Add error when calling `FlowProposal.populate` without initialising the proposal.
- Add `NestedSampler.plot_insertion_indices` to allow for easier plotting of insertion indices.
- Add `filename` keyword argument to `NestedSampler.plot_trace`.
- Added `batch_norm_within_layers` to `NeuralSplineFlow`
### Changed
- Plotting logX vs logL now returns the figure is `filename=None`
- `NestedSampler.plot_state` now has the keyword argument `filename` and the figure is only saved if it is specified.
- Changed name from `_NSintegralState` to `_NSIntegralState`.
- `nessai.model.Model` now inherits from `abc.ABC` and `log_prior` and `log_likelihood` are now `abstractmethods`. This prevents the class from being used without redefining those methods.
- Updated `AumgentedFlowProposal` to work with current version of `FlowProposal`
- Fix random seed unit tests.
- Improved `FlowProposal.reset` so that all attributes that are changed by calling `draw` are reset.
- Move `_NSIntegralState` and some functions from `posterior.py` to `evidence.py`
- `NestedSampler.check_flow_model_reset` will now NOT reset the flow it has never been trained (i.e `proposal.training_count==0`)
- Moved all legacy gw functions to `nessai/gw/legacy.py` and removed them from the coverage report.
- Minor improvements to `NestedSampler`
- Better handling on NaNs in `NestedSampler.populate_live_points`
- Minor improvements to plotting in `FlowProposal` and moved plotting to separate methods in `FlowProposal`.
- Switch to using `os.path.join` when joins paths.
- Improved `FlowProposal.reset`
- Renamed `FlexibleRealNVP` to `RealNVP`, shouldn't affect most uses since the default way to specify a flow is via strings in `configure_model`.
- Renamed `nessai.flows.utils.setup_model` to `configure_model`.
- Renamed `nessai.flows.utils.CustomMLP` to `MLP`
- Changed default value for `tail_bound` in `NeuralSplineFlow` to 5.
### Fixed
- Fixed a bug when plotting the state plot from a saved instance of the sampler where the sampling time was changed based on the current time.
- Fixed a bug when using `plot_trace`, `plot_1d_comparison` or `plot_live_points` with a single parameter
- Total sampling time is now correctly displayed when producing a state plot from a saved sampler.
- Fixed a bug when using unbounded priors related to `Model.verify_model`
- Fix inversion-split with `RescaleToBounds`
- Fixed `AugmentedGWFlowProposal`.
- Fixed a bug with `plot_live_points` when the hue parameter (`c`) was constant.
- Fixed a bug with the reparameterisation `Rescale` when `scale` was set to a negative number.
- Fixed a bug where `scale` could not be changed in `ToCartesian`.
- Fixed a error when specifying `NullReparameterisation` (!82)
- Fix typo in `FlowProposal.set_poolsize_scale` when `acceptance=0`
- Fixed unintended behaviour when `rescale_parameters` is a list and `boundary_inversion=True`, where the code would try apply inversion to all parameters in `Model.names`.
- Fixed bug where `z` returned by `FlowProposal.rejection_sampling` was incorrect when using truncation (which is not recommended).
- Fix `prior_sampling`
- Fixed minor typos in `nessai.proposal.flowproposal.py`
### Removed
- Remove "clip" option in `FlowProposal`, this was unused and untested.
## [0.2.4] - 2021-03-08
This release includes a number of bug fixes, changes to make the `GWFlowProposal` consistent with `LegacyGWFlowProposal` and a number of new unit tests to improve test coverage.
### Added
- Add poolsize to `AnalyticProposal`
- Add a test for sampling with multiprocessing.
- Add a test for sampling with `AnalyticProposal` and `RejectionProposal`.
- Add a test for using the proposal methods with `n_pool`
- Add tests for reparameterisations.
- Add a test for comparing `GWFlowProposal` and `LegacyGWFlowProposal`.
### Changed
- Changed prime priors in `LegacyGWFlowProposal` to not update. This improves efficiency.
- Changes to the reparameterisations to the the proposal consistent with `LegacayGWFlowProposal`:
- Use [-1, 1] when inversion is enabled but not applied
- Improved errors when reparameterisations are configured incorrectly.
### Fixed
- Fixed a bug with saving results when multiprocessing is enabled.
- Fixed a bug with `AnalyticProposal` introduced in the last release.
- Fixed a bug with resuming when using certain reparameterisations.
## [0.2.3] - 2021-02-24
Add support for Python >= 3.6 and other minor changes and bug fixes
### Added
- Badges for DOI and PyPI versions.
- Add support for Python >= 3.6.
- Improve doc-strings and tweak settings for doc-strings in the documentation.
- Add tests for plotting functions.
- Added sections to README and docs on citing `nessai`.
### Changed
- Remove `:=` operator to enable support for Python >= 3.6.
- Plotting functions are now more consistent and all return the figure if `filename=None`.
### Fixed
- Fixed bug when plotting non-structured arrays with `plot_1d_comparison` and specifying `parameters`.
- Fixed bug where `plot_indices` failed if using an empty array but worked with an empty list.
### Removed
- Remove `plot_posterior` because functionality is include in `plot_live_points`.
- Remove `plot_likelihood_evaluations` because information is already contained in the state plot.
- Remove `plot_acceptance` as it is only by augmented proposal which is subject to change.
- Remove `plot_flow`.
## [0.2.2] - 2021-02-19
This release was added to trigger Zenodo for producing a DOI.
### Added
- Docs badge
## [0.2.1] - 2021-02-18
Minor repository related fixes. Core code remains unchanged.
### Added
- PyPI workflow to automatically release package to PyPI
### Fixed
- Fixed issue with README not rendering of PyPi
## [0.2.0] - 2021-02-18
First public release.
### Added
- Complete documentation
- Use `setup.cfg` and `pyproject.toml` for installing package
- `reparemeterisations` submodule for more specific reparameterisations
- `half_gaussian.py` example
### Changed
- Change to use `main` instead of `master`
- Default `GWFlowProposal` changed to used `reparameterisations`
- Split `proposal.py` into various submodules
- Minor updates to examples
- `max_threads` default changed to 1.
### Fixed
- Fix a bug where `maximum_uninformed` did not have the expected behaviour.
### Deprecated
- Original `GWFlowProposal` method renamed to `LegacyGWFlowProposal`. Will be removed in the next release.
[Unreleased]: https://github.com/mj-will/nessai/compare/v0.13.2...HEAD
[0.13.2]: https://github.com/mj-will/nessai/compare/v0.13.1...v0.13.2
[0.13.1]: https://github.com/mj-will/nessai/compare/v0.13.0...v0.13.1
[0.13.0]: https://github.com/mj-will/nessai/compare/v0.12.0...v0.13.0
[0.12.0]: https://github.com/mj-will/nessai/compare/v0.11.0...v0.12.0
[0.11.0]: https://github.com/mj-will/nessai/compare/v0.10.1...v0.11.0
[0.10.1]: https://github.com/mj-will/nessai/compare/v0.10.0...v0.10.1
[0.10.0]: https://github.com/mj-will/nessai/compare/v0.9.1...v0.10.0
[0.9.1]: https://github.com/mj-will/nessai/compare/v0.9.0...v0.9.1
[0.9.0]: https://github.com/mj-will/nessai/compare/v0.8.1...v0.9.0
[0.8.1]: https://github.com/mj-will/nessai/compare/v0.8.0...v0.8.1
[0.8.0]: https://github.com/mj-will/nessai/compare/v0.7.1...v0.8.0
[0.7.1]: https://github.com/mj-will/nessai/compare/v0.7.0...v0.7.1
[0.7.0]: https://github.com/mj-will/nessai/compare/v0.6.0...v0.7.0
[0.6.0]: https://github.com/mj-will/nessai/compare/v0.5.1...v0.6.0
[0.5.1]: https://github.com/mj-will/nessai/compare/v0.5.0...v0.5.1
[0.5.0]: https://github.com/mj-will/nessai/compare/v0.4.0...v0.5.0
[0.4.0]: https://github.com/mj-will/nessai/compare/v0.3.3...v0.4.0
[0.3.3]: https://github.com/mj-will/nessai/compare/v0.3.2...v0.3.3
[0.3.2]: https://github.com/mj-will/nessai/compare/v0.3.1...v0.3.2
[0.3.1]: https://github.com/mj-will/nessai/compare/v0.3.0...v0.3.1
[0.3.0]: https://github.com/mj-will/nessai/compare/v0.2.4...v0.3.0
[0.2.4]: https://github.com/mj-will/nessai/compare/v0.2.3...v0.2.4
[0.2.3]: https://github.com/mj-will/nessai/compare/v0.2.2...v0.2.3
[0.2.2]: https://github.com/mj-will/nessai/compare/v0.2.1...v0.2.2
[0.2.1]: https://github.com/mj-will/nessai/compare/v0.2.0...v0.2.1
[0.2.0]: https://github.com/mj-will/nessai/compare/v0.1.1...v0.2.0
|
mj-willREPO_NAMEnessaiPATH_START.@nessai_extracted@nessai-main@CHANGELOG.md@.PATH_END.py
|
{
"filename": "getattr_static.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/jedi/py3/jedi/evaluate/compiled/getattr_static.py",
"type": "Python"
}
|
"""
A static version of getattr.
This is a backport of the Python 3 code with a little bit of additional
information returned to enable Jedi to make decisions.
"""
import types
from jedi._compatibility import py_version
_sentinel = object()
def _check_instance(obj, attr):
instance_dict = {}
try:
instance_dict = object.__getattribute__(obj, "__dict__")
except AttributeError:
pass
return dict.get(instance_dict, attr, _sentinel)
def _check_class(klass, attr):
for entry in _static_getmro(klass):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
return _sentinel
def _is_type(obj):
try:
_static_getmro(obj)
except TypeError:
return False
return True
def _shadowed_dict_newstyle(klass):
dict_attr = type.__dict__["__dict__"]
for entry in _static_getmro(klass):
try:
class_dict = dict_attr.__get__(entry)["__dict__"]
except KeyError:
pass
else:
if not (type(class_dict) is types.GetSetDescriptorType and
class_dict.__name__ == "__dict__" and
class_dict.__objclass__ is entry):
return class_dict
return _sentinel
def _static_getmro_newstyle(klass):
return type.__dict__['__mro__'].__get__(klass)
if py_version >= 30:
_shadowed_dict = _shadowed_dict_newstyle
_get_type = type
_static_getmro = _static_getmro_newstyle
else:
def _shadowed_dict(klass):
"""
In Python 2 __dict__ is not overwritable:
class Foo(object): pass
setattr(Foo, '__dict__', 4)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: __dict__ must be a dictionary object
It applies to both newstyle and oldstyle classes:
class Foo(object): pass
setattr(Foo, '__dict__', 4)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: attribute '__dict__' of 'type' objects is not writable
It also applies to instances of those objects. However to keep things
straight forward, newstyle classes always use the complicated way of
accessing it while oldstyle classes just use getattr.
"""
if type(klass) is _oldstyle_class_type:
return getattr(klass, '__dict__', _sentinel)
return _shadowed_dict_newstyle(klass)
class _OldStyleClass:
pass
_oldstyle_instance_type = type(_OldStyleClass())
_oldstyle_class_type = type(_OldStyleClass)
def _get_type(obj):
type_ = object.__getattribute__(obj, '__class__')
if type_ is _oldstyle_instance_type:
# Somehow for old style classes we need to access it directly.
return obj.__class__
return type_
def _static_getmro(klass):
if type(klass) is _oldstyle_class_type:
def oldstyle_mro(klass):
"""
Oldstyle mro is a really simplistic way of look up mro:
https://stackoverflow.com/questions/54867/what-is-the-difference-between-old-style-and-new-style-classes-in-python
"""
yield klass
for base in klass.__bases__:
for yield_from in oldstyle_mro(base):
yield yield_from
return oldstyle_mro(klass)
return _static_getmro_newstyle(klass)
def _safe_hasattr(obj, name):
return _check_class(_get_type(obj), name) is not _sentinel
def _safe_is_data_descriptor(obj):
return _safe_hasattr(obj, '__set__') or _safe_hasattr(obj, '__delete__')
def getattr_static(obj, attr, default=_sentinel):
"""Retrieve attributes without triggering dynamic lookup via the
descriptor protocol, __getattr__ or __getattribute__.
Note: this function may not be able to retrieve all attributes
that getattr can fetch (like dynamically created attributes)
and may find attributes that getattr can't (like descriptors
that raise AttributeError). It can also return descriptor objects
instead of instance members in some cases. See the
documentation for details.
Returns a tuple `(attr, is_get_descriptor)`. is_get_descripter means that
the attribute is a descriptor that has a `__get__` attribute.
"""
instance_result = _sentinel
if not _is_type(obj):
klass = _get_type(obj)
dict_attr = _shadowed_dict(klass)
if (dict_attr is _sentinel or type(dict_attr) is types.MemberDescriptorType):
instance_result = _check_instance(obj, attr)
else:
klass = obj
klass_result = _check_class(klass, attr)
if instance_result is not _sentinel and klass_result is not _sentinel:
if _safe_hasattr(klass_result, '__get__') \
and _safe_is_data_descriptor(klass_result):
# A get/set descriptor has priority over everything.
return klass_result, True
if instance_result is not _sentinel:
return instance_result, False
if klass_result is not _sentinel:
return klass_result, _safe_hasattr(klass_result, '__get__')
if obj is klass:
# for types we check the metaclass too
for entry in _static_getmro(type(klass)):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr], False
except KeyError:
pass
if default is not _sentinel:
return default, False
raise AttributeError(attr)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@jedi@py3@jedi@evaluate@compiled@getattr_static.py@.PATH_END.py
|
{
"filename": "google_drive.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/document_loaders/google_drive.ipynb",
"type": "Jupyter Notebook"
}
|
# Google Drive
>[Google Drive](https://en.wikipedia.org/wiki/Google_Drive) is a file storage and synchronization service developed by Google.
This notebook covers how to load documents from `Google Drive`. Currently, only `Google Docs` are supported.
## Prerequisites
1. Create a Google Cloud project or use an existing project
1. Enable the [Google Drive API](https://console.cloud.google.com/flows/enableapi?apiid=drive.googleapis.com)
1. [Authorize credentials for desktop app](https://developers.google.com/drive/api/quickstart/python#authorize_credentials_for_a_desktop_application)
1. `pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib`
## 🧑 Instructions for ingesting your Google Docs data
Set the environmental variable `GOOGLE_APPLICATION_CREDENTIALS` to an empty string (`""`).
By default, the `GoogleDriveLoader` expects the `credentials.json` file to be located at `~/.credentials/credentials.json`, but this is configurable using the `credentials_path` keyword argument. Same thing with `token.json` - default path: `~/.credentials/token.json`, constructor param: `token_path`.
The first time you use GoogleDriveLoader, you will be displayed with the consent screen in your browser for user authentication. After authentication, `token.json` will be created automatically at the provided or the default path. Also, if there is already a `token.json` at that path, then you will not be prompted for authentication.
`GoogleDriveLoader` can load from a list of Google Docs document ids or a folder id. You can obtain your folder and document id from the URL:
* Folder: https://drive.google.com/drive/u/0/folders/1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5 -> folder id is `"1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5"`
* Document: https://docs.google.com/document/d/1bfaMQ18_i56204VaQDVeAFpqEijJTgvurupdEDiaUQw/edit -> document id is `"1bfaMQ18_i56204VaQDVeAFpqEijJTgvurupdEDiaUQw"`
```python
%pip install --upgrade --quiet langchain-google-community[drive]
```
```python
from langchain_google_community import GoogleDriveLoader
```
```python
loader = GoogleDriveLoader(
folder_id="1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5",
token_path="/path/where/you/want/token/to/be/created/google_token.json",
# Optional: configure whether to recursively fetch files from subfolders. Defaults to False.
recursive=False,
)
```
```python
docs = loader.load()
```
When you pass a `folder_id` by default all files of type document, sheet and pdf are loaded. You can modify this behaviour by passing a `file_types` argument
```python
loader = GoogleDriveLoader(
folder_id="1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5",
file_types=["document", "sheet"],
recursive=False,
)
```
## Passing in Optional File Loaders
When processing files other than Google Docs and Google Sheets, it can be helpful to pass an optional file loader to `GoogleDriveLoader`. If you pass in a file loader, that file loader will be used on documents that do not have a Google Docs or Google Sheets MIME type. Here is an example of how to load an Excel document from Google Drive using a file loader.
```python
from langchain_community.document_loaders import UnstructuredFileIOLoader
from langchain_google_community import GoogleDriveLoader
```
```python
file_id = "1x9WBtFPWMEAdjcJzPScRsjpjQvpSo_kz"
loader = GoogleDriveLoader(
file_ids=[file_id],
file_loader_cls=UnstructuredFileIOLoader,
file_loader_kwargs={"mode": "elements"},
)
```
```python
docs = loader.load()
```
```python
docs[0]
```
You can also process a folder with a mix of files and Google Docs/Sheets using the following pattern:
```python
folder_id = "1asMOHY1BqBS84JcRbOag5LOJac74gpmD"
loader = GoogleDriveLoader(
folder_id=folder_id,
file_loader_cls=UnstructuredFileIOLoader,
file_loader_kwargs={"mode": "elements"},
)
```
```python
docs = loader.load()
```
```python
docs[0]
```
```python
```
## Extended usage
An external (unofficial) component can manage the complexity of Google Drive : `langchain-googledrive`
It's compatible with the ̀`langchain_community.document_loaders.GoogleDriveLoader` and can be used
in its place.
To be compatible with containers, the authentication uses an environment variable `̀GOOGLE_ACCOUNT_FILE` to credential file (for user or service).
```python
%pip install --upgrade --quiet langchain-googledrive
```
```python
folder_id = "root"
# folder_id='1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5'
```
```python
# Use the advanced version.
from langchain_googledrive.document_loaders import GoogleDriveLoader
```
```python
loader = GoogleDriveLoader(
folder_id=folder_id,
recursive=False,
num_results=2, # Maximum number of file to load
)
```
By default, all files with these mime-type can be converted to `Document`.
- text/text
- text/plain
- text/html
- text/csv
- text/markdown
- image/png
- image/jpeg
- application/epub+zip
- application/pdf
- application/rtf
- application/vnd.google-apps.document (GDoc)
- application/vnd.google-apps.presentation (GSlide)
- application/vnd.google-apps.spreadsheet (GSheet)
- application/vnd.google.colaboratory (Notebook colab)
- application/vnd.openxmlformats-officedocument.presentationml.presentation (PPTX)
- application/vnd.openxmlformats-officedocument.wordprocessingml.document (DOCX)
It's possible to update or customize this. See the documentation of `GDriveLoader`.
But, the corresponding packages must be installed.
```python
%pip install --upgrade --quiet unstructured
```
```python
for doc in loader.load():
print("---")
print(doc.page_content.strip()[:60] + "...")
```
### Loading auth Identities
Authorized identities for each file ingested by Google Drive Loader can be loaded along with metadata per Document.
```python
from langchain_google_community import GoogleDriveLoader
loader = GoogleDriveLoader(
folder_id=folder_id,
load_auth=True,
# Optional: configure whether to load authorized identities for each Document.
)
doc = loader.load()
```
You can pass load_auth=True, to add Google Drive document access identities to metadata.
```python
doc[0].metadata
```
### Loading extended metadata
Following extra fields can also be fetched within metadata of each Document:
- full_path - Full path of the file/s in google drive.
- owner - owner of the file/s.
- size - size of the file/s.
```python
from langchain_google_community import GoogleDriveLoader
loader = GoogleDriveLoader(
folder_id=folder_id,
load_extended_matadata=True,
# Optional: configure whether to load extended metadata for each Document.
)
doc = loader.load()
```
You can pass load_extended_matadata=True, to add Google Drive document extended details to metadata.
```python
doc[0].metadata
```
### Customize the search pattern
All parameter compatible with Google [`list()`](https://developers.google.com/drive/api/v3/reference/files/list)
API can be set.
To specify the new pattern of the Google request, you can use a `PromptTemplate()`.
The variables for the prompt can be set with `kwargs` in the constructor.
Some pre-formated request are proposed (use `{query}`, `{folder_id}` and/or `{mime_type}`):
You can customize the criteria to select the files. A set of predefined filter are proposed:
| template | description |
| -------------------------------------- | --------------------------------------------------------------------- |
| gdrive-all-in-folder | Return all compatible files from a `folder_id` |
| gdrive-query | Search `query` in all drives |
| gdrive-by-name | Search file with name `query` |
| gdrive-query-in-folder | Search `query` in `folder_id` (and sub-folders if `recursive=true`) |
| gdrive-mime-type | Search a specific `mime_type` |
| gdrive-mime-type-in-folder | Search a specific `mime_type` in `folder_id` |
| gdrive-query-with-mime-type | Search `query` with a specific `mime_type` |
| gdrive-query-with-mime-type-and-folder | Search `query` with a specific `mime_type` and in `folder_id` |
```python
loader = GoogleDriveLoader(
folder_id=folder_id,
recursive=False,
template="gdrive-query", # Default template to use
query="machine learning",
num_results=2, # Maximum number of file to load
supportsAllDrives=False, # GDrive `list()` parameter
)
for doc in loader.load():
print("---")
print(doc.page_content.strip()[:60] + "...")
```
You can customize your pattern.
```python
from langchain_core.prompts.prompt import PromptTemplate
loader = GoogleDriveLoader(
folder_id=folder_id,
recursive=False,
template=PromptTemplate(
input_variables=["query", "query_name"],
template="fullText contains '{query}' and name contains '{query_name}' and trashed=false",
), # Default template to use
query="machine learning",
query_name="ML",
num_results=2, # Maximum number of file to load
)
for doc in loader.load():
print("---")
print(doc.page_content.strip()[:60] + "...")
```
The conversion can manage in Markdown format:
- bullet
- link
- table
- titles
Set the attribut `return_link` to `True` to export links.
#### Modes for GSlide and GSheet
The parameter mode accepts different values:
- "document": return the body of each document
- "snippets": return the description of each file (set in metadata of Google Drive files).
The parameter `gslide_mode` accepts different values:
- "single" : one document with <PAGE BREAK>
- "slide" : one document by slide
- "elements" : one document for each elements.
```python
loader = GoogleDriveLoader(
template="gdrive-mime-type",
mime_type="application/vnd.google-apps.presentation", # Only GSlide files
gslide_mode="slide",
num_results=2, # Maximum number of file to load
)
for doc in loader.load():
print("---")
print(doc.page_content.strip()[:60] + "...")
```
The parameter `gsheet_mode` accepts different values:
- `"single"`: Generate one document by line
- `"elements"` : one document with markdown array and <PAGE BREAK> tags.
```python
loader = GoogleDriveLoader(
template="gdrive-mime-type",
mime_type="application/vnd.google-apps.spreadsheet", # Only GSheet files
gsheet_mode="elements",
num_results=2, # Maximum number of file to load
)
for doc in loader.load():
print("---")
print(doc.page_content.strip()[:60] + "...")
```
### Advanced usage
All Google File have a 'description' in the metadata. This field can be used to memorize a summary of the document or others indexed tags (See method `lazy_update_description_with_summary()`).
If you use the `mode="snippet"`, only the description will be used for the body. Else, the `metadata['summary']` has the field.
Sometime, a specific filter can be used to extract some information from the filename, to select some files with specific criteria. You can use a filter.
Sometimes, many documents are returned. It's not necessary to have all documents in memory at the same time. You can use the lazy versions of methods, to get one document at a time. It's better to use a complex query in place of a recursive search. For each folder, a query must be applied if you activate `recursive=True`.
```python
import os
loader = GoogleDriveLoader(
gdrive_api_file=os.environ["GOOGLE_ACCOUNT_FILE"],
num_results=2,
template="gdrive-query",
filter=lambda search, file: "#test" not in file.get("description", ""),
query="machine learning",
supportsAllDrives=False,
)
for doc in loader.load():
print("---")
print(doc.page_content.strip()[:60] + "...")
```
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@document_loaders@google_drive.ipynb@.PATH_END.py
|
{
"filename": "ragged_to_sparse_op_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/ops/ragged/ragged_to_sparse_op_test.py",
"type": "Python"
}
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged.to_sparse op."""
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedTensorToSparseOpTest(test_util.TensorFlowTestCase):
def testDocStringExample(self):
rt = ragged_factory_ops.constant([[1, 2, 3], [4], [], [5, 6]])
st = self.evaluate(rt.to_sparse())
self.assertAllEqual(st.indices,
[[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [3, 1]])
self.assertAllEqual(st.values, [1, 2, 3, 4, 5, 6])
self.assertAllEqual(st.dense_shape, [4, 3])
def test2DRaggedTensorWithOneRaggedDimension(self):
rt = ragged_factory_ops.constant([['a', 'b'], ['c', 'd', 'e'], ['f'], [],
['g']])
st = self.evaluate(rt.to_sparse())
self.assertAllEqual(
st.indices, [[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [2, 0], [4, 0]])
self.assertAllEqual(st.values, b'a b c d e f g'.split())
self.assertAllEqual(st.dense_shape, [5, 3])
def test3DRaggedTensorWithOneRaggedDimension(self):
rt = ragged_factory_ops.constant(
[[[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]], [[11, 12]], [], [[13, 14]]
],
ragged_rank=1)
st = self.evaluate(rt.to_sparse())
self.assertAllEqual(st.indices,
[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0],
[1, 0, 1], [1, 1, 0], [1, 1, 1], [1, 2, 0], [1, 2, 1],
[2, 0, 0], [2, 0, 1], [4, 0, 0], [4, 0, 1]])
self.assertAllEqual(st.values,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])
self.assertAllEqual(st.dense_shape, [5, 3, 2])
def test4DRaggedTensorWithOneRaggedDimension(self):
rt = ragged_factory_ops.constant(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [], [[[9, 10], [11, 12]]]],
ragged_rank=1)
st = self.evaluate(rt.to_sparse())
self.assertAllEqual(st.values, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
self.assertAllEqual(
st.indices,
[
[0, 0, 0, 0], # index for value=1
[0, 0, 0, 1], # index for value=2
[0, 0, 1, 0], # index for value=3
[0, 0, 1, 1], # index for value=4
[0, 1, 0, 0], # index for value=5
[0, 1, 0, 1], # index for value=6
[0, 1, 1, 0], # index for value=7
[0, 1, 1, 1], # index for value=8
[2, 0, 0, 0], # index for value=9
[2, 0, 0, 1], # index for value=10
[2, 0, 1, 0], # index for value=11
[2, 0, 1, 1], # index for value=12
])
self.assertAllEqual(st.dense_shape, [3, 2, 2, 2])
def test4DRaggedTensorWithTwoRaggedDimensions(self):
rt = ragged_factory_ops.constant(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]]],
[[[11, 12]], [], [[13, 14]]], []],
ragged_rank=2)
st = self.evaluate(rt.to_sparse())
self.assertAllEqual(
st.indices,
[
[0, 0, 0, 0], # index for value=1
[0, 0, 0, 1], # index for value=2
[0, 0, 1, 0], # index for value=3
[0, 0, 1, 1], # index for value=4
[0, 1, 0, 0], # index for value=5
[0, 1, 0, 1], # index for value=6
[0, 1, 1, 0], # index for value=7
[0, 1, 1, 1], # index for value=8
[0, 1, 2, 0], # index for value=9
[0, 1, 2, 1], # index for value=10
[1, 0, 0, 0], # index for value=11
[1, 0, 0, 1], # index for value=12
[1, 2, 0, 0], # index for value=13
[1, 2, 0, 1], # index for value=14
])
self.assertAllEqual(st.values,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])
self.assertAllEqual(st.dense_shape, [3, 3, 3, 2])
def testShape(self):
rt = ragged_factory_ops.constant([[1, 2], [3, 4, 5], [6], [], [7]])
st = rt.to_sparse()
self.assertEqual(st.indices.shape.as_list(), [7, 2])
self.assertEqual(st.values.shape.as_list(), [7])
self.assertEqual(st.dense_shape.shape.as_list(), [2])
rt = ragged_factory_ops.constant([[[1, 2]], [], [[3, 4]], []],
ragged_rank=1)
st = rt.to_sparse()
self.assertEqual(st.indices.shape.as_list(), [4, 3])
self.assertEqual(st.values.shape.as_list(), [4])
self.assertEqual(st.dense_shape.shape.as_list(), [3])
rt = ragged_factory_ops.constant([[[1], [2, 3, 4, 5, 6, 7]], [[]]])
st = rt.to_sparse()
self.assertEqual(st.indices.shape.as_list(), [7, 3])
self.assertEqual(st.values.shape.as_list(), [7])
self.assertEqual(st.dense_shape.shape.as_list(), [3])
def testKernelErrors(self):
# An empty vector, defined using a placeholder to ensure that we can't
# determine that it's invalid at graph-construction time.
empty_vector = array_ops.placeholder_with_default(
array_ops.zeros([0], dtypes.int64), shape=None)
bad_rt1 = ragged_tensor.RaggedTensor.from_row_splits(
row_splits=[2, 3], values=[1, 2, 3], validate=False)
bad_split0 = r'First value of ragged splits must be 0.*'
with self.assertRaisesRegex(errors.InvalidArgumentError, bad_split0):
self.evaluate(bad_rt1.to_sparse())
bad_rt2 = ragged_tensor.RaggedTensor.from_row_splits(
row_splits=[0, 5], values=empty_vector, validate=False)
bad_rt3 = ragged_tensor.RaggedTensor.from_row_splits(
row_splits=[0, 1],
values=ragged_tensor.RaggedTensor.from_row_splits(
row_splits=[0, 5], values=empty_vector, validate=False),
validate=False)
split_mismatch1_error = r'Final value of ragged splits must match.*'
for rt in [bad_rt2, bad_rt3]:
with self.assertRaisesRegex(errors.InvalidArgumentError,
split_mismatch1_error):
self.evaluate(rt.to_sparse())
bad_rt4 = ragged_tensor.RaggedTensor.from_row_splits(
row_splits=[0, 5],
values=ragged_tensor.RaggedTensor.from_row_splits(
row_splits=[0], values=empty_vector, validate=False),
validate=False)
split_mismatch2_error = r'Final value of ragged splits must match.*'
with self.assertRaisesRegex(errors.InvalidArgumentError,
split_mismatch2_error):
self.evaluate(bad_rt4.to_sparse())
bad_rt5 = ragged_tensor.RaggedTensor.from_row_splits(
row_splits=empty_vector, values=[], validate=False)
empty_splits_error = (r'ragged splits may not be empty.*')
with self.assertRaisesRegex(errors.InvalidArgumentError,
empty_splits_error):
self.evaluate(bad_rt5.to_sparse())
def testGradient(self):
if context.executing_eagerly():
return
# rt1.shape == rt2.shape == [2, (D2), (D3), 2].
rt1 = ragged_factory_ops.constant(
[[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0]]]], ragged_rank=2)
rt2 = ragged_factory_ops.constant(
[[[[9.0, 8.0], [7.0, 6.0]], [[5.0, 4.0]]]], ragged_rank=2)
rt = ragged_functional_ops.map_flat_values(math_ops.add, rt1, rt2 * 2.0)
st = rt.to_sparse()
g1, g2 = gradients_impl.gradients(st.values,
[rt1.flat_values, rt2.flat_values])
self.assertAllEqual(g1, [[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]])
self.assertAllEqual(g2, [[2.0, 2.0], [2.0, 2.0], [2.0, 2.0]])
if __name__ == '__main__':
googletest.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@ops@ragged@ragged_to_sparse_op_test.py@.PATH_END.py
|
{
"filename": "test_colorlist_validator.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/_plotly_utils/tests/validators/test_colorlist_validator.py",
"type": "Python"
}
|
import pytest
import numpy as np
from _plotly_utils.basevalidators import ColorlistValidator
# Fixtures
# --------
@pytest.fixture()
def validator():
return ColorlistValidator("prop", "parent")
# Rejection
# ---------
@pytest.mark.parametrize("val", [set(), 23, 0.5, {}, "redd"])
def test_rejection_value(validator, val):
with pytest.raises(ValueError) as validation_failure:
validator.validate_coerce(val)
assert "Invalid value" in str(validation_failure.value)
@pytest.mark.parametrize("val", [[set()], [23, 0.5], [{}, "red"], ["blue", "redd"]])
def test_rejection_element(validator, val):
with pytest.raises(ValueError) as validation_failure:
validator.validate_coerce(val)
assert "Invalid element(s)" in str(validation_failure.value)
# Acceptance
# ----------
@pytest.mark.parametrize(
"val",
[
["blue"],
["red", "rgb(255, 0, 0)"],
np.array(["red", "rgb(255, 0, 0)"]),
["hsl(0, 100%, 50%)", "hsla(0, 100%, 50%, 100%)", "hsv(0, 100%, 100%)"],
np.array(
["hsl(0, 100%, 50%)", "hsla(0, 100%, 50%, 100%)", "hsv(0, 100%, 100%)"]
),
["hsva(0, 100%, 100%, 50%)"],
],
)
def test_acceptance_aok(val, validator):
coerce_val = validator.validate_coerce(val)
assert isinstance(coerce_val, list)
assert validator.present(coerce_val) == tuple(val)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@_plotly_utils@tests@validators@test_colorlist_validator.py@.PATH_END.py
|
{
"filename": "peft_lora_seq2seq_accelerate_big_model_inference.ipynb",
"repo_name": "huggingface/peft",
"repo_path": "peft_extracted/peft-main/examples/conditional_generation/peft_lora_seq2seq_accelerate_big_model_inference.ipynb",
"type": "Jupyter Notebook"
}
|
```python
from transformers import AutoModelForSeq2SeqLM
from peft import PeftModel, PeftConfig
import torch
from datasets import load_dataset
import os
from transformers import AutoTokenizer
from torch.utils.data import DataLoader
from transformers import default_data_collator, get_linear_schedule_with_warmup
from tqdm import tqdm
from datasets import load_dataset
dataset_name = "twitter_complaints"
text_column = "Tweet text"
label_column = "text_label"
batch_size = 8
peft_model_id = "smangrul/twitter_complaints_bigscience_T0_3B_LORA_SEQ_2_SEQ_LM"
config = PeftConfig.from_pretrained(peft_model_id)
```
```python
peft_model_id = "smangrul/twitter_complaints_bigscience_T0_3B_LORA_SEQ_2_SEQ_LM"
max_memory = {0: "6GIB", 1: "0GIB", 2: "0GIB", 3: "0GIB", 4: "0GIB", "cpu": "30GB"}
config = PeftConfig.from_pretrained(peft_model_id)
model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path, device_map="auto", max_memory=max_memory)
model = PeftModel.from_pretrained(model, peft_model_id, device_map="auto", max_memory=max_memory)
```
```python
from datasets import load_dataset
dataset = load_dataset("ought/raft", dataset_name)
classes = [k.replace("_", " ") for k in dataset["train"].features["Label"].names]
print(classes)
dataset = dataset.map(
lambda x: {"text_label": [classes[label] for label in x["Label"]]},
batched=True,
num_proc=1,
)
print(dataset)
dataset["train"][0]
```
```python
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes])
def preprocess_function(examples):
inputs = examples[text_column]
targets = examples[label_column]
model_inputs = tokenizer(inputs, truncation=True)
labels = tokenizer(
targets, max_length=target_max_length, padding="max_length", truncation=True, return_tensors="pt"
)
labels = labels["input_ids"]
labels[labels == tokenizer.pad_token_id] = -100
model_inputs["labels"] = labels
return model_inputs
processed_datasets = dataset.map(
preprocess_function,
batched=True,
num_proc=1,
remove_columns=dataset["train"].column_names,
load_from_cache_file=True,
desc="Running tokenizer on dataset",
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["train"]
test_dataset = processed_datasets["test"]
def collate_fn(examples):
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=batch_size, pin_memory=True
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=collate_fn, batch_size=batch_size, pin_memory=True)
test_dataloader = DataLoader(test_dataset, collate_fn=collate_fn, batch_size=batch_size, pin_memory=True)
```
```python
model.eval()
i = 15
inputs = tokenizer(f'{text_column} : {dataset["test"][i]["Tweet text"]} Label : ', return_tensors="pt")
print(dataset["test"][i]["Tweet text"])
print(inputs)
with torch.no_grad():
outputs = model.generate(input_ids=inputs["input_ids"].to("cuda"), max_new_tokens=10)
print(outputs)
print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))
```
@NYTsupport i have complained a dozen times & yet my papers are still thrown FAR from my door. Why is this so hard to resolve?
{'input_ids': tensor([[25335, 1499, 3, 10, 3320, 12056, 382, 20390, 3, 23,
43, 25932, 3, 9, 9611, 648, 3, 184, 4624, 117,
780, 82, 5778, 33, 341, 3, 12618, 377, 4280, 45,
82, 1365, 5, 1615, 19, 48, 78, 614, 12, 7785,
58, 16229, 3, 10, 3, 1]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])}
tensor([[ 0, 10394, 1]], device='cuda:0')
['complaint']
```python
model.eval()
eval_preds = []
for _, batch in enumerate(tqdm(eval_dataloader)):
batch = {k: v.to("cuda") for k, v in batch.items() if k != "labels"}
with torch.no_grad():
outputs = model.generate(**batch, max_new_tokens=10)
preds = outputs.detach().cpu().numpy()
eval_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True))
```
0%| | 0/7 [00:00<?, ?it/s]You're using a T5TokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.
100%|████████████████████████████████████████████████████████████████████████████████████████████| 7/7 [00:10<00:00, 1.48s/it]
```python
correct = 0
total = 0
for pred, true in zip(eval_preds, dataset["train"][label_column]):
if pred.strip() == true.strip():
correct += 1
total += 1
accuracy = correct / total * 100
print(f"{accuracy=}")
print(f"{eval_preds[:10]=}")
print(f"{dataset['train'][label_column][:10]=}")
```
accuracy=100.0
eval_preds[:10]=['no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint', 'no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint']
dataset['train'][label_column][:10]=['no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint', 'no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint']
```python
model.eval()
test_preds = []
for _, batch in enumerate(tqdm(test_dataloader)):
batch = {k: v for k, v in batch.items() if k != "labels"}
with torch.no_grad():
outputs = model.generate(**batch, max_new_tokens=10)
preds = outputs.detach().cpu().numpy()
test_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True))
if len(test_preds) > 100:
break
test_preds
```
|
huggingfaceREPO_NAMEpeftPATH_START.@peft_extracted@peft-main@examples@conditional_generation@peft_lora_seq2seq_accelerate_big_model_inference.ipynb@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/__init__.py",
"type": "Python"
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from . import axes_size as Size
from .axes_divider import Divider, SubplotDivider, LocatableAxes, \
make_axes_locatable
from .axes_grid import Grid, ImageGrid, AxesGrid
#from axes_divider import make_axes_locatable
from matplotlib.cbook import warn_deprecated
warn_deprecated(since='2.1',
name='mpl_toolkits.axes_grid',
alternative='mpl_toolkits.axes_grid1 and'
' mpl_toolkits.axisartist provies the same'
' functionality',
obj_type='module')
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@matplotlib@py2@mpl_toolkits@axes_grid@__init__.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "ericagol/TRAPPIST1_Spitzer",
"repo_path": "TRAPPIST1_Spitzer_extracted/TRAPPIST1_Spitzer-master/src/v09_eps0.1_2k/README.md",
"type": "Markdown"
}
|
2/11/2020
Okay, setting up a 10-day, 2000-step x 112 CPU
run. Let's see how this goes!
This seems to be the run I'm going with.
6/18/2020
Here is some description of how to run an HMC chain, how to set up the slurm
runs on Hyak Mox, and what the output files contain:
0. First, you will need to install Julia v1.0.1 (although other Julia v1.*
ought to work as well). Then install Julia packages: ForwardDiff,
DelimitedFiles, Printf, SpecialFunctions, LinearAlgebra, IterativeSolvers
& JLD2. The code was run with the following versions:
"ForwardDiff" => v"0.10.9"
"DiffResults" => v???
"IterativeSolvers" => v"0.8.1"
"JLD2" => v"0.1.11"
"SpecialFunctions" => v"0.8.0"
1. To run a single 10-step chain, from the prompt, type:
v09_eps0.1_2k$ julia trappist1_run_hmc.jl 000 &> trappist1_hmc_000.txt &
(Right now the 2000 step chains are commented out in trappist1_run_hmc.jl
and an example of a 10-step chain will be run, which takes about one hour
on my Mackbook. To run a 2000 step chain, the commented lines should be
uncommented, and the 10-step lines should be commented; this took about
9 days for each chain on Hyak.)
To run the trappist1_run_hmc.jl script requires two input files:
- ../../data/T1_timings_20191203.txt # Transit times and uncertainties
- ../../data/elements_noprior_students.txt # Initial orbital elements
Also required is source code:
-trappist1_run_hmc.jl: This is a script called by the slurm
scripts which initializes one of the Markov chains.
-run_hmc_background_student_rescale.jl: This is the workhorse
script which calls NbodyGradient (via ttv.jl).
-log_students_prob.jl: Student likelihood function (called
in run_hmc_background_student_rescale.jl).
-../compute_grad_num.jl:
Computes gradients numerically with finite differences to check that the code
is working properly (commented out - this takes a long time to run since it
computes finite differences at BigFloat precision).
-../CGS.jl:
Defines some fundamental constants.
-../extract_planet.jl:
Extracts transit times for individual planets.
-../nlog_prior.jl:
Prior function which places bounds on parameters and corrects for eccentricity
vector bias with 1/e.
-../loglinspace.jl:
Function to define a vector of linear or logarithmically spaced values.
-../regress.jl:
Function which carries out linear regression.
-../../NbodyGradient/src/ttv.jl:
This is the transit-time N-body integrator which computes gradients of each
transit time with respect to the initial orbital elements and mass ratios.
It has several dependencies in that source directory (this has been copied
from the NbodyGradient repository).
2. The 2000-step chains were run with slurm on Hyak Mox with the command:
sbatch -p astro -A astro trappist1_run_hmc_05.slurm
etc. for the four slurm scripts run on 4 Hyak nodes
of 28 threads each, giving a total of 112 chains.
Note that the slurm scripts contain PATH definitions which need
to point to the local Julia installation.
Each chain was run for 2000 steps using epsilon0 = 0.1 (hence the name of this directory)
and nleap0 = 20.
3. Each job run has an output file trappist1_hmc_***.txt
which have been placed into a zip file trappist1_hmc_output.zip
The results are output to JLD2 compressed HDF files
which have been moved to the subdirectory ../../data/output_files/
The contensts of each jld2 file are:
```Hyak$ julia
_
_ _ _(_)_ | Documentation: https://docs.julialang.org
(_) | (_) (_) |
_ _ _| |_ __ _ | Type "?" for help, "]?" for Pkg help.
| | | | | | |/ _` | |
| | |_| | | | (_| | | Version 1.0.1 (2018-09-29)
_/ |\__'_|_|_|\__'_| | Official https://julialang.org/ release
|__/ |
julia> using JLD2
julia> @load "T1_run_hmc_student_ecc_lgndof_V1exp2nuinv_nstep2000_eps0.1_nleap20_318.jld2"
26-element Array{Symbol,1}:
:n # Number of bodies = Number of planets + 1 (for the star).
:cov_save # Covariance matrix used in HMC.
:hess_save # Hessian matrix.
:fname # File name for the input data (T1_timings_20191203.txt in this case).
:felements # File name for the initial orbital elements (elements_noprior_students.txt).
:nparam # Number of Markov chain parameters (5*(n-1)+2 in this case - 5 parameters
for each plane-parallel planet, and 2 Student's t distribution parameters).
:t0 # Initial time of integration.
:h # Integration time step.
:tmax # Duration of integration (this needs increasing as more transit times are added).
:ntrans # Total number of observed transits (447 in this case).
:iplanet # Vector of bodies associated with each transit (1=star; 2=b; 3=c; ...; 8=h).
:indx # Index of each observed transit time associated in the array of computed transits.
:tobs_tot # Vector holding transit times.
:sobs_tot # Vector holding timing uncertainties.
:data # Array of input data (planet, transit ephemeris, transit time, uncertainty).
:count1 # Vector length n of the total number of simulated transits for each planet (and 0 for the star).
:state # Results of the HMC chain. Size is (10*(n-1)+6,nstep) (twice number of parameters for momenta
plus the log hamiltonian and log of the posterior function (i.e. without kinetic energy term).
:hessian # Hessian matrix.
:cholh # Cholesky decomposition of Hessian - used for choosing a momentum vector.
:nleap0 # Maximum length of each leapfrog HMC integration (chosen between 0.8*nleap0 and nleap0).
:nacc # Number of accepted steps.
:nstep # Number of steps in the Markov chain.
:stats # Five statistics which are saved for each leapfrog step: [epsilon,nleap,alpha,uni,accepted],
where epsilon is leapfrog timestep (chosen from absolute value of a Gaussian of width epsilon0).
nleap is the number of integration steps for leapfrog integration, alpha is the ratio of
the log posterios of the prior and trial steps, uni is uniform deviate for metropolis
rejection, and accepted is 1 if accepted, zero if not.
:elements # Initial orbital element array (n x 7) contains
mass, period, t0, e*cos(omega), e*sin(omega), inclination, Omega for each planet in Jacobi coordinates.
First line is for the star, which only has a mass value specified (usually 1.0).
:chi_opt # Optimized initial log likelihood (without log prior).
:x_opt # Values of initial optimized likelihood.
```
(Forgot to save the value of epsilon0=0.1)
The results given in the paper were taken from the state variable for the
112 chains run for 2000 leapfrog steps. The state variables were combined
into a large array "state_total" which is saved in the path ../../tex/figures/julia/
for use in creating figures and tables for the paper.
|
ericagolREPO_NAMETRAPPIST1_SpitzerPATH_START.@TRAPPIST1_Spitzer_extracted@TRAPPIST1_Spitzer-master@src@v09_eps0.1_2k@README.md@.PATH_END.py
|
{
"filename": "_smoothing.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/contour/line/_smoothing.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SmoothingValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="smoothing", parent_name="contour.line", **kwargs):
super(SmoothingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
max=kwargs.pop("max", 1.3),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@contour@line@_smoothing.py@.PATH_END.py
|
{
"filename": "5-Chempy_function.ipynb",
"repo_name": "oliverphilcox/ChempyMulti",
"repo_path": "ChempyMulti_extracted/ChempyMulti-master/Chempy_tutorials/5-Chempy_function.ipynb",
"type": "Jupyter Notebook"
}
|
## Chempy
we will now introduce the Chempy function which will calculate the chemical evolution of a one-zone open box model
```python
%pylab inline
```
Populating the interactive namespace from numpy and matplotlib
```python
# loading the default parameters
from Chempy.parameter import ModelParameters
a = ModelParameters()
```
## Loading all the input
- solar abundances
- SFR
- infall
- initial abundances and inflowing abundances
```python
# Initialising sfr, infall, elements to trace, solar abundances
from Chempy.wrapper import initialise_stuff
basic_solar, basic_sfr, basic_infall = initialise_stuff(a)
elements_to_trace = a.elements_to_trace
```
## Elemental abundances at start
We need to define the abundances of:
- The ISM at beginning
- The corona gas at beginning
- The cosmic inflow into the corona for all times.
For all we chose primordial here.
```python
# Setting the abundance fractions at the beginning to primordial
from Chempy.infall import INFALL, PRIMORDIAL_INFALL
basic_primordial = PRIMORDIAL_INFALL(list(elements_to_trace),np.copy(basic_solar.table))
basic_primordial.primordial()
basic_primordial.fractions
```
array([ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0.76, 0.24, 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ])
## Initialising the element evolution matrix
We now feed everything into the abundance matrix and check its entries
```python
# Initialising the ISM instance
from Chempy.time_integration import ABUNDANCE_MATRIX
cube = ABUNDANCE_MATRIX(np.copy(basic_sfr.t),np.copy(basic_sfr.sfr),np.copy(basic_infall.infall),list(elements_to_trace),list(basic_primordial.symbols),list(basic_primordial.fractions),float(a.gas_at_start),list(basic_primordial.symbols),list(basic_primordial.fractions),float(a.gas_reservoir_mass_factor),float(a.outflow_feedback_fraction),bool(a.check_processes),float(a.starformation_efficiency),float(a.gas_power), float(a.sfr_factor_for_cosmic_accretion), list(basic_primordial.symbols), list(basic_primordial.fractions))
# All the entries of the ISM instance
print(list(cube.cube.dtype.names))
# Helium at start
print('Primordial ratio of H to He: ',cube.cube['H'][0]/cube.cube['He'][0])
print('Helium over time: ',cube.cube['He'])
```
['sfr', 'infall', 'time', 'feedback', 'mass_in_remnants', 'stars', 'gas', 'Z', 'alpha', 'sn1a', 'sn2', 'pn', 'bh', 'hn', 'Al', 'Ar', 'B', 'Be', 'C', 'Ca', 'Cl', 'Co', 'Cr', 'Cu', 'F', 'Fe', 'Ga', 'Ge', 'H', 'He', 'K', 'Li', 'Mg', 'Mn', 'N', 'Na', 'Ne', 'Ni', 'O', 'P', 'S', 'Sc', 'Si', 'Ti', 'V', 'Zn']
('Primordial ratio of H to He: ', 3.1666666666666665)
('Helium over time: ', array([ 9.26869415e-05, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00]))
## Time integration
With the advance_one_step method we can evolve the matrix in time, given that we provide the feedback from each steps previous SSP.
```python
# Now we run the time integration
from Chempy.wrapper import SSP_wrap
basic_ssp = SSP_wrap(a)
for i in range(len(basic_sfr.t)-1):
j = len(basic_sfr.t)-i
# The metallicity needs to be passed for the yields to be calculated as well as the initial elemental abundances
element_fractions = []
for item in elements_to_trace:
element_fractions.append(float(np.copy(cube.cube[item][max(i-1,0)]/cube.cube['gas'][max(i-1,0)])))## gas element fractions from one time step before
metallicity = float(cube.cube['Z'][i])
time_steps = np.copy(basic_sfr.t[:j])
basic_ssp.calculate_feedback(float(metallicity), list(elements_to_trace), list(element_fractions), np.copy(time_steps))
cube.advance_one_step(i+1,np.copy(basic_ssp.table),np.copy(basic_ssp.sn2_table),np.copy(basic_ssp.agb_table),np.copy(basic_ssp.sn1a_table))
print(cube.cube['He'])
```
[ 9.26869415e-05 1.40860453e-02 2.46201078e-02 3.22716659e-02
3.75769269e-02 4.09880689e-02 4.28975911e-02 4.36286864e-02
4.34475615e-02 4.25757031e-02 4.11957803e-02 3.94498921e-02
3.74567135e-02 3.53087975e-02 3.30824748e-02 3.08305729e-02
2.86006517e-02 2.64208026e-02 2.43259121e-02 2.23255178e-02
2.04333626e-02 1.86576398e-02 1.69950788e-02 1.54481327e-02
1.40162342e-02 1.26941237e-02 1.14762096e-02 1.03656268e-02]
## Making abundances from element fractions
The cube stores everything in elemental fractions, we use a tool to convert these to abundances scaled to solar:
```python
# Turning the fractions into dex values (normalised to solar [X/H])
from Chempy.making_abundances import mass_fraction_to_abundances
abundances,elements,numbers = mass_fraction_to_abundances(np.copy(cube.cube),np.copy(basic_solar.table))
print(abundances['He'])
```
[-0.02951008 -0.02944417 -0.02371412 -0.01804385 -0.01281405 -0.00807708
-0.0036858 0.00039861 0.00418825 0.00773171 0.0110975 0.01425566
0.01724532 0.02006999 0.02279457 0.02534465 0.02780392 0.03007043
0.03243048 0.03472625 0.03703849 0.0394542 0.04178142 0.04408782
0.04643867 0.04874673 0.05094251 0.05349851]
/home/jan/anaconda2/lib/python2.7/site-packages/Chempy-0.1-py2.7.egg/Chempy/making_abundances.py:49: RuntimeWarning: divide by zero encountered in log10
cube_abundances[item] = np.where(cube_abundances[item] == 0. , -np.inf, np.log10(cube_abundances[item]) + 12.)
```python
## Alpha enhancement over time
plot(cube.cube['time'][1:],abundances['O'][1:]-abundances['Fe'][1:])
plt.xlabel('time in Gyr')
plt.ylabel('[O/Fe]')
```
<matplotlib.text.Text at 0x7f09c30bfc50>

```python
# [X/Fe] vs. [Fe/H]
plot(abundances['Fe'][1:],abundances['O'][1:]-abundances['Fe'][1:], label = 'O')
plot(abundances['Fe'][1:],abundances['Mn'][1:]-abundances['Fe'][1:], label = 'Mn')
plot(abundances['Fe'][1:],abundances['N'][1:]-abundances['Fe'][1:], label = 'N')
plt.xlabel('[Fe/H]')
plt.ylabel('[X/Fe]')
plt.legend()
```
<matplotlib.legend.Legend at 0x7f09c0cbe890>

## Likelihood calculation
There are a few build-in functions (actually representing the observational constraints from the Chempy paper) which return a likelihood. One of those is called sol_norm and compares the proto-solar abundances with the Chempy ISM abundances 4.5 Gyr ago.
```python
# Here we load a likelihood test for the solar abundances
# This is how it looks for the prior parameters with the default yield set
from Chempy.data_to_test import sol_norm
probabilities, abundance_list, element_names = sol_norm(True,a.name_string,np.copy(abundances),np.copy(cube.cube),elements_to_trace,a.element_names,np.copy(basic_solar.table),a.number_of_models_overplotted,a.produce_mock_data,a.use_mock_data,a.error_inflation)
```
<matplotlib.figure.Figure at 0x7f09c30bebd0>

## Net vs. total yield
Now we will change a little detail in the time-integration. Instead of letting unprocessed material that is expelled from the stars ('unprocessed_mass_in_winds' in the yield tables) being composed of the stellar birth material, which would be consistent (and is what I call 'net' yield), we now use solar-scaled material which only has the same metallicity as the stellar birth material (This is what happens if yield tables are giving the total yield including the unprocessed material, which means that the author usually uses solar-scaled material which is then expelled by the star, but might not even be produced by it). Therefore we see a difference in the likelihood which is better for the total yields case (-180.05 vs -198.30). We see the difference especially well in K and Ti.
```python
cube = ABUNDANCE_MATRIX(np.copy(basic_sfr.t),np.copy(basic_sfr.sfr),np.copy(basic_infall.infall),list(elements_to_trace),list(basic_primordial.symbols),list(basic_primordial.fractions),float(a.gas_at_start),list(basic_primordial.symbols),list(basic_primordial.fractions),float(a.gas_reservoir_mass_factor),float(a.outflow_feedback_fraction),bool(a.check_processes),float(a.starformation_efficiency),float(a.gas_power), float(a.sfr_factor_for_cosmic_accretion), list(basic_primordial.symbols), list(basic_primordial.fractions))
basic_ssp = SSP_wrap(a)
for i in range(len(basic_sfr.t)-1):
j = len(basic_sfr.t)-i
metallicity = float(cube.cube['Z'][i])
# Instead of using the ISM composition we use solar scaled material
solar_scaled_material = PRIMORDIAL_INFALL(list(elements_to_trace),np.copy(basic_solar.table))
solar_scaled_material.solar(np.log10(metallicity/basic_solar.z))
time_steps = np.copy(basic_sfr.t[:j])
basic_ssp.calculate_feedback(float(metallicity), list(elements_to_trace), list(solar_scaled_material.fractions), np.copy(time_steps))
cube.advance_one_step(i+1,np.copy(basic_ssp.table),np.copy(basic_ssp.sn2_table),np.copy(basic_ssp.agb_table),np.copy(basic_ssp.sn1a_table))
abundances,elements,numbers = mass_fraction_to_abundances(np.copy(cube.cube),np.copy(basic_solar.table))
# We do the solar abundance test again and see that the likelihood improves
probabilities, abundance_list, element_names = sol_norm(True,a.name_string,np.copy(abundances),np.copy(cube.cube),elements_to_trace,a.element_names,np.copy(basic_solar.table),a.number_of_models_overplotted,a.produce_mock_data,a.use_mock_data,a.error_inflation)
```
/home/jan/anaconda2/lib/python2.7/site-packages/ipykernel/__main__.py:10: RuntimeWarning: divide by zero encountered in log10
<matplotlib.figure.Figure at 0x7f09c3135b90>

## Making chemical evolution modelling fast and flexible
Now we have all ingredients at hand. We use a wrapper function were we only need to pass the ModelParameters.
```python
# This is a convenience function
from Chempy.wrapper import Chempy
a = ModelParameters()
cube, abundances = Chempy(a)
plot(abundances['Fe'][1:],abundances['O'][1:]-abundances['Fe'][1:], label = 'O')
plot(abundances['Fe'][1:],abundances['Mn'][1:]-abundances['Fe'][1:], label = 'Mn')
plot(abundances['Fe'][1:],abundances['N'][1:]-abundances['Fe'][1:], label = 'N')
plt.xlabel('[Fe/H]')
plt.ylabel('[X/Fe]')
plt.legend()
```
<matplotlib.legend.Legend at 0x7f09c0a71a90>

## IMF effect
now we can easily check the effect of the IMF on the chemical evolution
```python
# prior IMF
a = ModelParameters()
a.imf_parameter= (22.8978, 716.4, 0.25,-2.29)
cube, abundances = Chempy(a)
plot(abundances['Fe'][1:],abundances['O'][1:]-abundances['Fe'][1:], label = 'O', color = 'b')
plot(abundances['Fe'][1:],abundances['Mn'][1:]-abundances['Fe'][1:], label = 'Mn', color = 'orange')
plot(abundances['Fe'][1:],abundances['N'][1:]-abundances['Fe'][1:], label = 'N', color = 'g')
# top-heavy IMF
a = ModelParameters()
a.imf_parameter = (22.8978, 716.4, 0.25,-2.09)
cube, abundances = Chempy(a)
plot(abundances['Fe'][1:],abundances['O'][1:]-abundances['Fe'][1:], color = 'b', linestyle = ':')
plot(abundances['Fe'][1:],abundances['Mn'][1:]-abundances['Fe'][1:], color = 'orange', linestyle = ':')
plot(abundances['Fe'][1:],abundances['N'][1:]-abundances['Fe'][1:], color = 'g', linestyle = ':')
# bottom-heavy IMF
a = ModelParameters()
a.imf_parameter = (22.8978, 716.4, 0.25,-2.49)
cube, abundances = Chempy(a)
plot(abundances['Fe'][1:],abundances['O'][1:]-abundances['Fe'][1:], color = 'b', linestyle = '--')
plot(abundances['Fe'][1:],abundances['Mn'][1:]-abundances['Fe'][1:], color = 'orange', linestyle = '--')
plot(abundances['Fe'][1:],abundances['N'][1:]-abundances['Fe'][1:], color = 'g', linestyle = '--')
plt.xlabel('[Fe/H]')
plt.ylabel('[X/Fe]')
plt.title('IMF effect: top-heavy as dotted line, bottom-heavy as dashed line')
plt.legend()
```
<matplotlib.legend.Legend at 0x7f09c09d2710>

## SFR effect
We can do the same for the peak of the SFR etc...
```python
# Prior SFR
a = ModelParameters()
a.sfr_scale = 3.5
cube, abundances = Chempy(a)
plot(abundances['Fe'][1:],abundances['O'][1:]-abundances['Fe'][1:], label = 'O', color = 'b')
plot(abundances['Fe'][1:],abundances['Mn'][1:]-abundances['Fe'][1:], label = 'Mn', color = 'orange')
plot(abundances['Fe'][1:],abundances['N'][1:]-abundances['Fe'][1:], label = 'N', color = 'g')
# Early peak in the SFR
a = ModelParameters()
a.sfr_scale = 1.5
cube, abundances = Chempy(a)
plot(abundances['Fe'][1:],abundances['O'][1:]-abundances['Fe'][1:], color = 'b', linestyle = ':')
plot(abundances['Fe'][1:],abundances['Mn'][1:]-abundances['Fe'][1:], color = 'orange', linestyle = ':')
plot(abundances['Fe'][1:],abundances['N'][1:]-abundances['Fe'][1:], color = 'green', linestyle = ':')
# late peak in the SFR
a = ModelParameters()
a.sfr_scale = 6.5
cube, abundances = Chempy(a)
plot(abundances['Fe'][1:],abundances['O'][1:]-abundances['Fe'][1:], color = 'b', linestyle = '--')
plot(abundances['Fe'][1:],abundances['Mn'][1:]-abundances['Fe'][1:], color = 'orange', linestyle = '--')
plot(abundances['Fe'][1:],abundances['N'][1:]-abundances['Fe'][1:], color = 'green', linestyle = '--')
plt.xlabel('[Fe/H]')
plt.ylabel('[X/Fe]')
plt.title('SFR effect: early peak as dotted line, late peak as dashed line')
plt.legend()
```
<matplotlib.legend.Legend at 0x7f09c08eed90>

## Time resolution
The time steps are equidistant and the resolution is flexible. Even with coarse 0.5Gyr resolution the results are quite good, saving a lot of computational time. Here we test different time resolution of 0.5, 0.1 and 0.025 Gyr.
All results converge after metallicity increases above -1. The shorter time sampling allows more massive stars to explode first which generally have alpha enhanced yields, therefore the [O/Fe] is higher in the beginning.
```python
## 0.5 Gyr resolution
a = ModelParameters()
a.time_steps = 28 # default
cube, abundances = Chempy(a)
plot(abundances['Fe'][1:],abundances['O'][1:]-abundances['Fe'][1:], label = 'O', color = 'b')
plot(abundances['Fe'][1:],abundances['Mn'][1:]-abundances['Fe'][1:], label = 'Mn', color = 'orange')
plot(abundances['Fe'][1:],abundances['N'][1:]-abundances['Fe'][1:], label = 'N', color = 'g')
# 0.1 Gyr resolution
a = ModelParameters()
a.time_steps = 136
cube, abundances = Chempy(a)
plot(abundances['Fe'][1:],abundances['O'][1:]-abundances['Fe'][1:], color = 'b', linestyle = ':')
plot(abundances['Fe'][1:],abundances['Mn'][1:]-abundances['Fe'][1:], color = 'orange', linestyle = ':')
plot(abundances['Fe'][1:],abundances['N'][1:]-abundances['Fe'][1:], color = 'green', linestyle = ':')
# 25 Myr resolution
a = ModelParameters()
a.time_steps = 541
cube, abundances = Chempy(a)
plot(abundances['Fe'][1:],abundances['O'][1:]-abundances['Fe'][1:], color = 'b', linestyle = '--')
plot(abundances['Fe'][1:],abundances['Mn'][1:]-abundances['Fe'][1:], color = 'orange', linestyle = '--')
plot(abundances['Fe'][1:],abundances['N'][1:]-abundances['Fe'][1:], color = 'green', linestyle = '--')
plt.xlabel('[Fe/H]')
plt.ylabel('[X/Fe]')
plt.title('Time resolution effect: 0.5 solid, 0.1 dotted, 0.025Gyr dashed line')
plt.legend()
```
/home/jan/anaconda2/lib/python2.7/site-packages/Chempy-0.1-py2.7.egg/Chempy/making_abundances.py:49: RuntimeWarning: invalid value encountered in log10
cube_abundances[item] = np.where(cube_abundances[item] == 0. , -np.inf, np.log10(cube_abundances[item]) + 12.)
<matplotlib.legend.Legend at 0x7f09c0803190>

# A note on chemical evolution tracks and 'by eye' fit
Sometimes Astronomers like to show that their chemical evolution track runs through some stellar abundance data points. But if we want the computer to steer our result fit we need to know the selection function of the stars that we try to match and we need to take our star formation history into account (Maybe there are almost no stars formed after 8Gyr).
- We assume that we have an unbiased sample of red clump stars
- We reproduce its selection function by multiplying their age-distribution for a flat SFR with the SFR.
(for the age distribution I have included a cut from a mock catalogue according to [Just&Rybizki 2016](http://adsabs.harvard.edu/abs/2016AN....337..880J) but you could also use the analytic formula from [Bovy+2014](http://adsabs.harvard.edu/abs/2014ApJ...790..127B))
- Then we sample some synthetic stars (with observational errors) along the chemical evolutionary track
```python
# Default model parameters
from Chempy import localpath
a = ModelParameters()
cube, abundances = Chempy(a)
# Red clump age distribution
selection = np.load(localpath + "input/selection/red_clump_new.npy")
time_selection = np.load(localpath + "input/selection/time_red_clump_new.npy")
plt.plot(time_selection,selection)
plt.xlabel('Age in Gyr')
plt.title('Age distribution of Red clump stars')
plt.show()
# We need to put the age distribution on the same time-steps as our model
selection = np.interp(cube.cube['time'], time_selection[::-1], selection)
plt.plot(cube.cube['time'],selection)
plt.xlabel('time in Gyr')
plt.title('Normalisation for a population of Red clump stars')
plt.show()
# Comparing to the SFR
plt.plot(cube.cube['time'],cube.cube['sfr'])
plt.xlabel('time in Gyr')
plt.title('SFR')
plt.show()
# Convolution of SFR and Red clump age distribution
weight = cube.cube['sfr']*selection
plt.plot(cube.cube['time'],weight)
plt.xlabel('time in Gyr')
plt.title('Weight to reproduce red clump stellar sample')
plt.show()
```




```python
# Here we sample 1000 stars with this age-distribution
from Chempy.data_to_test import sample_stars
sample_size = 1000
x,y = sample_stars(cube.cube['sfr'][1:],selection[1:],abundances['Fe'][1:],abundances['O'][1:]-abundances['Fe'][1:],float(basic_solar.table['error'][np.where(basic_solar.table['Symbol']=='Fe')]),float(basic_solar.table['error'][np.where(basic_solar.table['Symbol']=='O')]),int(sample_size))
plt.plot(x,y,"g.", alpha = 0.2, label = '(%d) synthesized red clum stars' %(int(sample_size)))
plt.plot(abundances['Fe'][1:],abundances['O'][1:]-abundances['Fe'][1:], 'r', label = 'evolutionary track')
plt.xlabel('[Fe/H]')
plt.ylabel('[O/Fe]')
plt.title("Sampling from SFH and red clump age distribution")
plt.legend(bbox_to_anchor = [1,1.5])
plt.show()
```

```python
# And we plot the 2d histogramm where we see that our model prediction for a red clump population
plt.hist2d(x,y,20)
plt.plot(abundances['Fe'][1:],abundances['O'][1:]-abundances['Fe'][1:],'r')
plt.xlabel('[Fe/H]')
plt.ylabel('[O/Fe]')
plt.title("Sampling from SFH and red clump age distribution")
plt.show
```
<function matplotlib.pyplot.show>

This PDF can then be compared to real data to get a realistic likelihood.
## The nucleosynthetic feedback per element
With the plot_processes routine we can plot the total feedback of each element and the fractional contribution from each nucleosynthetic feedback for a specific Chempy run.
```python
# Loading the routine and plotting the process contribution into the current folder
# Total enrichment mass in gray to the right, single process fractional contribution to the left
from Chempy.data_to_test import plot_processes
plot_processes(True,a.name_string,cube.sn2_cube,cube.sn1a_cube,cube.agb_cube,a.element_names,np.copy(cube),a.number_of_models_overplotted)
```
[0]
<matplotlib.figure.Figure at 0x7f09c2e3a910>

|
oliverphilcoxREPO_NAMEChempyMultiPATH_START.@ChempyMulti_extracted@ChempyMulti-master@Chempy_tutorials@5-Chempy_function.ipynb@.PATH_END.py
|
{
"filename": "geo.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/matplotlib/projections/geo.py",
"type": "Python"
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import math
import numpy as np
import numpy.ma as ma
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.axes import Axes
from matplotlib import cbook
from matplotlib.patches import Circle
from matplotlib.path import Path
import matplotlib.spines as mspines
import matplotlib.axis as maxis
from matplotlib.ticker import Formatter, Locator, NullLocator, FixedLocator, NullFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper
class GeoAxes(Axes):
"""
An abstract base class for geographic projections
"""
class ThetaFormatter(Formatter):
"""
Used to format the theta tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = (x / np.pi) * 180.0
degrees = np.round(degrees / self._round_to) * self._round_to
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % degrees
else:
return "%0.0f\N{DEGREE SIGN}" % degrees
RESOLUTION = 75
def _init_axis(self):
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
# Do not register xaxis or yaxis with spines -- as done in
# Axes._init_axis() -- until GeoAxes.xaxis.cla() works.
# self.spines['geo'].register_axis(self.yaxis)
self._update_transScale()
def cla(self):
Axes.cla(self)
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.yaxis.set_tick_params(label1On=True)
# Why do we need to turn on yaxis tick labels, but
# xaxis tick labels are already on?
self.grid(rcParams['axes.grid'])
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
def _set_lim_and_transforms(self):
# A (possibly non-linear) projection on the (already scaled) data
self.transProjection = self._get_core_transform(self.RESOLUTION)
self.transAffine = self._get_affine_transform()
self.transAxes = BboxTransformTo(self.bbox)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# This is the transform for longitude ticks.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# This is the transform for latitude ticks.
yaxis_stretch = Affine2D().scale(np.pi * 2.0, 1.0).translate(-np.pi, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space + \
self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def _get_affine_transform(self):
transform = self._get_core_transform(1)
xscale, _ = transform.transform_point((np.pi, 0))
_, yscale = transform.transform_point((0, np.pi / 2.0))
return Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
def get_xaxis_transform(self,which='grid'):
if which not in ['tick1','tick2','grid']:
msg = "'which' must be on of [ 'tick1' | 'tick2' | 'grid' ]"
raise ValueError(msg)
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self,which='grid'):
if which not in ['tick1','tick2','grid']:
msg = "'which' must be one of [ 'tick1' | 'tick2' | 'grid' ]"
raise ValueError(msg)
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pad):
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def _gen_axes_spines(self):
return {'geo':mspines.Spine.circular_spine(self,
(0.5, 0.5), 0.5)}
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
set_xscale = set_yscale
def set_xlim(self, *args, **kwargs):
raise TypeError("It is not possible to change axes limits "
"for geographic projections. Please consider "
"using Basemap or Cartopy.")
set_ylim = set_xlim
def format_coord(self, lon, lat):
'return a format string formatting the coordinate'
lon, lat = np.rad2deg([lon, lat])
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if lon >= 0.0:
ew = 'E'
else:
ew = 'W'
return ('%f\N{DEGREE SIGN}%s, %f\N{DEGREE SIGN}%s'
% (abs(lat), ns, abs(lon), ew))
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
# Skip -180 and 180, which are the fixed limits.
grid = np.arange(-180 + degrees, 180, degrees)
self.xaxis.set_major_locator(FixedLocator(np.deg2rad(grid)))
self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each latitude grid.
"""
# Skip -90 and 90, which are the fixed limits.
grid = np.arange(-90 + degrees, 90, degrees)
self.yaxis.set_major_locator(FixedLocator(np.deg2rad(grid)))
self.yaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
"""
self._longitude_cap = degrees * (np.pi / 180.0)
self._xaxis_pretransform \
.clear() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself.
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
This axes object does not support interactive zoom box.
"""
return False
def can_pan(self) :
"""
Return *True* if this axes supports the pan/zoom button functionality.
This axes object does not support interactive pan/zoom.
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
class AitoffAxes(GeoAxes):
name = 'aitoff'
class AitoffTransform(Transform):
"""
The base Aitoff transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
alpha = np.arccos(cos_latitude * np.cos(half_long))
# Mask this array or we'll get divide-by-zero errors
alpha = ma.masked_where(alpha == 0.0, alpha)
# The numerators also need to be masked so that masked
# division will be invoked.
# We want unnormalized sinc. numpy.sinc gives us normalized
sinc_alpha = ma.sin(alpha) / alpha
x = (cos_latitude * ma.sin(half_long)) / sinc_alpha
y = (ma.sin(latitude) / sinc_alpha)
return np.concatenate((x.filled(0), y.filled(0)), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return AitoffAxes.InvertedAitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedAitoffTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return AitoffAxes.AitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.AitoffTransform(resolution)
class HammerAxes(GeoAxes):
name = 'hammer'
class HammerTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Hammer transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Hammer space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
sqrt2 = np.sqrt(2.0)
alpha = np.sqrt(1.0 + cos_latitude * np.cos(half_long))
x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha
y = (sqrt2 * np.sin(latitude)) / alpha
return np.concatenate((x, y), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return HammerAxes.InvertedHammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedHammerTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, xy):
x, y = xy.T
z = np.sqrt(1 - (x / 4) ** 2 - (y / 2) ** 2)
longitude = 2 * np.arctan((z * x) / (2 * (2 * z ** 2 - 1)))
latitude = np.arcsin(y*z)
return np.column_stack([longitude, latitude])
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return HammerAxes.HammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.HammerTransform(resolution)
class MollweideAxes(GeoAxes):
name = 'mollweide'
class MollweideTransform(Transform):
"""
The base Mollweide transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Mollweide transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Mollweide space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, ll):
def d(theta):
delta = -(theta + np.sin(theta) - pi_sin_l) / (1 + np.cos(theta))
return delta, np.abs(delta) > 0.001
longitude = ll[:, 0]
latitude = ll[:, 1]
clat = np.pi/2 - np.abs(latitude)
ihigh = clat < 0.087 # within 5 degrees of the poles
ilow = ~ihigh
aux = np.empty(latitude.shape, dtype=float)
if ilow.any(): # Newton-Raphson iteration
pi_sin_l = np.pi * np.sin(latitude[ilow])
theta = 2.0 * latitude[ilow]
delta, large_delta = d(theta)
while np.any(large_delta):
theta[large_delta] += delta[large_delta]
delta, large_delta = d(theta)
aux[ilow] = theta / 2
if ihigh.any(): # Taylor series-based approx. solution
e = clat[ihigh]
d = 0.5 * (3 * np.pi * e**2) ** (1.0/3)
aux[ihigh] = (np.pi/2 - d) * np.sign(latitude[ihigh])
xy = np.empty(ll.shape, dtype=float)
xy[:,0] = (2.0 * np.sqrt(2.0) / np.pi) * longitude * np.cos(aux)
xy[:,1] = np.sqrt(2.0) * np.sin(aux)
return xy
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MollweideAxes.InvertedMollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedMollweideTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
# from Equations (7, 8) of
# http://mathworld.wolfram.com/MollweideProjection.html
theta = np.arcsin(y / np.sqrt(2))
lon = (np.pi / (2 * np.sqrt(2))) * x / np.cos(theta)
lat = np.arcsin((2 * theta + np.sin(2 * theta)) / np.pi)
return np.concatenate((lon, lat), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return MollweideAxes.MollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.MollweideTransform(resolution)
class LambertAxes(GeoAxes):
name = 'lambert'
class LambertTransform(Transform):
"""
The base Lambert transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
"""
Create a new Lambert transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Lambert space.
"""
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform_non_affine(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
cos_lat = np.cos(latitude)
sin_lat = np.sin(latitude)
diff_long = longitude - clong
cos_diff_long = np.cos(diff_long)
inner_k = (1.0 +
np.sin(clat)*sin_lat +
np.cos(clat)*cos_lat*cos_diff_long)
# Prevent divide-by-zero problems
inner_k = np.where(inner_k == 0.0, 1e-15, inner_k)
k = np.sqrt(2.0 / inner_k)
x = k*cos_lat*np.sin(diff_long)
y = k*(np.cos(clat)*sin_lat -
np.sin(clat)*cos_lat*cos_diff_long)
return np.concatenate((x, y), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return LambertAxes.InvertedLambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedLambertTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform_non_affine(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
p = np.sqrt(x*x + y*y)
p = np.where(p == 0.0, 1e-9, p)
c = 2.0 * np.arcsin(0.5 * p)
sin_c = np.sin(c)
cos_c = np.cos(c)
lat = np.arcsin(cos_c*np.sin(clat) +
((y*sin_c*np.cos(clat)) / p))
lon = clong + np.arctan(
(x*sin_c) / (p*np.cos(clat)*cos_c - y*np.sin(clat)*sin_c))
return np.concatenate((lon, lat), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return LambertAxes.LambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
self._center_longitude = kwargs.pop("center_longitude", 0.0)
self._center_latitude = kwargs.pop("center_latitude", 0.0)
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
def cla(self):
GeoAxes.cla(self)
self.yaxis.set_major_formatter(NullFormatter())
def _get_core_transform(self, resolution):
return self.LambertTransform(
self._center_longitude,
self._center_latitude,
resolution)
def _get_affine_transform(self):
return Affine2D() \
.scale(0.25) \
.translate(0.5, 0.5)
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@matplotlib@projections@geo.py@.PATH_END.py
|
{
"filename": "records.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/numpy/core/records.py",
"type": "Python"
}
|
"""
Record Arrays
=============
Record arrays expose the fields of structured arrays as properties.
Most commonly, ndarrays contain elements of a single type, e.g. floats,
integers, bools etc. However, it is possible for elements to be combinations
of these using structured types, such as::
>>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', int), ('y', float)])
>>> a
array([(1, 2.0), (1, 2.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
Here, each element consists of two fields: x (and int), and y (a float).
This is known as a structured array. The different fields are analogous
to columns in a spread-sheet. The different fields can be accessed as
one would a dictionary::
>>> a['x']
array([1, 1])
>>> a['y']
array([ 2., 2.])
Record arrays allow us to access fields as properties::
>>> ar = np.rec.array(a)
>>> ar.x
array([1, 1])
>>> ar.y
array([ 2., 2.])
"""
from __future__ import division, absolute_import, print_function
import sys
import os
from . import numeric as sb
from . import numerictypes as nt
from numpy.compat import isfileobj, bytes, long
from .arrayprint import get_printoptions
# All of the functions allow formats to be a dtype
__all__ = ['record', 'recarray', 'format_parser']
ndarray = sb.ndarray
_byteorderconv = {'b':'>',
'l':'<',
'n':'=',
'B':'>',
'L':'<',
'N':'=',
'S':'s',
's':'s',
'>':'>',
'<':'<',
'=':'=',
'|':'|',
'I':'|',
'i':'|'}
# formats regular expression
# allows multidimension spec with a tuple syntax in front
# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '
# are equally allowed
numfmt = nt.typeDict
def find_duplicate(list):
"""Find duplication in a list, return a list of duplicated elements"""
dup = []
for i in range(len(list)):
if (list[i] in list[i + 1:]):
if (list[i] not in dup):
dup.append(list[i])
return dup
class format_parser(object):
"""
Class to convert formats, names, titles description to a dtype.
After constructing the format_parser object, the dtype attribute is
the converted data-type:
``dtype = format_parser(formats, names, titles).dtype``
Attributes
----------
dtype : dtype
The converted data-type.
Parameters
----------
formats : str or list of str
The format description, either specified as a string with
comma-separated format descriptions in the form ``'f8, i4, a5'``, or
a list of format description strings in the form
``['f8', 'i4', 'a5']``.
names : str or list/tuple of str
The field names, either specified as a comma-separated string in the
form ``'col1, col2, col3'``, or as a list or tuple of strings in the
form ``['col1', 'col2', 'col3']``.
An empty list can be used, in that case default field names
('f0', 'f1', ...) are used.
titles : sequence
Sequence of title strings. An empty list can be used to leave titles
out.
aligned : bool, optional
If True, align the fields by padding as the C-compiler would.
Default is False.
byteorder : str, optional
If specified, all the fields will be changed to the
provided byte-order. Otherwise, the default byte-order is
used. For all available string specifiers, see `dtype.newbyteorder`.
See Also
--------
dtype, typename, sctype2char
Examples
--------
>>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
... ['T1', 'T2', 'T3']).dtype
dtype([(('T1', 'col1'), '<f8'), (('T2', 'col2'), '<i4'),
(('T3', 'col3'), '|S5')])
`names` and/or `titles` can be empty lists. If `titles` is an empty list,
titles will simply not appear. If `names` is empty, default field names
will be used.
>>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
... []).dtype
dtype([('col1', '<f8'), ('col2', '<i4'), ('col3', '|S5')])
>>> np.format_parser(['f8', 'i4', 'a5'], [], []).dtype
dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', '|S5')])
"""
def __init__(self, formats, names, titles, aligned=False, byteorder=None):
self._parseFormats(formats, aligned)
self._setfieldnames(names, titles)
self._createdescr(byteorder)
self.dtype = self._descr
def _parseFormats(self, formats, aligned=0):
""" Parse the field formats """
if formats is None:
raise ValueError("Need formats argument")
if isinstance(formats, list):
if len(formats) < 2:
formats.append('')
formats = ','.join(formats)
dtype = sb.dtype(formats, aligned)
fields = dtype.fields
if fields is None:
dtype = sb.dtype([('f1', dtype)], aligned)
fields = dtype.fields
keys = dtype.names
self._f_formats = [fields[key][0] for key in keys]
self._offsets = [fields[key][1] for key in keys]
self._nfields = len(keys)
def _setfieldnames(self, names, titles):
"""convert input field names into a list and assign to the _names
attribute """
if (names):
if (type(names) in [list, tuple]):
pass
elif isinstance(names, str):
names = names.split(',')
else:
raise NameError("illegal input names %s" % repr(names))
self._names = [n.strip() for n in names[:self._nfields]]
else:
self._names = []
# if the names are not specified, they will be assigned as
# "f0, f1, f2,..."
# if not enough names are specified, they will be assigned as "f[n],
# f[n+1],..." etc. where n is the number of specified names..."
self._names += ['f%d' % i for i in range(len(self._names),
self._nfields)]
# check for redundant names
_dup = find_duplicate(self._names)
if _dup:
raise ValueError("Duplicate field names: %s" % _dup)
if (titles):
self._titles = [n.strip() for n in titles[:self._nfields]]
else:
self._titles = []
titles = []
if (self._nfields > len(titles)):
self._titles += [None] * (self._nfields - len(titles))
def _createdescr(self, byteorder):
descr = sb.dtype({'names':self._names,
'formats':self._f_formats,
'offsets':self._offsets,
'titles':self._titles})
if (byteorder is not None):
byteorder = _byteorderconv[byteorder[0]]
descr = descr.newbyteorder(byteorder)
self._descr = descr
class record(nt.void):
"""A data-type scalar that allows field access as attribute lookup.
"""
# manually set name and module so that this class's type shows up
# as numpy.record when printed
__name__ = 'record'
__module__ = 'numpy'
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.item())
def __getattribute__(self, attr):
if attr in ['setfield', 'getfield', 'dtype']:
return nt.void.__getattribute__(self, attr)
try:
return nt.void.__getattribute__(self, attr)
except AttributeError:
pass
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
obj = self.getfield(*res[:2])
# if it has fields return a record,
# otherwise return the object
try:
dt = obj.dtype
except AttributeError:
#happens if field is Object type
return obj
if dt.fields:
return obj.view((self.__class__, obj.dtype.fields))
return obj
else:
raise AttributeError("'record' object has no "
"attribute '%s'" % attr)
def __setattr__(self, attr, val):
if attr in ['setfield', 'getfield', 'dtype']:
raise AttributeError("Cannot set '%s' attribute" % attr)
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
return self.setfield(val, *res[:2])
else:
if getattr(self, attr, None):
return nt.void.__setattr__(self, attr, val)
else:
raise AttributeError("'record' object has no "
"attribute '%s'" % attr)
def __getitem__(self, indx):
obj = nt.void.__getitem__(self, indx)
# copy behavior of record.__getattribute__,
if isinstance(obj, nt.void) and obj.dtype.fields:
return obj.view((self.__class__, obj.dtype.fields))
else:
# return a single element
return obj
def pprint(self):
"""Pretty-print all fields."""
# pretty-print all fields
names = self.dtype.names
maxlen = max(len(name) for name in names)
rows = []
fmt = '%% %ds: %%s' % maxlen
for name in names:
rows.append(fmt % (name, getattr(self, name)))
return "\n".join(rows)
# The recarray is almost identical to a standard array (which supports
# named fields already) The biggest difference is that it can use
# attribute-lookup to find the fields and it is constructed using
# a record.
# If byteorder is given it forces a particular byteorder on all
# the fields (and any subfields)
class recarray(ndarray):
"""Construct an ndarray that allows field access using attributes.
Arrays may have a data-types containing fields, analogous
to columns in a spread sheet. An example is ``[(x, int), (y, float)]``,
where each entry in the array is a pair of ``(int, float)``. Normally,
these attributes are accessed using dictionary lookups such as ``arr['x']``
and ``arr['y']``. Record arrays allow the fields to be accessed as members
of the array, using ``arr.x`` and ``arr.y``.
Parameters
----------
shape : tuple
Shape of output array.
dtype : data-type, optional
The desired data-type. By default, the data-type is determined
from `formats`, `names`, `titles`, `aligned` and `byteorder`.
formats : list of data-types, optional
A list containing the data-types for the different columns, e.g.
``['i4', 'f8', 'i4']``. `formats` does *not* support the new
convention of using types directly, i.e. ``(int, float, int)``.
Note that `formats` must be a list, not a tuple.
Given that `formats` is somewhat limited, we recommend specifying
`dtype` instead.
names : tuple of str, optional
The name of each column, e.g. ``('x', 'y', 'z')``.
buf : buffer, optional
By default, a new array is created of the given shape and data-type.
If `buf` is specified and is an object exposing the buffer interface,
the array will use the memory from the existing buffer. In this case,
the `offset` and `strides` keywords are available.
Other Parameters
----------------
titles : tuple of str, optional
Aliases for column names. For example, if `names` were
``('x', 'y', 'z')`` and `titles` is
``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then
``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``.
byteorder : {'<', '>', '='}, optional
Byte-order for all fields.
aligned : bool, optional
Align the fields in memory as the C-compiler would.
strides : tuple of ints, optional
Buffer (`buf`) is interpreted according to these strides (strides
define how many bytes each array element, row, column, etc.
occupy in memory).
offset : int, optional
Start reading buffer (`buf`) from this offset onwards.
order : {'C', 'F'}, optional
Row-major (C-style) or column-major (Fortran-style) order.
Returns
-------
rec : recarray
Empty array of the given shape and type.
See Also
--------
rec.fromrecords : Construct a record array from data.
record : fundamental data-type for `recarray`.
format_parser : determine a data-type from formats, names, titles.
Notes
-----
This constructor can be compared to ``empty``: it creates a new record
array but does not fill it with data. To create a record array from data,
use one of the following methods:
1. Create a standard ndarray and convert it to a record array,
using ``arr.view(np.recarray)``
2. Use the `buf` keyword.
3. Use `np.rec.fromrecords`.
Examples
--------
Create an array with two fields, ``x`` and ``y``:
>>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)])
>>> x
array([(1.0, 2), (3.0, 4)],
dtype=[('x', '<f8'), ('y', '<i4')])
>>> x['x']
array([ 1., 3.])
View the array as a record array:
>>> x = x.view(np.recarray)
>>> x.x
array([ 1., 3.])
>>> x.y
array([2, 4])
Create a new, empty record array:
>>> np.recarray((2,),
... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP
rec.array([(-1073741821, 1.2249118382103472e-301, 24547520),
(3471280, 1.2134086255804012e-316, 0)],
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '<i4')])
"""
# manually set name and module so that this class's type shows
# up as "numpy.recarray" when printed
__name__ = 'recarray'
__module__ = 'numpy'
def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False, order='C'):
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
if buf is None:
self = ndarray.__new__(subtype, shape, (record, descr), order=order)
else:
self = ndarray.__new__(subtype, shape, (record, descr),
buffer=buf, offset=offset,
strides=strides, order=order)
return self
def __array_finalize__(self, obj):
if self.dtype.type is not record and self.dtype.fields:
# if self.dtype is not np.record, invoke __setattr__ which will
# convert it to a record if it is a void dtype.
self.dtype = self.dtype
def __getattribute__(self, attr):
# See if ndarray has this attr, and return it if so. (note that this
# means a field with the same name as an ndarray attr cannot be
# accessed by attribute).
try:
return object.__getattribute__(self, attr)
except AttributeError: # attr must be a fieldname
pass
# look for a field with this name
fielddict = ndarray.__getattribute__(self, 'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("recarray has no attribute %s" % attr)
obj = self.getfield(*res)
# At this point obj will always be a recarray, since (see
# PyArray_GetField) the type of obj is inherited. Next, if obj.dtype is
# non-structured, convert it to an ndarray. Then if obj is structured
# with void type convert it to the same dtype.type (eg to preserve
# numpy.record type if present), since nested structured fields do not
# inherit type. Don't do this for non-void structures though.
if obj.dtype.fields:
if issubclass(obj.dtype.type, nt.void):
return obj.view(dtype=(self.dtype.type, obj.dtype))
return obj
else:
return obj.view(ndarray)
# Save the dictionary.
# If the attr is a field name and not in the saved dictionary
# Undo any "setting" of the attribute and do a setfield
# Thus, you can't create attributes on-the-fly that are field names.
def __setattr__(self, attr, val):
# Automatically convert (void) structured types to records
# (but not non-void structures, subarrays, or non-structured voids)
if attr == 'dtype' and issubclass(val.type, nt.void) and val.fields:
val = sb.dtype((record, val))
newattr = attr not in self.__dict__
try:
ret = object.__setattr__(self, attr, val)
except Exception:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
exctype, value = sys.exc_info()[:2]
raise exctype(value)
else:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
return ret
if newattr:
# We just added this one or this setattr worked on an
# internal attribute.
try:
object.__delattr__(self, attr)
except Exception:
return ret
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("record array has no attribute %s" % attr)
return self.setfield(val, *res)
def __getitem__(self, indx):
obj = super(recarray, self).__getitem__(indx)
# copy behavior of getattr, except that here
# we might also be returning a single element
if isinstance(obj, ndarray):
if obj.dtype.fields:
obj = obj.view(type(self))
if issubclass(obj.dtype.type, nt.void):
return obj.view(dtype=(self.dtype.type, obj.dtype))
return obj
else:
return obj.view(type=ndarray)
else:
# return a single element
return obj
def __repr__(self):
repr_dtype = self.dtype
if (self.dtype.type is record
or (not issubclass(self.dtype.type, nt.void))):
# If this is a full record array (has numpy.record dtype),
# or if it has a scalar (non-void) dtype with no records,
# represent it using the rec.array function. Since rec.array
# converts dtype to a numpy.record for us, convert back
# to non-record before printing
if repr_dtype.type is record:
repr_dtype = sb.dtype((nt.void, repr_dtype))
prefix = "rec.array("
fmt = 'rec.array(%s,%sdtype=%s)'
else:
# otherwise represent it using np.array plus a view
# This should only happen if the user is playing
# strange games with dtypes.
prefix = "array("
fmt = 'array(%s,%sdtype=%s).view(numpy.recarray)'
# get data/shape string. logic taken from numeric.array_repr
if self.size > 0 or self.shape == (0,):
lst = sb.array2string(
self, separator=', ', prefix=prefix, suffix=',')
else:
# show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(self.shape),)
lf = '\n'+' '*len(prefix)
if get_printoptions()['legacy'] == '1.13':
lf = ' ' + lf # trailing space
return fmt % (lst, lf, repr_dtype)
def field(self, attr, val=None):
if isinstance(attr, int):
names = ndarray.__getattribute__(self, 'dtype').names
attr = names[attr]
fielddict = ndarray.__getattribute__(self, 'dtype').fields
res = fielddict[attr][:2]
if val is None:
obj = self.getfield(*res)
if obj.dtype.fields:
return obj
return obj.view(ndarray)
else:
return self.setfield(val, *res)
def fromarrays(arrayList, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
""" create a record array from a (flat) list of arrays
>>> x1=np.array([1,2,3,4])
>>> x2=np.array(['a','dd','xyz','12'])
>>> x3=np.array([1.1,2,3,4])
>>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c')
>>> print(r[1])
(2, 'dd', 2.0)
>>> x1[1]=34
>>> r.a
array([1, 2, 3, 4])
"""
arrayList = [sb.asarray(x) for x in arrayList]
if shape is None or shape == 0:
shape = arrayList[0].shape
if isinstance(shape, int):
shape = (shape,)
if formats is None and dtype is None:
# go through each object in the list to see if it is an ndarray
# and determine the formats.
formats = []
for obj in arrayList:
if not isinstance(obj, ndarray):
raise ValueError("item in the array list must be an ndarray.")
formats.append(obj.dtype.str)
formats = ','.join(formats)
if dtype is not None:
descr = sb.dtype(dtype)
_names = descr.names
else:
parsed = format_parser(formats, names, titles, aligned, byteorder)
_names = parsed._names
descr = parsed._descr
# Determine shape from data-type.
if len(descr) != len(arrayList):
raise ValueError("mismatch between the number of fields "
"and the number of arrays")
d0 = descr[0].shape
nn = len(d0)
if nn > 0:
shape = shape[:-nn]
for k, obj in enumerate(arrayList):
nn = descr[k].ndim
testshape = obj.shape[:obj.ndim - nn]
if testshape != shape:
raise ValueError("array-shape mismatch in array %d" % k)
_array = recarray(shape, descr)
# populate the record array (makes a copy)
for i in range(len(arrayList)):
_array[_names[i]] = arrayList[i]
return _array
def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None):
""" create a recarray from a list of records in text form
The data in the same field can be heterogeneous, they will be promoted
to the highest data type. This method is intended for creating
smaller record arrays. If used to create large array without formats
defined
r=fromrecords([(2,3.,'abc')]*100000)
it can be slow.
If formats is None, then this will auto-detect formats. Use list of
tuples rather than list of lists for faster processing.
>>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)],
... names='col1,col2,col3')
>>> print(r[0])
(456, 'dbe', 1.2)
>>> r.col1
array([456, 2])
>>> r.col2
array(['dbe', 'de'],
dtype='|S3')
>>> import pickle
>>> print(pickle.loads(pickle.dumps(r)))
[(456, 'dbe', 1.2) (2, 'de', 1.3)]
"""
if formats is None and dtype is None: # slower
obj = sb.array(recList, dtype=object)
arrlist = [sb.array(obj[..., i].tolist()) for i in range(obj.shape[-1])]
return fromarrays(arrlist, formats=formats, shape=shape, names=names,
titles=titles, aligned=aligned, byteorder=byteorder)
if dtype is not None:
descr = sb.dtype((record, dtype))
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
try:
retval = sb.array(recList, dtype=descr)
except TypeError: # list of lists instead of list of tuples
if (shape is None or shape == 0):
shape = len(recList)
if isinstance(shape, (int, long)):
shape = (shape,)
if len(shape) > 1:
raise ValueError("Can only deal with 1-d array.")
_array = recarray(shape, descr)
for k in range(_array.size):
_array[k] = tuple(recList[k])
return _array
else:
if shape is not None and retval.shape != shape:
retval.shape = shape
res = retval.view(recarray)
return res
def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
""" create a (read-only) record array from binary data contained in
a string"""
if dtype is None and formats is None:
raise ValueError("Must have dtype= or formats=")
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
itemsize = descr.itemsize
if (shape is None or shape == 0 or shape == -1):
shape = (len(datastring) - offset) // itemsize
_array = recarray(shape, descr, buf=datastring, offset=offset)
return _array
def get_remaining_size(fd):
try:
fn = fd.fileno()
except AttributeError:
return os.path.getsize(fd.name) - fd.tell()
st = os.fstat(fn)
size = st.st_size - fd.tell()
return size
def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
"""Create an array from binary file data
If file is a string then that file is opened, else it is assumed
to be a file object. The file object must support random access
(i.e. it must have tell and seek methods).
>>> from tempfile import TemporaryFile
>>> a = np.empty(10,dtype='f8,i4,a5')
>>> a[5] = (0.5,10,'abcde')
>>>
>>> fd=TemporaryFile()
>>> a = a.newbyteorder('<')
>>> a.tofile(fd)
>>>
>>> fd.seek(0)
>>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10,
... byteorder='<')
>>> print(r[5])
(0.5, 10, 'abcde')
>>> r.shape
(10,)
"""
if (shape is None or shape == 0):
shape = (-1,)
elif isinstance(shape, (int, long)):
shape = (shape,)
name = 0
if isinstance(fd, str):
name = 1
fd = open(fd, 'rb')
if (offset > 0):
fd.seek(offset, 1)
size = get_remaining_size(fd)
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
itemsize = descr.itemsize
shapeprod = sb.array(shape).prod()
shapesize = shapeprod * itemsize
if shapesize < 0:
shape = list(shape)
shape[shape.index(-1)] = size / -shapesize
shape = tuple(shape)
shapeprod = sb.array(shape).prod()
nbytes = shapeprod * itemsize
if nbytes > size:
raise ValueError(
"Not enough bytes left in file for specified shape and type")
# create the array
_array = recarray(shape, descr)
nbytesread = fd.readinto(_array.data)
if nbytesread != nbytes:
raise IOError("Didn't read as many bytes as expected")
if name:
fd.close()
return _array
def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None, copy=True):
"""Construct a record array from a wide-variety of objects.
"""
if ((isinstance(obj, (type(None), str)) or isfileobj(obj)) and
(formats is None) and (dtype is None)):
raise ValueError("Must define formats (or dtype) if object is "
"None, string, or an open file")
kwds = {}
if dtype is not None:
dtype = sb.dtype(dtype)
elif formats is not None:
dtype = format_parser(formats, names, titles,
aligned, byteorder)._descr
else:
kwds = {'formats': formats,
'names': names,
'titles': titles,
'aligned': aligned,
'byteorder': byteorder
}
if obj is None:
if shape is None:
raise ValueError("Must define a shape if obj is None")
return recarray(shape, dtype, buf=obj, offset=offset, strides=strides)
elif isinstance(obj, bytes):
return fromstring(obj, dtype, shape=shape, offset=offset, **kwds)
elif isinstance(obj, (list, tuple)):
if isinstance(obj[0], (tuple, list)):
return fromrecords(obj, dtype=dtype, shape=shape, **kwds)
else:
return fromarrays(obj, dtype=dtype, shape=shape, **kwds)
elif isinstance(obj, recarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
return new
elif isfileobj(obj):
return fromfile(obj, dtype=dtype, shape=shape, offset=offset)
elif isinstance(obj, ndarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
return new.view(recarray)
else:
interface = getattr(obj, "__array_interface__", None)
if interface is None or not isinstance(interface, dict):
raise ValueError("Unknown input type")
obj = sb.array(obj)
if dtype is not None and (obj.dtype != dtype):
obj = obj.view(dtype)
return obj.view(recarray)
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@numpy@core@records.py@.PATH_END.py
|
{
"filename": "_stream.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/heatmap/_stream.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "heatmap"
_path_str = "heatmap.stream"
_valid_props = {"maxpoints", "token"}
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.heatmap.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.heatmap.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.heatmap.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
_v = maxpoints if maxpoints is not None else _v
if _v is not None:
self["maxpoints"] = _v
_v = arg.pop("token", None)
_v = token if token is not None else _v
if _v is not None:
self["token"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@heatmap@_stream.py@.PATH_END.py
|
{
"filename": "predictor.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/ensemble/_hist_gradient_boosting/predictor.py",
"type": "Python"
}
|
"""
This module contains the TreePredictor class which is used for prediction.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from ._predictor import (
_compute_partial_dependence,
_predict_from_binned_data,
_predict_from_raw_data,
)
from .common import PREDICTOR_RECORD_DTYPE, Y_DTYPE
class TreePredictor:
"""Tree class used for predictions.
Parameters
----------
nodes : ndarray of PREDICTOR_RECORD_DTYPE
The nodes of the tree.
binned_left_cat_bitsets : ndarray of shape (n_categorical_splits, 8), dtype=uint32
Array of bitsets for binned categories used in predict_binned when a
split is categorical.
raw_left_cat_bitsets : ndarray of shape (n_categorical_splits, 8), dtype=uint32
Array of bitsets for raw categories used in predict when a split is
categorical.
"""
def __init__(self, nodes, binned_left_cat_bitsets, raw_left_cat_bitsets):
self.nodes = nodes
self.binned_left_cat_bitsets = binned_left_cat_bitsets
self.raw_left_cat_bitsets = raw_left_cat_bitsets
def get_n_leaf_nodes(self):
"""Return number of leaves."""
return int(self.nodes["is_leaf"].sum())
def get_max_depth(self):
"""Return maximum depth among all leaves."""
return int(self.nodes["depth"].max())
def predict(self, X, known_cat_bitsets, f_idx_map, n_threads):
"""Predict raw values for non-binned data.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The input samples.
known_cat_bitsets : ndarray of shape (n_categorical_features, 8)
Array of bitsets of known categories, for each categorical feature.
f_idx_map : ndarray of shape (n_features,)
Map from original feature index to the corresponding index in the
known_cat_bitsets array.
n_threads : int
Number of OpenMP threads to use.
Returns
-------
y : ndarray, shape (n_samples,)
The raw predicted values.
"""
out = np.empty(X.shape[0], dtype=Y_DTYPE)
_predict_from_raw_data(
self.nodes,
X,
self.raw_left_cat_bitsets,
known_cat_bitsets,
f_idx_map,
n_threads,
out,
)
return out
def predict_binned(self, X, missing_values_bin_idx, n_threads):
"""Predict raw values for binned data.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The input samples.
missing_values_bin_idx : uint8
Index of the bin that is used for missing values. This is the
index of the last bin and is always equal to max_bins (as passed
to the GBDT classes), or equivalently to n_bins - 1.
n_threads : int
Number of OpenMP threads to use.
Returns
-------
y : ndarray, shape (n_samples,)
The raw predicted values.
"""
out = np.empty(X.shape[0], dtype=Y_DTYPE)
_predict_from_binned_data(
self.nodes,
X,
self.binned_left_cat_bitsets,
missing_values_bin_idx,
n_threads,
out,
)
return out
def compute_partial_dependence(self, grid, target_features, out):
"""Fast partial dependence computation.
Parameters
----------
grid : ndarray, shape (n_samples, n_target_features)
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray, shape (n_target_features)
The set of target features for which the partial dependence
should be evaluated.
out : ndarray, shape (n_samples)
The value of the partial dependence function on each grid
point.
"""
_compute_partial_dependence(self.nodes, grid, target_features, out)
def __setstate__(self, state):
try:
super().__setstate__(state)
except AttributeError:
self.__dict__.update(state)
# The dtype of feature_idx is np.intp which is platform dependent. Here, we
# make sure that saving and loading on different bitness systems works without
# errors. For instance, on a 64 bit Python runtime, np.intp = np.int64,
# while on 32 bit np.intp = np.int32.
#
# TODO: consider always using platform agnostic dtypes for fitted
# estimator attributes. For this particular estimator, this would
# mean replacing the intp field of PREDICTOR_RECORD_DTYPE by an int32
# field. Ideally this should be done consistently throughout
# scikit-learn along with a common test.
if self.nodes.dtype != PREDICTOR_RECORD_DTYPE:
self.nodes = self.nodes.astype(PREDICTOR_RECORD_DTYPE, casting="same_kind")
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@ensemble@_hist_gradient_boosting@predictor.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "mnicholl/superbol",
"repo_path": "superbol_extracted/superbol-master/example/README.md",
"type": "Markdown"
}
|
# Superbol input data
This directory contains real supernova data demonstrating the input format for superbol.
For *all* Superbol input:
- Sloan, PanSTARRS, Gaia, ATLAS, GALEX in AB mags;
- Johnson, NIR and Swift in Vega mags
# Example 1: SN2015bn (Nicholl et al. 2016, ApJ, 826, 39)
- Input has multiple filters per file
- Already in rest-frame absolute magnitudes (no cosmological corrections required)
- Covers UV-NIR (blackbody correction negligible if run with all filters)
- Play with effects of leaving out different filters to see which are most important and test accuracy of blackbody corrections
# Example 2: Gaia16apd (Nicholl et al. 2017, ApJL, 835, 8)
- One filter per input file
- MJD, apparent magnitudes (only extinction correction applied)
- Some mismatch in timing of different filters: useful to experiment with interpolations/extrapolations
- Can run multiple times with different reference bands to stitch together light curve
- No NIR so BB correction important
|
mnichollREPO_NAMEsuperbolPATH_START.@superbol_extracted@superbol-master@example@README.md@.PATH_END.py
|
{
"filename": "BinnedWCosmology.py",
"repo_name": "igomezv/simplemc_tests",
"repo_path": "simplemc_tests_extracted/simplemc_tests-main/simplemc/models/BinnedWCosmology.py",
"type": "Python"
}
|
from simplemc.models.LCDMCosmology import LCDMCosmology
from simplemc.cosmo.Parameter import Parameter
from scipy.interpolate import interp1d
from scipy.integrate import quad
import numpy as np
## Binned cosmology, where the DE eqn of state is assumed to be a set of bins
# with varying amplitudes and fix positions.
# Nevertheless, this class may be deprecated and substituted by the code use
# for writing the paper https://arxiv.org/abs/2111.10457
##
class BinnedWCosmology(LCDMCosmology):
def __init__(self, dz=0.2, zmax=1.0):
"""
This class corresponds to a CDM cosmology with binned w.
Still testing the file but it seems to be working just fine
Parameters
----------
dz : float
Step size for the position of the bins.
zmax : float
Maximum redshift to use for the reconstruction.
Returns
-------
"""
# Bunch of parameters for amplitudes and positions of the bins.
self.zbins = np.arange(0, zmax, dz)
self.Nb = len(self.zbins)
self.wvals = np.ones(self.Nb)*-1.0
self.pnames = ["w%i" % i for i in range(self.Nb)]
LCDMCosmology.__init__(self)
self.integrateOmega()
# My free parameters. We use a flat cosmology.
def freeParameters(self):
wpars = [Parameter(name, self.wvals[i], err=0.05)
for i, name in enumerate(self.pnames)]
return LCDMCosmology.freeParameters(self) + wpars
def updateParams(self, pars):
ok = LCDMCosmology.updateParams(self, pars)
if not ok:
return False
gotone = False
for p in pars:
## Something's happening here, check it later.
print ('**', p.name, self.pnames)
i = self.pnames.index(p.name)
if i > 0:
self.wvals[i] = p.value
gotone = True
if gotone:
self.integrateOmega()
return True
def integrateOmega(self):
abins = np.hstack((1./(1+self.zbins), [1e-4]))
w = np.hstack((self.wvals, [self.wvals[-1]]))
itg = interp1d(np.log(abins), 3*(1 + w))
oabins = np.hstack((np.logspace(-4, -1, 10), np.linspace(0.1, 1, 100)))
olnrho = [quad(itg, np.log(a), 0)[0] for a in oabins]
print(1/oabins**4)
print(np.exp(olnrho))
self.DEomega = interp1d(oabins, np.exp(olnrho))
# This is relative hsquared as a function of a
## i.e. H(z)^2/H(z=0)^2.
def RHSquared_a(self, a):
NuContrib = self.NuDensity.rho(a)/self.h**2
return (self.Ocb/a**3+self.Omrad/a**4+NuContrib+(1.0-self.Om)*self.DEomega(a))
|
igomezvREPO_NAMEsimplemc_testsPATH_START.@simplemc_tests_extracted@simplemc_tests-main@simplemc@models@BinnedWCosmology.py@.PATH_END.py
|
{
"filename": "_padding.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/newshape/label/_padding.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class PaddingValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="padding", parent_name="layout.newshape.label", **kwargs
):
super(PaddingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@newshape@label@_padding.py@.PATH_END.py
|
{
"filename": "photom.py",
"repo_name": "FRBs/FRB",
"repo_path": "FRB_extracted/FRB-main/frb/galaxies/photom.py",
"type": "Python"
}
|
""" Methods related to galaxy photometry """
import os
import warnings
import dust_extinction.parameter_averages
import numpy as np
import importlib_resources
from IPython import embed
from astropy.io import fits
from astropy.table import Table, hstack, vstack, join
from astropy.coordinates import SkyCoord
from astropy.coordinates import match_coordinates_sky
from astropy import units
from astropy.wcs import utils as wcs_utils
from astropy.nddata import Cutout2D
from astropy.wcs import WCS
from astropy import stats
from photutils.aperture import aperture_photometry, SkyCircularAperture
from frb.galaxies import defs
import dust_extinction
# Photometry globals
table_format = 'ascii.fixed_width'
fill_values_list = [('-999', '0'), ('-999.0', '0')]
fill_value = -999.
def merge_photom_tables(new_tbl, old_file, tol=1*units.arcsec, debug=False):
"""
Merge photometry tables
Args:
new_tbl (astropy.table.Table):
New table of photometry
old_file (str or Table):
Path to the old table
Returns:
astropy.table.Table:
Merged tables
"""
# File or tbl?
if isinstance(old_file, str):
# New file?
if not os.path.isfile(old_file):
return new_tbl
# Load me
old_tbl = Table.read(old_file, format=table_format)
elif isinstance(old_file, Table):
old_tbl = old_file
else:
embed(header='42 of photom')
# Coords
new_coords = SkyCoord(ra=new_tbl['ra'], dec=new_tbl['dec'], unit='deg')
old_coords = SkyCoord(ra=old_tbl['ra'], dec=old_tbl['dec'], unit='deg')
idx, d2d, _ = match_coordinates_sky(new_coords, old_coords, nthneighbor=1)
match = d2d < tol
# Match?
if np.sum(match) == len(new_coords):
# Insist on the same RA, DEC
new_tbl['ra'] = old_tbl['ra'][idx[0]]
new_tbl['dec'] = old_tbl['dec'][idx[0]]
# Join
merge_tbl = hstack([old_tbl.filled(-999.), new_tbl.filled(-999.)])
merge_tbl.remove_columns(['ra_2', 'dec_2'])
merge_tbl.rename_columns(['ra_1', 'dec_1'], ['ra', 'dec'])
#merge_tbl = join(old_tbl.filled(-999.), new_tbl, join_type='left').filled(-999.)
elif np.sum(match) == 0:
merge_tbl = vstack([old_tbl, new_tbl]).filled(-999.)
else:
embed(header='50 of photom') # Best to avoid!! Use photom_by_name
# Return
return merge_tbl
def photom_by_name(name, filelist):
"""
Generate a Table for a given galaxy from a list of photom files
Warning: Order matters! Use best data last
Args:
name (str):
filelist (list):
Returns:
astropy.table.Table:
"""
# Loop on tables
final_tbl = None
for ifile in filelist:
# Load an insure it is a masked Table
tbl = Table(Table.read(ifile, format=table_format, fill_values=fill_values_list), masked=True)
idx = tbl['Name'] == name
if np.sum(idx) == 1:
sub_tbl = tbl[idx]
if final_tbl is None:
final_tbl = sub_tbl
else:
for key in sub_tbl.keys():
if sub_tbl[key].mask != True: # Cannot use "is"
final_tbl[key] = sub_tbl[key]
# Return
return final_tbl.filled(fill_value)
def extinction_correction(filt, EBV, RV=3.1, max_wave=None, required=True):
"""
calculate MW extinction correction for given filter
Uses the Gordon 2024 extinction model
Args:
filt (str):
filter name (name of file without .dat extension)
EBV (float):
E(B-V) (can get from frb.galaxies.nebular.get_ebv which uses IRSA Dust extinction query
RV:
from gbrammer/threedhst eazyPy.py -- characterizes MW dust
max_wave (float, optional):
If set, cut off the calculation at this maximum wavelength.
A bit of a hack for the near-IR, in large part because the
MW extinction curve ends at 1.4 microns.
required (bool, optional):
Crash out if the transmission curve is not present
Returns:
float: linear extinction correction
"""
# Read in filter in Table
path_to_filters = importlib_resources.files('frb.data.analysis.CIGALE')
# Hack for LRIS which does not differentiate between cameras
if 'LRIS' in filt:
_filter = 'LRIS_{}'.format(filt[-1])
elif 'NSC' in filt:
_filter = filt.replace("NSC_","DECam_")
else:
_filter = filt
filter_file = path_to_filters/f'{_filter}.dat'
if not os.path.isfile(filter_file):
msg = "Filter {} is not in the Repo. Add it!!".format(filter_file)
if required:
raise IOError(msg)
else:
warnings.warn(msg)
return 1.
filter_tbl = Table.read(filter_file, format='ascii')
#get wave and transmission (file should have these headers in first row)
wave = filter_tbl['col1'].data
throughput = filter_tbl['col2'].data
if max_wave:
warnings.warn("Cutting off the extinction correction calculation at {} Ang".format(max_wave))
gdwv = wave < max_wave
wave = wave[gdwv]
throughput = throughput[gdwv]
#get MW extinction correction
AV = EBV * RV
#AlAV = nebular.load_extinction('MW')
#Alambda = extinction.fm07(wave, AV)
# Gordon 2024
extmod = dust_extinction.parameter_averages.G23(Rv=RV)
AlAV = extmod(wave*units.AA)
Alambda = AlAV * AV
source_flux = 1.
#calculate linear correction
delta = np.trapz(throughput * source_flux *
10 ** (-0.4 * Alambda), wave) / np.trapz(
throughput * source_flux, wave)
correction = 1./delta
return correction
def correct_photom_table(photom, EBV, name, max_wave=None, required=True):
"""
Correct the input photometry table for Galactic extinction
Table is modified in place
If there is SDSS photometry, we look for the extinction values
provided by the Survey itself.
Uses extinction_correction()
Args:
photom (astropy.table.Table):
EBV (float):
E(B-V) (can get from frb.galaxies.nebular.get_ebv which uses IRSA Dust extinction query
name (str):\
Name of the object to correct
required (bool, optional):
Crash out if the transmission curve is not present
Returns:
int: Return code
-1: No matches to the input name
0: One match
"""
# Cut the table
mt_name = photom['Name'] == name
if not np.any(mt_name):
print("No matches to input name={}. Returning".format(name))
return -1
elif np.sum(mt_name) > 1:
raise ValueError("More than 1 match to input name={}. Bad idea!!".format(name))
idx = np.where(mt_name)[0][0]
cut_photom = photom[idx] # This is a Row
# Dust correct
for key in photom.keys():
if key in ['Name', 'ra', 'dec', 'extinction', 'SDSS_ID',
'run', 'rerun'] or 'err' in key:
continue
filt = key
if filt not in defs.valid_filters:
print("Assumed filter {} is not in our valid list. Skipping extinction".format(
filt))
continue
# -999? -- Not even measured
try:
if cut_photom[filt] <= -999.:
continue
except:
embed(header='187 in photom')
# SDSS
if 'SDSS' in filt:
if 'extinction_{}'.format(filt[-1]) in photom.keys():
print("Appying SDSS-provided extinction correction")
cut_photom[key] -= cut_photom['extinction_{}'.format(filt[-1])]
continue
# Hack for LRIS
if 'LRIS' in filt:
_filter = 'LRIS_{}'.format(filt[-1])
elif 'DELVE' in filt:
_filter = filt.replace("DELVE_","DECam_")
else:
_filter = filt
# Do it
dust_correct = extinction_correction(_filter, EBV, max_wave=max_wave,
required=required)
mag_dust = 2.5 * np.log10(1. / dust_correct)
cut_photom[key] += mag_dust
# Add it back in
photom[idx] = cut_photom
return 0
def sb_at_frb(host, cut_dat:np.ndarray, cut_err:np.ndarray, wcs:WCS,
fwhm=3., physical=False, min_uncert=2):
""" Measure the surface brightness at an FRB location
in a host galaxy
Args:
host (Host object): host galaxy object from frb repo
cut_dat (np.ndarray): data (data from astorpy 2D Cutout object)
cut_err (np.ndarray): inverse variance of data (from astropy 2D Cutout object)
wcs (WCS): WCS for the cutout
fwhm (float, optional): FWHM of the PSF of the image in either
pixels or kpc. Defaults to 3 [pix].
physical (bool, optional): If True, FWHM is in kpc. Defaults to False.
min_uncert (int, optional): Minimum localization unceratainty
for the FRB, in pixels. Defaults to 2.
Returns:
tuple: sb_average, sb_average_err [counts/sqarcsec]
"""
# Generate the x,y grid of coordiantes
x = np.arange(np.shape(cut_dat)[0])
y = np.arange(np.shape(cut_dat)[1])
xx, yy = np.meshgrid(x, y)
coords = wcs_utils.pixel_to_skycoord(xx, yy, wcs)
xfrb, yfrb = wcs_utils.skycoord_to_pixel(host.frb.coord, wcs)
plate_scale = coords[0, 0].separation(coords[0, 1]).to('arcsec').value
# Calculate total a, b uncertainty (FRB frame)
uncerta, uncertb = host.calc_tot_uncert()
# Put in pixel space
uncerta /= plate_scale
uncertb /= plate_scale
# Set a minimum threshold
uncerta = max(uncerta, min_uncert)
uncertb = max(uncertb, min_uncert)
# check if in ellipse -- pixel space!
theta = host.frb.eellipse['theta']
in_ellipse = ((xx - xfrb.item()) * np.cos(theta) +
(yy - yfrb.item()) * np.sin(theta)) ** 2 / (uncerta ** 2) + (
(xx - xfrb.item()) * np.sin(theta) - (
yy - yfrb.item()) * np.cos(theta)) ** 2 / (uncertb ** 2) <= 1
idx = np.where(in_ellipse)
xval = xx[idx]
yval = yy[idx]
# x, y gal on the tilted grid (same for frb coords)
xp = yval * np.cos(theta) - xval * np.sin(theta)
yp = xval * np.cos(theta) + yval * np.sin(theta)
xpfrb = yfrb.item() * np.cos(theta) - xfrb.item() * np.sin(theta)
ypfrb = xfrb.item() * np.cos(theta) + yfrb.item() * np.sin(theta)
# convert fwhm from pixels to arcsec or kpc to arcsec
if physical:
fwhm_as = fwhm * units.kpc * defs.frb_cosmo.arcsec_per_kpc_proper(host.z)
else:
fwhm_as = fwhm * plate_scale * units.arcsec
# Aperture photometry at every pixel in the ellipse
photom = []
photom_var = []
for i in np.arange(np.shape(idx)[1]):
aper = SkyCircularAperture(coords[idx[0][i], idx[1][i]], fwhm_as)
apermap = aper.to_pixel(wcs)
# aperture photometry for psf-size within the galaxy
photo_frb = aperture_photometry(cut_dat, apermap)
photo_err = aperture_photometry(1 / cut_err, apermap)
photom.append(photo_frb['aperture_sum'][0])
photom_var.append(photo_err['aperture_sum'][0])
# ff prob distribution
p_ff = np.exp(-(xp - xpfrb) ** 2 / (2 * uncerta ** 2)) * np.exp(
-(yp - ypfrb) ** 2 / (2 * uncertb ** 2))
f_weight = (photom / (np.pi * fwhm_as.value ** 2)) * p_ff # weighted photometry
fvar_weight = (photom_var / (np.pi * fwhm_as.value ** 2)) * p_ff # weighted sigma
weight_avg = np.sum(f_weight) / np.sum(p_ff) # per unit area (arcsec^2)
# Errors
weight_var_avg = np.sum(fvar_weight) / np.sum(p_ff)
weight_err_avg = np.sqrt(weight_var_avg)
return weight_avg, weight_err_avg
def fractional_flux(cutout, frbdat, hg, nsig=3.):
"""Calculate the fractional flux at the FRB location
Args:
cutout (WCS Cutout2D): astropy 2D Cutout of data around host galaxy
frbdat (frb.FRB): frb object loaded from frb repo
hg (frb.galaxies.frbgalaxy.FRBHost): host galaxy object loaded from frb repo
nsig (float, optional): sigma for FRB localization within which the measurement should be made. Defaults to 3.
Returns:
tuple: median_ff, sig_ff, ff_weight [no units]
Median fractional flux, uncertainty
"""
# get image data from cutout
cut_data = cutout.data
frbcoord = frbdat.coord
# shift the data to above zero (all positive values)
shift_data = cut_data - np.min(cut_data)
# make mesh grid
if np.shape(cut_data)[0] != np.shape(cut_data)[1]:
cut_data = np.resize(cut_data, (np.shape(cut_data)[1], np.shape(cut_data)[1]))
x = np.arange(np.shape(cut_data)[0])
y = np.arange(np.shape(cut_data)[1])
xx, yy = np.meshgrid(x, y)
coords = wcs_utils.pixel_to_skycoord(xx, yy, cutout.wcs)
xfrb, yfrb = wcs_utils.skycoord_to_pixel(frbcoord, cutout.wcs)
# Calc plate scale
plate_scale = coords[0, 0].separation(coords[0, 1]).to('arcsec').value
# get a, b, and theta from frb object -- convert to pixel space
sig_a, sig_b = hg.calc_tot_uncert()
# Put in pixel space
sig_a /= plate_scale
sig_b /= plate_scale
# sigma
a = nsig * sig_a
if a < 1:
print('a is less than 1!')
a = 3
b = nsig * sig_b
if b < 1:
print('b is less than 1!')
b = 3
# check if in ellipse -- pixel space!
theta = hg.frb.eellipse['theta'] * units.deg
in_ellipse = ((xx - xfrb.item()) * np.cos(theta).value + (yy - yfrb.item()) * np.sin(theta).value) ** 2 / (
a ** 2) + (
(xx - xfrb.item()) * np.sin(theta).value - (yy - yfrb.item()) * np.cos(
theta).value) ** 2 / (
b ** 2) <= 1
#print(frbdat.FRB, a, b, np.size(cut_data), np.size(cut_data[in_ellipse]))
idx = np.where(in_ellipse)
xval = xx[idx]
yval = yy[idx]
# x, y gal
xp = yval * np.cos(theta).value - xval * np.sin(theta).value
yp = xval * np.cos(theta).value + yval * np.sin(theta).value
xpfrb = yfrb.item() * np.cos(theta).value - xfrb.item() * np.sin(theta).value
ypfrb = xfrb.item() * np.cos(theta).value + yfrb.item() * np.sin(theta).value
# sigma clip data to exclude background
clipp = stats.sigma_clip(shift_data, sigma=1, maxiters=5)
mask = np.ma.getmask(clipp)
masked_dat = shift_data[mask]
# fractional flux for all values in ellipse
fprime_inlocal = []
for dat in shift_data[idx]:
fprime = np.sum(shift_data[shift_data < dat]) / np.sum(shift_data)
fprime_inlocal.append(fprime)
# ff prob distribution
p_ff = np.exp(-(xp - xpfrb) ** 2 / (2 * a ** 2)) * np.exp(-(yp - ypfrb) ** 2 / (2 * b ** 2))
f_weight = fprime_inlocal * p_ff # weighted fractional fluxes
avg_ff = np.sum(fprime_inlocal * p_ff) / np.sum(p_ff)
var_ff = np.sum((fprime_inlocal - avg_ff) ** 2 * p_ff) / np.sum(p_ff)
sig_ff = np.sqrt(var_ff)
med_ff = np.percentile(f_weight, 50)
l68, u68 = np.abs(np.percentile(f_weight, (16, 84)))
# make array into list for writing out
f_weight = np.array(f_weight).tolist()
# return med_ff, med_flux, fprime_inlocal
return med_ff, sig_ff, f_weight
|
FRBsREPO_NAMEFRBPATH_START.@FRB_extracted@FRB-main@frb@galaxies@photom.py@.PATH_END.py
|
{
"filename": "exceptions.py",
"repo_name": "astropy/pyvo",
"repo_path": "pyvo_extracted/pyvo-main/pyvo/utils/xml/exceptions.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.utils.exceptions import AstropyWarning
__all__ = ['XMLWarning', 'UnknownElementWarning']
def _format_message(message, name, config=None, pos=None):
if config is None:
config = {}
if pos is None:
pos = ('?', '?')
filename = config.get('filename', '?')
return '{}:{}:{}: {}: {}'.format(filename, pos[0], pos[1], name, message)
class XMLWarning(AstropyWarning):
"""
Base warning for violations of XML specifications
"""
def __init__(self, args, config=None, pos=None):
if config is None:
config = {}
if not isinstance(args, tuple):
args = (args, )
msg = self.message_template.format(*args)
self.formatted_message = _format_message(
msg, self.__class__.__name__, config, pos)
Warning.__init__(self, self.formatted_message)
class UnknownElementWarning(XMLWarning):
"""
Warning for missing xml elements
"""
message_template = "Unknown element {}"
default_args = ('x',)
|
astropyREPO_NAMEpyvoPATH_START.@pyvo_extracted@pyvo-main@pyvo@utils@xml@exceptions.py@.PATH_END.py
|
{
"filename": "TransferGeckoFiles.ipynb",
"repo_name": "SilverRon/gppy",
"repo_path": "gppy_extracted/gppy-main/TransferGeckoFiles.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import os, glob
from astropy.io import fits
from util import tool
from astropy.table import Table
```
```python
path_data = "/data4/gecko/factory/gecko"
imlist = sorted(glob.glob(f"{path_data}/C*m.fits"))
print(imlist)
```
['/data4/gecko/factory/gecko/Calib-LOAO-NGC6555-20230421-114409-B-180.com.fits', '/data4/gecko/factory/gecko/Calib-LOAO-NGC6814-20230421-115414-B-180.com.fits']
```python
obs = 'LOAO'
filte = 'B'
```
- Edit test images (OBJECT: to MS230425z)
```python
for inim in imlist:
tool.puthdr(inim, 'OBJECT', 'MS230425z', "")
```
```python
image_table = Table()
image_table['image'] = imlist
image_table['object'] = " "*20
image_table['filter'] = " "*5
image_table['project'] = " "*10
image_table['obsmode'] = " "*10
```
```python
objlist = []
for ii, inim in enumerate(imlist):
_hdr = fits.getheader(inim)
_obj = _hdr['OBJECT']
_filte = _hdr['FILTER']
if _obj[:3] == "MS2":
project = "GECKO"
obsmode = "MockGW"
elif _obj[:2] == "S2":
project = "GECKO"
obsmode = "GW"
elif "GRB" in _obj[:3]:
project = "GECKO"
obsmode = "GRB"
elif _obj[:3] == "FRB":
project = "GECKO"
obsmode = "GRB"
elif _obj[:2] == "SN":
project = "GECKO"
obsmode = "SN"
elif _obj[:2] == "AT":
project = "GECKO"
obsmode = "ATel"
else:
project = "IMSNG"
obsmode = "Monitor"
image_table['object'][ii] = _obj
image_table['filter'][ii] = _filte
image_table['project'][ii] = project
image_table['obsmode'][ii] = obsmode
if _obj not in objlist:
objlist.append(_obj)
print(f"[{ii}] {os.path.basename(inim)}: {_obj} {project}-{obsmode}")
# objlist = list(set(objlist))
objlist = []
```
[0] Calib-LOAO-NGC6555-20230421-114409-B-180.com.fits: MS230425z GECKO-MockGW
[1] Calib-LOAO-NGC6814-20230421-115414-B-180.com.fits: MS230425z GECKO-MockGW
```python
image_table
```
<div><i>Table length=2</i>
<table id="table140355635633104" class="table-striped table-bordered table-condensed">
<thead><tr><th>image</th><th>object</th><th>filter</th><th>project</th><th>obsmode</th></tr></thead>
<thead><tr><th>str76</th><th>str20</th><th>str5</th><th>str10</th><th>str10</th></tr></thead>
<tr><td>/data4/gecko/factory/gecko/Calib-LOAO-NGC6555-20230421-114409-B-180.com.fits</td><td>MS230425z</td><td>B</td><td>GECKO</td><td>MockGW</td></tr>
<tr><td>/data4/gecko/factory/gecko/Calib-LOAO-NGC6814-20230421-115414-B-180.com.fits</td><td>MS230425z</td><td>B</td><td>GECKO</td><td>MockGW</td></tr>
</table></div>
- Save the file
```python
# if project == "GECKO":
# for obj in objlist:
# print(path_save)
path_save = f"/data7/GECKO"
for ii, image in enumerate(image_table['image']):
path_goal = f"{path_save}/{image_table['object'][ii]}/{obs}/{image_table['filter'][ii]}"
path_transient = f"{path_goal}/transients"
# Main image
mvcom = f"mv {image} {path_goal}"
print(mvcom)
# os.makedirs(path_goal)
# os.system(mvcom)
transientkey = image.replace("Calib", "hdCalib").replace("fits", "*fits")
tmvcom = f"mv {transientkey} {path_transient}"
print(tmvcom)
# os.makedirs(path_transient)
# os.system(path_transient)
```
mv /data4/gecko/factory/gecko/Calib-LOAO-NGC6555-20230421-114409-B-180.com.fits /data7/GECKO/MS230425z/LOAO/B
mv /data4/gecko/factory/gecko/hdCalib-LOAO-NGC6555-20230421-114409-B-180.com.*fits /data7/GECKO/MS230425z/LOAO/B/transients
mv /data4/gecko/factory/gecko/Calib-LOAO-NGC6814-20230421-115414-B-180.com.fits /data7/GECKO/MS230425z/LOAO/B
mv /data4/gecko/factory/gecko/hdCalib-LOAO-NGC6814-20230421-115414-B-180.com.*fits /data7/GECKO/MS230425z/LOAO/B/transients
|
SilverRonREPO_NAMEgppyPATH_START.@gppy_extracted@gppy-main@TransferGeckoFiles.ipynb@.PATH_END.py
|
{
"filename": "panel.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/curses/panel.py",
"type": "Python"
}
|
"""curses.panel
Module for using panels with curses.
"""
from _curses_panel import *
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@curses@panel.py@.PATH_END.py
|
{
"filename": "plot_sed.py",
"repo_name": "KarlenS/swift-uvot-analysis-tools",
"repo_path": "swift-uvot-analysis-tools_extracted/swift-uvot-analysis-tools-master/plot_sed.py",
"type": "Python"
}
|
#!/Users/karlen/anaconda2/envs/astroconda/bin/python
from astropy.io import fits
import matplotlib.pyplot as plt
import argparse
import numpy as np
def readData(filename):
return fits.getdata(filename)
def plotSED(dat,axs,color='black',label=None):
central_wav = {'uu':3465.,'w1':2600.,'m2':2246.,'w2':1928.,'bb':4392.,'vv':5468.}
c = 2.9979E8*1E10 #speed of light in angstroms
axs.errorbar(None,None,fmt='o',color=color,label=label,alpha=0.7)
for keys,vals in central_wav.items():
ind = dat['filter'] == keys
flux = dat[ind]['FluxDensity']
ferr = dat[ind]['FluxDensityErr']
fluxcorr = dat[ind]['FluxExtCorr']
ferrcorr = dat[ind]['FluxExtCorrErr']
if True in ind:
axs.errorbar(c/vals,fluxcorr,yerr=ferrcorr, fmt='o',color=color,alpha=0.7)
axs.set_ylabel(r'Flux (ExtCorr) [ erg cm$^{-2}$ s$^{-1}$ ]')
axs.set_xlabel(r'Frequency [ Hz ]')
axs.legend(ncol=4)
def main():
parser = argparse.ArgumentParser(description='Quick plotter for UVOT SEDs.')
parser.add_argument('-f', default=None, help='Name of fits file to plot.')
parser.add_argument('-l', default=None, help='List of fits files to plot.')
args = parser.parse_args()
if not args.f and not args.l:
raise parser.error('Either provide a fits file or a file with a list of fits files to plot')
if args.f:
dat = readData(args.f)
fig, axs = plt.subplots(1,sharex=True,figsize=(10, 6))
plotSED(dat,axs)
if args.l:
files = np.genfromtxt(args.l,dtype=str)
colors = plt.cm.rainbow(np.linspace(0,1,np.size(files)))
fig, axs = plt.subplots(1,sharex=True,figsize=(10, 6))
for f,c in zip(files,colors):
dat = readData(f)
plotSED(dat,axs,color=c,label=f[6:21])
plt.show()
if __name__ == '__main__':
main()
|
KarlenSREPO_NAMEswift-uvot-analysis-toolsPATH_START.@swift-uvot-analysis-tools_extracted@swift-uvot-analysis-tools-master@plot_sed.py@.PATH_END.py
|
{
"filename": "Needham_problems.py",
"repo_name": "wmpg/Supracenter",
"repo_path": "Supracenter_extracted/Supracenter-master/supra/Yields/examples/Needham_problems.py",
"type": "Python"
}
|
import numpy as np
from supra.Atmosphere.Pressure import *
from supra.Yields.YieldFuncs import *
from supra.Yields.YieldCalcs import *
print("Section 12.3 Examples of Scaling")
W_0 = 1 # in pounds
W = 1000 # in pounds
d = 3.61
print("We know that {:} ft away from a {:} pound charge produces 60 psi".format(d, W_0))
print("Therefore, a {:.2f} pound charge will produce the same overpressure at {:.2f} ft".format(W, (W/W_0)**(1/3)*d))
print("To find the range at which 110 kPa occurs for a 1 g charge")
f = (1/454)**(1/3)
print("Our radius is multiplied by a factor of {:.3f}".format(f))
# 1 pound = 454 grams
print("So if 110 kPa occurs at 1.91 m for a 1 pound charge, it will occur at {:.4f} m for a 1 g charge".format(f*1.91))
print("")
print("Atmospheric Scaling")
H1 = 6500/3.281 # meters
H2 = 0
P1 = estPressure(H1)
P2 = estPressure(H2)
print("Blast from a height of {:.2f} km ({:.2f} Pa) to the ground ({:.2f} Pa)".format(H1/1000, P1, P2))
print("Atmospheric Pressure Ratio: {:.3f}".format(P1/P2))
print("The distance to 15.4 psi is 6.4 ft from the table (interpolated)")
print("This is also the distance to 12 psi at 6500 ft")
|
wmpgREPO_NAMESupracenterPATH_START.@Supracenter_extracted@Supracenter-master@supra@Yields@examples@Needham_problems.py@.PATH_END.py
|
{
"filename": "_showexponent.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/cone/colorbar/_showexponent.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowexponentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showexponent", parent_name="cone.colorbar", **kwargs
):
super(ShowexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@cone@colorbar@_showexponent.py@.PATH_END.py
|
{
"filename": "three-cornered-hat-demo.ipynb",
"repo_name": "aewallin/allantools",
"repo_path": "allantools_extracted/allantools-master/examples/three-cornered-hat-demo.ipynb",
"type": "Jupyter Notebook"
}
|
# Three-cornered-hat test
See http://www.wriley.com/3-CornHat.htm
we test ADEV etc. by calculations on synthetic data
with known slopes of ADEV
#### Import packages and setup notebook
```python
%matplotlib inline
```
```python
import numpy
import matplotlib.pyplot as plt
import allantools
from allantools import noise
```
```python
def plotallan_phase(plt,y,rate,taus, style):
(t2, ad, ade,adn) = allantools.mdev(y,rate=rate,taus=taus)
plt.loglog(t2, ad, style)
# plot a line with the slope alpha
def plotline(plt, alpha, taus,style):
y = [ pow(tt,alpha) for tt in taus]
plt.loglog(taus,y,style)
```
Generate some example data
```python
t = numpy.logspace( 0 ,4,50) # tau values from 1 to 1000
N=10000
rate = 1.0
# white phase noise => 1/tau ADEV
d = numpy.random.randn(4*N)
phaseA = d[0:N] # numpy.random.randn(N) #pink(N)
phaseA = [1*x for x in phaseA]
phaseB = d[N:2*N] #numpy.random.randn(N) #noise.pink(N)
phaseB = [5*x for x in phaseB]
phaseC = d[2*N:3*N] #numpy.random.randn(N) #noise.pink(N)
phaseC = [5*x for x in phaseC]
phaseAB = [a-b for (a,b) in zip(phaseA,phaseB)]
phaseBC = [b-c for (b,c) in zip(phaseB,phaseC)]
phaseCA = [c-a for (c,a) in zip(phaseC,phaseA)]
```
Now, run three-cornered hat phase calculation
```python
(taus,devA,err_a,ns_ab) = allantools.three_cornered_hat_phase(phaseAB,phaseBC,phaseCA,rate,t, allantools.mdev)
```
Plot results:
```python
plt.subplot(111, xscale="log", yscale="log")
plotallan_phase(plt, phaseA, 1, t, 'ro')
plotallan_phase(plt, phaseB, 1, t, 'go')
plotallan_phase(plt, phaseC, 1, t, 'bo')
plotallan_phase(plt, phaseAB, 1, t, 'r.')
plotallan_phase(plt, phaseBC, 1, t, 'g.')
plotallan_phase(plt, phaseCA, 1, t, 'b.')
plt.loglog(taus, devA, 'rv')
plt.grid()
plt.show()
```

```python
```
```python
```
|
aewallinREPO_NAMEallantoolsPATH_START.@allantools_extracted@allantools-master@examples@three-cornered-hat-demo.ipynb@.PATH_END.py
|
{
"filename": "casatools_vla_pipe.py",
"repo_name": "interferopy/interferopy",
"repo_path": "interferopy_extracted/interferopy-master/interferopy/casatools_vla_pipe.py",
"type": "Python"
}
|
# """VLA pipeline helper functions"""
import numpy as np
import os
import scipy.constants
# import some stuff from CASA
if os.getenv('CASAPATH') is not None:
# import casadef
from taskinit import *
msmd = msmdtool() # need for metadata
def flagtemplate_add(sdm="", flagcmds=[], outfile=""):
"""
Append additional flagging commands to the flagtemplate if they are not present already.
Useful for L-band VLA HI obs: e.g. flagcmds=["mode='manual' spw='1,2:0~64,4'"]
:param sdm: path to visibility data, will append .flagtemplate.txt to it
:param flagcmds: list of commands to append into the template file
:param outfile: override naming to custom filename
:return: None
"""
if outfile == "":
outfile = sdm + ".flagtemplate.txt"
if os.path.exists(outfile):
with open(outfile, "r") as f:
content = f.read().splitlines()
else:
content = [""]
with open(outfile, "a") as f:
for cmd in flagcmds:
if cmd not in content:
f.write("\n" + cmd + "\n")
return
def partition_cont_range(line_freqs=[], line_widths=[], spw_start=1, spw_end=2):
"""
Cuts one continuum range into smaller ones to avoid lines.
:param line_freqs: line frequencies in GHz
:param line_widths: widths of lines in GHz to cut from the continuum
:param spw_start: start of the SPW in GHz
:param spw_end: end of the SPW in GHz
:return: list of continuum chunks, each defined as a dictionary with start and end freqs in GHz.
"""
# make sure lists are treaded as float vectors
line_freqs = np.array(line_freqs)
line_widths = np.array(line_widths)
# define line ranges that will be excluded
line_starts = line_freqs - 0.5 * line_widths
line_ends = line_freqs + 0.5 * line_widths
# start with the whole spw as one continuum chunk
cont_chunks = [dict(start=spw_start, end=spw_end)]
for i in range(len(line_freqs)):
# for each line loop over the continuum chunk collection and modify it in the process
j = 0
while j < len(cont_chunks):
# line outside chunk, skip
if line_ends[i] < cont_chunks[j]["start"] or line_starts[i] > cont_chunks[j]["end"]:
pass
# line covers whole chunk, delete it
elif line_starts[i] <= cont_chunks[j]["start"] and line_ends[i] >= cont_chunks[j]["end"]:
cont_chunks.pop(j)
j = j - 1
# line covers left edge only, edit cont chunk start
elif line_starts[i] < cont_chunks[j]["start"] and line_ends[i] >= cont_chunks[j]["start"]:
cont_chunks[j]["start"] = line_ends[i]
# line covers right edge only, edit cont chunk end
elif line_starts[i] <= cont_chunks[j]["end"] and line_ends[i] > cont_chunks[j]["end"]:
cont_chunks[j]["end"] = line_starts[i]
# line in the middle, splits chunk into two
elif line_starts[i] > cont_chunks[j]["start"] and line_ends[i] < cont_chunks[j]["end"]:
cont_chunks.insert(j + 1, dict(start=line_ends[i], end=cont_chunks[j]["end"]))
cont_chunks[j]["end"] = line_starts[i]
j = j + 1
# other non-implemented scenarios (are all useful cases covered? any pathological edge case?)
else:
pass
# progress to the next chunk
j = j + 1
return cont_chunks
def build_cont_dat(vis="", line_freqs=[], line_widths=[], fields=[], outfile="cont.dat", overwrite=False, append=False):
"""
Creates a cont.dat file for the VLA pipeline. Must be run in CASA (uses msmetadata).
It currently reads SPW edges in the original observed frame (usually TOPO),
but writes them down as LSRK. Should not matter much, edges should be flagged anyway.
Example of cont.dat content from NRAO online documentation:
https://science.nrao.edu/facilities/vla/data-processing/pipeline/#section-25
:param vis: path to the measurement set
:param line_freqs: line frequencies (obs frame, LSRK) in GHz
:param line_widths: widths of lines (obs frame, LSRK) in GHz to cut from the continuum
:param fields: science target fields. If empty, TARGET intent fields are used.
:param outfile: path to the output cont.dat file
:param overwrite: if True and the outfile exists, it will be overriten
:param append: add at the end of existing cont.dat file, useful for optimising lines per field
:return: None
"""
# if no fields are provided use observe_target intent
# I saw once a calibrator also has this intent so check carefully
msmd.open(vis)
if len(fields) < 1:
# fields = msmd.fieldsforintent("*OBSERVE_TARGET*", True)
fields = msmd.fieldsforintent("*TARGET*", True)
if len(fields) < 1:
print("ERROR: no fields!")
return
if os.path.exists(outfile) and not overwrite and not append:
print("ERROR: file already exists!")
return
# generate a dictonary containing continuum chunks for every spw of every field
cont_dat = {}
for field in fields:
spws = msmd.spwsforfield(field)
cont_dat_field = {}
for spw in spws:
# Get freq range of the SPW
chan_freqs = msmd.chanfreqs(spw)
# SPW edges are reported in whichever frame was used for observing (usually TOPO)
# TODO: implement some transformations to LSRK for the edges?
spw_start = np.min(chan_freqs) * 1e-9 # GHz
spw_end = np.max(chan_freqs) * 1e-9 # GHz
cont_chunks = partition_cont_range(line_freqs, line_widths, spw_start, spw_end)
cont_dat_field.update({spw: cont_chunks})
# print(spw, cont_chunks)
# print(spw_start, spw_end)
cont_dat.update({field: cont_dat_field})
msmd.close()
# write the dictionary into a file usable by the CASA VLA pipeline
access_mode = "a" if append else "w"
with open(outfile, access_mode) as f:
for field in cont_dat.keys():
f.write("\nField: " + field + "\n")
for spw in cont_dat[field].keys():
if len(cont_dat[field][spw]) > 0:
f.write("\nSpectralWindow: " + str(spw) + "\n")
for chunk in cont_dat[field][spw]:
f.write(str(chunk["start"]) + "~" + str(chunk["end"]) + "GHz LSRK\n")
f.write("\n")
print("DONE: written in " + outfile)
return
def lines_rest2obs(line_freqs_rest, line_widths_kms, vrad0):
"""
Get observed frame frequencies and widths of the lines.
:param line_freqs_rest: list of rest-frame line frequencies
:param line_widths_kms: list of rest-frame linewidths in km/s
:param vrad0: systemic velocity of the galaxy in km/s
:return: line_freqs, line_widths, both in GHz
"""
ckms = scipy.constants.c / 1000.
line_freqs = np.array(line_freqs_rest) * (1 - vrad0 / ckms)
line_widths = np.array(line_freqs) * line_widths_kms / ckms
return line_freqs, line_widths
|
interferopyREPO_NAMEinterferopyPATH_START.@interferopy_extracted@interferopy-master@interferopy@casatools_vla_pipe.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/chart-studio/chart_studio/__init__.py",
"type": "Python"
}
|
from __future__ import absolute_import
from chart_studio import plotly, dashboard_objs, grid_objs, session, tools
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@chart-studio@chart_studio@__init__.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "projectchrono/chrono",
"repo_path": "chrono_extracted/chrono-main/src/chrono_swig/chrono_python/README.md",
"type": "Markdown"
}
|
# PyChrono Sensor Module
## Extra/special dependencies
- numpy
## Reasons why the sensor+python interface was setup like it was
#### Numpy
here is wanted to use numpy and what that gives us performance wise
- cmake point to numpy include directory
- an additiona cmake flag was added when sensor and python are both enabled. The cmake flag is called "NUMPY_INCLUDE_DIR" and should be pointed to the include directory of numpy. The user will have to do this manually, as there is no clear way to find the directly. (TODO: maybe change with "because findPYTHON::Numpy would require CMake >= 3.14"?)
- data wrapping (typemap, array shape, Argout View Arrays)
- typemap: Numpy uses SWIG typemaps to wrap arrays. SWIG typemaps look for functions with the same name and type the typemap was declared for and operates the argin and argout operations described in the typemap (in this case the typemap operations are inside numpy.i)
- array shape
- Cameras and Lidar: since python packages (such as opencv) image shapes are typically BxWx3 we adopted the same rule
- GPS and IMU: using the same rule GPD and IMU data wuould have had 1x1xN shape. For this reason they are mapped to 1D array
- Argout View Arrays
- numpy.i offers different ways of wrapping arrays. This in particular does not instanciate memory, but uses instead the memory allocated by the C++ code. This approach is the most efficient, and can be used since:
- the memory is moved to the user, then only the usen detains the ownership over the array.
- the memory does not get deleted since it does not go out of scope, due to the fact that the array is returned to the python process.
#### UserBufferHostPtr
Because swig does not support std::unique_ptr, an alternative approach was required transferring data from the render threads to the master thread on which the user requests sensor data. The final solution was to switch from unique to shared pointer for all the sensor buffers. Because these would give non-threadsafe access to underlying memory to the user, the following setup is used. The underlying filter-graph buffers are not accessible to the user, and thus can be shared_ptrs. The previously named "LockedBufferXXPtr", which is now named "UserBufferXXPtr", is a pointer to moved memory. That is the underlying buffers owndership has been moved from the filter graph's point to the UserBufferXXPtr. This operation is performed with std::move() when the user calls GetMostRecentBuffer(). This means that if the user is calling this every simulation step, they should check if the data is null before trying to use the values. In the python and C++ demos, this operation is shown.
#### Function Specialization/Extension
- HasData()
- To check if the data is null as mentioned in the previous section we extended we extended each UserBufferXXPtr, adding a method called "HasData". It returns True if the pointer to the data is not NULL.
- GetMostRecentXXBuffer()
- Since GetMostRecentBuffer is templatic, we instanced it for each type of Buffer, i. e. to get a buffer of PixelDI the user will call GetMostRecentDIBuffer.
- GetXXData()
- We extended each Buffer type, providing them with a GetXXData (i. e. GetDIData for Pixel DI) to wrap their data raw arrays as Numpy arrays.
#### Miscellaneous
- user accessing device pointer (nightmare)
- user can only access memory on the CPU. Getting a point to memory on the device was no seriously considered when setting up the interface for 2 reasons. 1) It was deemed an unlikely use case for a user to want a device pointer to sensor data when using pythong (or C++). 2) The would complicate the swig wrapping for no clear reason.
- cudamemcpy of data directly for UserBufferHostPtr vs optix::map and memcpy
- A couple places in the sensor module, we apply a filter that copies memory from the filter graph into host memory that will be moved into a UserBufferPtr. The can be done via mapping the optix::buffer, then calling memcpy, or can be done via cudaMemcpy from device to host directly into space that will be moved to UserBufferPtr. The cudaMemcpy was deemed faster, but this should be further analyzed, and a consistent solution should be implemented.
|
projectchronoREPO_NAMEchronoPATH_START.@chrono_extracted@chrono-main@src@chrono_swig@chrono_python@README.md@.PATH_END.py
|
{
"filename": "Example_5_Inverse_Compton_Scattering.ipynb",
"repo_name": "hongwanliu/DarkHistory",
"repo_path": "DarkHistory_extracted/DarkHistory-master/examples/Example_5_Inverse_Compton_Scattering.ipynb",
"type": "Jupyter Notebook"
}
|
# Example 5: Inverse Compton Scattering
DarkHistory comes with the module [*darkhistory.electrons.ics*](https://darkhistory.readthedocs.io/en/latest/_autosummary/darkhistory/electrons/darkhistory.electrons.ics.html) to compute the inverse Compton scattering (ICS) scattered photon spectrum in the Thomson limit and in the relativistic limit, as well as the scattered electron energy-loss spectrum, which can be used to obtain the scattered electron spectrum. See our paper for more details and definitions for these terms.
All of our calculations assume ICS off a **blackbody spectrum**.
## Initialization
```python
%load_ext autoreload
import sys
sys.path.append("..")
```
```python
%matplotlib inline
```
```python
%autoreload
import matplotlib
matplotlib.rc_file('matplotlibrc')
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import numpy as np
import darkhistory.physics as phys
import darkhistory.utilities as utils
import darkhistory.spec.spectools as spectools
import darkhistory.spec.transferfunction as tf
from darkhistory.spec.spectrum import Spectrum
from darkhistory.electrons.ics.ics_spectrum import ics_spec
from darkhistory.electrons.ics.ics_spectrum import thomson_spec
from darkhistory.electrons.ics.ics_spectrum import rel_spec
from darkhistory.electrons.ics.ics_engloss_spectrum import engloss_spec
from darkhistory.electrons.ics.ics_cooling import get_ics_cooling_tf
```
## Computing the ICS Spectra
Before we begin, the user should note that DarkHistory provides the transfer functions for inverse Compton scattering off the CMB for immediate use with the default electron and photon binning for spectra found in [*config*](https://darkhistory.readthedocs.io/en/latest/_autosummary/config.html). The user will not have to worry about computing ICS spectra separately if this is the only way in which the ICS results are used.
However, some users may want to specifically use our ICS code to compute their own spectra, with their own binning in electron and photon energies. This example will illustrate how to do that.
### Scattered Photon Spectrum - Thomson Regime
First, we'll compute the scattered photon spectrum in the Thomson regime. ICS occurs in the Thomson regime when
$$ \frac{4\epsilon \gamma_e}{m_e} \ll 1 \,,$$
where $\epsilon$ is the initial energy of the photon, $\gamma_e$ is the Lorentz boost of the injected electron, and $m_e$ is the incoming mass. For the range of redshifts we consider in DarkHistory $1+z < 3000$, $\gamma_e \lesssim 10^4$ can be considered to lie well within the Thomson regime. Note that the Thomson and relativistic regime (where $\gamma_e \gg 1$) have a large overlap, and calculations in both regimes should yield almost identical results.
The relevant function that we will call is [*ics.thomson_spec()*](https://darkhistory.readthedocs.io/en/latest/_autosummary/darkhistory/electrons/ics/ics_spectrum/darkhistory.electrons.ics.ics_spectrum.thomson_spec.html). This function takes an abscissa for the kinetic energy of electrons, the energy of photons, and the temperature at which we want the spectrum.
The successful calculation of the spectrum looks like this:
```
Initializing...
***** Computing Spectra by Expansion in beta ...... Complete! *****
***** Computing Spectra by Analytic Series... *****
Series 1/12...
Series 2/12...
Series 3/12...
Series 4/12...
Series 5/12...
Series 6/12...
Series 7/12...
Series 8/12...
Series 9/12...
Series 10/12...
Series 11/12...
Series 12/12...
***** Analytic Series Computation Complete! *****
########### Spectrum computed! ###########
```
```python
nEe = 500
nEp = 500
Emax = 1.1e10
Emin = 0.9e-8
dlnEp = np.log(Emax/Emin)/nEp
lowengEp = Emin*np.exp((np.arange(nEp)+0.5)*dlnEp)
dlnEe = np.log(Emax/Emin)/nEe
lowengEe = Emin*np.exp((np.arange(nEe)+0.5)*dlnEe)
ics_thomson_tf = thomson_spec(lowengEe, lowengEp, 0.25)
```
Let's make a plot of the spectrum produced. The matrix corresponding to the transfer function can be accessed directly by the attribute [*grid_vals*](https://darkhistory.readthedocs.io/en/latest/_autosummary/darkhistory/spec/transferfunction/darkhistory.spec.transferfunction.TransFuncAtRedshift.html), and is indexed by (``eleceng``, ``photeng``).
```python
plt.figure(figsize=(7.8, 6.2))
plt.contourf(
lowengEe, lowengEp, np.transpose(ics_thomson_tf.grid_vals),
levels=10.**np.array([-25, -22, -18, -15, -12, -10, -8, -6, -4, -2, 0]), cmap = 'inferno',
norm = LogNorm()
)
plt.colorbar(label=r'$\frac{dN_\gamma}{dE_\gamma \, dt}$ [eV$^{-1}$ s$^{-1}$ ]')
ax = plt.gca()
ax.set_xscale('log')
ax.set_yscale('log')
plt.title(r'\textbf{ICS Scattered Photon Spectrum}')
plt.xlabel('Electron Kinetic Energy [eV]')
plt.ylabel('Scattered Photon Energy [eV]')
plt.text(1e-7, 3e13, 'Thomson Regime', fontsize=20)
plt.text(1e-7, 2e11, r'$T_{\mathrm{CMB}} = $ 0.25 eV', fontsize=20)
plt.axis([1e-8, 1e8, 1e-8, 1e16])
```
### Scattered Photon Spectrum - Relativistic Regime
We can also get the spectrum in the relativistic regime, i.e. when $\gamma_e \gg 1$. In DarkHistory, the transition occurs by default at $\gamma_e = 20$. The function that does this is [*ics.rel_spec()*](https://darkhistory.readthedocs.io/en/latest/_autosummary/darkhistory/electrons/ics/ics_spectrum/darkhistory.electrons.ics.ics_spectrum.rel_spec.html). Note that this function takes the *total electron energy* as input, rather than the kinetic energy. The inputs are otherwise the same as for `thomson_spec()`.
`rel_spec` also has a flag `inf_upp_bound`, which determines the level of approximation to apply to the spectrum. If `inf_upp_bound` is True, only downscattering of electrons is included among other approximations, and returns a spectrum that is unreliable below a final photon energy of $E_1 < T_\text{CMB}$. However, this part of the spectrum accounts for a minute portion of the total energy deposited into photons, and the produced transfer function can be reused for different redshifts.
With `inf_upp_bound` set to False, upscattering of electrons is included, and the spectrum is accurate up to $\mathcal{O}(1/\gamma^2)$ terms.
The successful execution of this calculation will produce the following printout:
```
Initializing...
Computing series 1/4...
Computing series 2/4...
Computing series 3/4...
Computing series 4/4...
Relativistic Computation Complete!
```
```python
Emax_rel = 1e20
Emin_rel = 1e-8
nEe = 500
nEp = 500
dlnEp = np.log(Emax_rel/Emin_rel)/nEp
lowengEp_rel = Emin_rel*np.exp((np.arange(nEp)+0.5)*dlnEp)
dlnEe = np.log(Emax_rel/Emin_rel)/nEe
lowengEe_rel = Emin_rel*np.exp((np.arange(nEe)+0.5)*dlnEe)
ics_rel_tf = rel_spec(phys.me+lowengEe_rel, lowengEp_rel, 0.25, inf_upp_bound=False)
```
And now we'll plot the spectrum. Observe that the relativistic spectrum at the low energy end of this plot is exactly the same as the Thomson spectrum at the same energy: we have chosen parameters such that both the Thomson and relativistic approximations are valid in this region.
```python
plt.figure(figsize=(7.8, 6.2))
plt.contourf(
phys.me+lowengEe_rel, lowengEp_rel, np.transpose(ics_rel_tf.grid_vals),
levels=10.**np.array([-25, -22, -18,-15, -12, -10, -8, -6, -4, -2, 0]), cmap = 'inferno',
norm = LogNorm()
)
plt.colorbar(label=r'$\frac{dN_\gamma}{dE_\gamma \, dt}$ [eV$^{-1}$ s$^{-1}$]')
ax = plt.gca()
ax.set_xscale('log')
ax.set_yscale('log')
plt.title(r'\textbf{ICS Scattered Photon Spectrum}')
plt.xlabel('Electron Kinetic Energy [eV]')
plt.ylabel('Scattered Photon Energy [eV]')
plt.text(3e8, 3e13, 'Relativistic Regime', fontsize=20)
plt.text(3e8, 2e11, r'$T_{\mathrm{CMB}} = $ 0.25 eV', fontsize=20)
plt.axis([1e8, 1e14, 1e-8, 1e16])
```
### Scattered Photon Spectrum - All Regimes
The user may simply use [*ics.ics_spec()*](https://darkhistory.readthedocs.io/en/latest/_autosummary/darkhistory/electrons/ics/ics_spectrum/darkhistory.electrons.ics.ics_spectrum.ics_spec.html) to switch between the two regimes automatically. There are two ways to use this function: either by passing it the electron kinetic energy and photon energy abscissae, or by passing it transfer functions, over which a very fast interpolation can be done to get the spectrum at other temperatures.
Here, let's use `ics.ics_spec` to calculate the spectrum from scratch.
```python
Emax = 1e14
Emin = 1e-8
nEe = 500
nEp = 500
dlnEp = np.log(Emax/Emin)/nEp
Ep = Emin*np.exp((np.arange(nEp)+0.5)*dlnEp)
dlnEe = np.log(Emax/Emin)/nEe
Ee = Emin*np.exp((np.arange(nEe)+0.5)*dlnEe)
ics_tf = ics_spec(Ee, Ep, 0.25, inf_upp_bound=False)
```
And now let's make the plot! Observe the smooth transition at $\gamma_e = 20 \sim $ 10 MeV in electron energy. We've demonstrated a way of obtaining the ICS scattering spectrum of electrons off the CMB over a wide range of regimes.
```python
plt.figure(figsize=(7.8, 6.2))
plt.contourf(
Ee, Ep, np.transpose(ics_tf.grid_vals),
levels=10.**np.array([-25, -22, -18,-15, -12, -10, -8, -6, -4, -2, 0]), cmap = 'inferno',
norm = LogNorm()
)
plt.axvline(19*phys.me, color='gray', linestyle=':')
plt.colorbar(label=r'$\frac{dN_\gamma}{dE_\gamma \, dt}$ [eV$^{-1}$ s$^{-1}$]')
ax = plt.gca()
ax.set_xscale('log')
ax.set_yscale('log')
plt.title(r'\textbf{ICS Scattered Photon Spectrum}')
plt.xlabel('Electron Kinetic Energy [eV]')
plt.ylabel('Scattered Photon Energy [eV]')
plt.text(3e-7, 3e13, r'$T_{\mathrm{CMB}} = $ 0.25 eV', fontsize=20)
plt.text(5e5, 3e8, 'Transition', fontsize=20, rotation=90, color='gray')
plt.axis([1e-8, 1e14, 1e-8, 1e16])
```
The other way to use `ics_spec()` is to pass it two transfer functions, one corresponding to a Thomson regime transfer function, the other a relativistic regime transfer function, both evaluated at some reference temperature $T_\text{ref}$. We can now interpolate over these two transfer functions to obtain the spectrum at different temperatures. First, we need to produce the relativistic regime transfer function with `inf_upp_bound` set to True. This is the correct setting for using the transfer function for interpolation.
```python
Emax_rel = 1e20
Emin_rel = 1e-8
nEe = 500
nEp = 500
dlnEp = np.log(Emax_rel/Emin_rel)/nEp
lowengEp_rel = Emin_rel*np.exp((np.arange(nEp)+0.5)*dlnEp)
dlnEe = np.log(Emax_rel/Emin_rel)/nEe
lowengEe_rel = Emin_rel*np.exp((np.arange(nEe)+0.5)*dlnEe)
ics_rel_interp_tf = rel_spec(phys.me+lowengEe_rel, lowengEp_rel, 0.25, inf_upp_bound=True)
```
And now here's the code to get the ICS spectrum at $T = 10^{-2}$ eV from the transfer functions that we have already evaluated at $T = 0.25 $ eV. It's important that the transfer functions are evaluated over a large enough range of electron and photon energies.
```python
Emax = 1e20
Emin = 1e-5
nEe = 500
nEp = 500
dlnEp = np.log(Emax/Emin)/nEp
Ep = Emin*np.exp((np.arange(nEp)+0.5)*dlnEp)
dlnEe = np.log(Emax/Emin)/nEe
Ee = Emin*np.exp((np.arange(nEe)+0.5)*dlnEe)
# The function arguments are
#(electron kinetic energy, photon energy, temperature, Thomson transfer function,
# relativistic transfer function, reference temperature)
ics_interp_tf = ics_spec(Ee, Ep, 1e-2, thomson_tf = ics_thomson_tf, rel_tf = ics_rel_interp_tf, T_ref = 0.25)
```
And the plot of the spectra at $T = 10^{-2}$ eV. Notice the poor transition at the low end of the spectrum. This however does not affect the bulk of the spectrum (as measured in energy).
```python
plt.figure(figsize=(7.8, 6.2))
plt.contourf(
Ee, Ep, np.transpose(ics_interp_tf.grid_vals),
levels=10.**np.array([-25, -22, -18,-15, -12, -10, -8, -6, -4, -2, 0]), cmap = 'inferno',
norm = LogNorm()
)
plt.axvline(19*phys.me, color='gray', linestyle=':')
plt.colorbar(label=r'$\frac{dN_\gamma}{dE_\gamma \, dt}$ [eV$^{-1}$ s$^{-1}$]')
ax = plt.gca()
ax.set_xscale('log')
ax.set_yscale('log')
plt.title(r'\textbf{ICS Scattered Photon Spectrum}')
plt.xlabel('Electron Kinetic Energy [eV]')
plt.ylabel('Scattered Photon Energy [eV]')
plt.text(1e-4, 3e13, r'$T_{\mathrm{CMB}} = $ 0.25 eV', fontsize=20)
plt.text(5e5, 3e8, 'Transition', fontsize=20, rotation=90, color='gray')
plt.axis([1e-5, 1e14, 1e-5, 1e16])
```
### Mean Electron Energy Loss Spectrum
We can also compute the scattered electron net energy loss spectrum in a similar fashion. The function of interest is [*ics.ics_engloss_spectrum.engloss_spec()*](https://darkhistory.readthedocs.io/en/latest/_autosummary/darkhistory/electrons/ics/ics_engloss_spectrum/darkhistory.electrons.ics.ics_engloss_spectrum.engloss_spec.html), and works in a very similar way, although the calculation is more complicated. A successful execution will print the following:
```
###### THOMSON ENERGY LOSS SPECTRUM ######
****** Energy Loss Spectrum by beta Expansion ******
Computing integrals 1/6...
Computing integrals 2/6...
Computing integrals 3/6...
Computing integrals 4/6...
Computing integrals 5/6...
Computing integrals 6/6...
Integrals computed!
****** Complete! ******
****** Energy Loss Spectrum by Analytic Series ******
Computing upscattering loss spectra...
Computing series 1/8...
Computing series 2/8...
Computing series 3/8...
Computing series 4/8...
Computing series 5/8...
Computing series 6/8...
Computing series 7/8...
Computing series 8/8...
Computing downscattering loss spectra...
Computing series 1/8...
Computing series 2/8...
Computing series 3/8...
Computing series 4/8...
Computing series 5/8...
Computing series 6/8...
Computing series 7/8...
Computing series 8/8...
****** Complete! ******
###### COMPLETE! ######
```
We can choose to use the Thomson expressions to compute the energy loss transfer function for all electron energies, by setting the flag `thomson_only` to True. If this flag is set to False, then as previously we switch into the relativistic regime for $\gamma_e > 20$, and in this regime we take the energy loss spectrum to be identical to the scattered photon spectrum, since the photons are upscattered to an energy much greater than their initial energy.
```python
Emax = 1e10
Emin = 1e-8
nEe = 500
nEp = 500
dlnEp = np.log(Emax/Emin)/nEp
Ep = Emin*np.exp((np.arange(nEp)+0.5)*dlnEp)
dlnEe = np.log(Emax/Emin)/nEe
Ee = Emin*np.exp((np.arange(nEe)+0.5)*dlnEe)
ics_engloss_tf = engloss_spec(Ee, Ep, 0.25, thomson_only=True)
```
And the plot:
```python
plt.figure(figsize=(7.8, 6.2))
plt.contourf(
Ee, Ep, np.transpose(ics_engloss_tf.grid_vals),
levels=10.**np.array([-18, -12, -10, -8, -6, -4, -2, 0]), cmap = 'inferno',
norm = LogNorm()
)
plt.colorbar(label=r'$\frac{dN_e}{d\Delta \, dt}$ [eV$^{-1}$ s$^{-1}$]')
ax = plt.gca()
ax.set_xscale('log')
ax.set_yscale('log')
plt.title(r'\textbf{ICS Electron Net Energy Loss Spectrum}')
plt.xlabel(r'Electron Kinetic Energy [eV]')
plt.ylabel('Net Energy Loss $\Delta$ [eV]')
plt.text(1e-7, 3e8, 'Thomson Regime', fontsize=20)
plt.text(1e-7, 1e7, r'$T_{\mathrm{CMB}} = $ 0.25 eV', fontsize=20)
plt.axis([1e-8, 1e10, 1e-8, 1e10])
```
## Secondary Photon Spectrum
With these scattered photon transfer functions, we can now compute the secondary photon spectrum transfer function, which takes injected electrons through its complete cooling process, and produces a final secondary photon spectrum and low energy (sub 3 keV) electron spectrum. This is computed through the function [*electrons.ics.ics_cooling.get_ics_cooling_tf()*](https://darkhistory.readthedocs.io/en/latest/_autosummary/darkhistory/electrons/ics/ics_cooling/darkhistory.electrons.ics.ics_cooling.get_ics_cooling_tf.html).
We will perform this computation by interpolation of a reference transfer function, evaluated at a temperature of $400 T_{\mathrm{CMB},0}$. We first define the required energy binning:
```python
Emax = 1e20
Emin = 1e-8
nEe = 500
nEp = 500
dlnEp = np.log(Emax/Emin)/nEp
lowengEp_rel = Emin*np.exp((np.arange(nEp)+0.5)*dlnEp)
dlnEe = np.log(Emax/Emin)/nEe
lowengEe_rel = Emin*np.exp((np.arange(nEe)+0.5)*dlnEe)
Emax = 1e10
Emin = 1e-8
nEe = 500
nEp = 500
dlnEp = np.log(Emax/Emin)/nEp
lowengEp_nonrel = Emin*np.exp((np.arange(nEp)+0.5)*dlnEp)
dlnEe = np.log(Emax/Emin)/nEe
lowengEe_nonrel = Emin*np.exp((np.arange(nEe)+0.5)*dlnEe)
```
Next, we compute all of the reference scattered photon spectra (Thomson and relativistic) and the energy loss spectrum, which will be used as the baseline for the interpolation:
```python
print('********* Thomson regime scattered photon spectrum *********')
ics_thomson_ref_tf = thomson_spec(lowengEe_nonrel, lowengEp_nonrel, phys.TCMB(400))
print('********* Relativistic regime scattered photon spectrum *********')
ics_rel_ref_tf = rel_spec(lowengEe_rel, lowengEp_rel, phys.TCMB(400), inf_upp_bound=True)
print('********* Thomson regime energy loss spectrum *********')
engloss_ref_tf = engloss_spec(lowengEe_nonrel, lowengEp_nonrel, phys.TCMB(400), thomson_only=True)
```
Now we compute the full ICS secondary photon spectrum, as well as the low energy secondary electron spectrum, which are produced after the electrons cool completely, at a redshift of $1+z = 1000$.
The outputs of `get_ics_cooling_tf()` are a 2D transfer function for the ICS secondary photons (`ics_sec_phot_tf`, whose grid values are indexed by (`eleceng`, `photeng`), a 2D transfer function for the low-energy electrons (`ics_sec_elec_tf`, indexed by (`eleceng` (in), `eleceng` (out)), a 1D array (indexed by initial electron energy) describing the amount of energy that has been removed from the CMB by scattering, and a 1D array (indexed by initial electron energy) storing the energy nonconservation as a result of numerical errors during the calculation (should all be much smaller than `eleceng`). These quantities are all normalized to a single electron completely cooling through only ICS.
```python
eleceng = 10**np.arange(2, 12, 0.025)
photeng = 10**np.arange(-4, 12, 0.025)
(ics_sec_phot_tf, ics_sec_elec_tf, cont_loss, eng_discrep) = get_ics_cooling_tf(
ics_thomson_ref_tf, ics_rel_ref_tf, engloss_ref_tf,
eleceng, photeng, 1000
)
```
And here's the plot of the result:
```python
plt.figure(figsize=(7.8, 6.2))
ax = plt.gca()
if ics_sec_phot_tf.spec_type == 'N':
ics_sec_phot_tf.switch_spec_type()
plt.contourf(
eleceng, photeng, np.transpose(ics_sec_phot_tf.grid_vals),
levels=10.**np.array([ -12, -8, -4, 0, 4, 6, 8]), cmap = 'inferno',
norm = LogNorm()
)
plt.text(9e3, 1e10, r'$T_{\mathrm{CMB}} = $ 0.25 eV', fontsize=20)
plt.text(9e3, 1e9, 'Single Electron', fontsize=20)
plt.colorbar(label=r'$\frac{dN_\gamma}{dE_\gamma}$ [eV$^{-1}$]')
plt.title(r'\textbf{ICS Secondary Photon Spectrum}')
plt.xlabel('Electron Kinetic Energy [eV]')
plt.ylabel('Secondary Photon Energy [eV]')
ax.set_xscale('log')
ax.set_yscale('log')
plt.axis([3e3, 1e12, 1e-4, 1e12])
```
To use this transfer function, let's take an injected electron spectrum, and work out the spectrum of photons after the spectrum cools entirely through ICS (to include atomic processes, see [*electrons.elec_cooling*](https://darkhistory.readthedocs.io/en/latest/_autosummary/darkhistory/electrons/darkhistory.electrons.elec_cooling.html). Here, we consider a spectrum produced by annihilation of dark matter into a $W^+ W^-$ pair. This can be obtained from the [*darkhistory.spec.pppc*](https://darkhistory.readthedocs.io/en/latest/_autosummary/darkhistory/spec/darkhistory.spec.pppc.html), which returns the same flux produced by PPPC4DMID [[1]](#cite_PPPC)[[2]](#cite_weakCorrections). We'll pick $m_\text{DM}$ = 500 GeV, and obtain the spectrum per annihilation.
```python
%autoreload
from darkhistory.spec.pppc import get_pppc_spec
mDM = 500e9
elec_spec = get_pppc_spec(mDM, eleceng, 'W', 'elec')
elec_spec.switch_spec_type('N')
```
`elec_spec` here is a [`Spectrum`](https://darkhistory.readthedocs.io/en/latest/_autosummary/darkhistory/spec/spectrum/darkhistory.spec.spectrum.Spectrum.html) object, and contains the abscissa (`eleceng`) and the spectrum, stored as dN/dE or N. Here, the data is stored as type `'N'`. To contract this spectrum with `ics_sec_phot_tf`, we use `TransferFuncAtRedshift.sum_specs`, which acts with the matrix stored in `ics_sec_phot_tf` on the spectrum. Keep in mind that `ics_sec_phot_tf` is normalized to a single electron, so multiplying by `elec_spec` of type `'N'` is appropriate.
```python
phot_spec = ics_sec_phot_tf.sum_specs(elec_spec)
```
This produces a photon `Spectrum` object, with the abscissa given by `photeng`.
We can also produce the initial spectrum of photons that got scattered. This is a blackbody spectrum, since we are only tracking photons that get upscattered in the nonrelativistic, Thomson regime (see the paper for more details). The CMB spectrum at any temperature can be obtained using [`physics.CMB_spec`](https://darkhistory.readthedocs.io/en/latest/_autosummary/darkhistory/physics/darkhistory.physics.CMB_spec.html). This returns a spectrum containing $dN/(dE\, dV)$ of the CMB. We simply have to rescale this spectrum to the correct initial energy, and this information is stored in `cont_loss`.
First, let's put the spectrum in a `Spectrum` object.
```python
from darkhistory.spec.spectrum import Spectrum
# Use physics.TCMB() to get the temperature at the right redshift.
CMB_spec = Spectrum(photeng, phys.CMB_spec(photeng, phys.TCMB(1000)), spec_type='dNdE')
```
Now, we want to rescale this spectrum so that it has the energy of the photons that got upscattered. This energy is given by `np.dot(cont_loss, elec_spec.N)`, since `cont_loss` has each bin normalized to one injected electron at some energy.
We can do the rescaling simply by multiplying the `Spectrum` object itself.
```python
CMB_spec *= np.dot(cont_loss, elec_spec.N)/CMB_spec.toteng()
```
The curves below show the initial electron spectrum, the initial CMB photon spectrum prior to upscattering, and the upscattered photon spectrum.
```python
plt.figure()
plt.loglog()
elec_spec_plot, = plt.plot(eleceng, elec_spec.dNdE*eleceng**2, label=r'Injected $e^+e^-$ Spectrum')
phot_spec_plot, = plt.plot(photeng, phot_spec.dNdE*photeng**2, label='ICS Secondary Photon Spectrum')
CMB_plot, = plt.plot(photeng, CMB_spec.dNdE*photeng**2, 'k--', label='CMB')
plt.xlabel('Energy [eV]')
plt.ylabel(r'$E^2\, dN/dE$ [eV]')
plt.title('ICS Secondary Spectrum')
plt.text(0.1, 0.92, r'$\chi \chi \to W^+W^-$, $m_\chi$ = 500 GeV', fontsize=20, transform=ax.transAxes)
plt.legend(handles=[elec_spec_plot, phot_spec_plot, CMB_plot], loc=(0.1, 0.65))
plt.axis([1e-4, 5e12, 1e-10, 1e23])
```
## Bibliography
[1]<a id='cite_PPPC'></a> M.Cirelli, G.Corcella, A.Hektor, G.Hütsi, M.Kadastik, P.Panci, M.Raidal, F.Sala, A.Strumia, "PPPC 4 DM ID: A Poor Particle Physicist Cookbook for Dark Matter Indirect Detection," arXiv 1012.4515, JCAP 1103 (2011) 051. Erratum: JCAP 1210 (2012) E01.
[2]<a id='cite_weakCorrections'></a> P. Ciafaloni, D. Comelli, A. Riotto, F. Sala, A. Strumia, A. Urbano, "Weak corrections are relevant for dark matter indirect detection," arXiv 1009.0224, JCAP 1103 (2011) 019
```python
```
|
hongwanliuREPO_NAMEDarkHistoryPATH_START.@DarkHistory_extracted@DarkHistory-master@examples@Example_5_Inverse_Compton_Scattering.ipynb@.PATH_END.py
|
{
"filename": "Plot.Converge.py",
"repo_name": "alexrhowe/APOLLO",
"repo_path": "APOLLO_extracted/APOLLO-master/Plot.Converge.py",
"type": "Python"
}
|
from __future__ import print_function
import sys
import numpy as np
import matplotlib.pyplot as plt
if len(sys.argv)>1:
fin = open(sys.argv[1],'r')
else:
'Input file not specified.'
sys.exit()
line = fin.readline().split()
nwalkers = int(line[0])
nsteps = int(line[1])
ndim = int(line[2])
pnames = fin.readline().split()
lines = fin.readlines()
samples = np.zeros((nsteps,nwalkers,ndim))
nsamp = np.linspace(1,nsteps,nsteps)
for i in range(0,nsteps):
if i%100==0: print(i)
for j in range(0,nwalkers):
for k in range(0,ndim):
samples[i,j,k] = lines[i*nwalkers+j].split()[k]
for n in range(0,ndim-1):
fig = plt.figure()
ax1 = fig.add_subplot(111)
plt.xlabel('Steps',fontsize=14)
plt.ylabel(pnames[n],fontsize=14)
for i in range(0,ndim):
ax1.plot(nsamp,samples[:,i,n])
plt.show()
|
alexrhoweREPO_NAMEAPOLLOPATH_START.@APOLLO_extracted@APOLLO-master@Plot.Converge.py@.PATH_END.py
|
{
"filename": "iterateIdealHInst.cc.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/src/Utilities/iterateIdealHInst.cc.py",
"type": "Python"
}
|
text = """
//------------------------------------------------------------------------------
// Explicit instantiation.
//------------------------------------------------------------------------------
#include "Utilities/iterateIdealH.cc"
#include "Geometry/Dimension.hh"
namespace Spheral {
template void iterateIdealH<Dim< %(ndim)s > >(DataBase<Dim< %(ndim)s > >&,
const vector<Boundary<Dim< %(ndim)s > >*>&,
const TableKernel<Dim< %(ndim)s > >&,
const SmoothingScaleBase<Dim< %(ndim)s > >&,
const int,
const double,
const double,
const bool,
const bool);
}
"""
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@src@Utilities@iterateIdealHInst.cc.py@.PATH_END.py
|
{
"filename": "fit_simulation_suite.ipynb",
"repo_name": "steven-murray/mrpy",
"repo_path": "mrpy_extracted/mrpy-master/docs/examples/fit_simulation_suite.ipynb",
"type": "Jupyter Notebook"
}
|
# Fit MRP parameters to a suite of simulation data simultaneously
In this example, we grab haloes from the publicly available $\nu^2$GC simulation suite and show how MRP can be fit to the haloes of 4 simulations simultaneously. In this case, the 4 simulations have different box sizes, so they probe different parts of the mass function more or less effectively. By combining them, we can get a good handle on a wide range of the mass function.
Do note that this example is not quick. It takes a while to *get* the data, let alone run the MCMC on it. You may want to generate some smaller fake datasets to have a play.
**The plots from this example are used in MRP as Figures 3 and 4.**
```python
# General imports
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pickle
from os.path import join, splitext, exists, expanduser
# Mrpy imports
from mrpy.fitting.fit_sample import SimFit
from mrpy import MRP
from chainconsumer import ChainConsumer
```
```python
fig_folder = expanduser("~")
data_folder = expanduser("~")
```
## Preparing the data
First you'll need to get the data. These are the files you'll need to download (beware, at least one of them is 12Gb alone):
http://www2.ccs.tsukuba.ac.jp/Astro/Members/ishiyama/nngc/Data/n2gc-m_z0.fof.bz2
http://www2.ccs.tsukuba.ac.jp/Astro/Members/ishiyama/nngc/Data/n2gc-h1_z0.fof.bz2
http://www2.ccs.tsukuba.ac.jp/Astro/Members/ishiyama/nngc/Data/n2gc-m_z0.rockstar.bz2
Then unzip them. **NOTE: you don't need to run this section if you've already got the data and compactified it**
First of all, we need to pare down the huge files. We can do this in a few ways:
* We only care about the Mass column, so we can delete everything else
* We only care about unique halo masses (and the quantity of each), so we can "tabulate" the data
* We keep only haloes with 40 or more particles (this limit is taken from the paper accompanying the catalogue)
These operations reduce the file size by about a factor of 100-1000, and make the subsequent MCMC runs much faster.
Something else to consider is that the fastest way to read in the data files and reduce them is to do it in one big chunk with numpy. However, this takes a lot of memory. So instead we read them line by line.
With these considerations, we implement the following functions.
### Compress Data
```python
def strip_and_compress(fname,fout,mpart=None,Nmin=0,Nmax=np.inf, force=False):
unique_masses = {}
ftype = splitext(fname)[1][1:]
if not force and exists(fout):
return
with open(fname) as fin:
for line in fin:
l = line.strip()
# Skip comments
if l.startswith("#"):
continue
else:
if ftype=="fof":
npart = int(l.split()[-1])
elif ftype=="rockstar":
npart = int(l.split()[7])
# Reject the entry if it is less than Nmin
if npart < Nmin:
continue
elif npart > Nmax:
continue
# Calculate the mass of the halo
if ftype=="fof":
mvir = mpart * npart
elif ftype=="rockstar":
mvir = float(l.split()[21]) # Corresponds to M200b
# Add it to the final unique mass dict
if mvir in unique_masses:
unique_masses[mvir] += 1
else:
unique_masses[mvir] = 1
# Convert the dict of values into a 2D array of masses and number of occurrences
out = np.array([[k,v] for k,v in unique_masses.iteritems()])
print "Compressed {} to {} percent".format(fname,100*len(out[:,1])/sum(out[:,1]))
# Save the data to a table file
np.savetxt(fout,out)
```
Now actually do the stripping and compressing of the files. We save the data in new files with an appended ".compact". Note also we limit the size of the halos, to be in line with the quoted values from the I15 paper. There is in fact at least 1 outlier beyond these limits.
```python
FORCE = False
strip_and_compress(join(data_folder,"n2gc-h1_z0.fof"),
join(data_folder, "n2gc-h1_z0.fof.compact"),2.75e7,100,17476256)
strip_and_compress(join(data_folder,"n2gc-m_z0.fof"),
join(data_folder, "n2gc-m_z0.fof.compact"),2.2e8,100,12120576)
strip_and_compress(join(data_folder,"n2gc-m_z0.rockstar"),
join(data_folder, "n2gc-m_z0.rockstar.compact"),force=FORCE,
Nmin=100)
strip_and_compress(join(data_folder,"n2gc-h1_z0.rockstar"),
join(data_folder, "n2gc-h1_z0.rockstar.compact"), force=FORCE,
Nmin=100)
```
Compressed /home/steven/Documents/DataSets/n2gc/n2gc-m_z0.rockstar to 0.219503952011 percent
Compressed /home/steven/Documents/DataSets/n2gc/n2gc-h1_z0.rockstar to 0.949670196418 percent
### Read in Tabulated Data
First up, read in the compact data we just created.
```python
# Read in the data from file
def get_raw_data(folder, sims=['h1','m'],ftype="fof",mmin=None,mmax=None):
m = []
nm = []
for sim in sims:
data = np.genfromtxt(join(folder,"n2gc-{}_z0.{}.compact".format(sim,ftype)))
m.append(data[:,0])
nm.append(data[:,1])
if mmin is not None:
for i,mm in enumerate(mmin):
nm[i] = nm[i][m[i]>mm]
m[i] = m[i][m[i]>mm]
if mmax is not None:
for i,mm in enumerate(mmax):
nm[i] = nm[i][m[i]<mm]
m[i] = m[i][m[i]<mm]
return m,nm
```
We read in both FOF and SO halos with similar parameters, and store everything in the ``data`` dictionary.
```python
data = {'fof':{}, "so":{}}
# FOF halos
data['fof']['m'], data['fof']['nm'] = get_raw_data(data_folder, ['h1','m'],
mmin=[2.75e9,2.2e10],mmax=[2e13,7e14])
data['fof']['weights'] = [data['fof']['nm'][0]/140.0**3, data['fof']['nm'][1]/560.0**3]
print "Total number of FOF haloes: ", np.sum([np.sum(x) for x in data['fof']['nm']])
print "Total number of *unique* FOF haloes: ", np.sum([len(x) for x in data['fof']['m']])
print "-"*40
# SO halos
data['so']['m'], data['so']['nm'] = get_raw_data(data_folder, ['h1','m'], ftype='rockstar',
mmin=[2.75e9,2.2e10],mmax=[2e13,7e14])
data['so']['weights'] = [data['so']['nm'][0]/140.0**3, data['so']['nm'][1]/560.0**3]
print "Total number of SO haloes: ", np.sum([np.sum(x) for x in data['so']['nm']])
print "Total number of *unique* SO haloes: ", np.sum([len(x) for x in data['so']['m']])
```
Total number of FOF haloes: 24640920.0
Total number of *unique* FOF haloes: 141278
----------------------------------------
Total number of SO haloes: 24245670.0
Total number of *unique* SO haloes: 109419
## Running the fits
We'll run the fits with the ``emcee`` package (via a routine built in to ``mrpy``), but also with an optimization solver. The in-built function is able to utilise the tabulation of data we have performed already, and can do the suites simultaneously.
### Fitting with MCMC
```python
# Create the fitting class instance. This will have uniform priors.
fitobj_fof = SimFit(data['fof']['m'],data['fof']['nm'],
V=[140.0**3,560.0**3],
alpha_bounds = (-1.99,-1.5), hs_bounds=(12,16),
beta_bounds=(0.2,1.5),lnA_bounds=(-50,-10))
fitobj_so = SimFit(data['so']['m'],data['so']['nm'],
V=[140.0**3,560.0**3],
alpha_bounds = (-1.99,-1.5), hs_bounds=(12,16),
beta_bounds=(0.2,1.5),lnA_bounds=(-50,-10))
```
```python
# We don't use these, but they can be useful if something goes wrong.
downhill_res_fof = fitobj_fof.run_downhill(lnA0=-40.0)
downhill_res_so = fitobj_so.run_downhill(lnA0=-40.0)
```
```python
# Run the mcmc.
# We set 300 chains to warmup, but we can extend this later if we need to manually.
# Also, we start the chains in a small ball around the best (downhill) optimization solution using opt_init=True.
#fitobj_fof.run_mcmc(nchains=50,warmup=200,iterations=500,opt_init=True,threads=8)
fitobj_so.run_mcmc(nchains=50,warmup=200,iterations=500,opt_init=True,threads=8)
```
<emcee.ensemble.EnsembleSampler at 0x7fc6704bd350>
First off we want to look at a few key diagnostics of the chains to check whether everything's okay.
```python
print "Acceptance fraction for FOF (min, max, mean): ", fitobj_fof.mcmc_res.acceptance_fraction.min(), fitobj_fof.mcmc_res.acceptance_fraction.max(), fitobj_fof.mcmc_res.acceptance_fraction.mean()
print "Acceptance fraction for SO (min, max, mean): ", fitobj_so.mcmc_res.acceptance_fraction.min(), fitobj_so.mcmc_res.acceptance_fraction.max(), fitobj_so.mcmc_res.acceptance_fraction.mean()
```
Acceptance fraction for FOF (min, max, mean): 0.518 0.622 0.57288
Acceptance fraction for SO (min, max, mean): 0.542 0.648 0.58912
These acceptance fractions are somewhat high, but probably okay. We'll check burnin as well soon.
```python
def gelman_rubin(chain):
ssq = np.var(chain, axis=1, ddof=1)
W = np.mean(ssq, axis=0)
thb = np.mean(chain, axis=1)
thbb = np.mean(thb, axis=0)
m = chain.shape[0]
n = chain.shape[1]
B = n / (m - 1) * np.sum((thbb - thb)**2, axis=0)
var_th = (n - 1.) / n * W + 1. / n * B
R = np.sqrt(var_th / W)
return R
```
```python
ChainConsumer().add_chain(fitobj_fof.mcmc_res.chain.reshape((-1,4)), walkers = 50).diagnostic.gelman_rubin(threshold=0.1)
ChainConsumer().add_chain(fitobj_so.mcmc_res.chain.reshape((-1,4)), walkers = 50).diagnostic.gelman_rubin(threshold=0.1)
```
Gelman-Rubin Statistic values for chain 0
Param 0: 1.04796 (Passed)
Param 1: 1.04777 (Passed)
Param 2: 1.09876 (Passed)
Param 3: 1.05742 (Passed)
Gelman-Rubin Statistic values for chain 0
Param 0: 1.06547 (Passed)
Param 1: 1.06030 (Passed)
Param 2: 1.06584 (Passed)
Param 3: 1.06507 (Passed)
True
We see that the chains have converged (R < 1.1).
Since the fitting takes some time, we save the main results, i.e. the chain, to file here so that we can begin again at any time without running the MCMC. Thus the following analysis only uses the chains as written to file, rather than the full fit objects just created.
```python
np.savez("n2gc_analysis/n2gc_mcmc_chain_fof",chain=fitobj_fof.mcmc_res.chain)
np.savez("n2gc_analysis/n2gc_mcmc_chain_so",chain=fitobj_so.mcmc_res.chain)
```
```python
chain_so = np.load("n2gc_analysis/n2gc_mcmc_chain_so.npz")['chain']
chain_fof = np.load("n2gc_analysis/n2gc_mcmc_chain_fof.npz")['chain']
```
## Analysis
### Traceplot
The first thing we might want to do with each fit is to check its traceplot, and determine if the burnin was sufficient.
```python
def traceplot(keys,chains):
f, ax = plt.subplots(len(keys), 1, sharex=True, figsize=(8, 2.5 * len(keys)))
for i, (key, chain) in enumerate(zip(keys,chains.T)):
ax[i].plot(chain, color="black", alpha=0.2)
ax[i].set_ylabel(key,fontsize=16)
f.subplots_adjust(hspace=0)
plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)
return f
```
Plot both cases:
```python
fig0 = traceplot([r"$ \log \mathcal{H}_\star$",r"$\alpha$",r'$\beta$',r"$\ln A$"],chain_fof)
plt.show()
```

It seems like a reasonable burn-in time has been met for the FOF halos, so we're happy we trust our sample.
```python
fig0 = traceplot([r"$ \log \mathcal{H}_\star$",r"$\alpha$",r'$\beta$',r"$\ln A$"],chain_so)
plt.show()
```

The SO halos however have moved significantly, possible due to a poor downhill-gradient optimization. We remove the firs parts of the chain:
```python
chain_so = chain_so[:,100:,:]
```
### Basic Results
We'd like to know the pure basic results: mean, median, mode, standard deviation etc.
```python
print "Mean: ", np.mean(chain_fof,axis=(0,1))
print "Median: ", np.median(chain_fof,axis=(0,1))
print "Mode: ", fitobj_fof.mcmc_res.flatchain[np.argmax(fitobj_fof.mcmc_res.flatlnprobability),:]
print "Std Dev.: ", np.std(chain_fof,axis=(0,1))
print "Covariance: ", np.cov(chain_fof.reshape((-1,4)).T)
print "Relative Uncertainty: ", np.std(chain_fof,axis=(0,1))*100/np.mean(chain_fof,axis=(0,1))
from mrpy.base.core import log_mass_mode
print "Log Mass Mode: ", np.log10(log_mass_mode(*np.mean(chain_fof[:,:,:3],axis=(0,1))))
```
Mean: [ 14.51918594 -1.89972215 1.19198794 -44.41676165]
Median: [ 14.51921005 -1.89971928 1.19135641 -44.41679216]
Mode: [ 14.51964269 -1.89972233 1.19189829 -44.41885557]
Std Dev.: [ 0.00715219 0.00016809 0.0194396 0.03367548]
Covariance: [[ 5.11558477e-05 -1.68702393e-07 -1.92940482e-05 -2.09231989e-04]
[ -1.68702393e-07 2.82549206e-08 -1.01344465e-06 1.83532519e-06]
[ -1.92940482e-05 -1.01344465e-06 3.77913140e-04 -2.41123515e-04]
[ -2.09231989e-04 1.83532519e-06 -2.41123515e-04 1.13408347e-03]]
Relative Uncertainty: [ 0.04926026 -0.00884806 1.63085537 -0.07581706]
Log Mass Mode: 13.617275083
```python
print "Mean: ", np.mean(chain_so,axis=(0,1))
print "Median: ", np.median(chain_so,axis=(0,1))
print "Mode: ", fitobj_so.mcmc_res.flatchain[np.argmax(fitobj_so.mcmc_res.flatlnprobability),:]
print "Std Dev.: ", np.std(chain_so,axis=(0,1))
print "Covariance: ", np.cov(chain_so.reshape((-1,4)).T)
print "Relative Uncertainty: ", np.std(chain_so,axis=(0,1))*100/np.mean(chain_so,axis=(0,1))
from mrpy.base.core import log_mass_mode
print "Log Mass Mode: ", np.log10(log_mass_mode(*np.mean(chain_so[:,:,:3],axis=(0,1))))
```
Mean: [ 14.10470648 -1.88130297 0.74993505 -42.00895938]
Median: [ 14.10465981 -1.88130181 0.7497424 -42.00886146]
Mode: [ 14.10427937 -1.88133126 0.75064434 -42.00835854]
Std Dev.: [ 0.00592467 0.00024254 0.00635023 0.02659803]
Covariance: [[ 3.51034875e-05 -4.02509937e-07 -6.81369166e-06 -1.46285387e-04]
[ -4.02509937e-07 5.88305367e-08 -9.58809971e-07 3.51176102e-06]
[ -6.81369166e-06 -9.58809971e-07 4.03274777e-05 -3.28788684e-05]
[ -1.46285387e-04 3.51176102e-06 -3.28788684e-05 7.07490346e-04]]
Relative Uncertainty: [ 0.04200493 -0.01289234 0.84677108 -0.06331513]
Log Mass Mode: 13.0371689695
### Corner plot
This produces a "corner" plot which shows the covariance between parameters.
```python
c = ChainConsumer().add_chain(chain_fof.reshape((-1,4)),
parameters=[r'$h_\star$',r'$\alpha$',r"$\beta$",r'$\ln A$'],
walkers=50)
fig = c.plot(figsize="PAGE")
if fig_folder:
fig.savefig(join(fig_folder,"n2gc_triangle.pdf"))
```
WARNING:chainconsumer.chain:This method is deprecated. Please use chainConsumer.plotter.plot instead

```python
c = ChainConsumer().add_chain(chain_so.reshape((-1,4)),
parameters=[r'$h_\star$',r'$\alpha$',r"$\beta$",r'$\ln A$'],
walkers=50)
fig = c.plot(figsize="PAGE")
```
WARNING:chainconsumer.chain:This method is deprecated. Please use chainConsumer.plotter.plot instead

### Residual Plot
Importantly, we want to check if the actual results look good against the data, when binned.
```python
# A function to create histograms from raw masses, and conver them to dn/dm.
# It also sets edge values in which a whole bin is not sampled to nan for visual purposes.
def bin_masses(masses,nm, V, bins=50):
hist, edges = np.histogram(np.log10(masses), bins,weights=nm)
centres = (edges[1:] + edges[:-1]) / 2
dx = centres[1] - centres[0]
dn = hist.astype("float") / (10 ** centres *float(V)* dx * np.log(10))#
poisson_error = np.sqrt(hist.astype("float"))/ (10 ** centres *float(V)* dx * np.log(10))#
try:
hist0 = np.where(hist != 0)[0][0]
dn[hist0] = 0
hist[hist0] = 0
poisson_error[hist0] = 0
except IndexError:
pass
try:
histN = np.where(hist != 0)[0][-1]
dn[histN] = 0
hist[histN] = 0
poisson_error[histN] = 0
except IndexError:
pass
dn[hist == 0] = np.nan
return centres, dn, hist, poisson_error
resids = {}
for jj, ftype in enumerate(['fof','so']):
resids[ftype] = {}
m,nm = data[ftype]['m'], data[ftype]['nm']
# Generate total density of each sim
resids[ftype]['rho'] = [np.sum(x*nx)/L**3 for x,nx,L in zip(m,nm,[140.0,560.0])]
# Calculate the total mmin and mmax for all sims in the suite
mmin = np.min([x.min() for x in m])
mmax = np.max([x.max() for x in m])
# Generate the bin structure
bins = np.linspace(np.log10(mmin), np.log10(mmax),50)
bin_centres = (bins[1:] + bins[:-1])/2
# Generate the dn/dm from the sims
resids[ftype]["dndm"] = []
resids[ftype]["hist"] = []
resids[ftype]["err"] = []
for mi,nmi,L in zip(m,nm,[140.0,560.0]):
_,dn,h_, err = bin_masses(mi,nmi,L**3,bins)
resids[ftype]["dndm"].append(dn)
resids[ftype]["hist"].append(h_)
resids[ftype]["err"].append(err)
# The final best-fit object.
parms = np.mean([chain_fof, chain_so][jj], axis=(0,1))
norm = parms[3] # downhill_res[0].x[3]
resids[ftype]['fit'] = MRP(logm = bin_centres,logHs=parms[0],alpha=parms[1],beta=parms[2],norm=norm)
```
Along with the best-fit MRP, we want to show the published mass function of the data, which we get from the hmf package.
```python
from hmf import MassFunction
h = MassFunction(hmf_model="Ishiyama", cosmo_params={"Om0":0.31, "Ob0":0.048, "H0":68.0},
sigma_8=0.83, n=0.96,lnk_min=-15, lnk_max=15, dlnk=0.01,Mmin=bin_centres[0],Mmax=bin_centres[-1]+0.001,
dlog10m=bin_centres[1]-bin_centres[0])
```
Finally we draw the actual plot.
```python
fig, ax = plt.subplots(1,2, figsize=(9,4), sharex=True, sharey=True,
subplot_kw={"xscale":'log', 'ylim':(-0.2,0.2)},
gridspec_kw={"wspace":0.05})
ftypes = ['fof','so']
for jj, ftype in enumerate(ftypes):
for i,(dn,hst,err, label,col) in enumerate(zip(resids[ftype]['dndm'],
resids[ftype]['hist'],
resids[ftype]['err'],
["H1","M"],
["C0",'C2'])):
fit = resids[ftype]['fit']
# Plot alternative type in grey
if jj==1:
dn_, err_ = resids[ftypes[(jj+1)%2]]['dndm'][i], resids[ftypes[(jj+1)%2]]['err'][i]
frac = (dn_/fit.dndm()) - 1
err_ = err_/fit.dndm()
mask = np.abs(frac)<0.3
#ax[jj].plot(10**bin_centres[mask],frac[mask],label=ftypes[(jj+1)%2].upper() if i else "",
# color='k',lw=2, alpha=0.1)
ax[jj].fill_between(10**bin_centres[mask],
frac[mask] - err_[mask], frac[mask]+err_[mask],
label=ftypes[(jj+1)%2].upper() if i else "",
color='k', alpha=0.2, facecolor=None, edgecolor=None, lw=0)
# Residuals to MRP. Mask trailing bits so that poisson noise doesn't dominate the view
frac = (dn/fit.dndm()) - 1
err_ = err/fit.dndm()
mask = np.abs(frac)<0.3
ax[jj].plot(10**bin_centres[mask],frac[mask],label=label if not jj else "",color=col,lw=2)
ax[jj].fill_between(10**bin_centres[mask],
frac[mask] - err_[mask], frac[mask]+err_[mask],
color=col, alpha=0.2)
if ftype=="fof":
frac = (dn/h.dndm) - 1
err_ = err/h.dndm
mask = np.abs(frac)<0.3
ax[jj].plot(10**bin_centres[mask],frac[mask],color=col,lw=2, ls='--')
ax[jj].fill_between(10**bin_centres[mask],
frac[mask] - err_[mask], frac[mask]+err_[mask],
color=col, alpha=0.2, hatch='/')
ax[jj].set_xlabel(r"Halo Mass, [$h^{-1}M_\odot$]",fontsize=15)
ax[jj].grid(True)
# Rsidual of Rockstar to MRP
#frac = dndm_rock/fit.dndm() -1
#plt.plot(10**bin_centres[np.abs(frac)<0.3],frac[np.abs(frac)<0.3], color="C3",label="SO")
# Legend item for I15 fit
ax[0].plot([0],[0],label="Residual to I15",ls="--",color="k")
ax[0].set_title("FOF Halos")
ax[1].set_title("SO Halos")
# PLOT STYLING
#ax[0].xscale('log')
#plt.grid(True)
#plt.ylim((-0.2,0.2))
#plt.ylim((-0.05,0.05))
ax[0].set_ylabel("Sim/Fit - 1",fontsize=15)
for jj in range(2):
ax[jj].legend(loc=0,ncol=2)
# Save for the paper!
if fig_folder:
plt.savefig(join(fig_folder,"n2gc_fof_simul.pdf"))
```
/home/steven/miniconda3/envs/mrpy/lib/python2.7/site-packages/ipykernel/__main__.py:31: RuntimeWarning: invalid value encountered in less
/home/steven/miniconda3/envs/mrpy/lib/python2.7/site-packages/ipykernel/__main__.py:41: RuntimeWarning: invalid value encountered in less
/home/steven/miniconda3/envs/mrpy/lib/python2.7/site-packages/ipykernel/__main__.py:19: RuntimeWarning: invalid value encountered in less

We notice that the residuals from MRP are very similar in magnitude to those from the full EPS-based fit, over a fairly wide range of masses. Note that it seems that the MRP will diverge more significantly below the mass threshold than the EPS fit. In any case, both diverge significantly less than the *same simulation* with haloes found with a spherical overdensity technique.
|
steven-murrayREPO_NAMEmrpyPATH_START.@mrpy_extracted@mrpy-master@docs@examples@fit_simulation_suite.ipynb@.PATH_END.py
|
{
"filename": "solid_solid.py",
"repo_name": "jrenaud90/TidalPy",
"repo_path": "TidalPy_extracted/TidalPy-main/TidalPy/radial_solver/numerical/interfaces/solid_solid.py",
"type": "Python"
}
|
""" Functions to calculate the initial conditions for an overlying solid layer above another solid layer.
For solid-solid layer interfaces, all radial functions are continuous.
Since the solid solutions do not lose a y or an independent solution when moving from dynamic to static: Then the
interfaces between static-dynamic or dynamic-static will also be fully continuous.
In all likely-hood you probably will not want to use these as true interfaces. Instead, just combining all adjacent
"solid" layers into one super solid layer.
References
----------
S74 : Saito (1974; J. Phy. Earth; DOI: 10.4294/jpe1952.22.123)
TS72 : Takeuchi, H., and M. Saito (1972), Seismic surface waves, Methods Comput. Phys., 11, 217–295.
"""
import numpy as np
from TidalPy.utilities.performance import njit
@njit(cacheable=False)
def both_dynamic(solid_layer_ys: np.ndarray) -> np.ndarray:
""" Find the starting values for the radial functions at the bottom of a solid layer that is above a
solid layer. Assumes dynamic tides in both of the layers.
References
----------
TS72
Parameters
----------
solid_layer_ys : np.ndarray
The solution for the radial functions in the layer below.
This function assumes a lower layer that is solid and dynamic.
Returns
-------
initial_solutions_solid : np.ndarray
The base (initial) solutions used to calculate the radial functions in the upper layer.
For this function's assumptions, there will be three independent solutions for the upper layer.
"""
initial_solutions = np.empty((3, 6), dtype=np.complex128)
for solution in range(3):
for yi in range(6):
# Upper layer equals lower layer for all solutions and ys.
initial_solutions[solution, yi] = solid_layer_ys[solution, yi]
return initial_solutions
@njit(cacheable=False)
def static_dynamic(solid_layer_ys: np.ndarray) -> np.ndarray:
""" Find the starting values for the radial functions at the bottom of a solid layer that is above a
solid layer. Assumes static tides in the lower layer and dynamic in the upper.
References
----------
TS72
Parameters
----------
solid_layer_ys : np.ndarray
The solution for the radial functions in the layer below.
This function assumes a lower layer that is solid and static.
Returns
-------
initial_solutions_solid : np.ndarray
The base (initial) solutions used to calculate the radial functions in the upper layer.
For this function's assumptions, there will be three independent solutions for the upper layer.
"""
initial_solutions = np.empty((3, 6), dtype=np.complex128)
for solution in range(3):
for yi in range(6):
# Upper layer equals lower layer for all solutions and ys.
initial_solutions[solution, yi] = solid_layer_ys[solution, yi]
return initial_solutions
@njit(cacheable=False)
def dynamic_static(solid_layer_ys: np.ndarray) -> np.ndarray:
""" Find the starting values for the radial functions at the bottom of a solid layer that is above a
solid layer. Assumes static tides in the upper layer and dynamic in the lower.
References
----------
TS72
Parameters
----------
solid_layer_ys : np.ndarray
The solution for the radial functions in the layer below.
This function assumes a lower layer that is solid and dynamic.
Returns
-------
initial_solutions_solid : np.ndarray
The base (initial) solutions used to calculate the radial functions in the upper layer.
For this function's assumptions, there will be three independent solutions for the upper layer.
"""
initial_solutions = np.empty((3, 6), dtype=np.complex128)
for solution in range(3):
for yi in range(6):
# Upper layer equals lower layer for all solutions and ys.
initial_solutions[solution, yi] = solid_layer_ys[solution, yi]
return initial_solutions
@njit(cacheable=False)
def both_static(solid_layer_ys: np.ndarray) -> np.ndarray:
""" Find the starting values for the radial functions at the bottom of a solid layer that is above a
solid layer. Assumes static tides in both of the layers.
References
----------
TS72
Parameters
----------
solid_layer_ys : np.ndarray
The solution for the radial functions in the layer below.
This function assumes a lower layer that is solid and static.
Returns
-------
initial_solutions_solid : np.ndarray
The base (initial) solutions used to calculate the radial functions in the upper layer.
For this function's assumptions, there will be three independent solutions for the upper layer.
"""
initial_solutions = np.empty((3, 6), dtype=np.complex128)
for solution in range(3):
for yi in range(6):
# Upper layer equals lower layer for all solutions and ys.
initial_solutions[solution, yi] = solid_layer_ys[solution, yi]
return initial_solutions
|
jrenaud90REPO_NAMETidalPyPATH_START.@TidalPy_extracted@TidalPy-main@TidalPy@radial_solver@numerical@interfaces@solid_solid.py@.PATH_END.py
|
{
"filename": "test_cosmology.py",
"repo_name": "LSSTDESC/CCL",
"repo_path": "CCL_extracted/CCL-master/pyccl/tests/test_cosmology.py",
"type": "Python"
}
|
import pickle
import tempfile
import pytest
import numpy as np
import pyccl as ccl
import copy
import warnings
from .test_cclobject import check_eq_repr_hash
def test_Cosmology_eq_repr_hash():
# Test eq, repr, hash for Cosmology and CosmologyCalculator.
# 1. Using a complicated Cosmology object.
extras = {"camb": {"halofit_version": "mead2020", "HMCode_logT_AGN": 7.8}}
kwargs = {"transfer_function": "bbks",
"matter_power_spectrum": "linear",
"extra_parameters": extras}
COSMO1 = ccl.CosmologyVanillaLCDM(**kwargs)
COSMO2 = ccl.CosmologyVanillaLCDM(**kwargs)
assert check_eq_repr_hash(COSMO1, COSMO2)
# 2. Now make a copy and change it.
kwargs = copy.deepcopy(kwargs)
kwargs["extra_parameters"]["camb"]["halofit_version"] = "mead2020_feedback"
COSMO3 = ccl.CosmologyVanillaLCDM(**kwargs)
assert check_eq_repr_hash(COSMO1, COSMO3, equal=False)
# 3. Using a CosmologyCalculator.
COSMO1.compute_linear_power()
a_arr, lk_arr, pk_arr = COSMO1.get_linear_power().get_spline_arrays()
pk_linear = {"a": a_arr,
"k": np.exp(lk_arr),
"delta_matter:delta_matter": pk_arr}
COSMO4 = ccl.CosmologyCalculator(
Omega_c=0.25, Omega_b=0.05, h=0.67, n_s=0.96, sigma8=0.81,
pk_linear=pk_linear, pk_nonlin=pk_linear)
COSMO5 = ccl.CosmologyCalculator(
Omega_c=0.25, Omega_b=0.05, h=0.67, n_s=0.96, sigma8=0.81,
pk_linear=pk_linear, pk_nonlin=pk_linear)
assert check_eq_repr_hash(COSMO4, COSMO5)
pk_linear = {"a": a_arr,
"k": np.exp(lk_arr),
"delta_matter:delta_matter": 2*pk_arr}
COSMO6 = ccl.CosmologyCalculator(
Omega_c=0.25, Omega_b=0.05, h=0.67, n_s=0.96, sigma8=0.81,
pk_linear=pk_linear, pk_nonlin=pk_linear)
assert check_eq_repr_hash(COSMO4, COSMO6, equal=False)
def test_cosmo_methods():
""" Check that all pyccl functions that take cosmo
as their first argument are methods of the Cosmology object.
"""
from inspect import getmembers, isfunction, signature
from pyccl import background, boltzmann, \
cells, correlations, covariances, neutrinos, \
pk2d, power, tk3d, tracers, halos, nl_pt
cosmo = ccl.CosmologyVanillaLCDM()
subs = [background, boltzmann, cells, correlations, covariances,
neutrinos, pk2d, power, tk3d, tracers, halos, nl_pt]
funcs = [getmembers(sub, isfunction) for sub in subs]
funcs = [func for sub in funcs for func in sub]
for name, func in funcs:
if name.startswith("_"): # no private functions
continue
pars = signature(func).parameters
if pars and list(pars)[0] == "cosmo":
_ = getattr(cosmo, name)
# quantitative
assert ccl.sigma8(cosmo) == cosmo.sigma8()
assert ccl.rho_x(cosmo, 1., "matter", is_comoving=False) == \
cosmo.rho_x(1., "matter", is_comoving=False)
assert ccl.get_camb_pk_lin(cosmo)(1., 1., cosmo) == \
cosmo.get_camb_pk_lin()(1., 1., cosmo)
prof = ccl.halos.HaloProfilePressureGNFW(mass_def="200m")
hmf = ccl.halos.MassFuncTinker08(mass_def="200m")
hbf = ccl.halos.HaloBiasTinker10(mass_def="200m")
hmc = ccl.halos.HMCalculator(mass_function=hmf, halo_bias=hbf,
mass_def="200m")
assert ccl.halos.halomod_power_spectrum(cosmo, hmc, 1., 1., prof) == \
cosmo.halomod_power_spectrum(hmc, 1., 1., prof)
def test_cosmology_critical_init():
cosmo = ccl.Cosmology(
Omega_c=0.25,
Omega_b=0.05,
h=0.7,
sigma8=0.8,
n_s=0.96,
Neff=0,
m_nu=0.0,
w0=-1.0,
wa=0.0,
mass_split='normal',
Omega_g=0,
Omega_k=0)
assert np.allclose(cosmo.cosmo.data.growth0, 1)
def test_cosmology_As_sigma8_populates():
# Check that cosmo.sigma8() pupulates sigma8 if it is missing.
cosmo = ccl.Cosmology(Omega_c=0.265, Omega_b=0.045, h=0.675,
n_s=0.965, A_s=2e-9)
assert np.isnan(cosmo["sigma8"])
cosmo.sigma8()
assert cosmo["sigma8"] == cosmo.sigma8()
def test_cosmology_init():
"""
Check that Cosmology objects can only be constructed in a valid way.
"""
# Make sure error raised if invalid transfer/power spectrum etc. passed
with pytest.raises(KeyError):
ccl.Cosmology(Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
matter_power_spectrum='x')
with pytest.raises(KeyError):
ccl.Cosmology(Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
transfer_function='x')
with pytest.raises(ValueError):
ccl.Cosmology(Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
matter_power_spectrum=None)
with pytest.raises(ValueError):
ccl.Cosmology(Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
transfer_function=None)
with pytest.raises(ValueError):
ccl.Cosmology(Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
m_nu=np.array([0.1, 0.1, 0.1, 0.1]))
with pytest.raises(ValueError):
ccl.Cosmology(Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
m_nu=ccl)
def test_cosmology_output():
"""
Check that status messages and other output from Cosmology() object works
correctly.
"""
# Create test cosmology object
cosmo = ccl.Cosmology(Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9,
n_s=0.96)
# Return and print status messages
assert cosmo.cosmo.status == 0
# Test status methods for different precomputable quantities
assert not cosmo.has_distances
assert not cosmo.has_growth
assert not cosmo.has_linear_power
assert not cosmo.has_nonlin_power
assert not cosmo.has_sigma
cosmo.compute_distances()
cosmo.compute_growth()
cosmo.compute_linear_power()
cosmo.compute_nonlin_power()
cosmo.compute_sigma()
assert cosmo.has_distances
assert cosmo.has_growth
assert cosmo.has_linear_power
assert cosmo.has_nonlin_power
assert cosmo.has_sigma
def test_cosmology_pickles():
"""Check that a Cosmology object pickles."""
cosmo = ccl.Cosmology(
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
m_nu=[0.02, 0.1, 0.05], mass_split='list')
with tempfile.TemporaryFile() as fp:
pickle.dump(cosmo, fp)
fp.seek(0)
cosmo2 = pickle.load(fp)
assert np.allclose(ccl.comoving_radial_distance(cosmo, 0.5),
ccl.comoving_radial_distance(cosmo2, 0.5),
atol=0, rtol=0)
def test_cosmology_lcdm():
"""Check that the default vanilla cosmology behaves
as expected"""
c1 = ccl.Cosmology(Omega_c=0.25,
Omega_b=0.05,
h=0.67, n_s=0.96,
sigma8=0.81)
c2 = ccl.CosmologyVanillaLCDM()
assert np.allclose(ccl.comoving_radial_distance(c1, 0.5),
ccl.comoving_radial_distance(c2, 0.5),
atol=0, rtol=0)
def test_cosmology_p18lcdm_raises():
with pytest.raises(ValueError):
kw = {'Omega_c': 0.1}
ccl.CosmologyVanillaLCDM(**kw)
def test_cosmology_context():
"""Check that using a Cosmology object in a context manager
frees C resources properly."""
with ccl.Cosmology(
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
m_nu=np.array([0.02, 0.1, 0.05]), mass_split='list') as cosmo:
# make sure it works
assert not cosmo.has_distances
ccl.comoving_radial_distance(cosmo, 0.5)
assert cosmo.has_distances
# make sure it does not!
assert not hasattr(cosmo, "cosmo")
assert not hasattr(cosmo, "_params")
with pytest.raises(AttributeError):
cosmo.has_growth
def test_pyccl_default_params():
"""Check that the Python-layer for setting the gsl and spline parameters
works on par with the C-layer.
"""
EPS_SCALEFAC_GROWTH = ccl.gsl_params["EPS_SCALEFAC_GROWTH"]
# we will test with this parameter
assert EPS_SCALEFAC_GROWTH == 1e-6
# can be accessed as an attribute and as a dictionary item
assert ccl.gsl_params.EPS_SCALEFAC_GROWTH == \
ccl.gsl_params["EPS_SCALEFAC_GROWTH"]
# can be assigned as an attribute
ccl.gsl_params.EPS_SCALEFAC_GROWTH = 1e-5
assert ccl.gsl_params["EPS_SCALEFAC_GROWTH"] == 1e-5 # cross-check
ccl.gsl_params["EPS_SCALEFAC_GROWTH"] = 2e-6
assert ccl.gsl_params.EPS_SCALEFAC_GROWTH == 2e-6
# does not accept extra assignment
with pytest.raises(KeyError):
ccl.gsl_params.test = "hello_world"
with pytest.raises(KeyError):
ccl.gsl_params["test"] = "hello_world"
# complains when we try to set A_SPLINE_MAX != 1.0
ccl.spline_params.A_SPLINE_MAX = 1.0
with pytest.raises(ValueError):
ccl.spline_params.A_SPLINE_MAX = 0.9
# complains when we try to change the spline type
ccl.spline_params.A_SPLINE_TYPE = None
with pytest.raises(TypeError):
ccl.spline_params.A_SPLINE_TYPE = "something_else"
# complains when we try to change the physical constants
with pytest.raises(AttributeError):
ccl.physical_constants.CLIGHT = 1
# but if we unfreeze them, we can change them
with warnings.catch_warnings():
warnings.simplefilter("error")
ccl.physical_constants.unfreeze()
ccl.physical_constants.CLIGHT = 1
assert ccl.physical_constants.CLIGHT == 1
ccl.physical_constants.freeze()
ccl.physical_constants.reload()
# verify that this has changed
assert ccl.gsl_params.EPS_SCALEFAC_GROWTH != EPS_SCALEFAC_GROWTH
# but now we reload it, so it should be the default again
ccl.gsl_params.reload()
assert ccl.gsl_params.EPS_SCALEFAC_GROWTH == EPS_SCALEFAC_GROWTH
def test_cosmology_default_params():
"""Check that the default params within Cosmology work as intended."""
cosmo1 = ccl.CosmologyVanillaLCDM()
v1 = cosmo1.cosmo.gsl_params.EPS_SCALEFAC_GROWTH
ccl.gsl_params.EPS_SCALEFAC_GROWTH = v1*10
cosmo2 = ccl.CosmologyVanillaLCDM()
v2 = cosmo2.cosmo.gsl_params.EPS_SCALEFAC_GROWTH
assert v2 == v1*10
assert v2 != v1
ccl.gsl_params.reload()
cosmo3 = ccl.CosmologyVanillaLCDM()
v3 = cosmo3.cosmo.gsl_params.EPS_SCALEFAC_GROWTH
assert v3 == v1
def test_ccl_physical_constants_smoke():
assert ccl.physical_constants.CLIGHT == ccl.ccllib.cvar.constants.CLIGHT
def test_ccl_global_parameters_repr():
ccl.spline_params.reload()
assert eval(repr(ccl.spline_params)) == ccl.spline_params._bak
def test_camb_sigma8_input():
sigma8 = 0.85
cosmo = ccl.Cosmology(
Omega_c=0.25, Omega_b=0.05, h=0.67, n_s=0.96, sigma8=sigma8,
transfer_function="boltzmann_camb"
)
assert np.isclose(cosmo.sigma8(), sigma8)
|
LSSTDESCREPO_NAMECCLPATH_START.@CCL_extracted@CCL-master@pyccl@tests@test_cosmology.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "franpoz/SHERLOCK",
"repo_path": "SHERLOCK_extracted/SHERLOCK-master/setup.py",
"type": "Python"
}
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
version = "0.47.3"
setuptools.setup(
name="sherlockpipe", # Replace with your own username
version=version,
author="M. Dévora-Pajares & F.J. Pozuelos",
author_email="mdevorapajares@protonmail.com",
description="Search for Hints of Exoplanets fRom Lightcurves Of spaCe based seeKers",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/franpoz/SHERLOCK",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.10',
install_requires=['arviz==0.12.1', # Validation required (pytransit, from triceratops)
'astroplan==0.8',
"astroquery==0.4.6",
"alexfitter==1.2.15", # Fit
'argparse==1.4.0', # All modules
'beautifulsoup4==4.9.3', # Parsing HTML and XML, for OIs extraction
"celerite==0.4.0", # Allesfitter dependency
"corner==2.2.2", # Allesfitter dependency
"dearwatson==0.10.9", # Vetting
"dynesty==1.0.1", # Allesfitter dependency
"emcee==3.0.2", # Allesfitter dependency
"h5py==3.10.0", # Allesfitter dependency
"mock==4.0.3",
'pdf2image==1.16.2',
'pytransit==2.5.21', #Validation
'pytz', # Observation plan: Not using version because it gets the DB updated with each release
"requests==2.25.1", # OIs management
"rebound==4.4.1", # Stability
"reproject==0.13.0",
"seaborn==0.11.1",
'setuptools>=41.0.0',
"sklearn==0.0",
'statsmodels==0.13.5', # Allesfitter dependency, might conflict with lcbuilder dependency for autocorrelation
'timezonefinder==5.2.0', # Observation plan
'tqdm==4.56.0',
'triceratops==1.0.17', # Validation
'uncertainties==3.1.5' # Observation plan
]
)
|
franpozREPO_NAMESHERLOCKPATH_START.@SHERLOCK_extracted@SHERLOCK-master@setup.py@.PATH_END.py
|
{
"filename": "ALMAPipe.py",
"repo_name": "bill-cotton/Obit",
"repo_path": "Obit_extracted/Obit-master/ObitSystem/Obit/python/ALMAPipe.py",
"type": "Python"
}
|
#! /usr/bin/env ObitTalk
"""
The ALMA Pipeline. The pipeline can be invoked from the command line
as, ::
ObitTalk ALMAPipe.py AipsSetupScript PipelineParamScript
where the required arguments are
* *AipsSetupScript* = an AIPS setup script (an example of this file is stored in
``Obit/share/scripts``)
* *PipelineParamScript* = the ALMA continuum pipeline input parameters script
(a template is in ``Obit/share/scripts``)
"""
from __future__ import absolute_import
from __future__ import print_function
import sys, pydoc
from optparse import OptionParser
from six.moves.configparser import NoSectionError
import OErr, OSystem, UV, AIPS, FITS, OTObit
import ObitTalkUtil
from AIPS import AIPSDisk
from FITS import FITSDisk
from PipeUtil import *
from ALMACal import *
from six.moves import range
def pipeline( aipsSetup, parmFile):
"""
ALMA Continuum pipeline.
* *aipsSetup* = AIPS setup file
* *parmFile* = pipeline input parameters file
"""
############################# Initialize OBIT ##########################################
noScrat = []
exec(open(aipsSetup).read())
ALMAAddOutFile( aipsSetup, 'project', "Obit's AIPS setup file" )
############################# Default parameters ##########################################
# Initialize parameters
parms = ALMAInitContParms()
############################# Set Project Processing parameters ##################
print("parmFile",parmFile)
exec(open(parmFile).read())
ALMAAddOutFile( parmFile, 'project', 'Pipeline input parameters' )
# frequency/configuration dependent default parameters
ALMAInitContFQParms(parms)
# General data parameters
band = parms["band"] # Observing band
dataClass = ("UVDa"+band)[0:6] # AIPS class of raw uv data
project = parms["project"][0:12] # Project name (12 char or less, used as AIPS Name)
session = parms["session"] # Project session code
################################## Process #####################################
fileRoot = parms["project"]+"_"+parms["session"]+"_"+parms["band"] # root of file name
logFile = fileRoot +".log" # Processing log file
uv = None
uvc = None
avgClass = ("UVAv"+band)[0:6] # Averaged data AIPS class
outIClass = parms["outIClass"] # image AIPS class
outCClass = parms["outCClass"] # image AIPS class
# Load the outputs pickle jar
ALMAFetchOutFiles()
# Logging directly to logFile
OErr.PInit(err, parms["prtLv"], logFile)
OSystem.PAllowThreads(nThreads) # Allow threads in Obit/oython
retCode = 0
ALMAAddOutFile( logFile, 'project', 'Pipeline log file' )
mess = "Start project "+parms["project"]+" session "+parms["session"]+\
" "+parms["band"]+" Band"+" AIPS user no. "+str(AIPS.userno)+\
", ALMA Max. baseline "+str(parms["ALMAMaxBl"])
printMess(mess, logFile)
if debug:
pydoc.ttypager = pydoc.plainpager # don't page task input displays
mess = "Using Debug mode "
printMess(mess, logFile)
if check:
mess = "Only checking script"
printMess(mess, logFile)
# Log parameters
if parms['doLogParms']:
printMess("Parameter settings", logFile)
for p in parms:
mess = " "+p+": "+str(parms[p])
printMess(mess, logFile)
# Save parameters to pickle jar, manifest
ParmsPicklefile = project+"_"+session+"_"+band+".Parms.pickle" # Where results saved
SaveObject(parms, ParmsPicklefile, True)
ALMAAddOutFile( ParmsPicklefile, 'project', 'Processing parameters used' )
# Are we going to be doing Hanning?
if parms["doHann"]:
loadClass = parms["band"]+"Raw"
else:
loadClass = dataClass
# Load Data from Archive directory
if parms["doLoadArchive"]:
uv = ALMAUVLoadArch(parms["archRoots"], ALMAAIPSName(project, session), loadClass, disk, parms["seq"], err, \
selConfig=parms["selConfig"], selBand=parms["selBand"], selChan=parms["selChan"], \
selChBW=parms["selChBW"], selNIF=parms["selNIF"], calInt=parms["calInt"], \
logfile=logFile, Compress=parms["Compress"], check=check, debug=debug)
if uv==None and not check:
raise RuntimeError("Cannot load "+parms["archRoots"])
# Hanning
if parms["doHann"]:
# Set uv if not done
if uv==None and not check:
uv = UV.newPAUV("AIPS UV DATA", ALMAAIPSName(project, session), loadClass[0:6], disk, parms["seq"], True, err)
if err.isErr:
OErr.printErrMsg(err, "Error creating AIPS data")
uv = ALMAHann(uv, ALMAAIPSName(project, session), dataClass, disk, parms["seq"], err, \
doDescm=parms["doDescm"], logfile=logFile, check=check, debug=debug)
if uv==None and not check:
raise RuntimeError("Cannot Hann data ")
# Set uv if not done
if uv==None and not check:
uv = UV.newPAUV("AIPS UV DATA", ALMAAIPSName(project, session), dataClass[0:6], \
disk, parms["seq"], True, err)
if err.isErr:
OErr.printErrMsg(err, "Error creating AIPS data")
# Clear any old calibration/editing
if parms["doClearTab"]:
mess = "Clear previous calibration"
printMess(mess, logFile)
ALMAClearCal(uv, err, doGain=parms["doClearGain"], doFlag=parms["doClearFlag"], doBP=parms["doClearBP"], check=check)
OErr.printErrMsg(err, "Error resetting calibration")
# Copy FG 1 to FG 2
if parms["doCopyFG"]:
mess = "Copy FG 1 to FG 2"
printMess(mess, logFile)
retCode = ALMACopyFG (uv, err, logfile=logFile, check=check, debug=debug)
if retCode!=0:
raise RuntimeError("Error Copying FG table")
# Drop end channels of spectra? Only if new FG 2
if parms["doCopyFG"] and (parms["BChDrop"]>0) or (parms["EChDrop"]>0):
# Channels based on original number, reduced if Hanning
nchan = uv.Desc.Dict["inaxes"][uv.Desc.Dict["jlocf"]]
fact = max (1,parms["selChan"]/nchan) # Hanning reduction factor
BChDrop = parms["BChDrop"]/fact
EChDrop = parms["EChDrop"]/fact
mess = "Trim %d channels from start and %d from end of each spectrum"%(BChDrop,EChDrop)
printMess(mess, logFile)
retCode = ALMADropChan (uv, BChDrop, EChDrop, err, flagVer=parms["editFG"], \
logfile=logFile, check=check, debug=debug)
if retCode!=0:
raise RuntimeError("Error Copying FG table")
# Get list of source in data
AllSource = ALMAAllSource(uv,err,logfile=logFile,check=check,debug=debug)
# Edit source lists to remove sources not present
for c in parms["PCInsCals"]:
if c not in AllSource:
parms["PCInsCals"].remove(c)
for t in parms["targets"]:
if t not in AllSource:
parms["targets"].remove(t)
if parms["XYGainSource"] not in AllSource:
parms["XYGainSource"] = None
if parms["XYDelaySource"] not in AllSource:
parms["XYDelaySource"] = None
for c in parms["DCals"]:
if c['Source'] not in AllSource:
c['Source'] = None
for c in parms["BPCals"]:
if c['Source'] not in AllSource:
c['Source'] = None
for c in parms["PCals"]:
if c['Source'] not in AllSource:
c['Source'] = None
for c in parms["ACals"]:
if c['Source'] not in AllSource:
c['Source'] = None
# Special editing
if parms["doEditList"] and not check:
mess = "Special editing"
printMess(mess, logFile)
for edt in parms["editList"]:
UV.PFlag(uv,err,timeRange=[dhms2day(edt["timer"][0]),dhms2day(edt["timer"][1])], \
flagVer=parms["editFG"], Ants=edt["Ant"], Chans=edt["Chans"], IFs=edt["IFs"], \
Stokes=edt["Stokes"], Reason=edt["Reason"])
OErr.printErrMsg(err, "Error Flagging")
# Quack to remove data from start and end of each scan
if parms["doQuack"]:
retCode = ALMAQuack (uv, err, begDrop=parms["quackBegDrop"], endDrop=parms["quackEndDrop"], \
Reason=parms["quackReason"], \
logfile=logFile, check=check, debug=debug)
if retCode!=0:
raise RuntimeError("Error Quacking data")
# Flag antennas shadowed by others?
if parms["doShad"]:
retCode = ALMAShadow (uv, err, shadBl=parms["shadBl"], \
logfile=logFile, check=check, debug=debug)
if retCode!=0:
raise RuntimeError("Error Shadow flagging data")
# Apply online calibration
if parms["doOnlineCal"]:
retCode = ALMAOnlineCal (uv,err, logfile=logFile, check=check, debug=debug)
if retCode!=0:
raise RuntimeError("Error applying online calibration")
# Median window time editing, for RFI impulsive in time
if parms["doMedn"]:
mess = "Median window time editing, for RFI impulsive in time:"
printMess(mess, logFile)
retCode = ALMAMedianFlag (uv, " ", err, noScrat=noScrat, nThreads=nThreads, \
avgTime=parms["avgTime"], avgFreq=parms["avgFreq"], chAvg= parms["chAvg"], \
timeWind=parms["timeWind"], flagVer=2,flagSig=parms["mednSigma"], \
logfile=logFile, check=check, debug=False)
if retCode!=0:
raise RuntimeError("Error in MednFlag")
# Median window frequency editing, for RFI impulsive in frequency
if parms["doFD1"]:
mess = "Median window frequency editing, for RFI impulsive in frequency:"
printMess(mess, logFile)
retCode = ALMAAutoFlag (uv, " ", err, flagVer=2, doCalib=-1, doBand=-1, \
timeAvg=parms["FD1TimeAvg"], \
doFD=True, FDmaxAmp=1.0e20, FDmaxV=1.0e20, FDwidMW=parms["FD1widMW"], \
FDmaxRMS=[1.0e20,0.1], FDmaxRes=parms["FD1maxRes"], \
FDmaxResBL= parms["FD1maxRes"], \
nThreads=nThreads, logfile=logFile, check=check, debug=debug)
if retCode!=0:
raise RuntimeError("Error in AutoFlag")
# RMS/Mean editing for calibrators
if parms["doRMSAvg"]:
mess = "RMS/Mean editing for calibrators:"
printMess(mess, logFile)
clist = ALMACombineCals(parms["ACals"], parms["PCals"], parms["DCals"]) # Calibrator list
retCode = ALMAAutoFlag (uv, clist, err, flagVer=2, doCalib=-1, doBand=-1, \
RMSAvg=parms["RMSAvg"], timeAvg=parms["RMSTimeAvg"], \
nThreads=nThreads, logfile=logFile, check=check, debug=debug)
if retCode!=0:
raise RuntimeError("Error in AutoFlag")
# Need to find a reference antenna? See if we have saved it?
if (parms["refAnt"]<=0):
refAnt = FetchObject(project+"_"+session+"_"+band+".refAnt.pickle")
if refAnt:
parms["refAnt"] = refAnt
# Use bandpass calibrator and center half of each spectrum
if parms["refAnt"]<=0:
mess = "Find best reference antenna: run Calib on BP Cal(s) "
printMess(mess, logFile)
parms["refAnt"] = ALMAGetRefAnt(uv, parms["BPCals"], err, flagVer=2, \
solInt=parms["bpsolint1"], nThreads=nThreads, \
logfile=logFile, check=check, debug=debug)
if err.isErr:
raise RuntimeError("Error finding reference antenna")
if parms["refAnts"][0]<=0:
parms["refAnts"][0] = parms["refAnt"]
mess = "Picked reference antenna "+str(parms["refAnt"])
printMess(mess, logFile)
# Save it
ParmsPicklefile = project+"_"+session+"_"+band+".Parms.pickle" # Where results saved
SaveObject(parms, ParmsPicklefile, True)
refAntPicklefile = project+"_"+session+"_"+band+".refAnt.pickle" # Where results saved
SaveObject(parms["refAnt"], refAntPicklefile, True)
# Plot Raw, edited data?
if parms["doRawSpecPlot"] and parms["plotSource"]:
mess = "Raw Spectral plot for: "+parms["plotSource"]
printMess(mess, logFile)
plotFile = "./"+fileRoot+"RawSpec.ps"
retCode = ALMASpectrum(uv, parms["plotSource"], parms["plotTime"], plotFile, parms["refAnt"], err, \
flagVer=2, Stokes=["XX","YY"], doband=-1, \
check=check, debug=debug, logfile=logFile )
if retCode!=0:
raise RuntimeError("Error in Plotting spectrum")
ALMAAddOutFile( plotFile, 'project', 'Pipeline log file' )
# delay calibration
if parms["doDelayCal"] and parms["DCals"] and not check:
plotFile = "./"+fileRoot+"DelayCal.ps"
retCode = ALMADelayCal(uv, parms["DCals"], err, \
BChan=parms["delayBChan"], EChan=parms["delayEChan"], \
doCalib=2, flagVer=2, doBand=-1, \
solInt=parms["delaySolInt"], smoTime=parms["delaySmoo"], \
refAnts=[parms["refAnt"]], doTwo=parms["doTwo"],
doZeroPhs=parms["delayZeroPhs"], \
doPlot=parms["doSNPlot"], plotFile=plotFile, \
nThreads=nThreads, noScrat=noScrat, \
logfile=logFile, check=check, debug=debug)
if retCode!=0:
raise RuntimeError("Error in delay calibration")
# Plot corrected data?
if parms["doSpecPlot"] and parms["plotSource"]:
plotFile = "./"+fileRoot+"DelaySpec.ps"
retCode = ALMASpectrum(uv, parms["plotSource"], parms["plotTime"], \
plotFile, parms["refAnt"], err, \
flagVer=2, Stokes=["XX","YY"], doband=-1, \
check=check, debug=debug, logfile=logFile )
if retCode!=0:
raise RuntimeError("Error in Plotting spectrum")
# Bandpass calibration
if parms["doBPCal"] and parms["BPCals"]:
plotFile = "./"+fileRoot+"BPCal.ps"
retCode = ALMABPCal(uv, parms["BPCals"], err, noScrat=noScrat, solInt1=parms["bpsolint1"], \
solInt2=parms["bpsolint2"], solMode=parms["bpsolMode"], \
BChan1=parms["bpBChan1"], EChan1=parms["bpEChan1"], \
BChan2=parms["bpBChan2"], EChan2=parms["bpEChan2"], ChWid2=parms["bpChWid2"], \
doCenter1=parms["bpDoCenter1"], refAnt=parms["refAnt"], \
UVRange=parms["bpUVRange"], doCalib=2, gainUse=0, flagVer=2, doPlot=False, \
doAmpEdit=parms["doAmpEdit"], ampSigma=parms["ampSigma"], \
doBPPlot=parms["doBPPlot"], plotBPFile=plotFile, \
nThreads=nThreads, logfile=logFile, check=check, debug=debug)
if retCode!=0:
raise RuntimeError("Error in Bandpass calibration")
# Plot corrected data?
if parms["doSpecPlot"] and parms["plotSource"]:
plotFile = "./"+fileRoot+"BPSpec.ps"
retCode = ALMASpectrum(uv, parms["plotSource"], parms["plotTime"], plotFile, \
parms["refAnt"], err, flagVer=2, Stokes=["XX","YY"], doband=1, \
check=check, debug=debug, logfile=logFile )
if retCode!=0:
raise RuntimeError("Error in Plotting spectrum")
# set X/Y gains and initial calibration
if parms["doXYFixGain"] and parms["XYGainSource"] and not check:
mess = "Fix X/Y gain ratios"
printMess(mess, logFile)
retCode = ALMAXYGain(uv, err, \
XYCal=parms["XYGainSource"],timerange=parms["XYGainTime"], \
doCalib=2, gainUse=0, doBand=1, flagVer=2, refAnt=parms["refAnt"], \
nThreads=nThreads, noScrat=noScrat, logfile=logFile, \
check=check, debug=debug)
if retCode!=0:
raise RuntimeError("Error in X-Y gain fix")
# Self calibrate calibrators
if parms["doImgCal"] and not check:
mess = "SelfCalibrate/Image calibrators"
printMess(mess, logFile)
src = ALMACombineCals(parms["ACals"], parms["PCals"], parms["APCals"]) # Source list
ALMAImageCals(uv, err, Sources=src, seq=parms["seq"], \
sclass=parms["outCClass"], doCalib=2, flagVer=2, doBand=1, FOV=parms["CalFOV"], \
maxPSCLoop=parms["maxPSCLoop"], minFluxPSC=parms["minFluxPSC"], solPInt=parms["solPInt"], \
maxASCLoop=parms["maxASCLoop"], minFluxASC=parms["minFluxASC"],\
solAInt=parms["solAInt"], avgPol=parms["avgPol"], Niter=1000, \
avgIF=parms["avgIF"], minSNR=parms["minSNR"],\
refAnt=parms["refAnt"], nThreads=nThreads, noScrat=noScrat,\
logfile=logFile, check=check, debug=debug)
# Self calibrated models now available
ALMAImageModel(parms["ACals"], parms["outCClass"], disk, parms["seq"], err)
ALMAImageModel(parms["PCals"], parms["outCClass"], disk, parms["seq"], err)
ALMAImageModel(parms["APCals"], parms["outCClass"], disk, parms["seq"], err)
# Phase Calibrate
if parms["doPhaseCal"]:
plotFile = "./"+fileRoot+"PhaseCal.ps"
retCode = ALMAPhaseCal (uv, parms["PCals"], err, ACals=parms["ACals"], \
doCalib=2, doBand=1, BPVer=1, flagVer=2, refAnt=parms["refAnt"], \
BChan=parms["ampBChan"], EChan=parms["ampEChan"], \
solInt=parms["solPInt"], avgIF = parms['avgIF'], \
ampScalar=parms["ampScalar"], doPlot=parms["doSNPlot"], plotFile=plotFile, \
nThreads=nThreads, noScrat=noScrat, logfile=logFile, check=check, debug=debug)
if retCode!=0:
raise RuntimeError("Error calibrating")
# Amp & phase Calibrate
if parms["doAmpPhaseCal"]:
plotFile = "./"+fileRoot+"APCal.ps"
retCode = ALMACalAP (uv, [], parms["ACals"], err, PCals=parms["APCals"], \
doCalib=2, doBand=1, BPVer=1, flagVer=2, \
BChan=parms["ampBChan"], EChan=parms["ampEChan"], \
solInt=parms["solAInt"], solSmo=parms["solSmo"], ampScalar=parms["ampScalar"], \
doAmpEdit=parms["doAmpEdit"], ampSigma=parms["ampSigma"], \
ampEditFG=parms["ampEditFG"], \
doPlot=parms["doSNPlot"], plotFile=plotFile, refAnt=parms["refAnt"], \
nThreads=nThreads, noScrat=noScrat, logfile=logFile, check=check, debug=debug)
if retCode!=0:
raise RuntimeError("Error calibrating")
# More editing
if parms["doAutoFlag"]:
mess = "Post calibration editing:"
printMess(mess, logFile)
clist = []
retCode = ALMAAutoFlag (uv, clist, err, flagVer=2, \
doCalib=2, gainUse=0, doBand=1, BPVer=1, \
IClip=parms["IClip"], minAmp=parms["minAmp"], timeAvg=parms["timeAvg"], \
doFD=parms["doAFFD"], FDmaxAmp=parms["FDmaxAmp"], FDmaxV=parms["FDmaxV"], \
FDwidMW=parms["FDwidMW"], FDmaxRMS=parms["FDmaxRMS"], \
FDmaxRes=parms["FDmaxRes"], FDmaxResBL=parms["FDmaxResBL"], \
FDbaseSel=parms["FDbaseSel"], \
nThreads=nThreads, logfile=logFile, check=check, debug=debug)
if retCode!=0:
raise RuntimeError("Error in AutoFlag")
# Calibrate and average data
if parms["doCalAvg"]:
retCode = ALMACalAvg (uv, avgClass, parms["seq"], parms["CalAvgTime"], err, \
flagVer=2, doCalib=2, gainUse=0, doBand=1, BPVer=1, doPol=False, \
avgFreq=parms["CAavgFreq"], chAvg=parms["CAchAvg"], \
BChan=parms["CABChan"], EChan=parms["CAEChan"], \
BIF=parms["CABIF"], EIF=parms["CAEIF"], Compress=parms["Compress"], \
nThreads=nThreads, logfile=logFile, check=check, debug=debug)
if retCode!=0:
raise RuntimeError("Error in CalAvg")
# Get calibrated/averaged data
if not check:
try:
uv = UV.newPAUV("AIPS UV DATA", ALMAAIPSName(project, session), avgClass[0:6], \
disk, parms["seq"], True, err)
except Exception as exception:
mess = "No Averaged/calibrated datafile"
printMess(mess, logFile)
return
# XClip
if parms["XClip"] and parms["XClip"]>0.0:
mess = "Cross Pol clipping:"
printMess(mess, logFile)
retCode = ALMAAutoFlag (uv, [], err, flagVer=-1, flagTab=1, \
doCalib=2, gainUse=0, doBand=-1, maxBad=1.0, \
XClip=parms["XClip"], timeAvg=1./60., \
nThreads=nThreads, logfile=logFile, check=check, debug=debug)
if retCode!=0:
raise RuntimeError("Error in AutoFlag")
# X/Y Delay calibration
if parms["doXYDelay"] and parms["XYDelaySource"]!=None:
retCode = ALMAXYDelay(uv, err, \
XYDCal=parms["XYDelaySource"], timerange=parms["XYDelayTime"], \
BChan=parms["xyBChan"], EChan=parms["xyEChan"], UVRange=parms["xyUVRange"], \
doCalib=parms["xyDoCal"], gainUse=parms["xygainUse"], numIFs=parms["xynumIFs"], \
flagVer=parms["xyflagVer"], refAnt=parms["refAnt"], doPol=False, \
nThreads=nThreads, noScrat=noScrat, logfile=logFile, \
check=check, debug=debug)
if retCode!=0:
raise RuntimeError("Error in X-Y delay calibration")
# Instrumental polarization calibration
if parms["doPolCal"]:
if parms["PCRefAnt"]==-10:
parms["PCRefAnt"] = parms["refAnt"]
retCode = ALMAPolCal(uv, parms["PCInsCals"], err, InsCalPoln=parms["PCCalPoln"], \
doCalib=2, gainUse=0, doBand=-1, flagVer=0, doFitXY=parms["doFitXY"], \
solInt=parms["PCSolInt"], refAnt=parms["PCRefAnt"], solType=parms["PCSolType"], \
ChInc=parms["PCChInc"], ChWid=parms["PCChWid"], doFitOri=parms["doFitOri"], \
nThreads=nThreads, check=check, debug=debug, noScrat=noScrat, logfile=logFile)
if retCode!=0 and (not check):
raise RuntimeError("Error in polarization calibration: "+str(retCode))
# end poln cal.
# Plot corrected data? W/ polcal, i.e. in RR,LL...
if parms["doSpecPlot"] and parms["plotSource"]:
plotFile = "./"+fileRoot+"Spec.ps"
retCode = ALMASpectrum(uv, parms["plotSource"], parms["plotTime"], \
plotFile, parms["refAnt"], err, \
doPol=parms["doPol"], PDVer=parms["PDVer"], \
flagVer=1, Stokes=["RR","LL"], doband=1, \
check=check, debug=debug, logfile=logFile )
if retCode!=0:
raise RuntimeError("Error in Plotting parallel spectrum")
plotFile = "./"+fileRoot+"RL_Spec.ps"
retCode = ALMASpectrum(uv, parms["plotSource"], parms["plotTime"], \
plotFile, parms["refAnt"], err, \
doPol=parms["doPol"], PDVer=parms["PDVer"], \
flagVer=1, Stokes=["RL","LR"], doband=1, \
check=check, debug=debug, logfile=logFile )
if retCode!=0:
raise RuntimeError("Error in Plotting calibrated cross spectrum")
plotFile = "./"+fileRoot+"XY_Spec.ps"
retCode = ALMASpectrum(uv, parms["plotSource"], parms["plotTime"], \
plotFile, parms["refAnt"], err, \
flagVer=1, Stokes=["XY","YX"], doband=-1, \
check=check, debug=debug, logfile=logFile )
if retCode!=0:
raise RuntimeError("Error in Plotting uncalibrated cross spectrum")
# Image targets
if parms["doImage"]:
# If targets not specified, image all
if len(parms["targets"])<=0:
slist = ALMAAllSource(uv,err,logfile=logFile,check=check,debug=debug)
else:
slist = parms["targets"]
ALMAImageTargets (uv, err, Sources=slist, seq=parms["seq"], sclass=outIClass, \
doCalib=2, doBand=1, flagVer=1, doPol=parms["doPol"], PDVer=parms["PDVer"], \
Stokes=parms["Stokes"], FOV=parms["FOV"], Robust=parms["Robust"], Niter=parms["Niter"], \
CleanRad=parms["CleanRad"], minFlux=parms["minFlux"], UVRange=parms["UVRange"], \
maxPSCLoop=parms["maxPSCLoop"], minFluxPSC=parms["minFluxPSC"], \
solPInt=parms["solPInt"], solPMode=parms["solPMode"], solPType=parms["solPType"], \
maxASCLoop=parms["maxASCLoop"], minFluxASC=parms["minFluxASC"], \
solAInt=parms["solAInt"], solAMode=parms["solAMode"], solAType=parms["solAType"], \
avgPol=parms["avgPol"], avgIF=parms["avgIF"], minSNR = 4.0, refAnt=parms["refAnt"], \
do3D=parms["do3D"], BLFact=parms["BLFact"], BLchAvg=parms["BLchAvg"], \
doMB=parms["doMB"], norder=parms["MBnorder"], maxFBW=parms["MBmaxFBW"], \
PBCor=parms["PBCor"],antSize=parms["antSize"], \
nTaper=parms["nTaper"], Tapers=parms["Tapers"], \
doOutlier=parms["doOutlier"], OutlierDist=parms["OutlierDist"], OutlierFlux=parms["OutlierFlux"], \
nThreads=nThreads, noScrat=noScrat, logfile=logFile, check=check, debug=debug)
# End image
# Get report on sources
if parms["doReport"]:
# If targets not specified, do all
if len(parms["targets"])<=0:
slist = ALMAAllSource(uv,err,logfile=logFile,check=check,debug=debug)
else:
slist = parms["targets"]
Report = ALMAReportTargets(uv, err, Sources=slist, seq=parms["seq"], sclass=outIClass, \
Stokes=parms["Stokes"], logfile=logFile, check=check, debug=debug)
# Save to pickle jar
ReportPicklefile = "./"+fileRoot+"Report.pickle" # Where results saved
SaveObject(Report, ReportPicklefile, True)
# Write results, cleanup
# Save cal/average UV data?
if parms["doSaveUV"] and (not check):
Aname = ALMAAIPSName(project, session)
cno = AIPSDir.PTestCNO(disk, user, Aname, avgClass[0:6], "UV", parms["seq"], err)
if cno>0:
uvt = UV.newPAUV("AIPS CAL UV DATA", Aname, avgClass, disk, parms["seq"], True, err)
filename = parms["project"]+parms["session"]+parms["band"]+"Cal.uvtab"
fuv = ALMAUVFITS (uvt, filename, 0, err, compress=parms["Compress"], logfile=logFile)
ALMAAddOutFile( filename, 'project', "Calibrated Averaged UV data" )
# Save list of output files
ALMASaveOutFiles()
del uvt
# Save raw UV data tables?
if parms["doSaveTab"] and (not check):
Aname = ALMAAIPSName(project, session)
cno = AIPSDir.PTestCNO(disk, user, Aname, dataClass[0:6], "UV", parms["seq"], err)
if cno>0:
uvt = UV.newPAUV("AIPS RAW UV DATA", Aname, dataClass[0:6], disk, parms["seq"], True, err)
filename = parms["project"]+parms["session"]+parms["band"]+"CalTab.uvtab"
fuv = ALMAUVFITSTab (uvt, filename, 0, err, logfile=logFile)
ALMAAddOutFile( filename, 'project', "Calibrated AIPS tables" )
del uvt
# Write History
filename = project+'_'+session+'_'+band+".History.text"
OTObit.PrintHistory(uv, file=filename)
ALMAAddOutFile( filename, 'project', "Processing history of calibrated data" )
# Save list of output files
ALMASaveOutFiles()
# Imaging results
# If targets not specified, save all
if len(parms["targets"])<=0:
slist = ALMAAllSource(uv,err,logfile=logFile,check=check,debug=debug)
else:
slist = parms["targets"]
for target in slist:
if parms["doSaveImg"] and (not check):
for s in parms["Stokes"]:
oclass = s+outIClass[1:]
outname = target
# Test if image exists
cno = AIPSDir.PTestCNO(disk, user, outname, oclass, "MA", parms["seq"], err)
if cno <= 0 :
continue
x = Image.newPAImage("out", outname, oclass, disk, parms["seq"], True, err)
outfile = "./"+fileRoot+target+"."+oclass+".fits"
xf = ALMAImFITS (x, outfile, 0, err, logfile=logFile)
ALMAAddOutFile( outfile, target, 'Image of '+ target)
# Statistics
zz=imstat(x, err, logfile=logFile)
# end writing loop
# Save list of output files
ALMASaveOutFiles()
OErr.printErrMsg(err, "Writing output")
# Contour plots
if parms["doKntrPlots"]:
mess = "INFO --> Contour plots (doKntrPlots)"
printMess(mess, logFile)
ALMAKntrPlots( err, imName=parms["targets"], project=project,
session=session, band=band, disk=disk, debug=debug )
# Save list of output files
ALMASaveOutFiles()
elif debug:
mess = "Not creating contour plots ( doKntrPlots = "+str(parms["doKntrPlots"])+ " )"
printMess(mess, logFile)
# Source uv plane diagnostic plots
if parms["doDiagPlots"]:
mess = "INFO --> Diagnostic plots (doDiagPlots)"
printMess(mess, logFile)
# Get the highest number avgClass catalog file
Aname = ALMAAIPSName( project, session )
uvc = None
if not check:
uvname = project+"_"+session+"_"+band+"_Cal"
uvc = UV.newPAUV(uvname, Aname, avgClass, disk, parms["seq"], True, err)
ALMADiagPlots( uvc, err, cleanUp=parms["doCleanup"], \
project=project, session=session, band=band, \
logfile=logFile, check=check, debug=debug )
# Save list of output files
ALMASaveOutFiles()
elif debug:
mess = "Not creating diagnostic plots ( doDiagPlots = "+str(parms["doDiagPlots"])+ " )"
printMess(mess, logFile)
# Save metadata
srcMetadata = None
projMetadata = None
if parms["doMetadata"]:
mess = "INFO --> Save metadata (doMetadata)"
printMess(mess, logFile)
uvc = None
if not uvc:
# Get calibrated/averaged data
Aname = ALMAAIPSName(project, session)
uvname = project+"_"+session+"_"+band+"_Cal"
uvc = UV.newPAUV(uvname, Aname, avgClass, disk, parms["seq"], True, err)
if err.isErr:
OErr.printErrMsg(err, "Error creating cal/avg AIPS data")
# Get source metadata; save to pickle file
srcMetadata = ALMASrcMetadata( uvc, err, Sources=parms["targets"], seq=parms["seq"], \
sclass=outIClass, Stokes=parms["Stokes"],\
logfile=logFile, check=check, debug=debug )
picklefile = "./"+fileRoot+".SrcReport.pickle"
SaveObject( srcMetadata, picklefile, True )
ALMAAddOutFile( picklefile, 'project', 'All source metadata' )
# Get project metadata; save to pickle file
projMetadata = ALMAProjMetadata( uvc, AIPS_VERSION, err, \
PCals=parms["PCals"], ACals=parms["ACals"], \
BPCals=parms["BPCals"], DCals=parms["DCals"], \
project = project, session = session, band = band, \
dataInUVF = parms["archRoots"], archFileID = 12345 )
picklefile = "./"+fileRoot+".ProjReport.pickle"
SaveObject(projMetadata, picklefile, True)
ALMAAddOutFile( picklefile, 'project', 'Project metadata' )
else:
# Fetch from pickle jar
picklefile = "./"+fileRoot+".SrcReport.pickle"
srcMetadata = FetchObject(picklefile)
picklefile = "./"+fileRoot+".ProjReport.pickle"
projMetadata = FetchObject(picklefile)
# Write report
if parms["doHTML"]:
mess = "INFO --> Write HTML report (doHTML)"
printMess(mess, logFile)
ALMAHTMLReport( projMetadata, srcMetadata, \
outfile="./"+fileRoot+".report.html", \
logFile=logFile )
# Write VOTable
if parms["doVOTable"]:
mess = "INFO --> Write VOTable (doVOTable)"
printMess(mess, logFile)
ALMAAddOutFile( 'VOTable.xml', 'project', 'VOTable report' )
ALMAWriteVOTable( projMetadata, srcMetadata, filename='VOTable.xml' )
# Save list of output files
ALMASaveOutFiles()
# Cleanup - delete AIPS files
if parms["doCleanup"] and (not check):
mess = "INFO --> Clean up (doCleanup)"
printMess(mess, logFile)
# Delete target images
# How many Stokes images
nstok = len(parms["Stokes"])
for istok in range(0,nstok):
oclass = parms["Stokes"][istok:istok+1]+outIClass[1:]
AllDest(err, disk=disk,Aseq=parms["seq"],Aclass=oclass)
oclass = parms["Stokes"][istok:istok+1]+outCClass[1:]
AllDest(err, disk=disk,Aseq=parms["seq"],Aclass=oclass)
# Delete initial UV data
Aname = ALMAAIPSName(project, session)
# Test if data exists
cno = AIPSDir.PTestCNO(disk, user, Aname, dataClass[0:6], "UV", parms["seq"], err)
if cno>0:
uvt = UV.newPAUV("AIPS RAW UV DATA", Aname, dataClass[0:6], disk, parms["seq"], True, err)
uvt.Zap(err)
del uvt
if err.isErr:
OErr.printErrMsg(err, "Error deleting raw AIPS data")
# Zap calibrated/averaged data
# Test if data exists
cno = AIPSDir.PTestCNO(disk, user, Aname, avgClass[0:6], "UV", parms["seq"], err)
if cno>0:
uvt = UV.newPAUV("AIPS CAL UV DATA", Aname, avgClass[0:6], disk, parms["seq"], True, err)
uvt.Zap(err)
del uvt
if err.isErr:
OErr.printErrMsg(err, "Error deleting cal/avg AIPS data")
# Zap UnHanned data if present
loadClass = parms["band"]+"Raw"
# Test if image exists
cno = AIPSDir.PTestCNO(disk, user, Aname, loadClass[0:6], "UV", parms["seq"], err)
if cno>0:
uvt = UV.newPAUV("AIPS CAL UV DATA", Aname, loadClass[0:6], disk, parms["seq"], True, err)
uvt.Zap(err)
del uvt
if err.isErr:
OErr.printErrMsg(err, "Error deleting cal/avg AIPS data")
OErr.printErrMsg(err, "Writing output/cleanup")
# Shutdown
mess = "Finished project "+parms["project"]+" session "+parms["session"]+ \
" "+parms["band"]+" Band"+" AIPS user no. "+str(AIPS.userno)
printMess(mess, logFile)
OErr.printErr(err)
OSystem.Shutdown(ObitSys)
# end pipeline
class DataProductError(Exception):
""" Exception for data product (output file) errors. """
pass
if __name__ == '__main__':
usage = """usage: %prog [options] AIPSSetup PipelineParms
AIPSSetup = pipeline AIPS setup file
PipelineParms = pipeline input parameters file"""
parser = OptionParser( usage=usage )
(options, args) = parser.parse_args()
# Unset LD_PRELOAD to avoid ld.so warnings from binary installation
os.unsetenv('LD_PRELOAD')
if len(args) != 2:
parser.print_help()
sys.exit()
try:
pipeline( args[0] , args[1])
finally:
pass
|
bill-cottonREPO_NAMEObitPATH_START.@Obit_extracted@Obit-master@ObitSystem@Obit@python@ALMAPipe.py@.PATH_END.py
|
{
"filename": "_family.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/ohlc/legendgrouptitle/font/_family.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="family", parent_name="ohlc.legendgrouptitle.font", **kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@ohlc@legendgrouptitle@font@_family.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/jax/experimental/pallas/ops/gpu/__init__.py",
"type": "Python"
}
|
# Copyright 2024 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@experimental@pallas@ops@gpu@__init__.py@.PATH_END.py
|
{
"filename": "test_fixEB.py",
"repo_name": "liuhao-cn/fastSHT",
"repo_path": "fastSHT_extracted/fastSHT-main/scripts/obsolete/test_fixEB.py",
"type": "Python"
}
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import sys as sys
import os
nside = 64
nsim = 1000
n_proc = 8
niter = 3
compare = False
# the command line input will overwrite the defaults
if len(sys.argv)>1:
nside = int(sys.argv[1])
if len(sys.argv)>2:
nsim = int(sys.argv[2])
if len(sys.argv)>3:
n_proc = int(sys.argv[3])
if len(sys.argv)>4:
niter = int(sys.argv[4])
if len(sys.argv)>5:
compare = sys.argv[5].lower() == "true"
print(" ")
print("Working with the following parameters:")
print("Nside = %i, Nsim = %i, n_proc = %i, Niter = %i, comparison with HEALPix = %s"
%(nside, nsim, n_proc, niter, compare))
print(" ")
os.environ["OMP_NUM_THREADS"] = str(n_proc) # export OMP_NUM_THREADS=4
os.environ["OPENBLAS_NUM_THREADS"] = str(n_proc) # export OPENBLAS_NUM_THREADS=4
os.environ["MKL_NUM_THREADS"] = str(n_proc) # export MKL_NUM_THREADS=6
os.environ["VECLIB_MAXIMUM_THREADS"] = str(n_proc) # export VECLIB_MAXIMUM_THREADS=4
os.environ["NUMEXPR_NUM_THREADS"] = str(n_proc) # export NUMEXPR_NUM_THREADS=6
sys.path.append('..')
import SHT
import fastSHT
import numpy as np
import healpy as hp
import matplotlib.pyplot as plt
import time
import importlib
importlib.reload(SHT)
def make_mask(nside, upper_lat = 5):
npix = nside**2 * 12
mask = np.ones(npix)
for i in range(npix):
theta, phi = hp.pix2ang(nside, i, lonlat=True)
if phi < 5 and phi > -5:
mask[i] = 0
return mask
def fix_EB_hp(T, Q, U, mask, seed=23333):
vid = (np.arange(len(mask))[mask == 1])
nv = len(vid)
maps_in = np.array( [T, Q, U ] )
maps_in[:,mask==0,:] = hp.UNSEEN
start = time.time()
alms2_hp = np.array([hp.map2alm(maps_in[:,:,i], lmax=lmax, iter=niter) for i in range(nsim)])
BO = np.array([hp.alm2map(alms2_hp[i,2,:], nside=nside, lmax=lmax) for i in range(nsim)])
alms2_hp[:,2,:] = 0
alms2_hp[:,0,:] = 0
maps = np.array([hp.alm2map(alms2_hp[i,:,:], nside=nside, lmax=lmax) for i in range(nsim)])
maps[:,:,mask==0] = 0
alms2_hp = np.array([hp.map2alm(maps[i,:,:], lmax=lmax, iter=niter) for i in range(nsim)])
alms2_hp = alms2_hp[:,2,:]
BT = np.array([hp.alm2map(alms2_hp[i,:], nside=nside, lmax=lmax) for i in range(nsim)])
BC = np.zeros(BT.shape)
for i in range(nsim):
x = BT[i, vid]
y = BO[i, vid]
coe = np.polyfit(x, y, 1)
BC[i, vid] = BO[i, vid] - BT[i, vid] * coe[0] - coe[1]
print('Time cost for Healpy is ' + str(time.time() - start))
return BC
def test_fix_EB(seed=23333):
print('Testing fix_EB...')
np.random.seed(seed)
T = np.asfortranarray(np.random.rand(npix, nsim))
Q = np.asfortranarray(np.random.rand(npix, nsim))
U = np.asfortranarray(np.random.rand(npix, nsim))
mask = make_mask(nside)
sht = SHT.SHT(nside, lmax, nsim, niter, pol=True)
start = time.time()
Bmap = sht.fix_eb(Q, U, mask)
print('Time cost for fastSHT is ' + str(time.time() - start))
if(compare == False):
return
BC = fix_EB_hp(T, Q, U, mask, seed=seed)
cl = np.array([hp.anafast(Bmap[:,i]) for i in range(nsim)])
cl2 = np.array([hp.anafast(BC[i,:]) for i in range(nsim)])
max_err = (np.abs(cl2 - cl) / cl.mean()).max()
print('Max relative error in clB is: ' + str(max_err))
return (Bmap, BC)
npix = 12*nside**2
lmax = 3*nside - 1
test_fix_EB()
|
liuhao-cnREPO_NAMEfastSHTPATH_START.@fastSHT_extracted@fastSHT-main@scripts@obsolete@test_fixEB.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/volume/legendgrouptitle/_font.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="font", parent_name="volume.legendgrouptitle", **kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
size
style
Sets whether a font should be styled with a
normal or italic face from its family.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@volume@legendgrouptitle@_font.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "glue-viz/glue",
"repo_path": "glue_extracted/glue-main/glue/plugins/wcs_autolinking/tests/__init__.py",
"type": "Python"
}
|
glue-vizREPO_NAMEgluePATH_START.@glue_extracted@glue-main@glue@plugins@wcs_autolinking@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "main.py",
"repo_name": "cdslaborg/paramonte",
"repo_path": "paramonte_extracted/paramonte-main/example/fortran/pm_mathGammaGil/getGammaIncUppGil/main.py",
"type": "Python"
}
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import glob
import sys
fontsize = 17
kind = "RK"
label = [ r"shape: $\kappa = 1.0$"
, r"shape: $\kappa = 2.5$"
, r"shape: $\kappa = 5.0$"
]
pattern = "*." + kind + ".txt"
fileList = glob.glob(pattern)
if len(fileList) == 1:
df = pd.read_csv(fileList[0], delimiter = " ")
fig = plt.figure(figsize = 1.25 * np.array([6.4, 4.8]), dpi = 200)
ax = plt.subplot()
for i in range(1,len(df.values[0,:]+1)):
plt.plot( df.values[:, 0]
, df.values[:,i]
, linewidth = 2
)
plt.xticks(fontsize = fontsize - 2)
plt.yticks(fontsize = fontsize - 2)
ax.set_xlabel("x", fontsize = fontsize)
ax.set_ylabel("Regularized Upper\nIncomplete Gamma Function", fontsize = fontsize)
plt.grid(visible = True, which = "both", axis = "both", color = "0.85", linestyle = "-")
ax.tick_params(axis = "y", which = "minor")
ax.tick_params(axis = "x", which = "minor")
ax.legend ( label
, fontsize = fontsize
#, loc = "center left"
#, bbox_to_anchor = (1, 0.5)
)
plt.savefig(fileList[0].replace(".txt",".png"))
else:
sys.exit("Ambiguous file list exists.")
|
cdslaborgREPO_NAMEparamontePATH_START.@paramonte_extracted@paramonte-main@example@fortran@pm_mathGammaGil@getGammaIncUppGil@main.py@.PATH_END.py
|
{
"filename": "test_cnfw_ellipse_potential.py",
"repo_name": "sibirrer/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/test/test_LensModel/test_Profiles/test_cnfw_ellipse_potential.py",
"type": "Python"
}
|
__author__ = "sibirrer"
from lenstronomy.LensModel.Profiles.cnfw import CNFW
from lenstronomy.LensModel.Profiles.cnfw_ellipse_potential import CNFWEllipsePotential
import lenstronomy.Util.param_util as param_util
import numpy as np
import numpy.testing as npt
import pytest
class TestCNFWELLIPSE(object):
"""Tests the Gaussian methods."""
def setup_method(self):
self.nfw = CNFW()
self.nfw_e = CNFWEllipsePotential()
def test_function(self):
x = np.array([1])
y = np.array([2])
Rs = 1.0
alpha_Rs = 1.0
q = 1.0
phi_G = 0
r_core = 0.5
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
values = self.nfw.function(x, y, Rs, alpha_Rs, r_core=r_core)
values_e = self.nfw_e.function(x, y, Rs, alpha_Rs, r_core, e1, e2)
npt.assert_almost_equal(values[0], values_e[0], decimal=5)
x = np.array([0])
y = np.array([0])
q = 0.8
phi_G = 0
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
values = self.nfw_e.function(x, y, Rs, alpha_Rs, r_core, e1, e2)
npt.assert_almost_equal(values[0], 0, decimal=4)
x = np.array([2, 3, 4])
y = np.array([1, 1, 1])
values = self.nfw_e.function(x, y, Rs, alpha_Rs, r_core, e1, e2)
npt.assert_almost_equal(values[0], 1.8550220596738973, decimal=5)
npt.assert_almost_equal(values[1], 2.7684470762303537, decimal=5)
npt.assert_almost_equal(values[2], 3.7076606717487586, decimal=5)
def test_derivatives(self):
x = np.array([1])
y = np.array([2])
Rs = 1.0
alpha_Rs = 1.0
q = 1.0
phi_G = 0
r_core = 0.5
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
f_x, f_y = self.nfw.derivatives(x, y, Rs, alpha_Rs, r_core)
f_x_e, f_y_e = self.nfw_e.derivatives(x, y, Rs, alpha_Rs, r_core, e1, e2)
npt.assert_almost_equal(f_x[0], f_x_e[0], decimal=5)
npt.assert_almost_equal(f_y[0], f_y_e[0], decimal=5)
x = np.array([0])
y = np.array([0])
alpha_Rs = 0
f_x, f_y = self.nfw_e.derivatives(x, y, Rs, alpha_Rs, r_core, e1, e2)
npt.assert_almost_equal(f_x[0], 0, decimal=5)
npt.assert_almost_equal(f_y[0], 0, decimal=5)
x = np.array([1, 3, 4])
y = np.array([2, 1, 1])
alpha_Rs = 1.0
q = 0.8
phi_G = 0
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
values = self.nfw_e.derivatives(x, y, Rs, alpha_Rs, r_core, e1, e2)
npt.assert_almost_equal(values[0][0], 0.3867896894988756, decimal=5)
npt.assert_almost_equal(values[1][0], 1.1603690684966268, decimal=5)
npt.assert_almost_equal(values[0][1], 0.9371571936062841, decimal=5)
npt.assert_almost_equal(values[1][1], 0.46857859680314207, decimal=5)
def test_hessian(self):
x = np.array([1])
y = np.array([2])
Rs = 1.0
alpha_Rs = 1.0
q = 1.0
phi_G = 0
r_core = 0.5
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
f_xx, f_xy, f_yx, f_yy = self.nfw.hessian(x, y, Rs, alpha_Rs, r_core)
f_xx_e, f_xy_e, f_yx_e, f_yy_e = self.nfw_e.hessian(
x, y, Rs, alpha_Rs, r_core, e1, e2
)
npt.assert_almost_equal(f_xx[0], f_xx_e[0], decimal=5)
npt.assert_almost_equal(f_yy[0], f_yy_e[0], decimal=5)
npt.assert_almost_equal(f_xy[0], f_xy_e[0], decimal=5)
npt.assert_almost_equal(f_yx[0], f_yx_e[0], decimal=5)
x = np.array([1, 3, 4])
y = np.array([2, 1, 1])
q = 0.8
phi_G = 0
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
values = self.nfw_e.hessian(x, y, Rs, alpha_Rs, r_core, e1, e2)
npt.assert_almost_equal(values[0][0], 0.3306510620859626, decimal=5)
npt.assert_almost_equal(values[3][0], 0.07493437759187316, decimal=5)
npt.assert_almost_equal(values[1][0], -0.1684167189042185, decimal=5)
npt.assert_almost_equal(values[0][1], 0.020280774837289073, decimal=5)
npt.assert_almost_equal(values[3][1], 0.3955523575349673, decimal=5)
npt.assert_almost_equal(values[1][1], -0.14605247788956888, decimal=5)
def test_mass_3d(self):
Rs = 10.0
rho0 = 1.0
r_core = 7.0
R = np.linspace(0.1 * Rs, 4 * Rs, 1000)
alpha_Rs = self.nfw._rho2alpha(rho0, Rs, r_core)
m3d = self.nfw.mass_3d(R, Rs, rho0, r_core)
m3d_lens = self.nfw_e.mass_3d_lens(R, Rs, alpha_Rs, r_core)
npt.assert_almost_equal(m3d, m3d_lens, decimal=8)
if __name__ == "__main__":
pytest.main()
|
sibirrerREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@test@test_LensModel@test_Profiles@test_cnfw_ellipse_potential.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "i4Ds/sdo-cli",
"repo_path": "sdo-cli_extracted/sdo-cli-main/setup.py",
"type": "Python"
}
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
pkgs = find_packages(where='src')
setup(
name="sdo-cli",
version="0.0.21",
author="Marius Giger",
author_email="marius.giger@fhnw.ch",
description="An ML practitioner's utility for working with SDO data.",
long_description=long_description,
long_description_content_type="text/markdown",
packages=pkgs,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
include_package_data=True,
python_requires='>=3.8',
install_requires=["beautifulsoup4>=4.11.1",
"click>=8.1.2",
"dask>=2022.5.0",
"drms>=0.6.2",
"h5netcdf>=1.0.0",
"matplotlib>=3.5.1",
"munch>=2.5.0",
"opencv-python>=4.5.5.64",
"python-dotenv>=0.20.0",
"pandas>=1.4.2",
"pytorch-lightning>=1.6.1",
"scikit-learn>=1.0.2",
"Shapely>=1.7.1",
"SQLAlchemy>=1.4.17",
"sunpy>=3.1.6",
"torch>=1.11.0",
"tqdm>=4.64.0",
"torchmetrics>=0.8.2",
"torchvision>=0.12.0",
"wandb>=0.12.15",
"zeep>=4.1.0",
"zarr>=2.11.3"],
entry_points="""
[console_scripts]
sdo-cli=sdo.cli:cli
""",
url="https://github.com/i4DS/sdo-cli",
package_dir={'': 'src'}
)
|
i4DsREPO_NAMEsdo-cliPATH_START.@sdo-cli_extracted@sdo-cli-main@setup.py@.PATH_END.py
|
{
"filename": "test_k2sff.py",
"repo_name": "lightkurve/lightkurve",
"repo_path": "lightkurve_extracted/lightkurve-main/tests/io/test_k2sff.py",
"type": "Python"
}
|
import pytest
from astropy.io import fits
import numpy as np
from numpy.testing import assert_array_equal
from lightkurve.io.k2sff import read_k2sff_lightcurve
from lightkurve import search_lightcurve
@pytest.mark.remote_data
def test_read_k2sff():
"""Can we read K2SFF files?"""
url = "http://archive.stsci.edu/hlsps/k2sff/c16/212100000/00236/hlsp_k2sff_k2_lightcurve_212100236-c16_kepler_v1_llc.fits"
f = fits.open(url)
# Verify different extensions
fluxes = []
for ext in ["BESTAPER", "CIRC_APER9"]:
lc = read_k2sff_lightcurve(url, ext=ext)
assert type(lc).__name__ == "KeplerLightCurve"
# Are `time` and `flux` consistent with the FITS file?
assert_array_equal(f[ext].data["T"], lc.time.value)
assert_array_equal(f[ext].data["FCOR"], lc.flux.value)
fluxes.append(lc.flux)
# Different extensions should show different fluxes
assert not np.array_equal(fluxes[0], fluxes[1])
@pytest.mark.remote_data
def test_search_k2sff():
"""Can we search and download a K2SFF light curve?"""
# Try an early campaign
search = search_lightcurve("K2-18", author="K2SFF", campaign=1)
assert len(search) == 1
assert search.table["author"][0] == "K2SFF"
lc = search.download()
assert type(lc).__name__ == "KeplerLightCurve"
assert lc.campaign == 1
# Try a late campaign
lc = search_lightcurve("GJ 9827", author="K2SFF", campaign=19).download()
assert type(lc).__name__ == "KeplerLightCurve"
assert lc.targetid == 246389858
assert lc.campaign == 19
|
lightkurveREPO_NAMElightkurvePATH_START.@lightkurve_extracted@lightkurve-main@tests@io@test_k2sff.py@.PATH_END.py
|
{
"filename": "add_densities.py",
"repo_name": "phil-mansfield/gotetra",
"repo_path": "gotetra_extracted/gotetra-master/render/scripts/add_densities.py",
"type": "Python"
}
|
import numpy as np
import sys
width = int(sys.argv[1])
out = sys.argv[2]
inputs = sys.argv[3:]
grid = np.zeros(width * width * width)
for fname in inputs:
grid += np.fromfile(fname)
grid.tofile(out)
|
phil-mansfieldREPO_NAMEgotetraPATH_START.@gotetra_extracted@gotetra-master@render@scripts@add_densities.py@.PATH_END.py
|
{
"filename": "alpaca_chat.py",
"repo_name": "OpenAccess-AI-Collective/axolotl",
"repo_path": "axolotl_extracted/axolotl-main/src/axolotl/prompt_strategies/alpaca_chat.py",
"type": "Python"
}
|
"""Module for Alpaca prompt strategy classes"""
from typing import Any, Dict, Optional, Tuple
from axolotl.prompt_tokenizers import (
AlpacaPromptTokenizingStrategy,
InstructionPromptTokenizingStrategy,
)
from axolotl.prompters import AlpacaPrompter, PromptStyle, UnpromptedPrompter
def load(tokenizer, cfg, ds_cfg: Optional[Dict[str, Any]] = None):
prompt_style = PromptStyle.CHAT.value
if ds_cfg and "conversation" in ds_cfg:
prompt_style = ds_cfg["conversation"]
return AlpacaPromptTokenizingStrategy(
AlpacaPrompter(prompt_style),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)
class AlpacaConcisePrompter(AlpacaPrompter):
"""
Alpaca Prompter extending the system prompt to ask for concise chat-instruct answers
"""
system_prompt = "Below is an instruction from a USER that describes a task, paired with an input that provides further context. The ASSISTANT writes a response that concisely and appropriately completes the request.\n\n"
system_no_input_prompt = "Below is an instruction from a USER that describes a task. The ASSISTANT writes a response that appropriately and concisely completes the request.\n\n"
class AlpacaChatPrompter(AlpacaPrompter):
"""
Alpaca Chat Prompter extending the system prompt to for chat-instruct answers
"""
system_prompt = "Below is an instruction from a USER that describes a task, paired with an input that provides further context. The ASSISTANT writes a response that concisely and appropriately completes the request.\n\n"
system_no_input_prompt = "Below is an instruction from a USER that describes a task. The ASSISTANT writes a response that appropriately and concisely completes the request.\n\n"
def __init__(self): # pylint: disable=super-init-not-called
self.prompt_style = PromptStyle.CHAT.value
self.match_prompt_style()
class NoSystemPrompter(AlpacaPrompter):
"""
Null Prompter with no system prompts
"""
system_prompt = ""
system_no_input_prompt = ""
turn_format = "{instruction} {input} "
turn_no_input_format = "{instruction} "
def __init__(self): # pylint: disable=super-init-not-called
pass
class AlpacaQAPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
"""
Tokenizing strategy for AlpacaQA
"""
def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
return (
prompt["question"],
"",
prompt["answer"],
)
class CamelAIPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
"""
Tokenizing strategy for CamelAI datasets
"""
def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
return (
prompt["message_1"],
"",
prompt["message_2"],
)
def load_concise(tokenizer, cfg):
return AlpacaPromptTokenizingStrategy(
AlpacaConcisePrompter(PromptStyle.CHAT.value),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)
def load_qa(tokenizer, cfg):
return AlpacaQAPromptTokenizingStrategy(
AlpacaChatPrompter(),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)
def load_camel_ai(tokenizer, cfg):
return CamelAIPromptTokenizingStrategy(
AlpacaChatPrompter(),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)
def load_no_prompt(tokenizer, cfg):
return AlpacaPromptTokenizingStrategy(
UnpromptedPrompter(PromptStyle.CHAT.value),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)
|
OpenAccess-AI-CollectiveREPO_NAMEaxolotlPATH_START.@axolotl_extracted@axolotl-main@src@axolotl@prompt_strategies@alpaca_chat.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.