metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "sigpsingle.py",
"repo_name": "jpcoles/glass",
"repo_path": "glass_extracted/glass-master/glass/misc/sigpsingle.py",
"type": "Python"
}
|
import sys
sys.path.append('..')
from spherical_deproject import sigpsingle
from numpy import loadtxt
files = sys.argv[1:]
if not files:
dir = '/smaug/data/theorie/justin/Backup/Mylaptop/Scratch/Lensing/Cuspcore/CMerger1'
files.append(dir + '/cmerger_1_sigpx.txt')
for f in files:
data = loadtxt(f,
dtype = {'names': ('R', 'sigp', 'err'),
'formats': ('f8', 'f8', 'f8')})
import massmodel.hernquist as light
from scipy.integrate.quadrature import simps as integrator
intpnts = 100
lpars = [1,25,1,intpnts]
beta = 0
aperture = 400
print sigpsingle(data['R'],data['sigp'],light,lpars,aperture,integrator)
|
jpcolesREPO_NAMEglassPATH_START.@glass_extracted@glass-master@glass@misc@sigpsingle.py@.PATH_END.py
|
{
"filename": "setup_d.py",
"repo_name": "mhammond/pywin32",
"repo_path": "pywin32_extracted/pywin32-main/win32/scripts/setup_d.py",
"type": "Python"
}
|
# Install and register pythonXX_d.dll, pywintypesXX_d.dll and pythoncomXX_d.dll
#
# Assumes the _d files can be found in the same directory as this script
# or in the cwd.
import os
import shutil
import sys
import winreg
import win32api
def usage_and_die(rc):
print()
print("This script is designed to copy and register the Python debug")
print("binaries. It looks for pythonXX_d.dll, pythoncomXX_d.dll etc,")
print("and installs them to work correctly with Python debug builds.")
print()
print("You will generally find this script in the. zip file that")
print("included these _d files. Please run this script from")
print("that directory")
sys.exit(rc)
if os.path.splitext(os.path.basename(win32api.__file__))[0].endswith("_d"):
print("This scripts appears to be running a DEBUG version of Python.")
print("Please run it using a normal release build (python.exe)")
usage_and_die(1)
try:
import pythoncom
except ImportError as details:
print("Could not import the release version of pythoncom")
print(f"The error details are: {details}")
print("Please correct this error and rerun the script")
usage_and_die(2)
try:
import pywintypes
except ImportError as details:
print("Could not import the release version of pywintypes")
print(f"The error details are: {details}")
print("Please correct this error and rerun the script")
usage_and_die(2)
def _docopy(src, dest):
orig_src = src
if not os.path.isfile(src):
src = os.path.join(os.path.split(sys.argv[0])[0], src)
print(
"Can not find {} or {} to copy".format(
os.path.abspath(orig_src), os.path.abspath(src)
)
)
return 0
try:
shutil.copy(src, dest)
print(f"Copied {src} -> {dest}")
return 1
except:
print(f"Error copying '{src}' -> '{dest}'")
print(str(sys.exc_info()[1]))
usage_and_die(3)
def _doregister(mod_name, dll_name):
assert os.path.isfile(dll_name), "Shouldn't get here if the file doesn't exist!"
try:
key = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
f"Software\\Python\\PythonCore\\{sys.winver}\\Modules\\{mod_name}",
)
except winreg.error:
try:
key = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
f"Software\\Python\\PythonCore\\{sys.winver}\\Modules\\{mod_name}",
)
except winreg.error:
print(
"Could not find the existing '{}' module registered in the registry".format(
mod_name
)
)
usage_and_die(4)
# Create the debug key.
sub_key = winreg.CreateKey(key, "Debug")
winreg.SetValue(sub_key, None, winreg.REG_SZ, dll_name)
print(f"Registered '{dll_name}' in the registry")
def _domodule(mod_name, release_mod_filename):
path, fname = os.path.split(release_mod_filename)
base, ext = os.path.splitext(fname)
new_fname = base + "_d" + ext
if _docopy(new_fname, path):
_doregister(mod_name, os.path.abspath(os.path.join(path, new_fname)))
# First the main Python DLL.
path, fname = path, fname = os.path.split(win32api.GetModuleFileName(sys.dllhandle))
base, ext = os.path.splitext(fname)
_docopy(base + "_d" + ext, path)
# Then pythoncom and pywintypes.
_domodule("pythoncom", pythoncom.__file__)
_domodule("pywintypes", pywintypes.__file__)
print("System _d files were setup.")
|
mhammondREPO_NAMEpywin32PATH_START.@pywin32_extracted@pywin32-main@win32@scripts@setup_d.py@.PATH_END.py
|
{
"filename": "gp.py",
"repo_name": "pyro-ppl/numpyro",
"repo_path": "numpyro_extracted/numpyro-master/examples/gp.py",
"type": "Python"
}
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
Example: Gaussian Process
=========================
In this example we show how to use NUTS to sample from the posterior
over the hyperparameters of a gaussian process.
.. image:: ../_static/img/examples/gp.png
:align: center
"""
import argparse
import os
import time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import jax
from jax import vmap
import jax.numpy as jnp
import jax.random as random
import numpyro
import numpyro.distributions as dist
from numpyro.infer import (
MCMC,
NUTS,
init_to_feasible,
init_to_median,
init_to_sample,
init_to_uniform,
init_to_value,
)
matplotlib.use("Agg") # noqa: E402
# squared exponential kernel with diagonal noise term
def kernel(X, Z, var, length, noise, jitter=1.0e-6, include_noise=True):
deltaXsq = jnp.power((X[:, None] - Z) / length, 2.0)
k = var * jnp.exp(-0.5 * deltaXsq)
if include_noise:
k += (noise + jitter) * jnp.eye(X.shape[0])
return k
def model(X, Y):
# set uninformative log-normal priors on our three kernel hyperparameters
var = numpyro.sample("kernel_var", dist.LogNormal(0.0, 10.0))
noise = numpyro.sample("kernel_noise", dist.LogNormal(0.0, 10.0))
length = numpyro.sample("kernel_length", dist.LogNormal(0.0, 10.0))
# compute kernel
k = kernel(X, X, var, length, noise)
# sample Y according to the standard gaussian process formula
numpyro.sample(
"Y",
dist.MultivariateNormal(loc=jnp.zeros(X.shape[0]), covariance_matrix=k),
obs=Y,
)
# helper function for doing hmc inference
def run_inference(model, args, rng_key, X, Y):
start = time.time()
# demonstrate how to use different HMC initialization strategies
if args.init_strategy == "value":
init_strategy = init_to_value(
values={"kernel_var": 1.0, "kernel_noise": 0.05, "kernel_length": 0.5}
)
elif args.init_strategy == "median":
init_strategy = init_to_median(num_samples=10)
elif args.init_strategy == "feasible":
init_strategy = init_to_feasible()
elif args.init_strategy == "sample":
init_strategy = init_to_sample()
elif args.init_strategy == "uniform":
init_strategy = init_to_uniform(radius=1)
kernel = NUTS(model, init_strategy=init_strategy)
mcmc = MCMC(
kernel,
num_warmup=args.num_warmup,
num_samples=args.num_samples,
num_chains=args.num_chains,
thinning=args.thinning,
progress_bar=False if "NUMPYRO_SPHINXBUILD" in os.environ else True,
)
mcmc.run(rng_key, X, Y)
mcmc.print_summary()
print("\nMCMC elapsed time:", time.time() - start)
return mcmc.get_samples()
# do GP prediction for a given set of hyperparameters. this makes use of the well-known
# formula for Gaussian process predictions
def predict(rng_key, X, Y, X_test, var, length, noise, use_cholesky=True):
# compute kernels between train and test data, etc.
k_pp = kernel(X_test, X_test, var, length, noise, include_noise=True)
k_pX = kernel(X_test, X, var, length, noise, include_noise=False)
k_XX = kernel(X, X, var, length, noise, include_noise=True)
# since K_xx is symmetric positive-definite, we can use the more efficient and
# stable Cholesky decomposition instead of matrix inversion
if use_cholesky:
K_xx_cho = jax.scipy.linalg.cho_factor(k_XX)
K = k_pp - jnp.matmul(k_pX, jax.scipy.linalg.cho_solve(K_xx_cho, k_pX.T))
mean = jnp.matmul(k_pX, jax.scipy.linalg.cho_solve(K_xx_cho, Y))
else:
K_xx_inv = jnp.linalg.inv(k_XX)
K = k_pp - jnp.matmul(k_pX, jnp.matmul(K_xx_inv, jnp.transpose(k_pX)))
mean = jnp.matmul(k_pX, jnp.matmul(K_xx_inv, Y))
sigma_noise = jnp.sqrt(jnp.clip(jnp.diag(K), 0.0)) * jax.random.normal(
rng_key, X_test.shape[:1]
)
# we return both the mean function and a sample from the posterior predictive for the
# given set of hyperparameters
return mean, mean + sigma_noise
# create artificial regression dataset
def get_data(N=30, sigma_obs=0.15, N_test=400):
np.random.seed(0)
X = jnp.linspace(-1, 1, N)
Y = X + 0.2 * jnp.power(X, 3.0) + 0.5 * jnp.power(0.5 + X, 2.0) * jnp.sin(4.0 * X)
Y += sigma_obs * np.random.randn(N)
Y -= jnp.mean(Y)
Y /= jnp.std(Y)
assert X.shape == (N,)
assert Y.shape == (N,)
X_test = jnp.linspace(-1.3, 1.3, N_test)
return X, Y, X_test
def main(args):
X, Y, X_test = get_data(N=args.num_data)
# do inference
rng_key, rng_key_predict = random.split(random.PRNGKey(0))
samples = run_inference(model, args, rng_key, X, Y)
# do prediction
vmap_args = (
random.split(rng_key_predict, samples["kernel_var"].shape[0]),
samples["kernel_var"],
samples["kernel_length"],
samples["kernel_noise"],
)
means, predictions = vmap(
lambda rng_key, var, length, noise: predict(
rng_key, X, Y, X_test, var, length, noise, use_cholesky=args.use_cholesky
)
)(*vmap_args)
mean_prediction = np.mean(means, axis=0)
percentiles = np.percentile(predictions, [5.0, 95.0], axis=0)
# make plots
fig, ax = plt.subplots(figsize=(8, 6), constrained_layout=True)
# plot training data
ax.plot(X, Y, "kx")
# plot 90% confidence level of predictions
ax.fill_between(X_test, percentiles[0, :], percentiles[1, :], color="lightblue")
# plot mean prediction
ax.plot(X_test, mean_prediction, "blue", ls="solid", lw=2.0)
ax.set(xlabel="X", ylabel="Y", title="Mean predictions with 90% CI")
plt.savefig("gp_plot.pdf")
if __name__ == "__main__":
assert numpyro.__version__.startswith("0.16.1")
parser = argparse.ArgumentParser(description="Gaussian Process example")
parser.add_argument("-n", "--num-samples", nargs="?", default=1000, type=int)
parser.add_argument("--num-warmup", nargs="?", default=1000, type=int)
parser.add_argument("--num-chains", nargs="?", default=1, type=int)
parser.add_argument("--thinning", nargs="?", default=2, type=int)
parser.add_argument("--num-data", nargs="?", default=25, type=int)
parser.add_argument("--device", default="cpu", type=str, help='use "cpu" or "gpu".')
parser.add_argument(
"--init-strategy",
default="median",
type=str,
choices=["median", "feasible", "value", "uniform", "sample"],
)
parser.add_argument("--no-cholesky", dest="use_cholesky", action="store_false")
args = parser.parse_args()
numpyro.set_platform(args.device)
numpyro.set_host_device_count(args.num_chains)
main(args)
|
pyro-pplREPO_NAMEnumpyroPATH_START.@numpyro_extracted@numpyro-master@examples@gp.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/bar/hoverlabel/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._font import Font
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(__name__, [], ["._font.Font"])
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@bar@hoverlabel@__init__.py@.PATH_END.py
|
{
"filename": "0005_dbquery_queries.py",
"repo_name": "mavrix93/LightCurvesClassifier",
"repo_path": "LightCurvesClassifier_extracted/LightCurvesClassifier-master/lcc_web/web/interface/migrations/0005_dbquery_queries.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-03-15 09:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('interface', '0004_auto_20170313_1952'),
]
operations = [
migrations.AddField(
model_name='dbquery',
name='queries',
field=models.IntegerField(default=None),
preserve_default=False,
),
]
|
mavrix93REPO_NAMELightCurvesClassifierPATH_START.@LightCurvesClassifier_extracted@LightCurvesClassifier-master@lcc_web@web@interface@migrations@0005_dbquery_queries.py@.PATH_END.py
|
{
"filename": "Gas.py",
"repo_name": "psheehan/pdspy",
"repo_path": "pdspy_extracted/pdspy-master/pdspy/gas/Gas.py",
"type": "Python"
}
|
from pdspy.constants.physics import m_p, c, h
import numpy
import h5py
class Gas:
def set_properties_from_lambda(self, filename):
f = open(filename)
for i in range(3):
f.readline()
self.mass = float(f.readline())
f.readline()
nlev = int(f.readline())
f.readline()
self.J = numpy.empty(nlev, dtype="<U6")
self.E = numpy.empty(nlev, dtype=float)
self.g = numpy.empty(nlev, dtype=float)
for i in range(nlev):
temp, self.E[i], self.g[i], self.J[i] = tuple(f.readline().split())
f.readline()
ntrans = int(f.readline())
f.readline()
self.J_u = numpy.empty(ntrans, dtype=int)
self.J_l = numpy.empty(ntrans, dtype=int)
self.A_ul = numpy.empty(ntrans, dtype=float)
self.nu = numpy.empty(ntrans, dtype=float)
self.E_u = numpy.empty(ntrans, dtype=float)
for i in range(ntrans):
temp, self.J_u[i], self.J_l[i], self.A_ul[i], self.nu[i], \
self.E_u[i] = tuple(f.readline().split())
self.nu *= 1.0e9
self.B_ul = c**2 * self.A_ul / (2*h*self.nu**3)
f.readline()
npartners = int(f.readline())
self.partners = []
self.temp = []
self.J_u_coll = []
self.J_l_coll = []
self.gamma = []
for i in range(npartners):
f.readline()
self.partners.append(f.readline())
f.readline()
ncolltrans = int(f.readline())
f.readline()
ncolltemps = int(f.readline())
f.readline()
self.temp.append(numpy.array(f.readline().split(), dtype=float))
f.readline()
self.J_u_coll.append(numpy.empty(ncolltrans, dtype=int))
self.J_l_coll.append(numpy.empty(ncolltrans, dtype=int))
self.gamma.append(numpy.empty((ncolltrans,ncolltemps), \
dtype=float))
for j in range(ncolltrans):
temp, self.J_u_coll[i][j], self.J_l_coll[i][j], temp2 = \
tuple(f.readline().split(None,3))
self.gamma[i][j,:] = numpy.array(temp2.split())
f.close()
def set_properties_from_file(self, filename=None, usefile=None):
if (usefile == None):
f = h5py.File(filename, "r")
else:
f = usefile
self.mass = f['mass'][()]
self.J = f["J"][...]
self.E = f['E'][...]
self.g = f['g'][...]
self.J_u = f['J_u'][...]
self.J_l = f['J_l'][...]
self.A_ul = f['A_ul'][...]
self.nu = f['nu'][...]
self.E_u = f['E_u'][...]
self.B_ul = c**2 * self.A_ul / (2*h*self.nu**3)
self.partners = []
self.temp = []
self.J_u_coll = []
self.J_l_coll = []
self.gamma = []
for name in f["CollisionalPartners"]:
self.partners.append(name)
self.temp.append(f["CollisionalPartners"][name] \
["Temperature"][...])
self.J_u_coll.append(f["CollisionalPartners"][name] \
["J_u_coll"][...])
self.J_l_coll.append(f["CollisionalPartners"][name] \
["J_l_coll"][...])
self.gamma.append(f["CollisionalPartners"][name]["Gamma"][...])
if (usefile == None):
f.close()
def write(self, filename=None, usefile=None):
if (usefile == None):
f = h5py.File(filename, "w")
else:
f = usefile
f['mass'] = self.mass
J_dset = f.create_dataset("J", (self.J.size,), dtype=h5py.special_dtype(vlen=str))
J_dset[...] = self.J
E_dset = f.create_dataset("E", (self.E.size,), dtype='f')
E_dset[...] = self.E
g_dset = f.create_dataset("g", (self.g.size,), dtype='f')
g_dset[...] = self.g
J_u_dset = f.create_dataset("J_u", (self.J_u.size,), dtype='f')
J_u_dset[...] = self.J_u
J_l_dset = f.create_dataset("J_l", (self.J_l.size,), dtype='f')
J_l_dset[...] = self.J_l
A_ul_dset = f.create_dataset("A_ul", (self.A_ul.size,), dtype='f')
A_ul_dset[...] = self.A_ul
nu_dset = f.create_dataset("nu", (self.nu.size,), dtype='f')
nu_dset[...] = self.nu
E_u_dset = f.create_dataset("E_u", (self.E_u.size,), dtype='f')
E_u_dset[...] = self.E_u
collisions = f.create_group("CollisionalPartners")
partners = []
temp = []
J_u_coll = []
J_l_coll = []
gamma = []
for i in range(len(self.partners)):
partners.append(collisions.create_group("{0:s}".format( \
self.partners[i])))
temp.append(partners[i].create_dataset("Temperature", \
(self.temp[i].size,), dtype='f'))
temp[i][...] = self.temp[i]
J_u_coll.append(partners[i].create_dataset("J_u_coll", \
(self.J_u_coll[i].size,), dtype='f'))
J_u_coll[i][...] = self.J_u_coll[i]
J_l_coll.append(partners[i].create_dataset("J_l_coll", \
(self.J_l_coll[i].size,), dtype='f'))
J_l_coll[i][...] = self.J_l_coll[i]
gamma.append(partners[i].create_dataset("Gamma", \
self.gamma[i].shape, dtype='f'))
gamma[i][...] = self.gamma[i]
if (usefile == None):
f.close()
|
psheehanREPO_NAMEpdspyPATH_START.@pdspy_extracted@pdspy-master@pdspy@gas@Gas.py@.PATH_END.py
|
{
"filename": "plot_ska_fs8.py",
"repo_name": "philbull/RadioFisher",
"repo_path": "RadioFisher_extracted/RadioFisher-master/plotting/plot_ska_fs8.py",
"type": "Python"
}
|
#!/usr/bin/python
"""
Plot f.sigma_8 as a function of z
"""
import numpy as np
import pylab as P
from rfwrapper import rf
import matplotlib.patches
import matplotlib.cm
import matplotlib.ticker
from radiofisher import euclid
import copy
cosmo = rf.experiments.cosmo
#------------------------------------------------
# SKA RSD chapter: All distance measures (f.sigma_8)
#------------------------------------------------
names = ['fSKA1SUR350_25000', 'EuclidRef', 'SKA1MID350_25000', 'gSKA2']
labels = ['SKA1-SUR B1 (IM)', 'Euclid (gal.)', 'SKA1-MID B1 (IM)', 'SKA2 (gal.)']
fig_name = "ska-fs8.pdf"
colours = ['#FFA728', '#6B6B6B', '#1619A1', '#CC0000']
names = ['SKA1MID350_25000', 'SKA1MID900_25000', 'fSKA1SUR350_25000', 'fSKA1SUR650_25000',
'gSKAMIDMKB2', 'gSKASURASKAP', 'gSKA2', 'EuclidRef']
labels = ['SKA1-MID B1 (IM)', 'SKA1-MID B2 (IM)', 'SKA1-SUR B1 (IM)',
'SKA1-SUR B2 (IM)', 'SKA1-MID (gal.)', 'SKA1-SUR (gal.)',
'Full SKA (gal.)', 'Euclid (gal.)']
colours = ['#8082FF', '#1619A1', '#FFB928', '#ff6600', '#95CD6D', '#007A10', '#CC0000',
'#000000', '#858585', '#c1c1c1', 'y']
linestyle = [[], [], [], [], [], [], [], [], []]
marker = ['D', 'D', 'D', 'D', 's', 's', 'o', 'o', 'o']
ms = [6., 6., 6., 6., 6., 6., 5., 5., 5.]
XLIM = (-0.02, 2.2)
YLIM = (0., 0.045)
################################################################################
# Fiducial value and plotting
fig = P.figure()
ax = fig.add_subplot(111)
_k = range(len(names)) #[::-1]
for k in _k:
root = "output/" + names[k]
# Load cosmo fns.
dat = np.atleast_2d( np.genfromtxt(root+"-cosmofns-zc.dat") ).T
zc, Hc, dAc, Dc, fc = dat
zs, Hs, dAs, Ds, fs = np.genfromtxt(root+"-cosmofns-smooth.dat").T
kc = np.genfromtxt(root+"-fisher-kc.dat").T
# Load Fisher matrices as fn. of z
Nbins = zc.size
F_list = [np.genfromtxt(root+"-fisher-full-%d.dat" % i) for i in range(Nbins)]
# EOS FISHER MATRIX
pnames = rf.load_param_names(root+"-fisher-full-0.dat")
zfns = ['A', 'bs8', 'fs8', 'H', 'DA',]
excl = ['Tb', 'n_s', 'sigma8', 'omegak', 'omegaDE', 'w0', 'wa', 'h',
'gamma', 'N_eff', 'pk*', 'f', 'b_HI']
F, lbls = rf.combined_fisher_matrix( F_list, expand=zfns, names=pnames,
exclude=excl )
cov = np.linalg.inv(F)
errs = np.sqrt(np.diag(cov))
print lbls
# Identify functions of z
pfs8 = rf.indices_for_param_names(lbls, 'fs8*')
print ""
print "#", names[k]
print "# z, fsigma8, sigma(fsigma8)"
for j in range(zc.size):
print "%4.4f %5.5e %5.5e" % (zc[j], errs[pfs8][j], (cosmo['sigma_8']*fc*Dc)[j])
# Plot errors as fn. of redshift
err = errs[pfs8] / (cosmo['sigma_8']*fc*Dc)
line = P.plot( zc, err, color=colours[k], lw=1.8, label=labels[k],
marker=marker[k], markersize=ms[k], markeredgecolor=colours[k] )
line[0].set_dashes(linestyle[k])
P.tick_params(axis='both', which='major', labelsize=20, width=1.5, size=8., pad=5)
P.tick_params(axis='both', which='minor', labelsize=20, width=1.5, size=8.)
# Add label to panel
#P.figtext(l0 + 0.02, b0 + hh*(0.86+i), ax_lbls[i],
# fontdict={'size':'x-large'}) #, bbox=dict(ec='k', fc='none', lw=1.2))
P.xlabel('$z$', labelpad=10., fontdict={'fontsize':'xx-large'})
P.ylabel('$\sigma(f \sigma_8) / (f \sigma_8)$', labelpad=15., fontdict={'fontsize':'xx-large'})
# Set tick locations
P.gca().yaxis.set_major_locator(matplotlib.ticker.MultipleLocator(0.01))
P.gca().yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(0.01))
leg = P.legend(prop={'size':'large'}, loc='upper right', frameon=True, ncol=2)
leg.get_frame().set_edgecolor('w')
leg.get_frame().set_alpha(0.8)
P.xlim(XLIM)
P.ylim(YLIM)
# Set size
P.tight_layout()
P.gcf().set_size_inches(8.,6.)
P.savefig(fig_name, transparent=True)
print "Figure saved to: %s" % fig_name
P.show()
|
philbullREPO_NAMERadioFisherPATH_START.@RadioFisher_extracted@RadioFisher-master@plotting@plot_ska_fs8.py@.PATH_END.py
|
{
"filename": "VERSION.md",
"repo_name": "cdslaborg/paramonte",
"repo_path": "paramonte_extracted/paramonte-main/src/python/VERSION.md",
"type": "Markdown"
}
|
3.0.0
ParaMonte Python library version file.
This project uses semantic versioning.
For details, see http://semver.org
|
cdslaborgREPO_NAMEparamontePATH_START.@paramonte_extracted@paramonte-main@src@python@VERSION.md@.PATH_END.py
|
{
"filename": "bimod_sats_fithelp.py",
"repo_name": "ArgonneCPAC/diffmah",
"repo_path": "diffmah_extracted/diffmah-main/diffmah/diffmahpop_kernels/bimod_sats_fithelp.py",
"type": "Python"
}
|
"""
"""
from jax import jit as jjit
from jax import numpy as jnp
from jax import random as jran
from jax import value_and_grad, vmap
from . import mc_bimod_sats as mcs
T_OBS_FIT_MIN = 0.5
@jjit
def _mse(x, y):
d = y - x
return jnp.mean(d * d)
@jjit
def _loss_mah_moments_singlebin(
diffmahpop_params,
tarr,
lgm_obs,
t_obs,
ran_key,
lgt0,
target_mean_log_mah,
target_std_log_mah,
target_frac_peaked,
):
_preds = mcs.predict_mah_moments_singlebin(
diffmahpop_params, tarr, lgm_obs, t_obs, ran_key, lgt0
)
mean_log_mah, std_log_mah, frac_peaked = _preds
loss = _mse(mean_log_mah, target_mean_log_mah)
loss = loss + _mse(std_log_mah, target_std_log_mah)
# loss = loss + _mse(frac_peaked, target_frac_peaked)
return loss
_U = (None, 0, 0, 0, 0, None, 0, 0, 0)
_loss_mah_moments_multibin_vmap = jjit(vmap(_loss_mah_moments_singlebin, in_axes=_U))
@jjit
def _loss_mah_moments_multibin_kern(
diffmahpop_params,
tarr_matrix,
lgm_obs_arr,
t_obs_arr,
ran_key,
lgt0,
target_mean_log_mahs,
target_std_log_mahs,
target_frac_peaked,
):
ran_keys = jran.split(ran_key, tarr_matrix.shape[0])
return _loss_mah_moments_multibin_vmap(
diffmahpop_params,
tarr_matrix,
lgm_obs_arr,
t_obs_arr,
ran_keys,
lgt0,
target_mean_log_mahs,
target_std_log_mahs,
target_frac_peaked,
)
@jjit
def loss_mah_moments_multibin(
diffmahpop_params,
tarr_matrix,
lgm_obs_arr,
t_obs_arr,
ran_key,
lgt0,
target_mean_log_mahs,
target_std_log_mahs,
target_frac_peaked,
):
losses = _loss_mah_moments_multibin_kern(
diffmahpop_params,
tarr_matrix,
lgm_obs_arr,
t_obs_arr,
ran_key,
lgt0,
target_mean_log_mahs,
target_std_log_mahs,
target_frac_peaked,
)
return jnp.mean(losses)
loss_and_grads_mah_moments_multibin = value_and_grad(loss_mah_moments_multibin)
|
ArgonneCPACREPO_NAMEdiffmahPATH_START.@diffmah_extracted@diffmah-main@diffmah@diffmahpop_kernels@bimod_sats_fithelp.py@.PATH_END.py
|
{
"filename": "sghmc.py",
"repo_name": "blackjax-devs/blackjax",
"repo_path": "blackjax_extracted/blackjax-main/blackjax/sgmcmc/sghmc.py",
"type": "Python"
}
|
# Copyright 2020- The Blackjax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Public API for the Stochastic gradient Hamiltonian Monte Carlo kernel."""
from typing import Callable
import jax
import blackjax.sgmcmc.diffusions as diffusions
from blackjax.base import SamplingAlgorithm
from blackjax.types import ArrayLikeTree, ArrayTree, PRNGKey
from blackjax.util import generate_gaussian_noise
__all__ = ["init", "build_kernel", "as_top_level_api"]
def init(position: ArrayLikeTree) -> ArrayLikeTree:
return position
def build_kernel(alpha: float = 0.01, beta: float = 0) -> Callable:
"""Stochastic gradient Hamiltonian Monte Carlo (SgHMC) algorithm."""
integrator = diffusions.sghmc(alpha, beta)
def kernel(
rng_key: PRNGKey,
position: ArrayLikeTree,
grad_estimator: Callable,
minibatch: ArrayLikeTree,
step_size: float,
num_integration_steps: int,
temperature: float = 1.0,
) -> ArrayTree:
def body_fn(state, rng_key):
position, momentum = state
logdensity_grad = grad_estimator(position, minibatch)
position, momentum = integrator(
rng_key, position, momentum, logdensity_grad, step_size, temperature
)
return ((position, momentum), position)
momentum = generate_gaussian_noise(rng_key, position)
keys = jax.random.split(rng_key, num_integration_steps)
(position, momentum), _ = jax.lax.scan(body_fn, (position, momentum), keys)
return position
return kernel
def as_top_level_api(
grad_estimator: Callable,
num_integration_steps: int = 10,
alpha: float = 0.01,
beta: float = 0,
) -> SamplingAlgorithm:
"""Implements the (basic) user interface for the SGHMC kernel.
The general sghmc kernel builder (:meth:`blackjax.sgmcmc.sghmc.build_kernel`, alias
`blackjax.sghmc.build_kernel`) can be cumbersome to manipulate. Since most users
only need to specify the kernel parameters at initialization time, we
provide a helper function that specializes the general kernel.
Example
-------
To initialize a SGHMC kernel one needs to specify a schedule function, which
returns a step size at each sampling step, and a gradient estimator
function. Here for a constant step size, and `data_size` data samples:
.. code::
grad_estimator = blackjax.sgmcmc.gradients.grad_estimator(logprior_fn, loglikelihood_fn, data_size)
We can now initialize the sghmc kernel and the state. Like HMC, SGHMC needs the user to specify a number of integration steps.
.. code::
sghmc = blackjax.sghmc(grad_estimator, num_integration_steps)
Assuming we have an iterator `batches` that yields batches of data we can
perform one step:
.. code::
step_size = 1e-3
minibatch = next(batches)
new_position = sghmc.step(rng_key, position, minibatch, step_size)
Kernels are not jit-compiled by default so you will need to do it manually:
.. code::
step = jax.jit(sghmc.step)
new_position, info = step(rng_key, position, minibatch, step_size)
Parameters
----------
grad_estimator
A function that takes a position, a batch of data and returns an estimation
of the gradient of the log-density at this position.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel(alpha, beta)
def init_fn(position: ArrayLikeTree, rng_key=None):
del rng_key
return init(position)
def step_fn(
rng_key: PRNGKey,
state: ArrayLikeTree,
minibatch: ArrayLikeTree,
step_size: float,
temperature: float = 1,
) -> ArrayTree:
return kernel(
rng_key,
state,
grad_estimator,
minibatch,
step_size,
num_integration_steps,
temperature,
)
return SamplingAlgorithm(init_fn, step_fn) # type: ignore[arg-type]
|
blackjax-devsREPO_NAMEblackjaxPATH_START.@blackjax_extracted@blackjax-main@blackjax@sgmcmc@sghmc.py@.PATH_END.py
|
{
"filename": "_surface.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/_surface.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SurfaceValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="surface", parent_name="", **kwargs):
super(SurfaceValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Surface"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `colorscale`. In case
`colorscale` is unspecified or `autocolorscale`
is true, the default palette will be chosen
according to whether numbers in the `color`
array are all positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here z
or surfacecolor) or the bounds set in `cmin`
and `cmax` Defaults to `false` when `cmin` and
`cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Value
should have the same units as z or surfacecolor
and if set, `cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `cmin` and/or `cmax` to be equidistant
to this point. Value should have the same units
as z or surfacecolor. Has no effect when
`cauto` is `false`.
cmin
Sets the lower bound of the color domain. Value
should have the same units as z or surfacecolor
and if set, `cmax` must be set as well.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.surface.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an
array containing arrays mapping a normalized
value to an rgb, rgba, hex, hsl, hsv, or named
color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required.
For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`cmin` and
`cmax`. Alternatively, `colorscale` may be a
palette name string of the following list: Grey
s,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,
Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth
,Electric,Viridis,Cividis.
connectgaps
Determines whether or not gaps (i.e. {nan} or
missing values) in the `z` data are filled in.
contours
:class:`plotly.graph_objects.surface.Contours`
instance or dict with compatible properties
customdata
Assigns extra data each datum. This may be
useful when listening to hover, click and
selection events. Note that, "scatter" traces
also appends customdata items in the markers
DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud
for customdata .
hidesurface
Determines whether or not a surface is drawn.
For example, set `hidesurface` to False
`contours.x.show` to True and `contours.y.show`
to True to draw a wire frame plot.
hoverinfo
Determines which trace information appear on
hover. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud
for hoverinfo .
hoverlabel
:class:`plotly.graph_objects.surface.Hoverlabel
` instance or dict with compatible properties
hovertemplate
Template string used for rendering the
information that appear on hover box. Note that
this will override `hoverinfo`. Variables are
inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's
syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}".
https://github.com/d3/d3-time-
format#locale_format for details on the date
formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event
data described at this link
https://plotly.com/javascript/plotlyjs-
events/#event-data. Additionally, every
attributes that can be specified per-point (the
ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud
for hovertemplate .
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud
for hovertext .
ids
Assigns id labels to each datum. These ids for
object constancy of data points during
animation. Should be an array of strings, not
numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud
for ids .
legendgroup
Sets the legend group for this trace. Traces
part of the same legend group hide/show at the
same time when toggling legend items.
lighting
:class:`plotly.graph_objects.surface.Lighting`
instance or dict with compatible properties
lightposition
:class:`plotly.graph_objects.surface.Lightposit
ion` instance or dict with compatible
properties
meta
Assigns extra meta information associated with
this trace that can be used in various text
attributes. Attributes such as trace `name`,
graph, axis and colorbar `title.text`,
annotation `text` `rangeselector`,
`updatemenues` and `sliders` `label` text all
support `meta`. To access the trace `meta`
values in an attribute in the same trace,
simply use `%{meta[i]}` where `i` is the index
or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or
key of the `meta` and `n` is the trace index.
metasrc
Sets the source reference on Chart Studio Cloud
for meta .
name
Sets the trace name. The trace name appear as
the legend item and on hover.
opacity
Sets the opacity of the surface. Please note
that in the case of using high `opacity` values
for example a value greater than or equal to
0.5 on two surfaces (and 0.25 with four
surfaces), an overlay of multiple transparent
surfaces may not perfectly be sorted in depth
by the webgl API. This behavior may be improved
in the near future and is subject to change.
opacityscale
Sets the opacityscale. The opacityscale must be
an array containing arrays mapping a normalized
value to an opacity value. At minimum, a
mapping for the lowest (0) and highest (1)
values are required. For example, `[[0, 1],
[0.5, 0.2], [1, 1]]` means that higher/lower
values would have higher opacity values and
those in the middle would be more transparent
Alternatively, `opacityscale` may be a palette
name string of the following list: 'min',
'max', 'extremes' and 'uniform'. The default is
'uniform'.
reversescale
Reverses the color mapping if true. If true,
`cmin` will correspond to the last color in the
array and `cmax` will correspond to the first
color.
scene
Sets a reference between this trace's 3D
coordinate system and a 3D scene. If "scene"
(the default value), the (x,y,z) coordinates
refer to `layout.scene`. If "scene2", the
(x,y,z) coordinates refer to `layout.scene2`,
and so on.
showlegend
Determines whether or not an item corresponding
to this trace is shown in the legend.
showscale
Determines whether or not a colorbar is
displayed for this trace.
stream
:class:`plotly.graph_objects.surface.Stream`
instance or dict with compatible properties
surfacecolor
Sets the surface color values, used for setting
a color scale independent of `z`.
surfacecolorsrc
Sets the source reference on Chart Studio Cloud
for surfacecolor .
text
Sets the text elements associated with each z
value. If trace `hoverinfo` contains a "text"
flag and "hovertext" is not set, these elements
will be seen in the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud
for text .
uid
Assign an id to this trace, Use this to provide
object constancy between traces during
animations and transitions.
uirevision
Controls persistence of some user-driven
changes to the trace: `constraintrange` in
`parcoords` traces, as well as some `editable:
true` modifications such as `name` and
`colorbar.title`. Defaults to
`layout.uirevision`. Note that other user-
driven trace attribute changes are controlled
by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and
`colorbar.(x|y)` (accessible with `config:
{editable: true}`) is controlled by
`layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on
trace index if no `uid` is provided. So if your
app can add/remove traces before the end of the
`data` array, such that the same trace has a
different index, you can still preserve user-
driven changes if you give each trace a `uid`
that stays with it as it moves.
visible
Determines whether or not this trace is
visible. If "legendonly", the trace is not
drawn, but can appear as a legend item
(provided that the legend itself is visible).
x
Sets the x coordinates.
xcalendar
Sets the calendar system to use with `x` date
data.
xsrc
Sets the source reference on Chart Studio Cloud
for x .
y
Sets the y coordinates.
ycalendar
Sets the calendar system to use with `y` date
data.
ysrc
Sets the source reference on Chart Studio Cloud
for y .
z
Sets the z coordinates.
zcalendar
Sets the calendar system to use with `z` date
data.
zsrc
Sets the source reference on Chart Studio Cloud
for z .
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@_surface.py@.PATH_END.py
|
{
"filename": "acweSaveSeg_v5.py",
"repo_name": "jalanderos/STRIDE-CH",
"repo_path": "STRIDE-CH_extracted/STRIDE-CH-main/acwe_lib/acweSaveSeg_v5.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 18 13:44:16 2020
Updated on Mon Feb 20 16:19:33 2023
@author: jgra
"""
# In[1]
# Import Libraries and Tools
import numpy as np
# In[2]
# Define Save function
def saveSeg(filename,seg,h,correct_limb_brightening,resize_param,
foreground_weight,background_weight,init_mask,init_mask_method,
fill_init_holes,init_alpha,alpha,narrowband,N,
image_preprocess=None):
'''
Save Function for use in collaboration with ACWE output generated using
acweFunctions_v4 or greater.
Parameters
----------
filename : str
Full File Path where final Segmentation will be stored
seg : [bool] OR [float]
[MxN] Boolean array of single ACWE segmentation OR [IxMxN]
floating point array of multiple segmentations in the case of a
confidence map
I : number of segmentations
M, N : segmentation Dimensions
h : dict
Full header of original .fits file used to generate this
segmentation. It should be noted that the header can be provided
as generated using the fits.open protocol from astropy.io. This
function will, however, convert the output to a dictionary before
saving it within the metadata.
correct_limb_brightening : bool
Set to True to indicate that the limb brightening correction of
[2] was performed on the .fits file
resize_param : int
The factor by which the image was downsampled prior to performing
ACWE.
foreground_weight : float
Weight term for the foreground (CH) homogeneity within the ACWE
energy functional.
background_weight : float
Weight term for the background (quiet Sun and all remaining on
disk features) homogeneity within the ACWE energy functional.
init_mask : [bool]
initial mask prior to performing ACWE
init_mask_method : str
Text description of the method used to generate initial mask to
facilitate future methods of generating this mask.
For consistency, please use the pusdo-code string "alpha*mean(qs)"
for the traditional method (described in [1]).
If another method is used to generate the initial mask, please
provide either the name or a written description of the process
here.
fill_init_holes : bool
State if holes in the initial mask are filled prior to performing ACWE
if True holes were filled, if False holes were not
init_alpha : float
An alpha parameter is used as a threshold to generate the initial
mask. This is the initial choice for the alpha parameter.
Use NaN when the initial mask generation method does not use a
thresholding method.
alpha : float
For the traditional method of mask generation, there remains the
possibility that the initial mask will fail. In acweFunctions_v4
there is the option to mitigate for this issue by incrementally
updating alpha until a successful segmentation is generated, as
such place here the actual alpha (threshold) parameter used to
generate the final mask.
When init_alpha and alpha are the same, indicate so by populating
this space with the same value as init_alpha.
Use NaN when the initial mask generation method does not use a
thresholding method.
narrowband : int
Constraint on ACWE evolution to ensure iterative optimization
process does not result in overcorrection of contour boundary.
N : int
Number of iterations of ACWE between checks for convergence.
image_preprocess : str, optional
String describing any additional processing done to the original
solar EUV image prior to performing ACWE.
Default Value: None
Outputs
-------
At the specified file path there will be a compressed .npz file
containing, in order the original header h, as a dictionary, a header
AH outlining the ACWE Process, and the ACWE segmentation(s) seg.
Since two of the outputs are headers, reopening the files requires
that the allow_pickle setting within the numpy load function to be True.
References
----------
[1]
L. E. Boucheron, M. Valluri, and R. T. J. McAteer, "Segmentation
of Coronal Holes Using Active Contours Without Edges," Solar
Physics, vol. 291, pp. 2353-2372, 2016.
[2]
C. Verbeeck, V. Delouille, B. Mampaey, & R. De Visscher, "The
SPoCA-suite: Software for extraction, characterization and
tracking of active regions and coronal holes on EUV images,"
Astronomy & Astrophysics, vol. 561, pp. A29, 2014.
'''
# convert original header into dictionary
H = dict(h)
# Record key elements of ACWE process in second header
segHeader = {
'CORRECT_LIMB_BRIGHTENING' : correct_limb_brightening,
'IMAGE_PREPROCESS' : image_preprocess,
'RESIZE_PARAM' : resize_param,
'FOREGROUND_WEIGHT' : foreground_weight,
'BACKGROUND_WEIGHT' : background_weight,
'INIT_MASK' : init_mask,
'INIT_MASK_METHOD' : init_mask_method,
'Fill_INIT_HOLES' : fill_init_holes,
'INIT_ALPHA' : init_alpha,
'ALPHA' : alpha,
'NARROWBAND' : narrowband,
'ITTER_BETWEEN_CHK' : N
}
# save as .npz file
np.savez_compressed(filename,H,segHeader,seg)
# In[3]:
# Define open function
def openSeg(filename):
'''
Open final acwe segmentation file function. This function is fully
compatible with segmentations saved using acweSaveSeg_v2.py and
later.
Parameters
----------
filename : str
Full File Path where final Segmentation will be stored
Returns
-------
FITSHEADER : dict
Full header of original .fits file used to generate this
segmentation.
ACWEHEADER : dict
Header outlining ACWE process. Refer to the saveSeg function to see
information about data within header.
SEG : [bool] OR [float]
ACWE Segmentation
'''
# Open .npz file and get list of "arrays"
data = np.load(filename, allow_pickle=True)
lst = data.files
# Separate into Header and Image
FITSHEADER = data[lst[0]].tolist() # Header of Original .fits File
ACWEHEADER = data[lst[1]].tolist() # Record key elements of ACWE process
SEG = data[lst[2]] # Segment
# Return results
return FITSHEADER, ACWEHEADER, SEG
|
jalanderosREPO_NAMESTRIDE-CHPATH_START.@STRIDE-CH_extracted@STRIDE-CH-main@acwe_lib@acweSaveSeg_v5.py@.PATH_END.py
|
{
"filename": "test_Acspy_Common_TimeHelper.py",
"repo_name": "ACS-Community/ACS",
"repo_path": "ACS_extracted/ACS-master/LGPL/CommonSoftware/acspycommon/test/test_Acspy_Common_TimeHelper.py",
"type": "Python"
}
|
#! /usr/bin/env python
#*******************************************************************************
# ALMA - Atacama Large Millimiter Array
# (c) National Research Council of Canada, 2008
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# "@(#) $Id: test_Acspy_Common_TimeHelper.py,v 1.1.1.1 2012/03/07 17:40:45 acaproni Exp $"
#
# who when what
# -------- -------- ----------------------------------------------
# arne 2008-04-08 created
#
#------------------------------------------------------------------------------
__revision__ = "$Id: test_Acspy_Common_TimeHelper.py,v 1.1.1.1 2012/03/07 17:40:45 acaproni Exp $"
#--REGULAR IMPORTS-------------------------------------------------------------
import unittest
import time
import acstime
#--ACS IMPORTS____-------------------------------------------------------------
import Acspy.Common.TimeHelper as TimeHelper
#------------------------------------------------------------------------------
def fixedTime():
return 1207768989.9850370
class GetTimeStampCheck(unittest.TestCase):
"""Test that the getTimeStamp function is working correctly."""
def setUp(self):
self.savetime = time.time
time.time = fixedTime
def tearDown(self):
time.time = self.savetime
def testKnownDate(self):
'''getTimeStamp reports correct ACS timestamp for a known value'''
rtn = TimeHelper.getTimeStamp()
self.assertEqual(True, isinstance(rtn, acstime.Epoch))
self.assertEqual(134270617899850370L, rtn.value)
class TimeUtilCheck(unittest.TestCase):
"""Test that the TimeUtil class works correctly."""
def setUp(self):
self.th = TimeHelper.TimeUtil()
pass
def tearDown(self):
pass
def testPy2EpochEpoch(self):
'''TimeUtil.py2epoch handles Python epoch correctly'''
rtn = self.th.py2epoch(0)
self.assertEqual(True, isinstance(rtn, acstime.Epoch))
self.assertEqual(acstime.ACE_BEGIN, rtn.value)
def testPy2EpochNegative(self):
'''TimeUtil.py2epoch handles negative values correctly '''
rtn = self.th.py2epoch(-1)
self.assertEqual(10000000L, acstime.ACE_BEGIN - rtn.value)
def testPy2EpochACSEpoch(self):
'''TimeUtil.py2epoch handles ACS epoch correctly '''
rtn = self.th.py2epoch(-acstime.ACE_BEGIN / 10000000L)
self.assertEqual(0, rtn.value)
def testEpoch2PyEpoch(self):
'''TimeUtil.epoch2py handles ACS epoch correctly'''
rtn = self.th.epoch2py(acstime.Epoch(0))
self.assertEqual(True, isinstance(rtn, long))
self.assertEqual(-acstime.ACE_BEGIN / 10000000L, rtn)
def testEpoch2PyPyEpoch(self):
'''TimeUtil.epoch2py handles Python epoch correctly'''
rtn = self.th.epoch2py(acstime.Epoch(acstime.ACE_BEGIN))
self.assertEqual(0L, rtn)
def testEpoch2PyNegative(self):
'''TimeUtil.epoch2py handles negative values correctly '''
rtn = self.th.epoch2py(acstime.Epoch(acstime.ACE_BEGIN - 10000000L))
self.assertEqual(-1L, rtn)
pass
def testEpoch2PyLong(self):
'''TimeUtil.epoch2py handles long values correctly '''
rtn = self.th.epoch2py(long(acstime.ACE_BEGIN))
self.assertEqual(0L, rtn)
def testPy2DurationZero(self):
'''TimeUtil.py2duration handles 0 value correctly'''
rtn = self.th.py2duration(0)
self.assertEqual(True, isinstance(rtn, acstime.Duration))
self.assertEqual(0, rtn.value)
def testPy2DurationNegative(self):
'''TimeUtil.py2duration handles negative values correctly '''
rtn = self.th.py2duration(-1)
self.assertEqual(-10000000L, rtn.value)
def testDuration2PyZero(self):
'''TimeUtil.duration2py handles 0 value correctly'''
rtn = self.th.duration2py(acstime.Duration(0))
self.assertEqual(True, isinstance(rtn, long))
self.assertEqual(0, rtn)
def testDuration2PyNegative(self):
'''TimeUtil.duration2py handles negative values correctly '''
rtn = self.th.duration2py(acstime.Duration(-1))
self.assertEqual(-1, rtn)
def testDuration2PyLong(self):
'''TimeUtil.duration2py handles long values correctly '''
rtn = self.th.duration2py(0L)
self.assertEqual(0L, rtn)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(GetTimeStampCheck))
suite.addTest(unittest.makeSuite(TimeUtilCheck))
return suite
if __name__ == "__main__":
unittest.main(defaultTest='suite')
#
# ___oOo___
|
ACS-CommunityREPO_NAMEACSPATH_START.@ACS_extracted@ACS-master@LGPL@CommonSoftware@acspycommon@test@test_Acspy_Common_TimeHelper.py@.PATH_END.py
|
{
"filename": "online_storage.py",
"repo_name": "renecotyfanboy/jaxspec",
"repo_path": "jaxspec_extracted/jaxspec-main/src/jaxspec/util/online_storage.py",
"type": "Python"
}
|
import pooch
table_manager = pooch.create(
# Use the default cache folder for the operating system
path=pooch.os_cache("jaxspec"),
base_url="https://github.com/renecotyfanboy/jaxspec-database/raw/main/",
# The registry specifies the files that can be fetched
registry={
"abundances.dat": "sha256:6a7826331f0de308af4631eed5c3b65accda99cd1aa8766f54119dd285b57992",
"apec.nc": "sha256:52e10e1e4147453890dac68845a1a629954283579eac602419634d43d3c101f9",
"xsect_tbabs_wilm.fits": "sha256:3cf45e45c9d671c4c4fc128314b7c3a68b30f096eede6b3eb08bf55224a44935",
"xsect_phabs_aspl.fits": "sha256:3eaffba2a62e3a611e0a4e1ff4a57342d7d576f023d7bbb632710dc75b9a5019",
"xsect_wabs_angr.fits": "sha256:9b3073a477a30b52e207f2c4bf79afc6ae19abba8f207190ac4c697024f74073",
"nsatmosdata.fits": "sha256:faca712f87710ecb866b4ab61be593a6813517c44f6e8e92d689b38d42e1b6dc",
"example_data/NGC7793_ULX4/MOS2background_spectrum.fits": "sha256:5387923be0bf39229f4390dd5e85095a3d534b43a69d6d3179b832ebb366d173",
"example_data/NGC7793_ULX4/MOS1background_spectrum.fits": "sha256:265fd7465fb1a355f915d9902443ba2fd2be9aface04723056a8376971e3cf14",
"example_data/NGC7793_ULX4/MOS2.rmf": "sha256:b6af00603dece33dcda35d093451c947059af2e1e45c31c5a0ffa223b7fb693d",
"example_data/NGC7793_ULX4/PN.arf": "sha256:0ee897a63b6de80589c2da758d7477c54ba601b788bf32d4d16bbffa839acb73",
"example_data/NGC7793_ULX4/MOS1.rmf": "sha256:2d1138d22c31c5398a4eed1170b0b88b07350ecfec7a7aef4f550871cb4309ae",
"example_data/NGC7793_ULX4/PN_spectrum_grp20.fits": "sha256:a985e06076bf060d3a5331f20413afa9208a8d771fa6c671e8918a1860577c90",
"example_data/NGC7793_ULX4/MOS2_spectrum_grp.fits": "sha256:dccc7eda9d3d2e4aac2af4ca13d41ab4acc621265004d50a1586a187f7a04ffc",
"example_data/NGC7793_ULX4/MOS1_spectrum_grp.fits": "sha256:7e1ff664545bab4fdce1ef94768715b4d87a39b252b61e070e71427e5c8692ac",
"example_data/NGC7793_ULX4/MOS1.arf": "sha256:9017ada6a391d46f9b569b8d0338fbabb62a5397e7c29eb0a16e4e02d4868159",
"example_data/NGC7793_ULX4/PN.rmf": "sha256:91ba9ef82da8b9f73e6a799dfe097b87c68a7020ac6c5aa0dcd4067bf9cb4287",
"example_data/NGC7793_ULX4/MOS2.arf": "sha256:a126ff5a95a5f4bb93ed846944cf411d6e1c448626cb73d347e33324663d8b3f",
"example_data/NGC7793_ULX4/PNbackground_spectrum.fits": "sha256:55e017e0c19b324245fef049dff2a7a2e49b9a391667ca9c4f667c4f683b1f49",
},
)
|
renecotyfanboyREPO_NAMEjaxspecPATH_START.@jaxspec_extracted@jaxspec-main@src@jaxspec@util@online_storage.py@.PATH_END.py
|
{
"filename": "exceptions.py",
"repo_name": "astropy/astroplan",
"repo_path": "astroplan_extracted/astroplan-main/astroplan/exceptions.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.utils.exceptions import AstropyWarning
__all__ = ["TargetAlwaysUpWarning", "TargetNeverUpWarning",
"OldEarthOrientationDataWarning", "PlotWarning",
"PlotBelowHorizonWarning", "AstroplanWarning",
"MissingConstraintWarning"]
class AstroplanWarning(AstropyWarning):
"""Superclass for warnings used by astroplan"""
class TargetAlwaysUpWarning(AstroplanWarning):
"""Target is circumpolar"""
pass
class TargetNeverUpWarning(AstroplanWarning):
"""Target never rises above horizon"""
pass
class OldEarthOrientationDataWarning(AstroplanWarning):
"""Using old Earth rotation data from IERS"""
pass
class PlotWarning(AstroplanWarning):
"""Warnings dealing with the plotting aspects of astroplan"""
pass
class PlotBelowHorizonWarning(PlotWarning):
"""Warning for when something is hidden on a plot because it's below the horizon"""
pass
class MissingConstraintWarning(AstroplanWarning):
"""Triggered when a constraint is expected but not supplied"""
pass
|
astropyREPO_NAMEastroplanPATH_START.@astroplan_extracted@astroplan-main@astroplan@exceptions.py@.PATH_END.py
|
{
"filename": "_opacity.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/box/unselected/marker/_opacity.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="opacity", parent_name="box.unselected.marker", **kwargs
):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@box@unselected@marker@_opacity.py@.PATH_END.py
|
{
"filename": "_weight.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choropleth/legendgrouptitle/font/_weight.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WeightValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self,
plotly_name="weight",
parent_name="choropleth.legendgrouptitle.font",
**kwargs,
):
super(WeightValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
extras=kwargs.pop("extras", ["normal", "bold"]),
max=kwargs.pop("max", 1000),
min=kwargs.pop("min", 1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choropleth@legendgrouptitle@font@_weight.py@.PATH_END.py
|
{
"filename": "interface_generator.py",
"repo_name": "JulianBMunoz/RelicFast",
"repo_path": "RelicFast_extracted/RelicFast-master/CLASS_Current/python/interface_generator.py",
"type": "Python"
}
|
"""
Automatically reads header files to generate an interface
"""
from __future__ import division, print_function
import sys
import logging
try:
from collections import OrderedDict as od
except ImportError:
try:
from ordereddict import OrderedDict as od
except ImportError:
raise ImportError(
"If you are running with Python v2.5 or 2.6"
" you need to manually install the ordereddict"
" package.")
try:
import colorlog
except ImportError:
raise ImportError(
"You have to install the colorlog module"
" with pip, or easy-install.")
SPACING = ' '
NAMING_CONVENTION = {
'precision': {'python': 'precision',
'function': 'precision'},
'background': {'python': 'background',
'function': 'background'},
'thermo': {'python': 'thermodynamics',
'function': 'thermodynamics'},
'perturbs': {'python': 'perturbations',
'function': 'perturb'},
'transfers': {'python': 'transfer',
'function': 'transfer'},
'primordial': {'python': 'primordial',
'function': 'primordial'},
'spectra': {'python': 'spectra',
'function': 'spectra'},
'lensing': {'python': 'lensing',
'function': 'lensing'},
'nonlinear': {'python': 'nonlinear',
'function': 'nonlinear'},
'output': {'python': 'output',
'function': 'output'},
}
def main():
# create logger
logger = create_logger()
# Recover all sub-header files
main_header = '../include/class.h'
headers = []
with open(main_header, 'r') as header_file:
in_modules = False
for line in header_file:
if in_modules:
if line.strip() == '':
in_modules = False
continue
if line.find('common') == -1 and line.find('input') == -1:
headers.append(
'../include/%s' % line.split()[-1].strip('"'))
if line.find('class modules') != -1:
in_modules = True
logger.info('Extracted the following headers: %s', ', '.join(headers))
output = 'classy.pyx'
logger.info('Creating %s', output)
structs = od()
output_file = open(output, 'w')
write_imports(output_file)
output_file.write('cdef extern from "class.h":\n')
# First write the first non automatic bits
output_file.write(
SPACING+'ctypedef char FileArg[40]\n' +
SPACING+'ctypedef char* ErrorMsg\n' +
SPACING+'cdef struct precision:\n' +
2*SPACING+'ErrorMsg error_message\n\n' +
SPACING+'cdef int _FAILURE_\n' +
SPACING+'cdef int _FALSE_\n' +
SPACING+'cdef int _TRUE_\n')
for header in headers:
extract_headers(header, structs, output_file, logger)
logger.info("Finished extracting headers")
for struct_name, struct in structs.items():
create_wrapper_class(struct_name, struct, output_file, logger)
return
def extract_headers(header, structs, output_file, logger):
"""toto"""
# Initialise the two flags controlling the exploration of the main
# structure
in_struct, main_struct_finished = False, False
# Flags for exploring enums (only the ones before the struct)
in_enum = False
# flag dealing with extracting docstrings
comment_partially_recovered = False
# Flag keeping track of multiple variables
multiple_var = False
# Flag recovering the functions
in_function_definitions, in_function, in_init = False, False, False
with open(header, 'r') as header_file:
logger.info("reading %s" % header)
for line in header_file:
# First case, recover the enums
if not main_struct_finished and not in_struct:
if line.find("enum ") != -1 and line.find("{") != -1:
enum_members = []
if line.find(";") == -1:
in_enum = True
enum_name = line.strip("enum").strip().strip('{')
else:
in_enum = False
line = line.strip("enum").strip().strip(';')
enum_name, enum_sign = line.split(' ', 1)
enum_sign = enum_sign.strip('}').strip('{')
for elem in enum_sign.split(','):
enum_members.append(elem.strip())
output_file.write(
SPACING + 'cdef enum %s:\n' % enum_name)
for elem in enum_members:
output_file.write(2*SPACING + elem + '\n')
output_file.write('\n')
elif in_enum:
if line.find('};') != -1:
in_enum = False
output_file.write(
SPACING + 'cdef enum %s:\n' % enum_name)
for elem in enum_members:
output_file.write(2*SPACING + elem + '\n')
output_file.write('\n')
else:
if line.strip() != '':
enum_members.append(line.split()[0].strip().strip(','))
if line.find("struct ") != -1 and not main_struct_finished:
in_struct = True
# Recover the name
logger.debug("in struct: %s" % line)
struct_name = line.strip().split()[1]
logger.debug("struct name: %s" % struct_name)
structs[struct_name] = {}
structs[struct_name].update(
NAMING_CONVENTION[struct_name])
output_file.write("%scdef struct %s:\n" % (
SPACING, struct_name))
continue
elif in_struct:
if line.find("};\n") != -1:
output_file.write('\n')
in_struct, main_struct_finished = False, True
else:
# if the line is not empty or does not contain only a
# comment:
if line.strip() == '' or line.strip()[:2] == '/*':
continue
logger.debug(
"potentially non empty line: %s" % line.strip())
#elif line.find('/**') != -1 or line.find('*/') != -1:
#continue
if line.find(';') == -1 and not comment_partially_recovered:
logger.debug("--> Discarded")
continue
elif line.find(';') != -1 and not comment_partially_recovered:
var_doc = ''
var_part, begin_comment = line.strip().split(';', 1)
var_doc += begin_comment.strip()[4:].strip()
# 2 things can happen: there can be arrays, and there
# can be several variables defined in one line...
# If array, slightly more complex
if var_part.find('*') != -1:
# if no comma is found, it means it is a single
# variable: good !
if var_part.find(',') == -1:
# remove if commented (starts with /*)
if var_part[:2] in ['/*', '//']:
continue
multiple_var = False
var_type, var_stars, var_name = var_part.strip().split()
structs[struct_name][var_name] = [
var_type, var_stars]
else:
# Count how many variables are defined
multiple_var = True
all_vars = [elem.strip() for elem in
var_part.split('*')[-1].split(',')]
var_type, var_stars = (var_part.strip().
split()[:2])
for var in all_vars:
structs[struct_name][var] = [
var_type, var_stars]
else:
# Again check for more than one variable
var_stars = ''
if var_part.find(',') == -1:
multiple_var = False
var_type, var_name = var_part.strip().split(' ', 1)
# Check if enum
if var_type == 'enum':
enum_name, var_name = var_name.split()
var_type += ' '+enum_name
structs[struct_name][var_name] = [
var_type, var_stars]
else:
multiple_var = True
all_vars = [elem.strip() for elem in
var_part.split()[2:].split(',')]
var_type = (var_part.strip().split()[0])
for var in all_vars:
structs[struct_name][var] = [
var_type, var_stars]
# If the comment is finished, pass
if var_doc[-2:] != '*/':
comment_partially_recovered = True
else:
var_doc = var_doc[:-2].replace('\\f$', '$').strip()
structs[struct_name][var_name].append(var_doc)
logger.debug(
"extracted the variable %s, " % var_name +
"of type %s, with docstring: %s" % (
''.join([var_stars, var_type]), var_doc))
if not multiple_var:
output_file.write(2*SPACING+' '.join(
[elem for elem in [var_type, var_stars, var_name]
if elem])+'\n')
else:
for var in all_vars:
output_file.write(2*SPACING+' '.join(
[elem for elem in [var_type, var_stars, var]
if elem])+'\n')
if comment_partially_recovered:
logger.debug("--> Accepted")
var_doc += ' '+line.strip()
if var_doc[-2:] == '*/':
comment_partially_recovered = False
var_doc = var_doc[:-2].replace('\\f$', '$').strip()
structs[struct_name][var_name].append(var_doc)
logger.debug(
"extracted the variable %s, " % var_name +
"of type %s, with docstring: %s" % (
''.join([var_stars, var_type]), var_doc))
elif main_struct_finished:
if line.find('extern "C"') != -1:
in_function_definitions = True
if not in_function_definitions:
continue
else:
if line.find('(') != -1:
in_function = True
logger.debug("Found a function")
func_type, func_name = line.split('(')[0].strip().split()
logger.debug('%s %s' % (func_name, func_type))
func_param = []
if func_name == structs[struct_name]['function']+'_init':
logger.info("found the init function")
in_init = True
structs[struct_name]['init'] = [func_name]
output_file.write(SPACING+'%s %s(' % (
func_type, func_name))
elif in_function:
# recover the signature of the function
line = line.strip().strip(',')
if line.find('struct') != -1:
if in_init:
name = line.split('*')[0].strip()[7:]
structs[struct_name]['init'].append(name)
func_param.append('void *')
elif line.find('*') != -1:
# Taking into account with or without spaces
temp = ''.join(line.strip(',').split())
last_star = len(temp)-temp[::-1].find('*')
func_param.append(temp[:last_star])
elif line.find(')') == -1:
if line != '':
func_param.append(line.split()[0])
else:
logger.debug('signature extracted')
in_function = False
if in_init:
in_init = False
output_file.write(', '.join(func_param) + ')\n')
elif line.find('}') != -1:
output_file.write('\n')
in_function_definitions = False
#print line.strip()
def create_wrapper_class(struct_name, struct, of, logger):
"""TODO"""
of.write('# Defining wrapper around struct %s\n' % struct_name)
of.write('cdef class %s:\n' % (
NAMING_CONVENTION[struct_name]['python'].capitalize()))
## recover the number of additional arguments:
init_name, argument_names = struct['init'][0], struct['init'][1:]
for companion in argument_names:
of.write(SPACING+'cdef %s _%s\n' % (companion, companion))
#logger.info("structure: %s, python name: %s" % (
#companion, NAMING_CONVENTION[companion]['python']))
of.write('\n')
# Define the array variables for all needed
array_variables = []
variables = []
for key, value in struct.items():
if key != 'init':
if value[1]:
array_variables.append(key)
variables.append(key)
of.write(SPACING+'cdef np.ndarray %s_arr\n' % key)
else:
variables.append(key)
of.write('\n')
# write the init
of.write(SPACING+'def __init__(self')
for companion in argument_names:
of.write(", %s py_%s" % (
NAMING_CONVENTION[companion]['python'].capitalize(), companion))
of.write('):\n\n')
# pointing the pointers where they belong
for companion in argument_names:
of.write(2*SPACING+"self._%s = py_%s._%s\n" % (
companion, companion, companion))
# Writing the call to structname_init()
of.write(2*SPACING+'%s_init(\n' % struct_name)
for companion in argument_names:
of.write(3*SPACING+'&(self._%s),\n' % companion)
of.write(3*SPACING+'&(self._%s))\n\n' % struct_name)
#of.write(2*SPACING+'%s_init(&(self._%s))\n\n' % (
#struct_name, struct_name))
for array in array_variables:
of.write(2*SPACING+'# Wrapping %s\n' % array)
of.write(2*SPACING+'%s_wrapper = ArrayWrapper()\n' % array)
of.write(
2*SPACING+"%s_wrapper.set_data(%d, '%s', "
"<void*> self._%s.%s)\n" % (
array, 2, struct[array].strip('*'), struct_name, array))
of.write(
2*SPACING+'self.%s_arr = np.array(%s_wrapper, '
'copy=False)\n' % (
array, array))
of.write(2*SPACING+'self.%s_arr.base = '
'<PyObject*> %s_wrapper\n' % (
array, array))
of.write(2*SPACING+'Py_INCREF(%s_wrapper)\n\n' % array)
#raise NotImplementedError('multiple init are not supported')
# Write the properties
for key in variables:
of.write(SPACING+'property %s:\n' % key)
if key not in array_variables:
of.write(2*SPACING+'def __get__(self):\n')
of.write(3*SPACING+'return self._%s.%s\n' % (struct_name, key))
of.write(2*SPACING+'def __set__(self, rhs):\n')
of.write(3*SPACING+'self._%s.%s = rhs\n' % (struct_name, key))
else:
of.write(2*SPACING+'def __get__(self):\n')
of.write(3*SPACING+'return self.%s_arr\n' % key)
of.write(2*SPACING+'def __set__(self, rhs):\n')
of.write(3*SPACING+'self.%s_arr[:] = rhs\n' % key)
of.write('\n')
# Add blank lines
of.write('\n\n')
def write_imports(output_file):
"""TODO"""
a = '''# Author: Gael Varoquaux
# License: BSD
from libc.stdlib cimport free
from cpython cimport PyObject, Py_INCREF
# Import the Python-level symbols of numpy
import numpy as np
# Import the C-level symbols of numpy
cimport numpy as np
# Numpy must be initialized. When using numpy from C or Cython you must
# _always_ do that, or you will have segfaults
np.import_array()
cdef class ArrayWrapper:
cdef void* data_ptr
cdef int size
cdef int type
cdef set_data(self, int size, char* type, void* data_ptr):
""" Set the data of the array
This cannot be done in the constructor as it must recieve C-level
arguments.
Parameters:
-----------
size: int
Length of the array.
data_ptr: void*
Pointer to the data
"""
self.data_ptr = data_ptr
self.size = size
if type.find('int') != -1:
self.type = np.NPY_INT
elif type.find('float') != -1:
self.type = np.NPY_FLOAT
elif type.find('double') != -1:
self.type = np.NPY_DOUBLE
elif type.find('long') != -1:
self.type = np.NPY_LONG
def __array__(self):
""" Here we use the __array__ method, that is called when numpy
tries to get an array from the object."""
cdef np.npy_intp shape[1]
shape[0] = <np.npy_intp> self.size
# Create a 1D array, of length 'size'
ndarray = np.PyArray_SimpleNewFromData(1, shape,
self.type, self.data_ptr)
return ndarray
def __dealloc__(self):
""" Frees the array. This is called by Python when all the
references to the object are gone. """
free(<void*>self.data_ptr)\n\n'''
output_file.write(a)
def create_logger():
"""Nothing"""
logger = logging.getLogger('simple_example')
#logger.setLevel(logging.DEBUG)
logger.setLevel(logging.INFO)
# create console handler and set level to debug
console_handler = logging.StreamHandler()
#console_handler.setLevel(logging.DEBUG)
console_handler.setLevel(logging.INFO)
# create formatter
#formatter = logging.Formatter(
#"%(asctime)s %(module)s: L%(lineno) 4s %(funcName) 15s"
#" | %(levelname) -10s --> %(message)s")
formatter = colorlog.ColoredFormatter(
"%(asctime)s %(module)s: L%(lineno) 4s %(blue)s%(funcName) 15s%(reset)s"
" | %(log_color)s%(levelname) -10s --> %(message)s%(reset)s",
datefmt=None,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
})
# add formatter to console_handler
console_handler.setFormatter(formatter)
# add console_handler to logger
logger.addHandler(console_handler)
return logger
if __name__ == "__main__":
sys.exit(main())
|
JulianBMunozREPO_NAMERelicFastPATH_START.@RelicFast_extracted@RelicFast-master@CLASS_Current@python@interface_generator.py@.PATH_END.py
|
{
"filename": "bare_rock.ipynb",
"repo_name": "nichollsh/AGNI",
"repo_path": "AGNI_extracted/AGNI-main/misc/bare_rock.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import root_scalar
from scipy.interpolate import PchipInterpolator
```
```python
# Constants (SI units)
AU = 1.495979e+11
sigma = 5.670374419e-8
h_pl = 6.62607015e-34
k_B = 1.380649e-23
c_vac = 299792458.0
```
```python
# Parameters
a = 0.1 # Orbital distance [AU]
wave_min = 100.0 # nm
wave_max = 1e5 # nm
wave_bin = 3000 # integer
star_path = "../res/stellar_spectra/sun.txt"
alb_path = "../res/surface_albedos/c9mb29.dat"
```
```python
# Setup wavelength grid for calculation
obs_pts = np.logspace(np.log10(wave_min), np.log10(wave_max), wave_bin)
obs_bc = (obs_pts[1:] + obs_pts[:-1]) * 0.5 # bin centres
obs_bw = obs_pts[1:] - obs_pts[:-1] # bin widths
```
```python
# Load stellar spectrum
star_data = np.loadtxt(star_path).T
star_wl = star_data[0]
star_fl = star_data[1] * 0.001 # W m-2 nm-1
star_itp = PchipInterpolator(star_wl, star_fl) # At 1 AU
```
```python
# Load albedo data
alb_data = np.loadtxt(alb_path).T
alb_wl = list(alb_data[0])
alb_al = list(alb_data[1])
# extend range left
alb_wl = [0.0] + alb_wl
alb_al = [alb_al[0]] + alb_al
# extend range right
alb_wl.append(1e9)
alb_al.append(alb_al[-1])
# create interpolator
alb_itp = PchipInterpolator(alb_wl, alb_al)
# plot
plt.close("all")
fig,ax = plt.subplots(figsize=(5,2))
ax.plot(obs_bc, alb_itp(obs_bc))
ax.set_xlim(left=100, right=5e3)
ax.set(xlabel="Wavelength [nm]", ylabel=r"Surface albedo")
plt.show()
```

```python
def evaluate_planck(wav:float, tmp:float):
'''
Evaluate the planck function at a given wavelength and temperature
'''
# Output value
flx = 0.0
# Convert nm to m
wav = wav * 1.0e-9
# Calculate planck function value [W m-2 sr-1 m-1]
# http://spiff.rit.edu/classes/phys317/lectures/planck.html
flx = 2.0 * h_pl * c_vac * (c_vac / wav**5.0) / ( np.exp(h_pl * c_vac / (wav * k_B * tmp)) - 1.0)
# Integrate solid angle (hemisphere), convert units
flx = flx * np.pi * 1.0e-9 # [W m-2 nm-1]
return flx
```
```python
def fluxes(wav:float, wid:float, tmp:float):
'''
Calculate up/down fluxes at the surface of the planet at a given wavelength bin
'''
alb_s = alb_itp(wav)
eps_s = 1-alb_s
out = {} # W m-2
thermal = evaluate_planck(wav, tmp) * wid
out["LW_UP"] = thermal * eps_s
out["LW_DN"] = 0.0
stellar = star_itp(wav) * wid * (1/a)**2
out["SW_DN"] = stellar
out["SW_UP"] = stellar * alb_s
return out
```
```python
def residual(tmp:float):
'''
Calculate bolometric residual flux at the surface
'''
up = 0.0
dn = 0.0
for i in range(len(obs_bc)):
bc = obs_bc[i]
bw = obs_bw[i]
f = fluxes(bc, bw, tmp)
up += f["LW_UP"] + f["SW_UP"]
dn += f["LW_DN"] + f["SW_DN"]
return dn-up
```
```python
# Solve for surface temperature which satisfies radiative equilibrium
sol = root_scalar(residual, x0=2000.0)
sol
```
converged: True
flag: converged
function_calls: 14
iterations: 7
root: 1300.8563410349468
method: newton
```python
def emission_spectrum(tmp:float):
'''
Calculate emission spectrum (components) at a given temperature
'''
out = {
"LW_UP":[],
"SW_UP":[],
"SW_DN":[],
"UP":[]
}
for i in range(len(obs_bw)):
bc = obs_bc[i]
bw = obs_bw[i]
f = fluxes(bc, bw, tmp)
out["LW_UP"].append(f["LW_UP"]/bw)
out["SW_UP"].append(f["SW_UP"]/bw)
out["SW_DN"].append(f["SW_DN"]/bw)
out["UP"].append((f["LW_UP"] + f["SW_UP"])/bw)
for k in out.keys():
out[k] = np.array(out[k], dtype=float)
return out
```
```python
# Plot emission spectrum
plt.close("all")
fig,ax = plt.subplots(figsize=(7,4))
tmp = float(sol.root)
emiss = emission_spectrum(tmp)
for k in ["LW_UP","SW_UP","UP"]:
ax.plot(obs_bc, emiss[k], label=k)
# ax.plot(obs_bc, emiss["SW_UP"]/emiss["SW_DN"])
ax.set(title=r"T$_s$ = %.2f K"%tmp, xlabel="Wavelength [nm]", ylabel=r"Flux density [W m$^{-2}$ nm$^{-1}$]")
ax.set_xscale("log")
ax.legend()
plt.show()
```

|
nichollshREPO_NAMEAGNIPATH_START.@AGNI_extracted@AGNI-main@misc@bare_rock.ipynb@.PATH_END.py
|
{
"filename": "_dash.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/ohlc/increasing/line/_dash.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DashValidator(_plotly_utils.basevalidators.DashValidator):
def __init__(
self, plotly_name="dash", parent_name="ohlc.increasing.line", **kwargs
):
super(DashValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
values=kwargs.pop(
"values", ["solid", "dot", "dash", "longdash", "dashdot", "longdashdot"]
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@ohlc@increasing@line@_dash.py@.PATH_END.py
|
{
"filename": "_xpad.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter3d/marker/colorbar/_xpad.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="xpad", parent_name="scatter3d.marker.colorbar", **kwargs
):
super(XpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatter3d@marker@colorbar@_xpad.py@.PATH_END.py
|
{
"filename": "cninv.py",
"repo_name": "toshiyan/cmblensplus",
"repo_path": "cmblensplus_extracted/cmblensplus-master/wrap/curvedsky/cninv.py",
"type": "Python"
}
|
import libcurvedsky
import numpy
def cnfilter_freq(n,mn,nside,lmax,cl,bl,iNcov,maps,chn=1,lmaxs=[0],nsides=[0],itns=[1],eps=[1e-6],filter='W',verbose=False,ro=50,stat='',inl=None):
"""
Combining multiple frequency CMB maps optimally.
The filtering would work if the noise variance is not significantly varied with scale (multipole).
Please make sure that your input maps are beam-convolved.
This code deconvolves the beam during filtering and the output are the filtered alms after the beam-deconvolution.
Args:
:n (*int*): Number of maps, i.e., temperature only (n=1), polarization only (n=2) or both (n=3)
:mn (*int*): Number of frequencies
:nside (*int*): Nside of input map
:lmax (*int*): Maximum multipole of the input cl
:cl[*n,l*] (*double*): Theory signal power spectrum, with bounds (0:n-1,0:lmax)
:bl[*mn,l*] (*double*): Beam spectrum, with bounds (0:mn-1,0:lmax)
:iNcov[*n,mn,pix*] (*double*): Inverse of the noise variance at each pixel, with bounds (0:n-1,0:mn-1,0:npix-1)
:maps[*n,mn,pix*] (*double*): Beam-convolved T, Q, U maps, with bouds (0:n-1,0:mn-1,0:npix-1)
Args(optional):
:chn (*int*): Number of grids for preconsitioner (chn=1 for diagonal preconditioner, default)
:lmaxs[*chain*] (*int*): Maximum multipole(s) at each preconditioning and lmaxs[*0*] is the input maximum multipole of cl
:nsides[*chain*] (*int*): Nside(s) of preconditoner and nsides[*0*] should be consistent with the input map's nside.
:eps[*chain*] (*double*): Parameter to finish the iteration (i.e. terminate if the residul fraction becomes smaller than eps). Default to 1e-6.
:itns[*chain*] (*int*): Number of interations.
:filter (*str*): C-inverse ('') or Wiener filter (W), default to the Wiener filter.
:inl[*n,mn,l*] (*double*): Inverse noise spectrum (0 for white noise case, default).
:verbose (*bool*): Output messages, default to False
:ro (*int*): the residual fraction is output for every ro iteration (e.g. ro=2 means 1 output per 2 iterations). Default to 50. Useful for convergence speed.
:stat (*str*): Realtime status filename which contains the residual fraction, default to no output file
Returns:
:xlm[*n,l,m*] (*dcmplx*): C-inverse / Wiener filtered multipoles, with bounds (0:n-1,0:lmax,0:lmax)
Usage:
:xlm = curvedsky.cninv.cnfilter_freq(n,mn,nside,lmax,cl,bl,iNcov,maps,chn,lmaxs,nsides,itns,eps,filter,inl,verbose,ro,stat):
"""
npix = 12*nside**2
if inl is None: inl = 0*iNcov[:,:,:lmax+1]
return libcurvedsky.cninv.cnfilter_freq(n,mn,npix,lmax,cl,bl,iNcov,maps,chn,lmaxs,nsides,itns,eps,filter,inl,verbose,ro,stat)
def cnfilter_kappa(n,nside,lmax,cov,iNcov,maps,chn=1,lmaxs=[0],nsides=[0],itns=[1],eps=[1e-6],verbose=False,ro=50,stat='',inl=None):
"""
Computing the inverse-variance weighted multipole, (C+N)^-1 x kappa, for multiple mass-tracers of kappa maps.
Args:
:n (*int*): Number of input kappa maps to be combined
:nside (*int*): Nside of input maps
:lmax (*int*): Maximum multipole of the input cl
:cov[*n,n,l*] (*double*): Signal covariance matrix for each multipole, with bounds (0:n-1,0:n-1,0:lmax)
:iNcov[*n,pix*] (*double*): Inverse of the noise variance at each pixel, with bounds (0:n-1,0:npix-1)
:maps[*n,pix*] (*double*): Input kappa maps, with bouds (0:n-1,0:npix-1)
Args(optional):
:chn (*int*): Number of grids for preconsitioner (chn=1 for diagonal preconditioner, default)
:lmaxs[*chain*] (*int*): Maximum multipole(s) at each preconditioning and lmaxs[*0*] is the input maximum multipole of cl
:nsides[*chain*] (*int*): Nside(s) of preconditoner and nsides[*0*] should be consistent with the input map's nside.
:eps[*chain*] (*double*): Parameter to finish the iteration (i.e. terminate if the residul fraction becomes smaller than eps). Default to 1e-6.
:itns[*chain*] (*int*): Number of interations.
:inl[*n,l*] (*double*): Inverse noise spectrum for each mass map (0 for white noise case, default).
:verbose (*bool*): Output messages, default to False
:ro (*int*): the residual fraction is output for every ro iteration (e.g. ro=2 means 1 output per 2 iterations). Default to 50. Useful for convergence speed.
:stat (*str*): Realtime status filename which contains the residual fraction, default to no output file
Returns:
:xlm[*n,l,m*] (*dcmplx*): Wiener filtered multipoles, with bounds (n,0:lmax,0:lmax)
Usage:
:xlm = curvedsky.cninv.cnfilter_kappa(n,nside,lmax,cov,iNcov,maps,chn,lmaxs,nsides,itns,eps,inl,verbose,ro,stat):
"""
npix = 12*nside**2
if inl is None: inl = 0*iNcov[:,:lmax+1]
return libcurvedsky.cninv.cnfilter_kappa(n,npix,lmax,cov,iNcov,maps,chn,lmaxs,nsides,itns,eps,inl,verbose,ro,stat)
def cnfilter_freq_nside(n,mn0,mn1,nside0,nside1,lmax,cl,bl0,bl1,iNcov0,iNcov1,maps0,maps1,chn=1,lmaxs=[0],nsides0=[0],nsides1=[0],itns=[1],eps=[1e-6],filter='W',verbose=False,reducmn=0,ro=50,stat='',inl=None):
"""
Same as cnfilter_freq but for the maps with two different Nsides.
Please make sure that your input maps are beam-convolved.
This code deconvolves the beam during filtering and the output are the filtered alms after the beam-deconvolution.
Args:
:n (*int*): Number of maps, i.e., temperature only (n=1), polarization only (n=2) or both (n=3)
:mn0/1 (*int*): Number of frequencies
:nside0/1 (*int*): Nsides of input maps
:lmax (*int*): Maximum multipole of the input cl
:cl[*n,l*] (*double*): Theory signal power spectrum, with bounds (0:n-1,0:lmax)
:bl0/1[*mn,l*] (*double*): Beam function, with bounds (0:n-1,0:lmax)
:iNcov0/1[*n,mn,pix*] (*double*): Inverse of the noise variance at each pixel, with bounds (0:n-1,0:npix-1)
:maps0/1[*n,mn,pix*] (*double*): Beam-convolved T, Q, U maps, with bouds (0:n-1,0:npix-1)
Args(optional):
:chn (*int*): Number of grids for preconsitioner (chn=1 for diagonal preconditioner, default)
:lmaxs[*chain*] (*int*): Maximum multipole(s) at each preconditioning and lmaxs[*0*] is the input maximum multipole of cl
:nsides0/1[*chain*] (*int*): Nside(s) of preconditoner and nsides[*0*] should be consistent with the input map's nside.
:eps[*chain*] (*double*): Parameter to finish the iteration (i.e. terminate if the residul fraction becomes smaller than eps). Default to 1e-6.
:itns[*chain*] (*int*): Number of interations.
:filter (*str*): C-inverse ('') or Wiener filter (W), default to the Wiener filter.
:inl[*n,mn,l*] (*double*): Inverse noise spectrum, 0 for white noise case.
:verbose (*bool*): Output messages, default to False
:ro (*int*): the residual fraction is output for every ro iteration (e.g. ro=2 means 1 output per 2 iterations). Default to 50. Useful for convergence speed.
:stat (*str*): Realtime status filename which contains the residual fraction, default to no output file
:reducmn (*int*): Reducing number of maps per chain (1,2) or not (0, default). If 1, the maps are combined for the same nside inside the multigrid chain. If 2, in addition to the procedure of 1, the each nside maprs are further combined into a single map inside the second chain (chain>=3).
Returns:
:xlm[*n,l,m*] (*dcmplx*): C-inverse or Wiener filtered multipoles, with bounds (0:n-1,0:lmax,0:lmax)
mgc%cv(1,mi)%nij = iNcov0(:,mi,:)maps0(:,mi,:)
mgc%cv(1,mi)%nij = iNcov1(:,mi-mn0,:)maps1(:,mi-mn0,:)
Usage:
:xlm = curvedsky.cninv.cnfilter_freq_nside(n,mn0,mn1,nside0,nside1,lmax,cl,bl0,bl1,iNcov0,iNcov1,maps0,maps1,chn,lmaxs,nsides0,nsides1,itns,eps,filter,inl,verbose,reducmn,ro,stat):
"""
npix0 = 12*nside0**2
npix1 = 12*nside1**2
if inl is None: inl = 0*iNcov0[:,:,:lmax+1]
return libcurvedsky.cninv.cnfilter_freq_nside(n,mn0,mn1,npix0,npix1,lmax,cl,bl0,bl1,iNcov0,iNcov1,maps0,maps1,chn,lmaxs,nsides0,nsides1,itns,eps,filter,inl,verbose,reducmn,ro,stat)
|
toshiyanREPO_NAMEcmblensplusPATH_START.@cmblensplus_extracted@cmblensplus-master@wrap@curvedsky@cninv.py@.PATH_END.py
|
{
"filename": "_xpad.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/parcats/line/colorbar/_xpad.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="xpad", parent_name="parcats.line.colorbar", **kwargs
):
super(XpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@parcats@line@colorbar@_xpad.py@.PATH_END.py
|
{
"filename": "quickstart.md",
"repo_name": "ebellm/pyraf-dbsp",
"repo_path": "pyraf-dbsp_extracted/pyraf-dbsp-master/docs/quickstart.md",
"type": "Markdown"
}
|
### Quick Start/Command Summary
Complete night reduction:
`cd` to your data directory.
Save a copy of your files in a "raw" directory--this code overwrites the originals!
mdkir raw
cp *.fits raw
Start `ipython` and load the script:
%run /path/to/dbsp.py
For users on the Caltech astro network, log in to soroban and execute:
export PATH="/scr/ebellm/anaconda/bin:$PATH"
source activate iraf27
mkiraf # choose xgterm
ipython
%run /home/ebellm/observing/reduction/dbsp/dbsp.py
Exclude any images that you don't want to analyze (use your log; especially focus/test exposures from the beginning of the night):
mark_bad([47,49,50],side='blue')
mark_bad([35],side='red')
Create arcs and dome flats (run this after you have at least one science exposure):
create_arc_dome()
Process flux calibration standards, if desired. If not, skip this step and set `flux=False` in `extract1D()`.
store_standards([41,42,43], side='blue')
Extract data:
extract1D(61,side='blue')
For basic telluric correction on the red side, first extract an appropriate telluric calibrator, then pass it to `store_standards` and `extract1D`:
extract1D(77,side='red', Flux=False)
store_standards([41,42,43], side='red', telluric_cal_id = 77)
extract1D(63,side='red',flux=True, telluric_cal_id = 77)
To process a large number of science spectra in a row:
batch_process(20, 45, side='blue', quicklook='no')
Finally, join spectra from the blue and red sides, identify pairs (or more) of images:
combine_sides([61],[63,64])
|
ebellmREPO_NAMEpyraf-dbspPATH_START.@pyraf-dbsp_extracted@pyraf-dbsp-master@docs@quickstart.md@.PATH_END.py
|
{
"filename": "test_rambo.py",
"repo_name": "LoganAMorrison/Hazma",
"repo_path": "Hazma_extracted/Hazma-master/test/rambo/test_rambo.py",
"type": "Python"
}
|
import unittest
import pytest
import numpy as np
from numpy.testing import assert_allclose
from hazma.field_theory_helper_functions.common_functions import minkowski_dot as MDot
from hazma.parameters import GF, alpha_em
from hazma.parameters import electron_mass as me
from hazma.parameters import muon_mass as mmu
from hazma.parameters import qe
from hazma.rambo import compute_annihilation_cross_section, compute_decay_width
from hazma.utils import RealArray
mw = 80.385 * 10**3 # W-mass
mz = 91.1876 * 10**3 # Z-Mass
@pytest.mark.skip(reason="Deprecated")
class TestRambo(unittest.TestCase):
def setUp(self):
pass
def test_compute_annihilation_cross_section(self):
def msqrd_ee_to_mumu(momenta: RealArray):
p3 = momenta[0]
p4 = momenta[1]
P = np.sum(momenta, axis=1)
Q = P[0]
pi_mag = np.sqrt(Q**2 / 4.0 - me**2)
p1 = np.array([Q / 2.0, 0.0, 0.0, pi_mag])
p2 = np.array([Q / 2.0, 0.0, 0.0, -pi_mag])
return (
2.0
* qe**4
* (
MDot(p1, p4) * MDot(p2, p3)
+ MDot(p1, p3) * MDot(p2, p4)
+ MDot(p1, p2) * me**2
+ MDot(p3, p4) * mmu**2
+ 2.0 * me**2 * mmu**2
)
) / (MDot(p3, p4) + mmu**2) ** 2
isp_masses = np.array([me, me])
fsp_masses = np.array([mmu, mmu])
cme = 1000.0
rambo = compute_annihilation_cross_section(
isp_masses, fsp_masses, cme, num_ps_pts=5000, mat_elem_sqrd=msqrd_ee_to_mumu
)
analytic = 4.0 * np.pi * alpha_em**2 / (3.0 * cme**2)
assert_allclose(rambo[0], analytic, rtol=5e-3)
def test_compute_decay_width_muon(self):
"""
Test rambo decay width function on mu -> e nu nu.
"""
def msqrd_mu_to_enunu(momenta):
"""
Matrix element squared for mu -> e nu nu.
"""
pe = momenta[0]
pve = momenta[1]
pvmu = momenta[2]
pmu = sum(momenta)
return 64.0 * GF**2 * MDot(pe, pvmu) * MDot(pmu, pve)
fsp_masses = np.array([me, 0.0, 0.0])
rambo = compute_decay_width(
fsp_masses, mmu, num_ps_pts=50000, mat_elem_sqrd=msqrd_mu_to_enunu
)
r = me**2 / mmu**2
corr_fac = 1.0 - 8.0 * r + 8 * r**3 - r**4 - 12.0 * r**2 * np.log(r)
analytic = GF**2 * mmu**5 / (192.0 * np.pi**3) * corr_fac
assert_allclose(rambo[0], analytic, rtol=5e-3)
def test_compute_decay_width_Zee(self):
"""
Test rambo decay width function on Z -> e e.
"""
sw = np.sqrt(0.2223)
cw = np.sqrt(1.0 - sw**2)
fsp_masses = np.array([me, me])
def msqd_Z_to_ee(momenta):
p1 = momenta[0]
p2 = momenta[1]
return (
qe**2
* (
2 * (1 - 4 * sw**2 + 8 * sw**4) * MDot(p1, p2) ** 2
+ 2 * (1 - 4 * sw**2 + 8 * sw**4) * me**4
+ 12 * sw**2 * (-1 + 2 * sw**2) * me**2 * mz**2
+ (1 - 4 * sw**2 + 8 * sw**4)
* MDot(p1, p2)
* (4 * me**2 + mz**2)
)
) / (6.0 * cw**2 * sw**2 * mz**2)
rambo = compute_decay_width(
fsp_masses, mz, num_ps_pts=10, mat_elem_sqrd=msqd_Z_to_ee
)
num = qe**2 * (8.0 * sw**4 - 4.0 * sw**2 + 1) * mz
den = 96.0 * np.pi * cw**2 * sw**2
analytic = num / den
assert_allclose(rambo[0], analytic, rtol=5e-3)
|
LoganAMorrisonREPO_NAMEHazmaPATH_START.@Hazma_extracted@Hazma-master@test@rambo@test_rambo.py@.PATH_END.py
|
{
"filename": "test_take.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/test_take.py",
"type": "Python"
}
|
from datetime import datetime
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas import array
import pandas._testing as tm
import pandas.core.algorithms as algos
@pytest.fixture(
params=[
(np.int8, np.int16(127), np.int8),
(np.int8, np.int16(128), np.int16),
(np.int32, 1, np.int32),
(np.int32, 2.0, np.float64),
(np.int32, 3.0 + 4.0j, np.complex128),
(np.int32, True, np.object_),
(np.int32, "", np.object_),
(np.float64, 1, np.float64),
(np.float64, 2.0, np.float64),
(np.float64, 3.0 + 4.0j, np.complex128),
(np.float64, True, np.object_),
(np.float64, "", np.object_),
(np.complex128, 1, np.complex128),
(np.complex128, 2.0, np.complex128),
(np.complex128, 3.0 + 4.0j, np.complex128),
(np.complex128, True, np.object_),
(np.complex128, "", np.object_),
(np.bool_, 1, np.object_),
(np.bool_, 2.0, np.object_),
(np.bool_, 3.0 + 4.0j, np.object_),
(np.bool_, True, np.bool_),
(np.bool_, "", np.object_),
]
)
def dtype_fill_out_dtype(request):
return request.param
class TestTake:
def test_1d_fill_nonna(self, dtype_fill_out_dtype):
dtype, fill_value, out_dtype = dtype_fill_out_dtype
data = np.random.default_rng(2).integers(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_nd(data, indexer, fill_value=fill_value)
assert (result[[0, 1, 2]] == data[[2, 1, 0]]).all()
assert result[3] == fill_value
assert result.dtype == out_dtype
indexer = [2, 1, 0, 1]
result = algos.take_nd(data, indexer, fill_value=fill_value)
assert (result[[0, 1, 2, 3]] == data[indexer]).all()
assert result.dtype == dtype
def test_2d_fill_nonna(self, dtype_fill_out_dtype):
dtype, fill_value, out_dtype = dtype_fill_out_dtype
data = np.random.default_rng(2).integers(0, 2, (5, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert (result[[0, 1, 2], :] == data[[2, 1, 0], :]).all()
assert (result[3, :] == fill_value).all()
assert result.dtype == out_dtype
result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert (result[:, [0, 1, 2]] == data[:, [2, 1, 0]]).all()
assert (result[:, 3] == fill_value).all()
assert result.dtype == out_dtype
indexer = [2, 1, 0, 1]
result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert (result[[0, 1, 2, 3], :] == data[indexer, :]).all()
assert result.dtype == dtype
result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert (result[:, [0, 1, 2, 3]] == data[:, indexer]).all()
assert result.dtype == dtype
def test_3d_fill_nonna(self, dtype_fill_out_dtype):
dtype, fill_value, out_dtype = dtype_fill_out_dtype
data = np.random.default_rng(2).integers(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert (result[[0, 1, 2], :, :] == data[[2, 1, 0], :, :]).all()
assert (result[3, :, :] == fill_value).all()
assert result.dtype == out_dtype
result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert (result[:, [0, 1, 2], :] == data[:, [2, 1, 0], :]).all()
assert (result[:, 3, :] == fill_value).all()
assert result.dtype == out_dtype
result = algos.take_nd(data, indexer, axis=2, fill_value=fill_value)
assert (result[:, :, [0, 1, 2]] == data[:, :, [2, 1, 0]]).all()
assert (result[:, :, 3] == fill_value).all()
assert result.dtype == out_dtype
indexer = [2, 1, 0, 1]
result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert (result[[0, 1, 2, 3], :, :] == data[indexer, :, :]).all()
assert result.dtype == dtype
result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert (result[:, [0, 1, 2, 3], :] == data[:, indexer, :]).all()
assert result.dtype == dtype
result = algos.take_nd(data, indexer, axis=2, fill_value=fill_value)
assert (result[:, :, [0, 1, 2, 3]] == data[:, :, indexer]).all()
assert result.dtype == dtype
def test_1d_other_dtypes(self):
arr = np.random.default_rng(2).standard_normal(10).astype(np.float32)
indexer = [1, 2, 3, -1]
result = algos.take_nd(arr, indexer)
expected = arr.take(indexer)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_other_dtypes(self):
arr = np.random.default_rng(2).standard_normal((10, 5)).astype(np.float32)
indexer = [1, 2, 3, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
expected = arr.take(indexer, axis=0)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
expected = arr.take(indexer, axis=1)
expected[:, -1] = np.nan
tm.assert_almost_equal(result, expected)
def test_1d_bool(self):
arr = np.array([0, 1, 0], dtype=bool)
result = algos.take_nd(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1])
tm.assert_numpy_array_equal(result, expected)
result = algos.take_nd(arr, [0, 2, -1])
assert result.dtype == np.object_
def test_2d_bool(self):
arr = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 1]], dtype=bool)
result = algos.take_nd(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1], axis=0)
tm.assert_numpy_array_equal(result, expected)
result = algos.take_nd(arr, [0, 2, 2, 1], axis=1)
expected = arr.take([0, 2, 2, 1], axis=1)
tm.assert_numpy_array_equal(result, expected)
result = algos.take_nd(arr, [0, 2, -1])
assert result.dtype == np.object_
def test_2d_float32(self):
arr = np.random.default_rng(2).standard_normal((4, 3)).astype(np.float32)
indexer = [0, 2, -1, 1, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = np.nan
tm.assert_almost_equal(result, expected)
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_datetime64(self):
# 2005/01/01 - 2006/01/01
arr = (
np.random.default_rng(2).integers(11_045_376, 11_360_736, (5, 3))
* 100_000_000_000
)
arr = arr.view(dtype="datetime64[ns]")
indexer = [0, 2, -1, 1, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
expected = arr.take(indexer, axis=0)
expected.view(np.int64)[[2, 4], :] = iNaT
tm.assert_almost_equal(result, expected)
result = algos.take_nd(arr, indexer, axis=0, fill_value=datetime(2007, 1, 1))
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
expected = arr.take(indexer, axis=1)
expected.view(np.int64)[:, [2, 4]] = iNaT
tm.assert_almost_equal(result, expected)
result = algos.take_nd(arr, indexer, axis=1, fill_value=datetime(2007, 1, 1))
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
def test_take_axis_0(self):
arr = np.arange(12).reshape(4, 3)
result = algos.take(arr, [0, -1])
expected = np.array([[0, 1, 2], [9, 10, 11]])
tm.assert_numpy_array_equal(result, expected)
# allow_fill=True
result = algos.take(arr, [0, -1], allow_fill=True, fill_value=0)
expected = np.array([[0, 1, 2], [0, 0, 0]])
tm.assert_numpy_array_equal(result, expected)
def test_take_axis_1(self):
arr = np.arange(12).reshape(4, 3)
result = algos.take(arr, [0, -1], axis=1)
expected = np.array([[0, 2], [3, 5], [6, 8], [9, 11]])
tm.assert_numpy_array_equal(result, expected)
# allow_fill=True
result = algos.take(arr, [0, -1], axis=1, allow_fill=True, fill_value=0)
expected = np.array([[0, 0], [3, 0], [6, 0], [9, 0]])
tm.assert_numpy_array_equal(result, expected)
# GH#26976 make sure we validate along the correct axis
with pytest.raises(IndexError, match="indices are out-of-bounds"):
algos.take(arr, [0, 3], axis=1, allow_fill=True, fill_value=0)
def test_take_non_hashable_fill_value(self):
arr = np.array([1, 2, 3])
indexer = np.array([1, -1])
with pytest.raises(ValueError, match="fill_value must be a scalar"):
algos.take(arr, indexer, allow_fill=True, fill_value=[1])
# with object dtype it is allowed
arr = np.array([1, 2, 3], dtype=object)
result = algos.take(arr, indexer, allow_fill=True, fill_value=[1])
expected = np.array([2, [1]], dtype=object)
tm.assert_numpy_array_equal(result, expected)
class TestExtensionTake:
# The take method found in pd.api.extensions
def test_bounds_check_large(self):
arr = np.array([1, 2])
msg = "indices are out-of-bounds"
with pytest.raises(IndexError, match=msg):
algos.take(arr, [2, 3], allow_fill=True)
msg = "index 2 is out of bounds for( axis 0 with)? size 2"
with pytest.raises(IndexError, match=msg):
algos.take(arr, [2, 3], allow_fill=False)
def test_bounds_check_small(self):
arr = np.array([1, 2, 3], dtype=np.int64)
indexer = [0, -1, -2]
msg = r"'indices' contains values less than allowed \(-2 < -1\)"
with pytest.raises(ValueError, match=msg):
algos.take(arr, indexer, allow_fill=True)
result = algos.take(arr, indexer)
expected = np.array([1, 3, 2], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("allow_fill", [True, False])
def test_take_empty(self, allow_fill):
arr = np.array([], dtype=np.int64)
# empty take is ok
result = algos.take(arr, [], allow_fill=allow_fill)
tm.assert_numpy_array_equal(arr, result)
msg = "|".join(
[
"cannot do a non-empty take from an empty axes.",
"indices are out-of-bounds",
]
)
with pytest.raises(IndexError, match=msg):
algos.take(arr, [0], allow_fill=allow_fill)
def test_take_na_empty(self):
result = algos.take(np.array([]), [-1, -1], allow_fill=True, fill_value=0.0)
expected = np.array([0.0, 0.0])
tm.assert_numpy_array_equal(result, expected)
def test_take_coerces_list(self):
# GH#52981 coercing is deprecated, disabled in 3.0
arr = [1, 2, 3]
msg = (
"pd.api.extensions.take requires a numpy.ndarray, ExtensionArray, "
"Index, Series, or NumpyExtensionArray got list"
)
with pytest.raises(TypeError, match=msg):
algos.take(arr, [0, 0])
def test_take_NumpyExtensionArray(self):
# GH#59177
arr = array([1 + 1j, 2, 3]) # NumpyEADtype('complex128') (NumpyExtensionArray)
assert algos.take(arr, [2]) == 2
arr = array([1, 2, 3]) # Int64Dtype() (ExtensionArray)
assert algos.take(arr, [2]) == 2
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@test_take.py@.PATH_END.py
|
{
"filename": "_variant.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram2dcontour/colorbar/tickfont/_variant.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="variant",
parent_name="histogram2dcontour.colorbar.tickfont",
**kwargs,
):
super(VariantValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop(
"values",
[
"normal",
"small-caps",
"all-small-caps",
"all-petite-caps",
"petite-caps",
"unicase",
],
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram2dcontour@colorbar@tickfont@_variant.py@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/parcoords/tickfont/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="shadow", parent_name="parcoords.tickfont", **kwargs
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@parcoords@tickfont@_shadow.py@.PATH_END.py
|
{
"filename": "pgvector.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/vectorstores/pgvector.ipynb",
"type": "Jupyter Notebook"
}
|
# PGVector
> An implementation of LangChain vectorstore abstraction using `postgres` as the backend and utilizing the `pgvector` extension.
The code lives in an integration package called: [langchain_postgres](https://github.com/langchain-ai/langchain-postgres/).
## Status
This code has been ported over from `langchain_community` into a dedicated package called `langchain-postgres`. The following changes have been made:
* langchain_postgres works only with psycopg3. Please update your connnecion strings from `postgresql+psycopg2://...` to `postgresql+psycopg://langchain:langchain@...` (yes, it's the driver name is `psycopg` not `psycopg3`, but it'll use `psycopg3`.
* The schema of the embedding store and collection have been changed to make add_documents work correctly with user specified ids.
* One has to pass an explicit connection object now.
Currently, there is **no mechanism** that supports easy data migration on schema changes. So any schema changes in the vectorstore will require the user to recreate the tables and re-add the documents.
If this is a concern, please use a different vectorstore. If not, this implementation should be fine for your use case.
## Setup
First donwload the partner package:
```python
pip install -qU langchain_postgres
```
You can run the following command to spin up a a postgres container with the `pgvector` extension:
```python
%docker run --name pgvector-container -e POSTGRES_USER=langchain -e POSTGRES_PASSWORD=langchain -e POSTGRES_DB=langchain -p 6024:5432 -d pgvector/pgvector:pg16
```
### Credentials
There are no credentials needed to run this notebook, just make sure you downloaded the `langchain_postgres` package and correctly started the postgres container.
If you want to get best in-class automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:
```python
# os.environ["LANGSMITH_API_KEY"] = getpass.getpass("Enter your LangSmith API key: ")
# os.environ["LANGSMITH_TRACING"] = "true"
```
## Instantiation
import EmbeddingTabs from "@theme/EmbeddingTabs";
<EmbeddingTabs/>
```python
# | output: false
# | echo: false
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings(model="text-embedding-3-large")
```
```python
from langchain_core.documents import Document
from langchain_postgres import PGVector
from langchain_postgres.vectorstores import PGVector
# See docker command above to launch a postgres instance with pgvector enabled.
connection = "postgresql+psycopg://langchain:langchain@localhost:6024/langchain" # Uses psycopg3!
collection_name = "my_docs"
vector_store = PGVector(
embeddings=embeddings,
collection_name=collection_name,
connection=connection,
use_jsonb=True,
)
```
## Manage vector store
### Add items to vector store
Note that adding documents by ID will over-write any existing documents that match that ID.
```python
docs = [
Document(
page_content="there are cats in the pond",
metadata={"id": 1, "location": "pond", "topic": "animals"},
),
Document(
page_content="ducks are also found in the pond",
metadata={"id": 2, "location": "pond", "topic": "animals"},
),
Document(
page_content="fresh apples are available at the market",
metadata={"id": 3, "location": "market", "topic": "food"},
),
Document(
page_content="the market also sells fresh oranges",
metadata={"id": 4, "location": "market", "topic": "food"},
),
Document(
page_content="the new art exhibit is fascinating",
metadata={"id": 5, "location": "museum", "topic": "art"},
),
Document(
page_content="a sculpture exhibit is also at the museum",
metadata={"id": 6, "location": "museum", "topic": "art"},
),
Document(
page_content="a new coffee shop opened on Main Street",
metadata={"id": 7, "location": "Main Street", "topic": "food"},
),
Document(
page_content="the book club meets at the library",
metadata={"id": 8, "location": "library", "topic": "reading"},
),
Document(
page_content="the library hosts a weekly story time for kids",
metadata={"id": 9, "location": "library", "topic": "reading"},
),
Document(
page_content="a cooking class for beginners is offered at the community center",
metadata={"id": 10, "location": "community center", "topic": "classes"},
),
]
vector_store.add_documents(docs, ids=[doc.metadata["id"] for doc in docs])
```
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
### Delete items from vector store
```python
vector_store.delete(ids=["3"])
```
## Query vector store
Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent.
### Filtering Support
The vectorstore supports a set of filters that can be applied against the metadata fields of the documents.
| Operator | Meaning/Category |
|----------|-------------------------|
| \$eq | Equality (==) |
| \$ne | Inequality (!=) |
| \$lt | Less than (<) |
| \$lte | Less than or equal (<=) |
| \$gt | Greater than (>) |
| \$gte | Greater than or equal (>=) |
| \$in | Special Cased (in) |
| \$nin | Special Cased (not in) |
| \$between | Special Cased (between) |
| \$like | Text (like) |
| \$ilike | Text (case-insensitive like) |
| \$and | Logical (and) |
| \$or | Logical (or) |
### Query directly
Performing a simple similarity search can be done as follows:
```python
results = vector_store.similarity_search(
"kitty", k=10, filter={"id": {"$in": [1, 5, 2, 9]}}
)
for doc in results:
print(f"* {doc.page_content} [{doc.metadata}]")
```
* there are cats in the pond [{'id': 1, 'topic': 'animals', 'location': 'pond'}]
* the library hosts a weekly story time for kids [{'id': 9, 'topic': 'reading', 'location': 'library'}]
* ducks are also found in the pond [{'id': 2, 'topic': 'animals', 'location': 'pond'}]
* the new art exhibit is fascinating [{'id': 5, 'topic': 'art', 'location': 'museum'}]
If you provide a dict with multiple fields, but no operators, the top level will be interpreted as a logical **AND** filter
```python
vector_store.similarity_search(
"ducks",
k=10,
filter={"id": {"$in": [1, 5, 2, 9]}, "location": {"$in": ["pond", "market"]}},
)
```
[Document(metadata={'id': 1, 'topic': 'animals', 'location': 'pond'}, page_content='there are cats in the pond'),
Document(metadata={'id': 2, 'topic': 'animals', 'location': 'pond'}, page_content='ducks are also found in the pond')]
```python
vector_store.similarity_search(
"ducks",
k=10,
filter={
"$and": [
{"id": {"$in": [1, 5, 2, 9]}},
{"location": {"$in": ["pond", "market"]}},
]
},
)
```
[Document(metadata={'id': 1, 'topic': 'animals', 'location': 'pond'}, page_content='there are cats in the pond'),
Document(metadata={'id': 2, 'topic': 'animals', 'location': 'pond'}, page_content='ducks are also found in the pond')]
If you want to execute a similarity search and receive the corresponding scores you can run:
```python
results = vector_store.similarity_search_with_score(query="cats", k=1)
for doc, score in results:
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
```
* [SIM=0.763449] there are cats in the pond [{'id': 1, 'topic': 'animals', 'location': 'pond'}]
For a full list of the different searches you can execute on a `PGVector` vector store, please refer to the [API reference](https://python.langchain.com/api_reference/postgres/vectorstores/langchain_postgres.vectorstores.PGVector.html).
### Query by turning into retriever
You can also transform the vector store into a retriever for easier usage in your chains.
```python
retriever = vector_store.as_retriever(search_type="mmr", search_kwargs={"k": 1})
retriever.invoke("kitty")
```
[Document(metadata={'id': 1, 'topic': 'animals', 'location': 'pond'}, page_content='there are cats in the pond')]
## Usage for retrieval-augmented generation
For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:
- [Tutorials](/docs/tutorials/)
- [How-to: Question and answer with RAG](https://python.langchain.com/docs/how_to/#qa-with-rag)
- [Retrieval conceptual docs](https://python.langchain.com/docs/concepts/retrieval)
## API reference
For detailed documentation of all __ModuleName__VectorStore features and configurations head to the API reference: https://python.langchain.com/api_reference/postgres/vectorstores/langchain_postgres.vectorstores.PGVector.html
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@vectorstores@pgvector.ipynb@.PATH_END.py
|
{
"filename": "styles.py",
"repo_name": "hopehhchen/Droplets",
"repo_path": "Droplets_extracted/Droplets-master/Droplets/styles.py",
"type": "Python"
}
|
from matplotlib import rcParams
from cycler import cycler
colors_538 = ["#30a2da",
"#fc4f30",
"#e5ae38",
"#6d904f",
"#8b8b8b"]
### Hope's implementation ###
# lines
rcParams['lines.linewidth'] = 1
rcParams['lines.markersize'] = 20
# image
rcParams['image.cmap'] = 'viridis'
rcParams['image.interpolation'] = 'none'
rcParams['image.origin'] = 'bottom'
# ticks
rcParams['xtick.top'] = True
rcParams['xtick.major.size'] = 10.
rcParams['xtick.minor.size'] = 5.
rcParams['xtick.direction'] = 'in'
rcParams['ytick.right'] = True
rcParams['xtick.major.pad'] = 3.6
rcParams['ytick.major.size'] = 10.
rcParams['ytick.minor.size'] = 5.
rcParams['ytick.direction'] = 'in'
# patch
rcParams['patch.facecolor'] = 'yellow'
rcParams['patch.edgecolor'] = 'none'
# font
rcParams['font.size'] = 28
rcParams['font.family'] = 'StixGeneral'
# mathtext
rcParams['mathtext.fontset'] = 'stix'
# legend
rcParams['legend.frameon'] = False
rcParams['legend.scatterpoints'] = 1
rcParams['legend.numpoints'] = 1
rcParams['legend.fontsize'] = 26
# axes
rcParams['axes.prop_cycle'] = cycler('color', colors_538)
rcParams['axes.facecolor'] = 'none'
# figure
rcParams['figure.figsize'] = (14, 14)
rcParams['figure.dpi'] = 180
rcParams['figure.subplot.left'] = .1
rcParams['figure.subplot.right'] = .97
rcParams['figure.subplot.bottom'] = .1
rcParams['figure.subplot.top'] = .96
rcParams['figure.subplot.wspace'] = .02
rcParams['figure.subplot.hspace'] = .02
# savefig
rcParams['savefig.jpeg_quality'] = 100
rcParams['savefig.dpi'] = 180
|
hopehhchenREPO_NAMEDropletsPATH_START.@Droplets_extracted@Droplets-master@Droplets@styles.py@.PATH_END.py
|
{
"filename": "test_diskdf.py",
"repo_name": "jobovy/galpy",
"repo_path": "galpy_extracted/galpy-main/tests/test_diskdf.py",
"type": "Python"
}
|
# Tests of the diskdf module: distribution functions from Dehnen (1999)
import os
import numpy
import pytest
from scipy import stats
from galpy.df import dehnendf, schwarzschilddf, shudf
_FEWERLONGINTEGRALS = True
# So we can reuse the following
ddf_correct_flat = None
ddf_correct2_flat = None
ddf_correct_powerrise = None
sdf_correct_flat = None
# First some tests of surfaceSigmaProfile and expSurfaceSigmaProfile
def test_expSurfaceSigmaProfile_surfacemass():
from galpy.df import expSurfaceSigmaProfile
essp = expSurfaceSigmaProfile(params=(0.25, 0.75, 0.1))
assert (
numpy.fabs(essp.surfacemass(0.5) - numpy.exp(-0.5 / 0.25)) < 10.0**-8.0
), "expSurfaceSigmaProfile's surfacemass does not work as expected"
assert (
numpy.fabs(essp.surfacemass(1.5, log=True) + 1.5 / 0.25) < 10.0**-8.0
), "expSurfaceSigmaProfile's surfacemass does not work as expected"
return None
def test_expSurfaceSigmaProfile_surfacemassDerivative():
from galpy.df import expSurfaceSigmaProfile
essp = expSurfaceSigmaProfile(params=(0.25, 0.75, 0.1))
assert (
numpy.fabs(essp.surfacemassDerivative(0.5) + numpy.exp(-0.5 / 0.25) / 0.25)
< 10.0**-8.0
), "expSurfaceSigmaProfile's surfacemassDerivative does not work as expected"
assert (
numpy.fabs(essp.surfacemassDerivative(1.5, log=True) + 1.0 / 0.25) < 10.0**-8.0
), "expSurfaceSigmaProfile's surfacemassDerivative does not work as expected"
return None
def test_expSurfaceSigmaProfile_sigma2():
from galpy.df import expSurfaceSigmaProfile
essp = expSurfaceSigmaProfile(params=(0.25, 0.75, 0.1))
assert (
numpy.fabs(essp.sigma2(0.5) - 0.1**2.0 * numpy.exp(-(0.5 - 1.0) / 0.75 * 2.0))
< 10.0**-8.0
), "expSurfaceSigmaProfile's sigma2 does not work as expected"
assert (
numpy.fabs(
essp.sigma2(1.5, log=True) - 2.0 * numpy.log(0.1) + (1.5 - 1.0) / 0.75 * 2.0
)
< 10.0**-8.0
), "expSurfaceSigmaProfile's sigma2 does not work as expected"
return None
def test_expSurfaceSigmaProfile_sigma2Derivative():
from galpy.df import expSurfaceSigmaProfile
essp = expSurfaceSigmaProfile(params=(0.25, 0.75, 0.1))
assert (
numpy.fabs(
essp.sigma2Derivative(0.5)
+ 2.0 * 0.1**2.0 / 0.75 * numpy.exp(-(0.5 - 1.0) / 0.75 * 2.0)
)
< 10.0**-8.0
), "expSurfaceSigmaProfile's sigma2Derivative does not work as expected"
assert (
numpy.fabs(essp.sigma2Derivative(1.5, log=True) + 2.0 / 0.75) < 10.0**-8.0
), "expSurfaceSigmaProfile's sigma2 does not work as expected"
return None
def test_surfaceSigmaProfile_outputParams():
from galpy.df import expSurfaceSigmaProfile
essp = expSurfaceSigmaProfile(params=(0.25, 0.75, 0.1))
assert (
numpy.fabs(essp.outputParams()[0] - 0.25) < 10.0**-8.0
), "surfaceSigmaProfile's outputParams does not behave as expected"
assert (
numpy.fabs(essp.outputParams()[1] - 0.75) < 10.0**-8.0
), "surfaceSigmaProfile's outputParams does not behave as expected"
assert (
numpy.fabs(essp.outputParams()[2] - 0.1) < 10.0**-8.0
), "surfaceSigmaProfile's outputParams does not behave as expected"
return None
def test_surfaceSigmaProfile_formatStringParams():
from galpy.df import expSurfaceSigmaProfile
essp = expSurfaceSigmaProfile(params=(0.25, 0.75, 0.1))
assert (
essp.formatStringParams()[0] == r"%6.4f"
), "surfaceSigmaProfile's formatStringParams does not behave as expected"
assert (
essp.formatStringParams()[1] == r"%6.4f"
), "surfaceSigmaProfile's formatStringParams does not behave as expected"
assert (
essp.formatStringParams()[2] == r"%6.4f"
), "surfaceSigmaProfile's formatStringParams does not behave as expected"
return None
def test_dfsetup_surfaceSigmaProfile():
df = dehnendf(profileParams=(0.25, 0.75, 0.1), beta=0.0, correct=False)
from galpy.df import expSurfaceSigmaProfile
essp = expSurfaceSigmaProfile(params=(0.25, 0.75, 0.1))
df_alt = dehnendf(surfaceSigma=essp, beta=0.0, correct=False)
assert numpy.all(
numpy.fabs(
numpy.array(df._surfaceSigmaProfile._params)
- numpy.array(df_alt._surfaceSigmaProfile._params)
)
< 10.0**-10.0
), "diskdf setup with explicit surfaceSigmaProfile class does not give the same profile as with parameters only"
return None
# Tests for cold population, flat rotation curve: <vt> =~ v_c
def test_dehnendf_cold_flat_vt():
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=0.0, correct=False
)
assert (
numpy.fabs(df.meanvT(1.0) - 1.0) < 10.0**-3.0
), "mean vT of cold dehnendf in a flat rotation curve is not close to V_c at R=1"
assert (
numpy.fabs(df.meanvT(0.5) - 1.0) < 10.0**-3.0
), "mean vT of cold dehnendf in a flat rotation curve is not close to V_c at R=0.5"
assert (
numpy.fabs(df.meanvT(2.0) - 1.0) < 10.0**-3.0
), "mean vT of cold dehnendf in a flat rotation curve is not close to V_c at R=2"
# Really close to the center
assert (
numpy.fabs(df.meanvT(0.0001) - 1.0) < 10.0**-3.0
), "mean vT of cold dehnendf in a flat rotation curve is not close to V_c at R=0.5"
return None
# Tests for cold population, power-law rotation curve: <vt> =~ v_c
def test_dehnendf_cold_powerrise_vt():
# Rising rotation curve
beta = 0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
assert (
numpy.fabs(df.meanvT(1.0) - 1.0) < 10.0**-3.0
), "mean vT of cold dehnendf in a power-law rotation curve is not close to V_c at R=1"
assert (
numpy.fabs(df.meanvT(0.5) - (0.5) ** beta) < 10.0**-3.0
), "mean vT of cold dehnendf in a power-law rotation curve is not close to V_c at R=0.5"
assert (
numpy.fabs(df.meanvT(2.0) - (2.0) ** beta) < 10.0**-3.0
), "mean vT of cold dehnendf in a power-law rotation curve is not close to V_c at R=2"
def test_dehnendf_cold_powerfall_vt():
# Falling rotation curve
beta = -0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
assert (
numpy.fabs(df.meanvT(1.0) - 1.0) < 10.0**-3.0
), "mean vT of cold dehnendf in a power-law rotation curve is not close to V_c at R=1"
assert (
numpy.fabs(df.meanvT(0.5) - (0.5) ** beta) < 10.0**-3.0
), "mean vT of cold dehnendf in a power-law rotation curve is not close to V_c at R=0.5"
assert (
numpy.fabs(df.meanvT(2.0) - (2.0) ** beta) < 10.0**-3.0
), "mean vT of cold dehnendf in a power-law rotation curve is not close to V_c at R=2"
return None
# Tests for cold population, flat rotation curve: <vt> =~ v_c
def test_dehnendf_cold_flat_skewvt():
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=0.0, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.skewvT(1.0)) < 1.0 / 20.0
), "skew vT of cold dehnendf in a flat rotation curve is not close to zero at R=1"
assert (
numpy.fabs(df.skewvT(0.5)) < 1.0 / 20.0
), "skew vT of cold dehnendf in a flat rotation curve is not close to zero at R=0.5"
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.skewvT(2.0)) < 1.0 / 20.0
), "skew vT of cold dehnendf in a flat rotation curve is not close to zero at R=2"
return None
# Tests for cold population, power-law rotation curve: <vt> =~ v_c
def test_dehnendf_cold_powerrise_skewvt():
# Rising rotation curve
beta = 0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.skewvT(1.0)) < 1.0 / 20.0
), "skew vT of cold dehnendf in a power-law rotation curve is not close to zero at R=1"
assert (
numpy.fabs(df.skewvT(0.5)) < 1.0 / 20.0
), "skew vT of cold dehnendf in a power-law rotation curve is not close to zero at R=0.5"
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.skewvT(2.0)) < 1.0 / 20.0
), "skew vT of cold dehnendf in a power-law rotation curve is not close to zero at R=2"
return None
def test_dehnendf_cold_powerfall_skewvt():
# Falling rotation curve
beta = -0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.skewvT(1.0)) < 1.0 / 20.0
), "skew vT of cold dehnendf in a power-law rotation curve is not close to zero at R=1"
assert (
numpy.fabs(df.skewvT(0.5)) < 1.0 / 20.0
), "skew vT of cold dehnendf in a power-law rotation curve is not close to zero at R=0.5"
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.skewvT(2.0)) < 1.0 / 20.0
), "skew vT of cold dehnendf in a power-law rotation curve is not close to zero at R=2"
return None
# Tests for cold population, flat rotation curve: <vr> = 0
def test_dehnendf_cold_flat_vr():
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=0.0, correct=False
)
assert (
numpy.fabs(df.meanvR(1.0) - 0.0) < 10.0**-3.0
), "mean vR of cold dehnendf in a flat rotation curve is not close to zero at R=1"
assert (
numpy.fabs(df.meanvR(0.5) - 0.0) < 10.0**-3.0
), "mean vR of cold dehnendf in a flat rotation curve is not close to zero at R=0.5"
assert (
numpy.fabs(df.meanvR(2.0) - 0.0) < 10.0**-3.0
), "mean vR of cold dehnendf in a flat rotation curve is not close to zero at R=2"
return None
# Tests for cold population, flat rotation curve: kurtosis = 0
def test_dehnendf_cold_flat_kurtosisvt():
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=0.0, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.kurtosisvT(1.0)) < 1.0 / 20.0
), "kurtosis vT of cold dehnendf in a flat rotation curve is not close to zero at R=1"
assert (
numpy.fabs(df.kurtosisvT(0.5)) < 1.0 / 20.0
), "kurtosis vT of cold dehnendf in a flat rotation curve is not close to zero at R=0.5"
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.kurtosisvT(2.0)) < 1.0 / 20.0
), "kurtosis vT of cold dehnendf in a flat rotation curve is not close to zero at R=2"
return None
# Tests for cold population, power-law rotation curve: kurtosis = 0
def test_dehnendf_cold_powerrise_kurtosisvt():
# Rising rotation curve
beta = 0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.kurtosisvT(1.0)) < 1.0 / 20.0
), "kurtosis vT of cold dehnendf in a power-law rotation curve is not close to zero at R=1"
assert (
numpy.fabs(df.kurtosisvT(0.5)) < 1.0 / 20.0
), "kurtosis vT of cold dehnendf in a power-law rotation curve is not close to zero at R=0.5"
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.kurtosisvT(2.0)) < 1.0 / 20.0
), "kurtosis vT of cold dehnendf in a power-law rotation curve is not close to zero at R=2"
def test_dehnendf_cold_powerfall_kurtosisvt():
# Falling rotation curve
beta = -0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.kurtosisvT(1.0)) < 1.0 / 20.0
), "kurtosis vT of cold dehnendf in a power-law rotation curve is not close to zero at R=1"
assert (
numpy.fabs(df.kurtosisvT(0.5)) < 1.0 / 20.0
), "kurtosis vT of cold dehnendf in a power-law rotation curve is not close to zero at R=0.5"
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.kurtosisvT(2.0)) < 1.0 / 20.0
), "kurtosis vT of cold dehnendf in a power-law rotation curve is not close to zero at R=2"
return None
# Tests for cold population, power-law rotation curve: <vr> = 0
def test_dehnendf_cold_powerrise_vr():
# Rising rotation curve
beta = 0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
assert (
numpy.fabs(df.meanvR(1.0) - 0.0) < 10.0**-3.0
), "mean vR of cold dehnendf in a power-law rotation curve is not close to zero at R=1"
assert (
numpy.fabs(df.meanvR(0.5) - 0.0) < 10.0**-3.0
), "mean vR of cold dehnendf in a power-law rotation curve is not close to zero at R=0.5"
assert (
numpy.fabs(df.meanvR(2.0) - 0.0) < 10.0**-3.0
), "mean vR of cold dehnendf in a power-law rotation curve is not close to zero at R=2"
def test_dehnendf_cold_powerfall_vr():
# Falling rotation curve
beta = -0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
assert (
numpy.fabs(df.meanvR(1.0) - 0.0) < 10.0**-3.0
), "mean vR of cold dehnendf in a power-law rotation curve is not close to zero at R=1"
assert (
numpy.fabs(df.meanvR(0.5) - 0.0) < 10.0**-3.0
), "mean vR of cold dehnendf in a power-law rotation curve is not close to zero at R=0.5"
assert (
numpy.fabs(df.meanvR(2.0) - 0.0) < 10.0**-3.0
), "mean vR of cold dehnendf in a power-law rotation curve is not close to zero at R=2"
return None
# Tests for cold population, flat rotation curve: <vr> = 0
def test_dehnendf_cold_flat_skewvr():
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=0.0, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.skewvR(1.0) - 0.0) < 10.0**-3.0
), "skew vR of cold dehnendf in a flat rotation curve is not close to zero at R=1"
assert (
numpy.fabs(df.skewvR(0.5) - 0.0) < 10.0**-3.0
), "skew vR of cold dehnendf in a flat rotation curve is not close to zero at R=0.5"
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.skewvR(2.0) - 0.0) < 10.0**-3.0
), "skew vR of cold dehnendf in a flat rotation curve is not close to zero at R=2"
return None
# Tests for cold population, power-law rotation curve: <vr> = 0
def test_dehnendf_cold_powerrise_skewvr():
# Rising rotation curve
beta = 0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.skewvR(1.0) - 0.0) < 10.0**-3.0
), "skew vR of cold dehnendf in a power-law rotation curve is not close to zero at R=1"
assert (
numpy.fabs(df.skewvR(0.5) - 0.0) < 10.0**-3.0
), "skew vR of cold dehnendf in a power-law rotation curve is not close to zero at R=0.5"
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.skewvR(2.0) - 0.0) < 10.0**-3.0
), "skew vR of cold dehnendf in a power-law rotation curve is not close to zero at R=2"
def test_dehnendf_cold_powerfall_skewvr():
# Falling rotation curve
beta = -0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.skewvR(1.0) - 0.0) < 10.0**-3.0
), "skew vR of cold dehnendf in a power-law rotation curve is not close to zero at R=1"
assert (
numpy.fabs(df.skewvR(0.5) - 0.0) < 10.0**-3.0
), "skew vR of cold dehnendf in a power-law rotation curve is not close to zero at R=0.5"
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.skewvR(2.0) - 0.0) < 10.0**-3.0
), "skew vR of cold dehnendf in a power-law rotation curve is not close to zero at R=2"
return None
# Tests for cold population, flat rotation curve: kurtosis = 0
def test_dehnendf_cold_flat_kurtosisvr():
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=0.0, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.kurtosisvR(1.0)) < 1.0 / 20.0
), "kurtosis vR of cold dehnendf in a flat rotation curve is not close to zero at R=1"
assert (
numpy.fabs(df.kurtosisvR(0.5)) < 1.0 / 20.0
), "kurtosis vR of cold dehnendf in a flat rotation curve is not close to zero at R=0.5"
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.kurtosisvR(2.0)) < 1.0 / 20.0
), "kurtosis vR of cold dehnendf in a flat rotation curve is not close to zero at R=2"
return None
# Tests for cold population, power-law rotation curve: kurtosis = 0
def test_dehnendf_cold_powerrise_kurtosisvr():
# Rising rotation curve
beta = 0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.kurtosisvR(1.0)) < 1.0 / 20.0
), "kurtosis vR of cold dehnendf in a power-law rotation curve is not close to zero at R=1"
assert (
numpy.fabs(df.kurtosisvR(0.5)) < 1.0 / 20.0
), "kurtosis vR of cold dehnendf in a power-law rotation curve is not close to zero at R=0.5"
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.kurtosisvR(2.0)) < 1.0 / 20.0
), "kurtosis vR of cold dehnendf in a power-law rotation curve is not close to zero at R=2"
def test_dehnendf_cold_powerfall_kurtosisvr():
# Falling rotation curve
beta = -0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.kurtosisvR(1.0)) < 1.0 / 20.0
), "kurtosis vR of cold dehnendf in a power-law rotation curve is not close to zero at R=1"
assert (
numpy.fabs(df.kurtosisvR(0.5)) < 1.0 / 20.0
), "kurtosis vR of cold dehnendf in a power-law rotation curve is not close to zero at R=0.5"
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.kurtosisvR(2.0)) < 1.0 / 20.0
), "kurtosis vR of cold dehnendf in a power-law rotation curve is not close to zero at R=2"
return None
# Tests for cold population, flat rotation curve: A = 0.5
def test_dehnendf_cold_flat_oortA():
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=0.0, correct=False
)
assert (
numpy.fabs(df.oortA(1.0) - 0.5 * 1.0 / 1.0) < 10.0**-3.0
), "Oort A of cold dehnendf in a flat rotation curve is not close to expected at R=1"
assert (
numpy.fabs(df.oortA(0.5) - 0.5 * 1.0 / 0.5) < 10.0**-3.0
), "Oort A of cold dehnendf in a flat rotation curve is not close to expected at R=0.5"
# one w/ Romberg
assert (
numpy.fabs(df.oortA(2.0, romberg=True) - 0.5 * 1.0 / 2.0) < 10.0**-3.0
), "Oort A of cold dehnendf in a flat rotation curve is not close to expected at R=2"
return None
# Tests for cold population, power-law rotation curve: A
def test_dehnendf_cold_powerrise_oortA():
# Rising rotation curve
beta = 0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.oortA(1.0) - 0.5 * 1.0 / 1.0 * (1.0 - beta)) < 10.0**-3.0
), "Oort A of cold dehnendf in a power-law rotation curve is not close to expected at R=1"
assert (
numpy.fabs(df.oortA(0.5) - 0.5 * (0.5) ** beta / 0.5 * (1.0 - beta))
< 10.0**-3.0
), "Oort A of cold dehnendf in a power-law rotation curve is not close to expected at R=0.5"
# one w/ Romberg
assert (
numpy.fabs(
df.oortA(2.0, romberg=True) - 0.5 * (2.0) ** beta / 2.0 * (1.0 - beta)
)
< 10.0**-3.0
), "Oort A of cold dehnendf in a power-law rotation curve is not close to expected at R=2"
return None
def test_dehnendf_cold_powerfall_oortA():
# Falling rotation curve
beta = -0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.oortA(1.0) - 0.5 * 1.0 / 1.0 * (1.0 - beta)) < 10.0**-3.0
), "Oort A of cold dehnendf in a power-law rotation curve is not close to expected at R=1"
assert (
numpy.fabs(df.oortA(0.5) - 0.5 * (0.5) ** beta / 0.5 * (1.0 - beta))
< 10.0**-3.0
), "Oort A of cold dehnendf in a power-law rotation curve is not close to expected at R=0.5"
# One w/ Romberg
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(
df.oortA(2.0, romberg=True) - 0.5 * (2.0) ** beta / 2.0 * (1.0 - beta)
)
< 10.0**-3.0
), "Oort A of cold dehnendf in a power-law rotation curve is not close to expected at R=2"
return None
# Tests for cold population, flat rotation curve: B = -0.5
def test_dehnendf_cold_flat_oortB():
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=0.0, correct=False
)
assert (
numpy.fabs(df.oortB(1.0) + 0.5 * 1.0 / 1.0) < 10.0**-3.0
), "Oort B of cold dehnendf in a flat rotation curve is not close to expected at R=1"
assert (
numpy.fabs(df.oortB(0.5) + 0.5 * 1.0 / 0.5) < 10.0**-3.0
), "Oort B of cold dehnendf in a flat rotation curve is not close to expected at R=0.5"
assert (
numpy.fabs(df.oortB(2.0) + 0.5 * 1.0 / 2.0) < 10.0**-3.0
), "Oort B of cold dehnendf in a flat rotation curve is not close to expected at R=2"
return None
# Tests for cold population, power-law rotation curve: B
def test_dehnendf_cold_powerrise_oortB():
# Rising rotation curve
beta = 0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.oortB(1.0) + 0.5 * 1.0 / 1.0 * (1.0 + beta)) < 10.0**-3.0
), "Oort B of cold dehnendf in a power-law rotation curve is not close to expected at R=1"
assert (
numpy.fabs(df.oortB(0.5) + 0.5 * (0.5) ** beta / 0.5 * (1.0 + beta))
< 10.0**-3.0
), "Oort B of cold dehnendf in a power-law rotation curve is not close to expected at R=0.5"
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.oortB(2.0) + 0.5 * (2.0) ** beta / 2.0 * (1.0 + beta))
< 10.0**-3.0
), "Oort B of cold dehnendf in a power-law rotation curve is not close to expected at R=2"
return None
def test_dehnendf_cold_powerfall_oortB():
# Falling rotation curve
beta = -0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.oortB(1.0) + 0.5 * 1.0 / 1.0 * (1.0 + beta)) < 10.0**-3.0
), "Oort B of cold dehnendf in a power-law rotation curve is not close to expected at R=1"
assert (
numpy.fabs(df.oortB(0.5) + 0.5 * (0.5) ** beta / 0.5 * (1.0 + beta))
< 10.0**-3.0
), "Oort B of cold dehnendf in a power-law rotation curve is not close to expected at R=0.5"
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.oortB(2.0) + 0.5 * (2.0) ** beta / 2.0 * (1.0 + beta))
< 10.0**-3.0
), "Oort B of cold dehnendf in a power-law rotation curve is not close to expected at R=2"
return None
# Tests for cold population, flat rotation curve: C = 0
def test_dehnendf_cold_flat_oortC():
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=0.0, correct=False
)
assert (
numpy.fabs(df.oortC(1.0)) < 10.0**-3.0
), "Oort C of cold dehnendf in a flat rotation curve is not close to expected at R=1"
assert (
numpy.fabs(df.oortC(0.5)) < 10.0**-3.0
), "Oort C of cold dehnendf in a flat rotation curve is not close to expected at R=0.5"
assert (
numpy.fabs(df.oortC(2.0)) < 10.0**-3.0
), "Oort C of cold dehnendf in a flat rotation curve is not close to expected at R=2"
return None
# Tests for cold population, power-law rotation curve: C
def test_dehnendf_cold_powerrise_oortC():
# Rising rotation curve
beta = 0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.oortC(1.0)) < 10.0**-3.0
), "Oort C of cold dehnendf in a power-law rotation curve is not close to expected at R=1"
assert (
numpy.fabs(df.oortC(0.5)) < 10.0**-3.0
), "Oort C of cold dehnendf in a power-law rotation curve is not close to expected at R=0.5"
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.oortC(2.0)) < 10.0**-3.0
), "Oort C of cold dehnendf in a power-law rotation curve is not close to expected at R=2"
return None
def test_dehnendf_cold_powerfall_oortC():
# Falling rotation curve
beta = -0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.oortC(1.0)) < 10.0**-3.0
), "Oort C of cold dehnendf in a power-law rotation curve is not close to expected at R=1"
assert (
numpy.fabs(df.oortC(0.5)) < 10.0**-3.0
), "Oort C of cold dehnendf in a power-law rotation curve is not close to expected at R=0.5"
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.oortC(2.0)) < 10.0**-3.0
), "Oort C of cold dehnendf in a power-law rotation curve is not close to expected at R=2"
return None
# Tests for cold population, flat rotation curve: K = 0
def test_dehnendf_cold_flat_oortK():
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=0.0, correct=False
)
assert (
numpy.fabs(df.oortK(1.0)) < 10.0**-3.0
), "Oort K of cold dehnendf in a flat rotation curve is not close to expected at R=1"
assert (
numpy.fabs(df.oortK(0.5)) < 10.0**-3.0
), "Oort K of cold dehnendf in a flat rotation curve is not close to expected at R=0.5"
assert (
numpy.fabs(df.oortK(2.0)) < 10.0**-3.0
), "Oort K of cold dehnendf in a flat rotation curve is not close to expected at R=2"
return None
# Tests for cold population, power-law rotation curve: K
def test_dehnendf_cold_powerrise_oortK():
# Rising rotation curve
beta = 0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.oortK(1.0)) < 10.0**-3.0
), "Oort K of cold dehnendf in a power-law rotation curve is not close to expected at R=1"
assert (
numpy.fabs(df.oortK(0.5)) < 10.0**-3.0
), "Oort K of cold dehnendf in a power-law rotation curve is not close to expected at R=0.5"
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.oortK(2.0)) < 10.0**-3.0
), "Oort K of cold dehnendf in a power-law rotation curve is not close to expected at R=2"
return None
def test_dehnendf_cold_powerfall_oortK():
# Falling rotation curve
beta = -0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.oortK(1.0)) < 10.0**-3.0
), "Oort K of cold dehnendf in a power-law rotation curve is not close to expected at R=1"
assert (
numpy.fabs(df.oortK(0.5)) < 10.0**-3.0
), "Oort K of cold dehnendf in a power-law rotation curve is not close to expected at R=0.5"
if not _FEWERLONGINTEGRALS:
assert (
numpy.fabs(df.oortK(2.0)) < 10.0**-3.0
), "Oort K of cold dehnendf in a power-law rotation curve is not close to expected at R=2"
return None
# Tests for cold population, flat rotation curve: sigma_R^2 / sigma_T^2 = kappa^2 / Omega^2
def test_dehnendf_cold_flat_srst():
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=0.0, correct=False
)
assert (
numpy.fabs(df.sigmaR2(1.0) / df.sigmaT2(1.0) - 2.0) < 10.0**-2.0
), "sigma_R^2 / sigma_T^2 of cool dehnendf in a flat rotation curve is not close to expected at R=1"
assert (
numpy.fabs(df.sigmaR2(0.5) / df.sigmaT2(0.5) - 2.0) < 10.0**-2.0
), "sigma_R^2 / sigma_T^2 of cool dehnendf in a flat rotation curve is not close to expected at R=1"
assert (
numpy.fabs(df.sigmaR2(2.0) / df.sigmaT2(2.0) - 2.0) < 10.0**-2.0
), "sigma_R^2 / sigma_T^2 of cool dehnendf in a flat rotation curve is not close to expected at R=1"
return None
# Tests for cold population, power-law rotation curve: sigma_R^2 / sigma_T^2 = kappa^2 / Omega^2
def test_dehnendf_cold_powerrise_srst():
# Rising rotation curve
beta = 0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
assert (
numpy.fabs(df.sigmaR2(1.0) / df.sigmaT2(1.0) - 2.0 / (1.0 + beta)) < 10.0**-2.0
), "sigma_R^2 / sigma_T^2 of cool dehnendf in a flat rotation curve is not close to expected at R=1"
assert (
numpy.fabs(df.sigmaR2(0.5) / df.sigmaT2(0.5) - 2.0 / (1.0 + beta)) < 10.0**-2.0
), "sigma_R^2 / sigma_T^2 of cool dehnendf in a flat rotation curve is not close to expected at R=1"
assert (
numpy.fabs(df.sigmaR2(2.0) / df.sigmaT2(2.0) - 2.0 / (1.0 + beta)) < 10.0**-2.0
), "sigma_R^2 / sigma_T^2 of cool dehnendf in a flat rotation curve is not close to expected at R=1"
return None
def test_dehnendf_cold_powerfall_srst():
# Falling rotation curve
beta = -0.2
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=beta, correct=False
)
assert (
numpy.fabs(df.sigmaR2(1.0) / df.sigmaT2(1.0) - 2.0 / (1.0 + beta)) < 10.0**-2.0
), "sigma_R^2 / sigma_T^2 of cool dehnendf in a flat rotation curve is not close to expected at R=1"
assert (
numpy.fabs(df.sigmaR2(0.5) / df.sigmaT2(0.5) - 2.0 / (1.0 + beta)) < 10.0**-2.0
), "sigma_R^2 / sigma_T^2 of cool dehnendf in a flat rotation curve is not close to expected at R=1"
assert (
numpy.fabs(df.sigmaR2(2.0) / df.sigmaT2(2.0) - 2.0 / (1.0 + beta)) < 10.0**-2.0
), "sigma_R^2 / sigma_T^2 of cool dehnendf in a flat rotation curve is not close to expected at R=1"
return None
def test_targetSigma2():
beta = 0.0
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.1), beta=beta, correct=False
)
assert (
numpy.fabs(df.targetSigma2(1.0) - 0.1**2.0) < 10.0**-8.0
), "targetSigma2 for dehnendf does not agree with input"
assert (
numpy.fabs(df.targetSigma2(0.3) - 0.1**2.0 * numpy.exp(-(0.3 - 1.0) / 0.5))
< 10.0**-8.0
), "targetSigma2 for dehnendf does not agree with input"
assert (
numpy.fabs(
df.targetSigma2(3.0, log=True) - numpy.log(0.1) * 2.0 + (3.0 - 1.0) / 0.5
)
< 10.0**-8.0
), "targetSigma2 for dehnendf does not agree with input"
return None
def test_targetSurfacemass():
beta = 0.0
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.1), beta=beta, correct=False
)
assert (
numpy.fabs(df.targetSurfacemass(1.0) - numpy.exp(-1.0 / 0.3333333333333333))
< 10.0**-8.0
), "targetSigma2 for dehnendf does not agree with input"
assert (
numpy.fabs(df.targetSurfacemass(0.3) - numpy.exp(-0.3 / 0.3333333333333333))
< 10.0**-8.0
), "targetSigma2 for dehnendf does not agree with input"
assert (
numpy.fabs(df.targetSurfacemass(3.0, log=True) + 3.0 / 0.3333333333333333)
< 10.0**-8.0
), "targetSigma2 for dehnendf does not agree with input"
return None
def test_targetSurfacemassLOS():
beta = 0.0
df = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.1), beta=beta, correct=False
)
# Some easy directions in l
assert (
numpy.fabs(
df.targetSurfacemassLOS(0.2, l=0.0, deg=True)
- 0.2 * numpy.exp(-0.8 / 0.3333333333333333)
)
< 10.0**-8.0
), "targetSigma2 for dehnendf does not agree with input"
assert (
numpy.fabs(
df.targetSurfacemassLOS(0.2, l=180.0, deg=True)
- 0.2 * numpy.exp(-1.2 / 0.3333333333333333)
)
< 10.0**-8.0
), "targetSigma2 for dehnendf does not agree with input"
assert (
numpy.fabs(
df.targetSurfacemassLOS(0.2, l=numpy.pi, deg=False)
- 0.2 * numpy.exp(-1.2 / 0.3333333333333333)
)
< 10.0**-8.0
), "targetSigma2 for dehnendf does not agree with input"
assert (
numpy.fabs(
df.targetSurfacemassLOS(0.2, l=numpy.pi / 2.0, log=True, deg=False)
- numpy.log(0.2)
+ numpy.sqrt(1.0 + 0.2**2.0 - 2.0 * 0.2 * numpy.cos(numpy.pi / 2.0))
/ 0.3333333333333333
)
< 10.0**-8.0
), "targetSigma2 for dehnendf does not agree with input"
return None
def test_cold_surfacemass():
dfc = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=0.0, correct=False
)
assert (
numpy.fabs(
numpy.log(dfc.surfacemass(0.9)) - numpy.log(dfc.targetSurfacemass(0.9))
)
< 0.01
), "True surfacemass deviates more from target surfacemass for cold Dehnen DF than expected"
assert (
numpy.fabs(
numpy.log(dfc.surfacemass(0.5)) - numpy.log(dfc.targetSurfacemass(0.5))
)
< 0.01
), "True surfacemass deviates more from target surfacemass for cold Dehnen DF than expected"
assert (
numpy.fabs(
numpy.log(dfc.surfacemass(2.0)) - numpy.log(dfc.targetSurfacemass(2.0))
)
< 0.01
), "True surfacemass deviates more from target surfacemass for cold Dehnen DF than expected"
return None
def test_surfacemass():
dfc = dehnendf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.2))
assert (
numpy.fabs(
numpy.log(dfc.surfacemass(0.9)) - numpy.log(dfc.targetSurfacemass(0.9))
)
< 0.05
), "True surfacemass deviates more from target surfacemass for Dehnen DF with documentation-example parameters than expected"
assert (
numpy.fabs(
numpy.log(dfc.surfacemass(0.05)) - numpy.log(dfc.targetSurfacemass(0.05))
)
< 0.5
), "True surfacemass deviates more from target surfacemass for Dehnen DF with documentation-example parameters than expected"
assert (
numpy.fabs(numpy.log(dfc.surfacemass(4.0, romberg=True, relative=True))) < 0.05
), "True surfacemass deviates more from target surfacemass for Dehnen DF with documentation-example parameters than expected"
return None
def test_cold_sigma2surfacemass():
dfc = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=0.0, correct=False
)
assert (
numpy.fabs(
numpy.log(dfc.sigma2surfacemass(0.9))
- numpy.log(dfc.targetSigma2(0.9) * dfc.targetSurfacemass(0.9))
)
< 0.01
), "True surfacemass deviates more from target surfacemass for cold Dehnen DF than expected"
assert (
numpy.fabs(
numpy.log(dfc.sigma2surfacemass(0.5))
- numpy.log(dfc.targetSigma2(0.5) * dfc.targetSurfacemass(0.5))
)
< 0.01
), "True surfacemass deviates more from target surfacemass for cold Dehnen DF than expected"
assert (
numpy.fabs(
numpy.log(dfc.sigma2surfacemass(2.0))
- numpy.log(dfc.targetSigma2(2.0) * dfc.targetSurfacemass(2.0))
)
< 0.01
), "True surfacemass deviates more from target surfacemass for cold Dehnen DF than expected"
return None
def test_sigma2surfacemass():
dfc = dehnendf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.2))
assert (
numpy.fabs(
numpy.log(dfc.sigma2surfacemass(0.9))
- numpy.log(dfc.targetSigma2(0.9) * dfc.targetSurfacemass(0.9))
)
< 0.05
), "True surfacemass deviates more from target surfacemass for Dehnen DF with documentation-example parameters than expected"
assert (
numpy.fabs(
numpy.log(dfc.sigma2surfacemass(0.3))
- numpy.log(dfc.targetSigma2(0.3) * dfc.targetSurfacemass(0.3))
)
< 0.2
), "True surfacemass deviates more from target surfacemass for Dehnen DF with documentation-example parameters than expected"
assert (
numpy.fabs(numpy.log(dfc.sigma2surfacemass(3.0, relative=True, romberg=True)))
< 0.1
), "True surfacemass deviates more from target surfacemass for Dehnen DF with documentation-example parameters than expected"
return None
def test_vmomentsurfacemass():
# Test that vmomentsurfacemass gives reasonable results
dfc = dehnendf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.4))
assert (
numpy.fabs(dfc.vmomentsurfacemass(0.9, 0.0, 0.0) - dfc.surfacemass(0.9))
< 10.0**-8.0
), "vmomentsurfacemass with (n,m) = (0,0) is not equal to surfacemass"
assert (
numpy.fabs(
dfc.vmomentsurfacemass(0.9, 0.0, 0.0, relative=True)
- dfc.surfacemass(0.9) / dfc.targetSurfacemass(0.9)
)
< 10.0**-8.0
), "vmomentsurfacemass with (n,m) = (0,0) and relative=True is not equal to surfacemass/targetSurfacemass"
assert (
numpy.fabs(dfc.vmomentsurfacemass(0.9, 2.0, 0.0) - dfc.sigma2surfacemass(0.9))
< 10.0**-8.0
), "vmomentsurfacemass with (n,m) = (2,0) is not equal to sigma2surfacemass"
assert (
numpy.fabs(dfc.vmomentsurfacemass(0.9, 1.0, 1.0, romberg=True)) < 10.0**-8.0
), "vmomentsurfacemass with (n,m) = (1.,1.) is not equal to zero (not automatically zero)"
assert (
numpy.fabs(dfc.vmomentsurfacemass(0.9, 1, 1)) < 10.0**-8.0
), "vmomentsurfacemass with (n,m) = (1,1) is not equal to zero"
return None
def test_vmomentsurfacemass_physical():
# Test that vmomentsurfacemass gives correct physical results
from galpy.util import conversion
dfc = dehnendf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.2))
ro, vo = 7.0, 230.0
assert (
numpy.fabs(
dfc.vmomentsurfacemass(0.9, 0.0, 0.0, use_physical=True, ro=ro, vo=vo)
- dfc.vmomentsurfacemass(0.9, 0.0, 0.0)
* conversion.surfdens_in_msolpc2(vo, ro)
)
< 10.0**-8.0
), "vmomentsurfacemass with (n,m) = (0,0) is not equal to surfacemass"
assert (
numpy.fabs(
dfc.vmomentsurfacemass(0.9, 1.0, 0.0, use_physical=True, ro=ro, vo=vo)
- dfc.vmomentsurfacemass(0.9, 1.0, 0.0)
* vo
* conversion.surfdens_in_msolpc2(vo, ro)
)
< 10.0**-8.0
), "vmomentsurfacemass with (n,m) = (0,0) is not equal to surfacemass"
assert (
numpy.fabs(
dfc.vmomentsurfacemass(0.9, 1.0, 2.0, use_physical=True, ro=ro, vo=vo)
- dfc.vmomentsurfacemass(0.9, 1.0, 2.0)
* vo**3.0
* conversion.surfdens_in_msolpc2(vo, ro)
)
< 10.0**-8.0
), "vmomentsurfacemass with (n,m) = (0,0) is not equal to surfacemass"
return None
def test_cold_surfacemassLOS():
dfc = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.01), beta=0.0, correct=False
)
assert (
numpy.fabs(
numpy.log(dfc.surfacemassLOS(0.1, 0.0, target=False))
- numpy.log(0.1 * dfc.targetSurfacemass(0.9))
)
< 0.01
), "True surfacemassLOS deviates more from target surfacemassLOS for cold Dehnen DF than expected"
assert (
numpy.fabs(
numpy.log(
dfc.surfacemassLOS(
numpy.cos(numpy.pi / 6.0), numpy.pi / 6.0, target=False, deg=False
)
)
- numpy.log(
numpy.cos(numpy.pi / 6.0)
* dfc.targetSurfacemass(numpy.cos(numpy.pi / 3.0))
)
)
< 0.01
), "True surfacemassLOS deviates more from target surfacemassLOS for cold Dehnen DF than expected"
assert (
numpy.fabs(
numpy.log(
dfc.surfacemassLOS(
numpy.cos(numpy.pi / 3.0), numpy.pi / 3.0, deg=False, target=True
)
)
- numpy.log(
numpy.cos(numpy.pi / 3.0)
* dfc.targetSurfacemass(numpy.cos(numpy.pi / 6.0))
)
)
< 0.01
), "True surfacemassLOS deviates more from target surfacemassLOS for cold Dehnen DF than expected"
assert (
numpy.fabs(
numpy.log(
dfc.surfacemassLOS(
numpy.cos(numpy.pi / 3.0),
numpy.pi / 3.0,
deg=False,
relative=True,
target=True,
)
)
- numpy.log(numpy.cos(numpy.pi / 3.0))
)
< 0.01
), "True surfacemassLOS deviates more from target surfacemassLOS for cold Dehnen DF than expected"
return None
def test_warm_surfacemassLOS():
dfc = dehnendf(
profileParams=(0.3333333333333333, 1.0, 0.1), beta=0.0, correct=False
)
assert (
numpy.fabs(
numpy.log(dfc.surfacemassLOS(0.1, 0.0, target=False))
- numpy.log(0.1 * dfc.surfacemass(0.9))
)
< 10.0**-6.0
), "surfacemassLOS deviates more from surfacemass for warm Dehnen DF than expected"
assert (
numpy.fabs(
numpy.log(
dfc.surfacemassLOS(
numpy.cos(numpy.pi / 6.0), numpy.pi / 6.0, target=False, deg=False
)
)
- numpy.log(
numpy.cos(numpy.pi / 6.0) * dfc.surfacemass(numpy.cos(numpy.pi / 3.0))
)
)
< 0.01
), "surfacemassLOS deviates more from target surfacemass for warm Dehnen DF than expected"
assert (
numpy.fabs(
numpy.log(
dfc.surfacemassLOS(
numpy.cos(numpy.pi / 3.0), numpy.pi / 3.0, deg=False, target=True
)
)
- numpy.log(
numpy.cos(numpy.pi / 3.0)
* dfc.targetSurfacemass(numpy.cos(numpy.pi / 6.0))
)
)
< 0.01
), "surfacemassLOS w/ target deviates more from target surfacemassLOS for warm Dehnen DF than expected"
assert (
numpy.fabs(
numpy.log(
dfc.surfacemassLOS(
numpy.cos(numpy.pi / 3.0),
numpy.pi / 3.0,
deg=False,
relative=True,
target=True,
)
)
- numpy.log(numpy.cos(numpy.pi / 3.0))
)
< 0.01
), "surfacemassLOS w/ target deviates more from target surfacemass for warm Dehnen DF than expected"
return None
def test_dehnendf_call_sanity():
# Sanity checking of dehnendf's call function
dfc = dehnendf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.2))
meanvt = dfc.meanvT(0.7)
assert dfc(numpy.array([0.7, 0.0, meanvt])) > dfc(
numpy.array([0.7, 0.0, meanvt / 2.0])
), "dehnendf does not peak near (vR,vT) = (0,meanvT)"
assert dfc(numpy.array([0.7, 0.0, meanvt])) > dfc(
numpy.array([0.7, 0.0, meanvt * 2.0])
), "dehnendf does not peak near (vR,vT) = (0,meanvT)"
assert dfc(numpy.array([0.7, 0.0, meanvt])) > dfc(
numpy.array([0.7, -0.1, meanvt])
), "dehnendf does not peak near (vR,vT) = (0,meanvT)"
assert dfc(numpy.array([0.7, 0.0, meanvt])) > dfc(
numpy.array([0.7, 0.1, meanvt])
), "dehnendf does not peak near (vR,vT) = (0,meanvT)"
return None
def test_shudf_call_sanity_flat():
# Sanity checking of shudf's call function
dfc = shudf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.2))
meanvt = dfc.meanvT(0.7)
assert dfc(numpy.array([0.7, 0.0, meanvt])) > dfc(
numpy.array([0.7, 0.0, meanvt / 2.0])
), "shudf does not peak near (vR,vT) = (0,meanvT)"
assert dfc(numpy.array([0.7, 0.0, meanvt])) > dfc(
numpy.array([0.7, 0.0, meanvt * 2.0])
), "shudf does not peak near (vR,vT) = (0,meanvT)"
assert dfc(numpy.array([0.7, 0.0, meanvt])) > dfc(
numpy.array([0.7, -0.1, meanvt])
), "shudf does not peak near (vR,vT) = (0,meanvT)"
assert dfc(numpy.array([0.7, 0.0, meanvt])) > dfc(
numpy.array([0.7, 0.1, meanvt])
), "shudf does not peak near (vR,vT) = (0,meanvT)"
assert (
dfc(numpy.array([0.7, 0.0, -0.1])) == 0.0
), "shudf not zero for counter-rotating orbits"
return None
def test_shudf_call_sanity_powerfall():
# Sanity checking of shudf's call function
dfc = shudf(beta=-0.2, profileParams=(1.0 / 4.0, 1.0, 0.2))
meanvt = dfc.meanvT(0.7)
assert dfc(numpy.array([0.7, 0.0, meanvt])) > dfc(
numpy.array([0.7, 0.0, meanvt / 2.0])
), "shudf does not peak near (vR,vT) = (0,meanvT)"
assert dfc(numpy.array([0.7, 0.0, meanvt])) > dfc(
numpy.array([0.7, 0.0, meanvt * 2.0])
), "shudf does not peak near (vR,vT) = (0,meanvT)"
assert dfc(numpy.array([0.7, 0.0, meanvt])) > dfc(
numpy.array([0.7, -0.1, meanvt])
), "shudf does not peak near (vR,vT) = (0,meanvT)"
assert dfc(numpy.array([0.7, 0.0, meanvt])) > dfc(
numpy.array([0.7, 0.1, meanvt])
), "shudf does not peak near (vR,vT) = (0,meanvT)"
return None
def test_shudf_call_sanity_powerrise():
# Sanity checking of shudf's call function
dfc = shudf(beta=0.2, profileParams=(1.0 / 4.0, 1.0, 0.2))
meanvt = dfc.meanvT(0.7, nsigma=3.0)
assert dfc(numpy.array([0.7, 0.0, meanvt])) > dfc(
numpy.array([0.7, 0.0, meanvt / 2.0])
), "shudf does not peak near (vR,vT) = (0,meanvT)"
assert dfc(numpy.array([0.7, 0.0, meanvt])) > dfc(
numpy.array([0.7, 0.0, meanvt * 2.0])
), "shudf does not peak near (vR,vT) = (0,meanvT)"
assert dfc(numpy.array([0.7, 0.0, meanvt])) > dfc(
numpy.array([0.7, -0.1, meanvt])
), "shudf does not peak near (vR,vT) = (0,meanvT)"
assert dfc(numpy.array([0.7, 0.0, meanvt])) > dfc(
numpy.array([0.7, 0.1, meanvt])
), "shudf does not peak near (vR,vT) = (0,meanvT)"
return None
def test_call_diffinputs():
from galpy.orbit import Orbit
dfc = dehnendf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.2))
R, vR, vT, phi = 0.8, 0.4, 1.1, 2.0
to = Orbit([R, vR, vT, phi])
tao = Orbit([R, vR, vT])
# R,vR,vT,phi vs R,vR,vT
assert (
numpy.fabs(dfc(numpy.array([R, vR, vT, phi])) - dfc(numpy.array([R, vR, vT])))
< 10.0**-10.0
), "diskdf __call__ w/ array R,vR,vT,phi neq w/ array R,vR,vT"
# orbit vs R,vR,vT
assert (
numpy.fabs(dfc(to) - dfc(numpy.array([R, vR, vT]))) < 10.0**-10.0
), "diskdf __call__ w/ orbit neq w/ array R,vR,vT"
# axi orbit vs R,vR,vT
assert (
numpy.fabs(dfc(tao) - dfc(numpy.array([R, vR, vT]))) < 10.0**-10.0
), "diskdf __call__ w/ axi orbit neq w/ array R,vR,vT"
# orbit w/ t vs R,vR,vT
assert (
numpy.fabs(dfc(to, 0.0) - dfc(numpy.array([R, vR, vT]))) < 10.0**-10.0
), "diskdf __call__ w/ orbit and t neq w/ array R,vR,vT"
# axi orbit w/ t vs R,vR,vT
assert (
numpy.fabs(dfc(tao, 0.0) - dfc(numpy.array([R, vR, vT]))) < 10.0**-10.0
), "diskdf __call__ w/ axi orbit and t neq w/ array R,vR,vT"
# list of orbit vs R,vR,vT
assert (
numpy.fabs(dfc([to]) - dfc(numpy.array([R, vR, vT]))) < 10.0**-10.0
), "diskdf __call__ w/ list of orbit neq w/ array R,vR,vT"
# E,L vs R,vR,vT
assert (
numpy.fabs(
dfc(numpy.log(R) + vR**2.0 / 2.0 + vT**2.0 / 2.0, R * vT)
- dfc(numpy.array([R, vR, vT]))
)
< 10.0**-10.0
), "diskdf __call__ w/ E,L and t neq w/ array R,vR,vT"
return None
def test_call_marginalizevperp():
from galpy.orbit import Orbit
dfc = dehnendf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.4))
# l=0
R, vR = 1.8, 0.4
vts = numpy.linspace(0.0, 1.5, 51)
pvts = numpy.array([dfc(numpy.array([R, vR, vt])) for vt in vts])
assert (
numpy.fabs(
numpy.sum(pvts) * (vts[1] - vts[0])
- dfc(Orbit([R, vR, 0.0, 0.0]), marginalizeVperp=True)
)
< 10.0**-4.0
), "diskdf call w/ marginalizeVperp does not work"
# Another l=0, where va > sigmaR1
R, vR = 1.25, 0.4
vts = numpy.linspace(0.0, 1.5, 51)
pvts = numpy.array([dfc(numpy.array([R, vR, vt])) for vt in vts])
assert (
numpy.fabs(
numpy.sum(pvts) * (vts[1] - vts[0])
- dfc(Orbit([R, vR, 0.0, 0.0]), marginalizeVperp=True)
)
< 10.0**-4.0
), "diskdf call w/ marginalizeVperp does not work"
# l=270
R, vT = numpy.sin(numpy.pi / 6.0), 0.7 # l=30 degree
vrs = numpy.linspace(-2.0, 2.0, 101)
pvrs = numpy.array([dfc(numpy.array([R, vr, vT])) for vr in vrs])
assert (
numpy.fabs(
numpy.sum(pvrs) * (vrs[1] - vrs[0])
- dfc(Orbit([R, 0.0, vT, -numpy.pi / 3.0]), marginalizeVperp=True, nsigma=4)
)
< 10.0**-4.0
), "diskdf call w/ marginalizeVperp does not work"
return None
def test_call_marginalizevlos():
from galpy.orbit import Orbit
dfc = dehnendf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.4))
# l=0
R, vT = 0.8, 0.7
vrs = numpy.linspace(-2.0, 2.0, 101)
pvrs = numpy.array([dfc(numpy.array([R, vr, vT])) for vr in vrs])
assert (
numpy.fabs(
numpy.sum(pvrs) * (vrs[1] - vrs[0])
- dfc(Orbit([R, 0.0, vT, 0.0]), marginalizeVlos=True)
)
< 10.0**-4.0
), "diskdf call w/ marginalizeVlos does not work"
# l=270
R, vR = numpy.sin(numpy.pi / 6.0), 0.4 # l=30 degree
vts = numpy.linspace(-2.5, 2.5, 101)
pvts = numpy.array([dfc(numpy.array([R, vR, vt])) for vt in vts])
assert (
numpy.fabs(
numpy.sum(pvts) * (vts[1] - vts[0])
- dfc(Orbit([R, vR, 0.0, -numpy.pi / 3.0]), marginalizeVlos=True, nsigma=4)
)
< 10.0**-4.0
), "diskdf call w/ marginalizeVlos does not work"
return None
def test_dehnendf_dlnfdR_flat():
dfc = dehnendf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.2))
dR = 10**-8.0
R, vR, vT = 0.8, 0.1, 0.9
Rn = R + dR
dR = Rn - R # representable number
dlnf = (
numpy.log(dfc(numpy.array([R + dR, vR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dR
assert (
numpy.fabs(dlnf - dfc._dlnfdR(R, vR, vT)) < 10.0**-6.0
), "dehnendf's dlnfdR does not work"
return None
def test_dehnendf_dlnfdR_powerfall():
dfc = dehnendf(beta=-0.2, profileParams=(1.0 / 4.0, 1.0, 0.2))
dR = 10**-6.0
R, vR, vT = 0.8, 0.1, 0.9
Rn = R + dR
dR = Rn - R # representable number
dlnf = (
numpy.log(dfc(numpy.array([R + dR, vR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dR
assert (
numpy.fabs(dlnf - dfc._dlnfdR(R, vR, vT)) < 10.0**-6.0
), "dehnendf's dlnfdR does not work"
return None
def test_dehnendf_dlnfdR_powerrise():
dfc = dehnendf(beta=0.2, profileParams=(1.0 / 4.0, 1.0, 0.2))
dR = 10**-8.0
R, vR, vT = 0.8, 0.1, 0.9
Rn = R + dR
dR = Rn - R # representable number
dlnf = (
numpy.log(dfc(numpy.array([R + dR, vR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dR
assert (
numpy.fabs(dlnf - dfc._dlnfdR(R, vR, vT)) < 10.0**-6.0
), "dehnendf's dlnfdR does not work"
return None
def test_dehnendf_dlnfdvR_flat():
dfc = dehnendf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.2))
dvR = 10**-8.0
R, vR, vT = 0.8, 0.1, 0.9
vRn = vR + dvR
dvR = vRn - vR # representable number
dlnf = (
numpy.log(dfc(numpy.array([R, vR + dvR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvR
assert (
numpy.fabs(dlnf - dfc._dlnfdvR(R, vR, vT)) < 10.0**-6.0
), "dehnendf's dlnfdvR does not work"
return None
def test_dehnendf_dlnfdvR_powerfall():
dfc = dehnendf(beta=-0.2, profileParams=(1.0 / 4.0, 1.0, 0.2))
dvR = 10**-8.0
R, vR, vT = 0.8, 0.1, 0.9
vRn = vR + dvR
dvR = vRn - vR # representable number
dlnf = (
numpy.log(dfc(numpy.array([R, vR + dvR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvR
assert (
numpy.fabs(dlnf - dfc._dlnfdvR(R, vR, vT)) < 10.0**-6.0
), "dehnendf's dlnfdvR does not work"
return None
def test_dehnendf_dlnfdvR_powerrise():
dfc = dehnendf(beta=0.2, profileParams=(1.0 / 4.0, 1.0, 0.2))
dvR = 10**-8.0
R, vR, vT = 0.8, 0.1, 0.9
vRn = vR + dvR
dvR = vRn - vR # representable number
dlnf = (
numpy.log(dfc(numpy.array([R, vR + dvR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvR
assert (
numpy.fabs(dlnf - dfc._dlnfdvR(R, vR, vT)) < 10.0**-6.0
), "dehnendf's dlnfdvR does not work"
return None
def test_dehnendf_dlnfdvT_flat():
dfc = dehnendf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.2))
dvT = 10**-8.0
R, vR, vT = 0.8, 0.1, 0.9
vTn = vT + dvT
dvT = vTn - vT # representable number
dlnf = (
numpy.log(dfc(numpy.array([R, vR, vT + dvT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvT
assert (
numpy.fabs(dlnf - dfc._dlnfdvT(R, vR, vT)) < 10.0**-6.0
), "dehnendf's dlnfdvT does not work"
return None
def test_dehnendf_dlnfdvT_powerfall():
dfc = dehnendf(beta=-0.2, profileParams=(1.0 / 4.0, 1.0, 0.2))
dvT = 10**-8.0
R, vR, vT = 0.8, 0.1, 0.9
vTn = vT + dvT
dvT = vTn - vT # representable number
dlnf = (
numpy.log(dfc(numpy.array([R, vR, vT + dvT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvT
assert (
numpy.fabs(dlnf - dfc._dlnfdvT(R, vR, vT)) < 10.0**-6.0
), "dehnendf's dlnfdvT does not work"
return None
def test_dehnendf_dlnfdvT_powerrise():
dfc = dehnendf(beta=0.2, profileParams=(1.0 / 4.0, 1.0, 0.2))
dvT = 10**-8.0
R, vR, vT = 0.8, 0.1, 0.9
vTn = vT + dvT
dvT = vTn - vT # representable number
dlnf = (
numpy.log(dfc(numpy.array([R, vR, vT + dvT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvT
assert (
numpy.fabs(dlnf - dfc._dlnfdvT(R, vR, vT)) < 10.0**-6.0
), "dehnendf's dlnfdvT does not work"
return None
def test_dehnendf_dlnfdRe_flat():
dfc = dehnendf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.2))
# Calculate dlndfdRe w/ the chain rule; first calculate dR
dR = 10**-6.0
R, vR, vT = 0.8, 0.1, 0.9
Rn = R + dR
dR = Rn - R # representable number
dlnfdR = (
numpy.log(dfc(numpy.array([R + dR, vR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dR
E = vR**2.0 / 2.0 + vT**2.0 / 2.0 + numpy.log(R)
RE = numpy.exp(E - 0.5)
dE = vR**2.0 / 2.0 + vT**2.0 / 2.0 + numpy.log(R + dR)
dRE = numpy.exp(dE - 0.5)
dRedR = (dRE - RE) / dR
# dvR
dvR = 10**-6.0
vRn = vR + dvR
dvR = vRn - vR # representable number
dlnfdvR = (
numpy.log(dfc(numpy.array([R, vR + dvR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvR
dE = (vR + dvR) ** 2.0 / 2.0 + vT**2.0 / 2.0 + numpy.log(R)
dRE = numpy.exp(dE - 0.5)
dRedvR = (dRE - RE) / dvR
# dvT
dvT = 10**-6.0
vTn = vT + dvT
dvT = vTn - vT # representable number
dlnfdvT = (
numpy.log(dfc(numpy.array([R, vR, vT + dvT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvT
dE = vR**2.0 / 2.0 + (vT + dvT) ** 2.0 / 2.0 + numpy.log(R)
dRE = numpy.exp(dE - 0.5)
dRedvT = (dRE - RE) / dvT
# Calculate dR/dRe etc. from matrix inversion
dRvRvTdRe = numpy.linalg.inv(
numpy.array([[dRedR, dRedvR, dRedvT], [vT, 0.0, R], [0.0, 1.0, 0.0]])
)
dlnf = (
dlnfdR * dRvRvTdRe[0, 0] + dlnfdvR * dRvRvTdRe[1, 0] + dlnfdvT * dRvRvTdRe[2, 0]
)
assert (
numpy.fabs(dlnf - dfc._dlnfdRe(R, vR, vT)) < 10.0**-5.0
), "dehnendf's dlnfdRe does not work"
return None
def test_dehnendf_dlnfdRe_powerfall():
beta = -0.2
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
# Calculate dlndfdRe w/ the chain rule; first calculate dR
dR = 10**-6.0
R, vR, vT = 0.8, 0.1, 0.9
Rn = R + dR
dR = Rn - R # representable number
dlnfdR = (
numpy.log(dfc(numpy.array([R + dR, vR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dR
E = vR**2.0 / 2.0 + vT**2.0 / 2.0 + 1.0 / 2.0 / beta * R ** (2.0 * beta)
RE = (2.0 * E / (1.0 + 1.0 / beta)) ** (1.0 / 2.0 / beta)
dE = vR**2.0 / 2.0 + vT**2.0 / 2.0 + 1.0 / 2.0 / beta * (R + dR) ** (2.0 * beta)
dRE = (2.0 * dE / (1.0 + 1.0 / beta)) ** (1.0 / 2.0 / beta)
dRedR = (dRE - RE) / dR
# dvR
dvR = 10**-6.0
vRn = vR + dvR
dvR = vRn - vR # representable number
dlnfdvR = (
numpy.log(dfc(numpy.array([R, vR + dvR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvR
dE = (vR + dvR) ** 2.0 / 2.0 + vT**2.0 / 2.0 + 1.0 / 2.0 / beta * R ** (2.0 * beta)
dRE = (2.0 * dE / (1.0 + 1.0 / beta)) ** (1.0 / 2.0 / beta)
dRedvR = (dRE - RE) / dvR
# dvT
dvT = 10**-6.0
vTn = vT + dvT
dvT = vTn - vT # representable number
dlnfdvT = (
numpy.log(dfc(numpy.array([R, vR, vT + dvT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvT
dE = vR**2.0 / 2.0 + (vT + dvT) ** 2.0 / 2.0 + 1.0 / 2.0 / beta * R ** (2.0 * beta)
dRE = (2.0 * dE / (1.0 + 1.0 / beta)) ** (1.0 / 2.0 / beta)
dRedvT = (dRE - RE) / dvT
# Calculate dR/dRe etc. from matrix inversion
dRvRvTdRe = numpy.linalg.inv(
numpy.array([[dRedR, dRedvR, dRedvT], [vT, 0.0, R], [0.0, 1.0, 0.0]])
)
dlnf = (
dlnfdR * dRvRvTdRe[0, 0] + dlnfdvR * dRvRvTdRe[1, 0] + dlnfdvT * dRvRvTdRe[2, 0]
)
assert (
numpy.fabs(dlnf - dfc._dlnfdRe(R, vR, vT)) < 10.0**-5.0
), "dehnendf's dlnfdRe does not work"
return None
def test_dehnendf_dlnfdRe_powerrise():
beta = 0.2
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
# Calculate dlndfdRe w/ the chain rule; first calculate dR
dR = 10**-8.0
R, vR, vT = 0.8, 0.1, 0.9
Rn = R + dR
dR = Rn - R # representable number
dlnfdR = (
numpy.log(dfc(numpy.array([R + dR, vR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dR
E = vR**2.0 / 2.0 + vT**2.0 / 2.0 + 1.0 / 2.0 / beta * R ** (2.0 * beta)
RE = (2.0 * E / (1.0 + 1.0 / beta)) ** (1.0 / 2.0 / beta)
dE = vR**2.0 / 2.0 + vT**2.0 / 2.0 + 1.0 / 2.0 / beta * (R + dR) ** (2.0 * beta)
dRE = (2.0 * dE / (1.0 + 1.0 / beta)) ** (1.0 / 2.0 / beta)
dRedR = (dRE - RE) / dR
# dvR
dvR = 10**-8.0
vRn = vR + dvR
dvR = vRn - vR # representable number
dlnfdvR = (
numpy.log(dfc(numpy.array([R, vR + dvR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvR
dE = (vR + dvR) ** 2.0 / 2.0 + vT**2.0 / 2.0 + 1.0 / 2.0 / beta * R ** (2.0 * beta)
dRE = (2.0 * dE / (1.0 + 1.0 / beta)) ** (1.0 / 2.0 / beta)
dRedvR = (dRE - RE) / dvR
# dvT
dvT = 10**-8.0
vTn = vT + dvT
dvT = vTn - vT # representable number
dlnfdvT = (
numpy.log(dfc(numpy.array([R, vR, vT + dvT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvT
dE = vR**2.0 / 2.0 + (vT + dvT) ** 2.0 / 2.0 + 1.0 / 2.0 / beta * R ** (2.0 * beta)
dRE = (2.0 * dE / (1.0 + 1.0 / beta)) ** (1.0 / 2.0 / beta)
dRedvT = (dRE - RE) / dvT
# Calculate dR/dRe etc. from matrix inversion
dRvRvTdRe = numpy.linalg.inv(
numpy.array([[dRedR, dRedvR, dRedvT], [vT, 0.0, R], [0.0, 1.0, 0.0]])
)
dlnf = (
dlnfdR * dRvRvTdRe[0, 0] + dlnfdvR * dRvRvTdRe[1, 0] + dlnfdvT * dRvRvTdRe[2, 0]
)
assert (
numpy.fabs(dlnf - dfc._dlnfdRe(R, vR, vT)) < 10.0**-5.0
), "dehnendf's dlnfdRe does not work"
return None
def test_dehnendf_dlnfdl_flat():
dfc = dehnendf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.2))
# Calculate dlndfdl w/ the chain rule; first calculate dR
dR = 10**-6.0
R, vR, vT = 0.8, 0.1, 0.9
Rn = R + dR
dR = Rn - R # representable number
dlnfdR = (
numpy.log(dfc(numpy.array([R + dR, vR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dR
E = vR**2.0 / 2.0 + vT**2.0 / 2.0 + numpy.log(R)
RE = numpy.exp(E - 0.5)
dE = vR**2.0 / 2.0 + vT**2.0 / 2.0 + numpy.log(R + dR)
dRE = numpy.exp(dE - 0.5)
dRedR = (dRE - RE) / dR
# dvR
dvR = 10**-6.0
vRn = vR + dvR
dvR = vRn - vR # representable number
dlnfdvR = (
numpy.log(dfc(numpy.array([R, vR + dvR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvR
dE = (vR + dvR) ** 2.0 / 2.0 + vT**2.0 / 2.0 + numpy.log(R)
dRE = numpy.exp(dE - 0.5)
dRedvR = (dRE - RE) / dvR
# dvT
dvT = 10**-6.0
vTn = vT + dvT
dvT = vTn - vT # representable number
dlnfdvT = (
numpy.log(dfc(numpy.array([R, vR, vT + dvT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvT
dE = vR**2.0 / 2.0 + (vT + dvT) ** 2.0 / 2.0 + numpy.log(R)
dRE = numpy.exp(dE - 0.5)
dRedvT = (dRE - RE) / dvT
# Calculate dR/dl etc. from matrix inversion
dRvRvTdl = numpy.linalg.inv(
numpy.array([[dRedR, dRedvR, dRedvT], [vT, 0.0, R], [0.0, 1.0, 0.0]])
)
dlnf = dlnfdR * dRvRvTdl[0, 1] + dlnfdvR * dRvRvTdl[1, 1] + dlnfdvT * dRvRvTdl[2, 1]
assert (
numpy.fabs(dlnf - dfc._dlnfdl(R, vR, vT)) < 10.0**-5.0
), "dehnendf's dlnfdl does not work"
return None
def test_dehnendf_dlnfdl_powerfall():
beta = -0.2
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
# Calculate dlndfdl w/ the chain rule; first calculate dR
dR = 10**-6.0
R, vR, vT = 0.8, 0.1, 0.9
Rn = R + dR
dR = Rn - R # representable number
dlnfdR = (
numpy.log(dfc(numpy.array([R + dR, vR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dR
E = vR**2.0 / 2.0 + vT**2.0 / 2.0 + 1.0 / 2.0 / beta * R ** (2.0 * beta)
RE = (2.0 * E / (1.0 + 1.0 / beta)) ** (1.0 / 2.0 / beta)
dE = vR**2.0 / 2.0 + vT**2.0 / 2.0 + 1.0 / 2.0 / beta * (R + dR) ** (2.0 * beta)
dRE = (2.0 * dE / (1.0 + 1.0 / beta)) ** (1.0 / 2.0 / beta)
dRedR = (dRE - RE) / dR
# dvR
dvR = 10**-6.0
vRn = vR + dvR
dvR = vRn - vR # representable number
dlnfdvR = (
numpy.log(dfc(numpy.array([R, vR + dvR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvR
dE = (vR + dvR) ** 2.0 / 2.0 + vT**2.0 / 2.0 + 1.0 / 2.0 / beta * R ** (2.0 * beta)
dRE = (2.0 * dE / (1.0 + 1.0 / beta)) ** (1.0 / 2.0 / beta)
dRedvR = (dRE - RE) / dvR
# dvT
dvT = 10**-6.0
vTn = vT + dvT
dvT = vTn - vT # representable number
dlnfdvT = (
numpy.log(dfc(numpy.array([R, vR, vT + dvT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvT
dE = vR**2.0 / 2.0 + (vT + dvT) ** 2.0 / 2.0 + 1.0 / 2.0 / beta * R ** (2.0 * beta)
dRE = (2.0 * dE / (1.0 + 1.0 / beta)) ** (1.0 / 2.0 / beta)
dRedvT = (dRE - RE) / dvT
# Calculate dR/dl etc. from matrix inversion
dRvRvTdl = numpy.linalg.inv(
numpy.array([[dRedR, dRedvR, dRedvT], [vT, 0.0, R], [0.0, 1.0, 0.0]])
)
dlnf = dlnfdR * dRvRvTdl[0, 1] + dlnfdvR * dRvRvTdl[1, 1] + dlnfdvT * dRvRvTdl[2, 1]
assert (
numpy.fabs(dlnf - dfc._dlnfdl(R, vR, vT)) < 10.0**-5.0
), "dehnendf's dlnfdl does not work"
return None
def test_dehnendf_dlnfdl_powerrise():
beta = 0.2
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
# Calculate dlndfdl w/ the chain rule; first calculate dR
dR = 10**-8.0
R, vR, vT = 0.8, 0.1, 0.9
Rn = R + dR
dR = Rn - R # representable number
dlnfdR = (
numpy.log(dfc(numpy.array([R + dR, vR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dR
E = vR**2.0 / 2.0 + vT**2.0 / 2.0 + 1.0 / 2.0 / beta * R ** (2.0 * beta)
RE = (2.0 * E / (1.0 + 1.0 / beta)) ** (1.0 / 2.0 / beta)
dE = vR**2.0 / 2.0 + vT**2.0 / 2.0 + 1.0 / 2.0 / beta * (R + dR) ** (2.0 * beta)
dRE = (2.0 * dE / (1.0 + 1.0 / beta)) ** (1.0 / 2.0 / beta)
dRedR = (dRE - RE) / dR
# dvR
dvR = 10**-8.0
vRn = vR + dvR
dvR = vRn - vR # representable number
dlnfdvR = (
numpy.log(dfc(numpy.array([R, vR + dvR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvR
dE = (vR + dvR) ** 2.0 / 2.0 + vT**2.0 / 2.0 + 1.0 / 2.0 / beta * R ** (2.0 * beta)
dRE = (2.0 * dE / (1.0 + 1.0 / beta)) ** (1.0 / 2.0 / beta)
dRedvR = (dRE - RE) / dvR
# dvT
dvT = 10**-8.0
vTn = vT + dvT
dvT = vTn - vT # representable number
dlnfdvT = (
numpy.log(dfc(numpy.array([R, vR, vT + dvT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvT
dE = vR**2.0 / 2.0 + (vT + dvT) ** 2.0 / 2.0 + 1.0 / 2.0 / beta * R ** (2.0 * beta)
dRE = (2.0 * dE / (1.0 + 1.0 / beta)) ** (1.0 / 2.0 / beta)
dRedvT = (dRE - RE) / dvT
# Calculate dR/dl etc. from matrix inversion
dRvRvTdl = numpy.linalg.inv(
numpy.array([[dRedR, dRedvR, dRedvT], [vT, 0.0, R], [0.0, 1.0, 0.0]])
)
dlnf = dlnfdR * dRvRvTdl[0, 1] + dlnfdvR * dRvRvTdl[1, 1] + dlnfdvT * dRvRvTdl[2, 1]
assert (
numpy.fabs(dlnf - dfc._dlnfdl(R, vR, vT)) < 10.0**-5.0
), "dehnendf's dlnfdl does not work"
return None
def test_shudf_dlnfdR_flat():
dfc = shudf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.2))
dR = 10**-8.0
R, vR, vT = 0.8, 0.1, 0.9
Rn = R + dR
dR = Rn - R # representable number
dlnf = (
numpy.log(dfc(numpy.array([R + dR, vR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dR
assert (
numpy.fabs(dlnf - dfc._dlnfdR(R, vR, vT)) < 10.0**-6.0
), "shudf's dlnfdR does not work"
return None
def test_shudf_dlnfdR_powerfall():
dfc = shudf(beta=-0.2, profileParams=(1.0 / 4.0, 1.0, 0.2))
dR = 10**-6.0
R, vR, vT = 0.8, 0.1, 0.9
Rn = R + dR
dR = Rn - R # representable number
# print((dfc._dlnfdR(R+dR,vR,vT)-dfc._dlnfdR(R,vR,vT))/dR)
dlnf = (
numpy.log(dfc(numpy.array([R + dR, vR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dR
assert (
numpy.fabs(dlnf - dfc._dlnfdR(R, vR, vT)) < 10.0**-6.0
), "shudf's dlnfdR does not work"
return None
def test_shudf_dlnfdR_powerrise():
dfc = shudf(beta=0.2, profileParams=(1.0 / 4.0, 1.0, 0.2))
dR = 10**-8.0
R, vR, vT = 0.8, 0.1, 0.9
Rn = R + dR
dR = Rn - R # representable number
dlnf = (
numpy.log(dfc(numpy.array([R + dR, vR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dR
assert (
numpy.fabs(dlnf - dfc._dlnfdR(R, vR, vT)) < 10.0**-6.0
), "shudf's dlnfdR does not work"
return None
def test_shudf_dlnfdvR_flat():
dfc = shudf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.2))
dvR = 10**-8.0
R, vR, vT = 0.8, 0.1, 0.9
vRn = vR + dvR
dvR = vRn - vR # representable number
dlnf = (
numpy.log(dfc(numpy.array([R, vR + dvR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvR
assert (
numpy.fabs(dlnf - dfc._dlnfdvR(R, vR, vT)) < 10.0**-6.0
), "shudf's dlnfdvR does not work"
return None
def test_shudf_dlnfdvR_powerfall():
dfc = shudf(beta=-0.2, profileParams=(1.0 / 4.0, 1.0, 0.2))
dvR = 10**-8.0
R, vR, vT = 0.8, 0.1, 0.9
vRn = vR + dvR
dvR = vRn - vR # representable number
dlnf = (
numpy.log(dfc(numpy.array([R, vR + dvR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvR
assert (
numpy.fabs(dlnf - dfc._dlnfdvR(R, vR, vT)) < 10.0**-6.0
), "shudf's dlnfdvR does not work"
return None
def test_shudf_dlnfdvR_powerrise():
dfc = shudf(beta=0.2, profileParams=(1.0 / 4.0, 1.0, 0.2))
dvR = 10**-8.0
R, vR, vT = 0.8, 0.1, 0.9
vRn = vR + dvR
dvR = vRn - vR # representable number
dlnf = (
numpy.log(dfc(numpy.array([R, vR + dvR, vT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvR
assert (
numpy.fabs(dlnf - dfc._dlnfdvR(R, vR, vT)) < 10.0**-6.0
), "shudf's dlnfdvR does not work"
return None
def test_shudf_dlnfdvT_flat():
dfc = shudf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.2))
dvT = 10**-8.0
R, vR, vT = 0.8, 0.1, 0.9
vTn = vT + dvT
dvT = vTn - vT # representable number
dlnf = (
numpy.log(dfc(numpy.array([R, vR, vT + dvT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvT
assert (
numpy.fabs(dlnf - dfc._dlnfdvT(R, vR, vT)) < 10.0**-6.0
), "shudf's dlnfdvT does not work"
return None
def test_shudf_dlnfdvT_powerfall():
dfc = shudf(beta=-0.2, profileParams=(1.0 / 4.0, 1.0, 0.2))
dvT = 10**-8.0
R, vR, vT = 0.8, 0.1, 0.9
vTn = vT + dvT
dvT = vTn - vT # representable number
dlnf = (
numpy.log(dfc(numpy.array([R, vR, vT + dvT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvT
assert (
numpy.fabs(dlnf - dfc._dlnfdvT(R, vR, vT)) < 10.0**-6.0
), "shudf's dlnfdvT does not work"
return None
def test_shudf_dlnfdvT_powerrise():
dfc = shudf(beta=0.2, profileParams=(1.0 / 4.0, 1.0, 0.2))
dvT = 10**-8.0
R, vR, vT = 0.8, 0.1, 0.9
vTn = vT + dvT
dvT = vTn - vT # representable number
dlnf = (
numpy.log(dfc(numpy.array([R, vR, vT + dvT])))
- numpy.log(dfc(numpy.array([R, vR, vT])))
) / dvT
assert (
numpy.fabs(dlnf - dfc._dlnfdvT(R, vR, vT)) < 10.0**-6.0
), "shudf's dlnfdvT does not work"
return None
def test_estimatemeanvR():
beta = 0.0
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
vrp8 = dfc.meanvR(0.8)
assert (
numpy.fabs(dfc._estimatemeanvR(0.8) - vrp8) < 0.02
), "_estimatemeanvR does not agree with meanvR to the expected level"
return None
def test_asymmetricdrift_flat():
beta = 0.0
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
vtp8 = dfc.meanvT(0.8)
assert (
numpy.fabs(dfc.asymmetricdrift(0.8) - 1.0 + vtp8) < 0.02
), "asymmetricdrift does not agree with meanvT for flat rotation curve to the expected level"
assert (
numpy.fabs(dfc.asymmetricdrift(1.2) - 1.0 + dfc.meanvT(1.2)) < 0.02
), "asymmetricdrift does not agree with meanvT for flat rotation curve to the expected level"
# also test _estimatemeanvT
assert (
numpy.fabs(dfc._estimatemeanvT(0.8) - vtp8) < 0.02
), "_estimatemeanvT does not agree with meanvT for flat rotation curve to the expected level"
return None
def test_asymmetricdrift_powerfall():
beta = -0.2
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
assert (
numpy.fabs(dfc.asymmetricdrift(0.8) - 0.8**beta + dfc.meanvT(0.8)) < 0.02
), "asymmetricdrift does not agree with meanvT for flat rotation curve to the expected level"
assert (
numpy.fabs(dfc.asymmetricdrift(1.2) - 1.2**beta + dfc.meanvT(1.2)) < 0.02
), "asymmetricdrift does not agree with meanvT for flat rotation curve to the expected level"
return None
def test_asymmetricdrift_powerrise():
beta = 0.2
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
assert (
numpy.fabs(dfc.asymmetricdrift(0.8) - 0.8**beta + dfc.meanvT(0.8)) < 0.02
), "asymmetricdrift does not agree with meanvT for flat rotation curve to the expected level"
assert (
numpy.fabs(dfc.asymmetricdrift(1.2) - 1.2**beta + dfc.meanvT(1.2)) < 0.02
), "asymmetricdrift does not agree with meanvT for flat rotation curve to the expected level"
return None
def test_estimateSigmaR2():
beta = 0.0
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
assert (
numpy.fabs(dfc._estimateSigmaR2(0.8) / dfc.targetSigma2(0.8) - 1.0) < 0.02
), "_estimateSigmaR2 does not agree with targetSigma2 to the expected level"
return None
def test_estimateSigmaT2():
beta = 0.0
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.02))
assert (
numpy.fabs(dfc._estimateSigmaT2(0.8) / dfc.targetSigma2(0.8) * 2.0 - 1.0) < 0.02
), "_estimateSigmaT2 does not agree with targetSigma2 to the expected level"
assert (
numpy.fabs(
dfc._estimateSigmaT2(0.8, log=True)
- numpy.log(dfc.targetSigma2(0.8))
+ numpy.log(2.0)
)
< 0.02
), "_estimateSigmaT2 does not agree with targetSigma2 to the expected level"
return None
def test_vmomentsurfacedensity_deriv():
# Quick test that the phi derivative is zero
beta = 0.0
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.02))
assert (
numpy.fabs(dfc.vmomentsurfacemass(0.9, 0, 0, deriv="phi")) < 10.0**-6.0
), "surfacemass phi derivative is not zero"
return None
def test_ELtowRRapRperi_flat():
beta = 0.0
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
Rc = 0.8
Lc = Rc
Ec = numpy.log(Rc) + Lc**2.0 / 2.0 / Rc**2.0 + 0.01**2.0 / 2.0
wr, rap, rperi = dfc._ELtowRRapRperi(Ec, Lc)
assert (
numpy.fabs(wr - numpy.sqrt(2.0) / Rc) < 10.0**-3.0
), "diskdf's _ELtowRRapRperi's radial frequency for close to circular orbit is wrong"
return None
def test_ELtowRRapRperi_powerfall():
beta = -0.2
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
Rc = 0.8
Lc = Rc * Rc**beta
Ec = (
1.0 / 2.0 / beta * Rc ** (2.0 * beta)
+ Lc**2.0 / 2.0 / Rc**2.0
+ 0.01**2.0 / 2.0
)
gamma = numpy.sqrt(2.0 / (1.0 + beta))
wr, rap, rperi = dfc._ELtowRRapRperi(Ec, Lc)
assert (
numpy.fabs(wr - 2.0 * Rc ** (beta - 1.0) / gamma) < 10.0**-3.0
), "diskdf's _ELtowRRapRperi's radial frequency for close to circular orbit is wrong"
return None
def test_ELtowRRapRperi_powerrise():
beta = 0.2
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
Rc = 0.8
Lc = Rc * Rc**beta
Ec = (
1.0 / 2.0 / beta * Rc ** (2.0 * beta)
+ Lc**2.0 / 2.0 / Rc**2.0
+ 0.01**2.0 / 2.0
)
gamma = numpy.sqrt(2.0 / (1.0 + beta))
wr, rap, rperi = dfc._ELtowRRapRperi(Ec, Lc)
assert (
numpy.fabs(wr - 2.0 * Rc ** (beta - 1.0) / gamma) < 10.0**-3.0
), "diskdf's _ELtowRRapRperi's radial frequency for close to circular orbit is wrong"
return None
def test_sampledSurfacemassLOS_target():
numpy.random.seed(1)
beta = 0.0
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
# Sample a large number of points, then check some moments against the analytic distribution
ds = dfc.sampledSurfacemassLOS(numpy.pi / 4.0, n=10000, target=True)
xds = numpy.linspace(0.001, 4.0, 201)
pds = numpy.array([dfc.surfacemassLOS(d, 45.0, deg=True, target=True) for d in xds])
md = numpy.sum(xds * pds) / numpy.sum(pds)
sd = numpy.sqrt(numpy.sum(xds**2.0 * pds) / numpy.sum(pds) - md**2.0)
assert (
numpy.fabs(numpy.mean(ds) - md) < 10.0**-2.0
), "mean of surfacemassLOS for target surfacemass is not equal to the mean of the samples"
assert (
numpy.fabs(numpy.std(ds) - sd) < 10.0**-2.0
), "stddev of surfacemassLOS for target surfacemass is not equal to the mean of the samples"
assert (
numpy.fabs(skew_samples(ds) - skew_pdist(xds, pds)) < 10.0**-1
), "skew of surfacemassLOS for target surfacemass is not equal to the mean of the samples"
return None
def test_sampledSurfacemassLOS():
numpy.random.seed(1)
beta = 0.0
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
# Sample a large number of points, then check some moments against the analytic distribution
ds = dfc.sampledSurfacemassLOS(numpy.pi / 4.0, n=10000, target=False)
xds = numpy.linspace(0.001, 4.0, 101)
# check against target, bc that's easy to calculate
pds = numpy.array([dfc.surfacemassLOS(d, 45.0, deg=True, target=True) for d in xds])
md = numpy.sum(xds * pds) / numpy.sum(pds)
sd = numpy.sqrt(numpy.sum(xds**2.0 * pds) / numpy.sum(pds) - md**2.0)
assert (
numpy.fabs(numpy.mean(ds) - md) < 10.0**-2.0
), "mean of surfacemassLOS surfacemass is not equal to the mean of the samples"
assert (
numpy.fabs(numpy.std(ds) - sd) < 10.0**-2.0
), "stddev of surfacemassLOS surfacemass is not equal to the mean of the samples"
assert (
numpy.fabs(skew_samples(ds) - skew_pdist(xds, pds)) < 10.0**-1
), "skew of surfacemassLOS surfacemass is not equal to the mean of the samples"
return None
def test_sampleVRVT_target_flat():
numpy.random.seed(1)
beta = 0.0
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
# Sample a large number of points, then check some moments against the analytic distribution
vrvt = dfc.sampleVRVT(0.7, n=500, target=True)
assert (
numpy.fabs(numpy.mean(vrvt[:, 0])) < 0.05
), "mean vr of vrvt samples is not zero"
assert (
numpy.fabs(numpy.mean(vrvt[:, 1]) - dfc.meanvT(0.7)) < 10.0**-2.0
), "mean vt of vrvt samples is not equal to numerical calculation"
assert (
numpy.fabs(numpy.std(vrvt[:, 0]) - numpy.sqrt(dfc.sigmaR2(0.7))) < 10.0**-1.5
), "std dev vr of vrvt samples is not equal to the expected valueo"
assert (
numpy.fabs(numpy.std(vrvt[:, 1]) - numpy.sqrt(dfc.sigmaT2(0.7))) < 10.0**-1.5
), "std dev vr of vrvt samples is not equal to the expected valueo"
return None
def test_sampleVRVT_flat():
numpy.random.seed(1)
beta = 0.0
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
# Sample a large number of points, then check some moments against the analytic distribution
vrvt = dfc.sampleVRVT(0.7, n=500, target=False)
assert (
numpy.fabs(numpy.mean(vrvt[:, 0])) < 0.05
), "mean vr of vrvt samples is not zero"
assert (
numpy.fabs(numpy.mean(vrvt[:, 1]) - dfc.meanvT(0.7)) < 10.0**-2.0
), "mean vt of vrvt samples is not equal to numerical calculation"
assert (
numpy.fabs(numpy.std(vrvt[:, 0]) - numpy.sqrt(dfc.sigmaR2(0.7))) < 10.0**-1.5
), "std dev vr of vrvt samples is not equal to the expected valueo"
assert (
numpy.fabs(numpy.std(vrvt[:, 1]) - numpy.sqrt(dfc.sigmaT2(0.7))) < 10.0**-1.5
), "std dev vr of vrvt samples is not equal to the expected valueo"
return None
def test_sampleVRVT_target_powerfall():
numpy.random.seed(1)
beta = -0.2
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
# Sample a large number of points, then check some moments against the analytic distribution
vrvt = dfc.sampleVRVT(0.7, n=500, target=True)
assert (
numpy.fabs(numpy.mean(vrvt[:, 0])) < 0.05
), "mean vr of vrvt samples is not zero"
assert (
numpy.fabs(numpy.mean(vrvt[:, 1]) - dfc.meanvT(0.7)) < 10.0**-2.0
), "mean vt of vrvt samples is not equal to numerical calculation"
assert (
numpy.fabs(numpy.std(vrvt[:, 0]) - numpy.sqrt(dfc.sigmaR2(0.7))) < 10.0**-1.5
), "std dev vr of vrvt samples is not equal to the expected valueo"
assert (
numpy.fabs(numpy.std(vrvt[:, 1]) - numpy.sqrt(dfc.sigmaT2(0.7))) < 10.0**-1.5
), "std dev vr of vrvt samples is not equal to the expected valueo"
return None
def test_sampleLOS_target():
numpy.random.seed(1)
beta = 0.0
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
# Sample a large number of points, then check some moments against the analytic distribution
os = dfc.sampleLOS(
numpy.pi / 4.0, n=1000, targetSurfmass=True, targetSigma2=True, deg=False
)
ds = numpy.array([o.dist(ro=1.0, obs=[1.0, 0.0, 0.0]) for o in os])
xds = numpy.linspace(0.001, 4.0, 201)
pds = numpy.array([dfc.surfacemassLOS(d, 45.0, deg=True, target=True) for d in xds])
md = numpy.sum(xds * pds) / numpy.sum(pds)
sd = numpy.sqrt(numpy.sum(xds**2.0 * pds) / numpy.sum(pds) - md**2.0)
assert (
numpy.fabs(numpy.mean(ds) - md) < 10.0**-2.0
), "mean of distance in sampleLOS for target surfacemass is not equal to the mean of the distribution"
assert (
numpy.fabs(numpy.std(ds) - sd) < 10.0**-1.0
), "stddev of distance in sampleLOS for target surfacemass is not equal to the mean of the distribution"
assert (
numpy.fabs(skew_samples(ds) - skew_pdist(xds, pds)) < 0.3
), "skew of distance in sampleLOS for target surfacemass is not equal to the mean of the distribution"
return None
def test_sampleLOS():
numpy.random.seed(1)
beta = 0.0
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
# Sample a large number of points, then check some moments against the analytic distribution
os = dfc.sampleLOS(45.0, n=1000, targetSurfmass=False, deg=True)
ds = numpy.array([o.dist(ro=1.0, obs=[1.0, 0.0, 0.0]) for o in os])
xds = numpy.linspace(0.001, 4.0, 101)
# check against target, bc that's easy to calculate
pds = numpy.array([dfc.surfacemassLOS(d, 45.0, deg=True, target=True) for d in xds])
md = numpy.sum(xds * pds) / numpy.sum(pds)
sd = numpy.sqrt(numpy.sum(xds**2.0 * pds) / numpy.sum(pds) - md**2.0)
assert (
numpy.fabs(numpy.mean(ds) - md) < 10.0**-2.0
), "mean of ds of sampleLOS is not equal to the mean of the distribution"
assert (
numpy.fabs(numpy.std(ds) - sd) < 0.05
), "stddev of ds of sampleLOS is not equal to the mean of the distribution"
assert (
numpy.fabs(skew_samples(ds) - skew_pdist(xds, pds)) < 0.3
), "skew of ds of sampleLOS is not equal to the mean of the distribution"
return None
def test_dehnendf_sample_sampleLOS():
# Test that the samples returned through sample with los are the same as those returned with sampleLOS
beta = 0.0
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
# Sample a large number of points, then check some moments against the analytic distribution
numpy.random.seed(1)
os = dfc.sampleLOS(45.0, n=2, targetSurfmass=False, deg=True)
rs = numpy.array([o.R() for o in os])
vrs = numpy.array([o.vR() for o in os])
vts = numpy.array([o.vT() for o in os])
numpy.random.seed(1)
os2 = dfc.sample(los=45.0, n=2, targetSurfmass=False, losdeg=True)
rs2 = numpy.array([o.R() for o in os2])
vrs2 = numpy.array([o.vR() for o in os2])
vts2 = numpy.array([o.vT() for o in os2])
assert numpy.all(
numpy.fabs(rs - rs2) < 10.0**-10.0
), "Samples returned from dehnendf.sample with los set are not the same as those returned with sampleLOS"
assert numpy.all(
numpy.fabs(vrs - vrs2) < 10.0**-10.0
), "Samples returned from dehnendf.sample with los set are not the same as those returned with sampleLOS"
assert numpy.all(
numpy.fabs(vts - vts2) < 10.0**-10.0
), "Samples returned from dehnendf.sample with los set are not the same as those returned with sampleLOS"
return None
def test_shudf_sample_sampleLOS():
# Test that the samples returned through sample with los are the same as those returned with sampleLOS
beta = 0.0
dfc = shudf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
# Sample a large number of points, then check some moments against the analytic distribution
numpy.random.seed(1)
os = dfc.sampleLOS(45.0, n=2, targetSurfmass=False, deg=True)
rs = numpy.array([o.R() for o in os])
vrs = numpy.array([o.vR() for o in os])
vts = numpy.array([o.vT() for o in os])
numpy.random.seed(1)
os2 = dfc.sample(los=45.0, n=2, targetSurfmass=False, losdeg=True)
rs2 = numpy.array([o.R() for o in os2])
vrs2 = numpy.array([o.vR() for o in os2])
vts2 = numpy.array([o.vT() for o in os2])
assert numpy.all(
numpy.fabs(rs - rs2) < 10.0**-10.0
), "Samples returned from dehnendf.sample with los set are not the same as those returned with sampleLOS"
assert numpy.all(
numpy.fabs(vrs - vrs2) < 10.0**-10.0
), "Samples returned from dehnendf.sample with los set are not the same as those returned with sampleLOS"
assert numpy.all(
numpy.fabs(vts - vts2) < 10.0**-10.0
), "Samples returned from dehnendf.sample with los set are not the same as those returned with sampleLOS"
return None
def test_dehnendf_sample_flat_returnROrbit():
beta = 0.0
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
numpy.random.seed(1)
os = dfc.sample(n=300, returnROrbit=True)
# Test the spatial distribution
rs = numpy.array([o.R() for o in os])
assert (
numpy.fabs(numpy.mean(rs) - 0.5) < 0.05
), "mean R of sampled points does not agree with that of the input surface profile"
assert (
numpy.fabs(numpy.std(rs) - numpy.sqrt(2.0) / 4.0) < 0.03
), "stddev R of sampled points does not agree with that of the input surface profile"
# Test the velocity distribution
vrs = numpy.array([o.vR() for o in os])
assert (
numpy.fabs(numpy.mean(vrs)) < 0.05
), "mean vR of sampled points does not agree with that of the input surface profile (i.e., it is not zero)"
vts = numpy.array([o.vT() for o in os])
dvts = numpy.array(
[vt - r**beta + dfc.asymmetricdrift(r) for (r, vt) in zip(rs, vts)]
)
assert (
numpy.fabs(numpy.mean(dvts)) < 0.1
), "mean vT of sampled points does not agree with an estimate based on asymmetric drift"
return None
def test_dehnendf_sample_flat_returnROrbit_rrange():
beta = 0.0
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
numpy.random.seed(1)
os = dfc.sample(n=100, returnROrbit=True, rrange=[0.0, 1.0])
# Test the spatial distribution
rs = numpy.array([o.R() for o in os])
assert (
numpy.fabs(numpy.mean(rs) - 0.419352) < 0.05
), "mean R of sampled points does not agree with that of the input surface profile"
assert (
numpy.fabs(numpy.std(rs) - 0.240852) < 0.05
), "stddev R of sampled points does not agree with that of the input surface profile"
# Test the velocity distribution
vrs = numpy.array([o.vR() for o in os])
assert (
numpy.fabs(numpy.mean(vrs)) < 0.075
), "mean vR of sampled points does not agree with that of the input surface profile (i.e., it is not zero)"
vts = numpy.array([o.vT() for o in os])
dvts = numpy.array(
[vt - r**beta + dfc.asymmetricdrift(r) for (r, vt) in zip(rs, vts)]
)
assert (
numpy.fabs(numpy.mean(dvts)) < 0.1
), "mean vT of sampled points does not agree with an estimate based on asymmetric drift"
return None
def test_dehnendf_sample_powerrise_returnROrbit():
beta = 0.2
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
numpy.random.seed(1)
os = dfc.sample(n=300, returnROrbit=True)
# Test the spatial distribution
rs = numpy.array([o.R() for o in os])
assert (
numpy.fabs(numpy.mean(rs) - 0.5) < 0.1
), "mean R of sampled points does not agree with that of the input surface profile"
assert (
numpy.fabs(numpy.std(rs) - numpy.sqrt(2.0) / 4.0) < 0.06
), "stddev R of sampled points does not agree with that of the input surface profile"
# Test the velocity distribution
vrs = numpy.array([o.vR() for o in os])
assert (
numpy.fabs(numpy.mean(vrs)) < 0.05
), "mean vR of sampled points does not agree with that of the input surface profile (i.e., it is not zero)"
vts = numpy.array([o.vT() for o in os])
dvts = numpy.array(
[vt - r**beta + dfc.asymmetricdrift(r) for (r, vt) in zip(rs, vts)]
)
assert (
numpy.fabs(numpy.mean(dvts)) < 0.2
), "mean vT of sampled points does not agree with an estimate based on asymmetric drift"
return None
def test_dehnendf_sample_flat_returnOrbit():
beta = 0.0
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
numpy.random.seed(1)
os = dfc.sample(n=100, returnOrbit=True)
# Test the spatial distribution
rs = numpy.array([o.R() for o in os])
phis = numpy.array([o.phi() for o in os])
assert (
numpy.fabs(numpy.mean(rs) - 0.5) < 0.05
), "mean R of sampled points does not agree with that of the input surface profile"
assert (
numpy.fabs(numpy.mean(phis) - numpy.pi) < 0.2
), "mean phi of sampled points does not agree with that of the input surface profile"
assert (
numpy.fabs(numpy.std(rs) - numpy.sqrt(2.0) / 4.0) < 0.03
), "stddev R of sampled points does not agree with that of the input surface profile"
assert (
numpy.fabs(numpy.std(phis) - numpy.pi / numpy.sqrt(3.0)) < 0.1
), "stddev phi of sampled points does not agree with that of the input surface profile"
# Test the velocity distribution
vrs = numpy.array([o.vR() for o in os])
assert (
numpy.fabs(numpy.mean(vrs)) < 0.05
), "mean vR of sampled points does not agree with that of the input surface profile (i.e., it is not zero)"
vts = numpy.array([o.vT() for o in os])
dvts = numpy.array(
[vt - r**beta + dfc.asymmetricdrift(r) for (r, vt) in zip(rs, vts)]
)
assert (
numpy.fabs(numpy.mean(dvts)) < 0.1
), "mean vT of sampled points does not agree with an estimate based on asymmetric drift"
return None
def test_dehnendf_sample_flat_EL():
beta = 0.0
dfc = dehnendf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
numpy.random.seed(1)
EL = dfc.sample(n=50, returnROrbit=False, returnOrbit=False)
E = [el[0] for el in EL]
L = [el[1] for el in EL]
# radii of circular orbits with this energy, these should follow an exponential
rs = numpy.array([numpy.exp(e - 0.5) for e in E])
assert (
numpy.fabs(numpy.mean(rs) - 0.5) < 0.05
), "mean R of sampled points does not agree with that of the input surface profile"
assert (
numpy.fabs(numpy.std(rs) - numpy.sqrt(2.0) / 4.0) < 0.03
), "stddev R of sampled points does not agree with that of the input surface profile"
# BOVY: Could use another test
return None
def test_shudf_sample_flat_returnROrbit():
beta = 0.0
dfc = shudf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
numpy.random.seed(1)
os = dfc.sample(n=50, returnROrbit=True)
# Test the spatial distribution
rs = numpy.array([o.R() for o in os])
assert (
numpy.fabs(numpy.mean(rs) - 0.5) < 0.1
), "mean R of sampled points does not agree with that of the input surface profile"
assert (
numpy.fabs(numpy.std(rs) - numpy.sqrt(2.0) / 4.0) < 0.1
), "stddev R of sampled points does not agree with that of the input surface profile"
# Test the velocity distribution
vrs = numpy.array([o.vR() for o in os])
assert (
numpy.fabs(numpy.mean(vrs)) < 0.05
), "mean vR of sampled points does not agree with that of the input surface profile (i.e., it is not zero)"
vts = numpy.array([o.vT() for o in os])
dvts = numpy.array(
[vt - r**beta + dfc.asymmetricdrift(r) for (r, vt) in zip(rs, vts)]
)
assert (
numpy.fabs(numpy.mean(dvts)) < 0.1
), "mean vT of sampled points does not agree with an estimate based on asymmetric drift"
return None
def test_shudf_sample_flat_returnROrbit_rrange():
beta = 0.0
dfc = shudf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
numpy.random.seed(1)
os = dfc.sample(n=100, returnROrbit=True, rrange=[0.0, 1.0])
# Test the spatial distribution
rs = numpy.array([o.R() for o in os])
assert (
numpy.fabs(numpy.mean(rs) - 0.419352) < 0.05
), "mean R of sampled points does not agree with that of the input surface profile"
assert (
numpy.fabs(numpy.std(rs) - 0.240852) < 0.05
), "stddev R of sampled points does not agree with that of the input surface profile"
# Test the velocity distribution
vrs = numpy.array([o.vR() for o in os])
assert (
numpy.fabs(numpy.mean(vrs)) < 0.075
), "mean vR of sampled points does not agree with that of the input surface profile (i.e., it is not zero)"
vts = numpy.array([o.vT() for o in os])
dvts = numpy.array(
[vt - r**beta + dfc.asymmetricdrift(r) for (r, vt) in zip(rs, vts)]
)
assert (
numpy.fabs(numpy.mean(dvts)) < 0.13
), "mean vT of sampled points does not agree with an estimate based on asymmetric drift"
return None
def test_shudf_sample_powerrise_returnROrbit():
beta = 0.2
dfc = shudf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
numpy.random.seed(1)
os = dfc.sample(n=100, returnROrbit=True)
# Test the spatial distribution
rs = numpy.array([o.R() for o in os])
assert (
numpy.fabs(numpy.mean(rs) - 0.5) < 0.1
), "mean R of sampled points does not agree with that of the input surface profile"
assert (
numpy.fabs(numpy.std(rs) - numpy.sqrt(2.0) / 4.0) < 0.06
), "stddev R of sampled points does not agree with that of the input surface profile"
# Test the velocity distribution
vrs = numpy.array([o.vR() for o in os])
assert (
numpy.fabs(numpy.mean(vrs)) < 0.05
), "mean vR of sampled points does not agree with that of the input surface profile (i.e., it is not zero)"
vts = numpy.array([o.vT() for o in os])
dvts = numpy.array(
[vt - r**beta + dfc.asymmetricdrift(r) for (r, vt) in zip(rs, vts)]
)
assert (
numpy.fabs(numpy.mean(dvts)) < 0.2
), "mean vT of sampled points does not agree with an estimate based on asymmetric drift"
return None
def test_shudf_sample_flat_returnOrbit():
beta = 0.0
dfc = shudf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
numpy.random.seed(1)
os = dfc.sample(n=100, returnOrbit=True)
# Test the spatial distribution
rs = numpy.array([o.R() for o in os])
phis = numpy.array([o.phi() for o in os])
assert (
numpy.fabs(numpy.mean(rs) - 0.5) < 0.05
), "mean R of sampled points does not agree with that of the input surface profile"
assert (
numpy.fabs(numpy.mean(phis) - numpy.pi) < 0.2
), "mean phi of sampled points does not agree with that of the input surface profile"
assert (
numpy.fabs(numpy.std(rs) - numpy.sqrt(2.0) / 4.0) < 0.03
), "stddev R of sampled points does not agree with that of the input surface profile"
assert (
numpy.fabs(numpy.std(phis) - numpy.pi / numpy.sqrt(3.0)) < 0.2
), "stddev phi of sampled points does not agree with that of the input surface profile"
# Test the velocity distribution
vrs = numpy.array([o.vR() for o in os])
assert (
numpy.fabs(numpy.mean(vrs)) < 0.05
), "mean vR of sampled points does not agree with that of the input surface profile (i.e., it is not zero)"
vts = numpy.array([o.vT() for o in os])
dvts = numpy.array(
[vt - r**beta + dfc.asymmetricdrift(r) for (r, vt) in zip(rs, vts)]
)
assert (
numpy.fabs(numpy.mean(dvts)) < 0.1
), "mean vT of sampled points does not agree with an estimate based on asymmetric drift"
return None
def test_shudf_sample_flat_EL():
beta = 0.0
dfc = shudf(beta=beta, profileParams=(1.0 / 4.0, 1.0, 0.2))
numpy.random.seed(1)
EL = dfc.sample(n=50, returnROrbit=False, returnOrbit=False)
E = [el[0] for el in EL]
L = [el[1] for el in EL]
# radii of circular orbits with this angular momentum, these should follow an exponential
rs = numpy.array(L)
assert (
numpy.fabs(numpy.mean(rs) - 0.5) < 0.05
), "mean R of sampled points does not agree with that of the input surface profile"
assert (
numpy.fabs(numpy.std(rs) - numpy.sqrt(2.0) / 4.0) < 0.03
), "stddev R of sampled points does not agree with that of the input surface profile"
# BOVY: Could use another test
return None
def test_schwarzschild_vs_shu_flat():
# Schwarzschild DF should be ~~ Shu for small sigma, test w/ flat rotcurve
dfs = shudf(profileParams=(0.3333333333333333, 1.0, 0.05), beta=0.0, correct=False)
dfw = schwarzschilddf(
profileParams=(0.3333333333333333, 1.0, 0.05), beta=0.0, correct=False
)
assert (
numpy.fabs(dfs.meanvT(0.97) - dfw.meanvT(0.97)) < 10.0**-3.0
), "Shu and Schwarschild DF differ more than expected for small sigma"
assert (
numpy.fabs(dfs.oortA(0.97) - dfw.oortA(0.97)) < 10.0**-2.9
), "Shu and Schwarschild DF differ more than expected for small sigma"
return None
def test_schwarzschild_vs_shu_powerfall():
# Schwarzschild DF should be ~~ Shu for small sigma, test w/ flat rotcurve
beta = -0.2
dfs = shudf(profileParams=(0.3333333333333333, 1.0, 0.05), beta=beta, correct=False)
dfw = schwarzschilddf(
profileParams=(0.3333333333333333, 1.0, 0.05), beta=beta, correct=False
)
assert (
numpy.fabs(dfs.meanvT(0.97) - dfw.meanvT(0.97)) < 10.0**-3.0
), "Shu and Schwarschild DF differ more than expected for small sigma"
assert (
numpy.fabs(dfs.oortA(0.97) - dfw.oortA(0.97)) < 10.0**-3.0
), "Shu and Schwarschild DF differ more than expected for small sigma"
return None
def test_schwarzschild_vs_shu_powerrise():
# Schwarzschild DF should be ~~ Shu for small sigma, test w/ flat rotcurve
beta = 0.2
dfs = shudf(profileParams=(0.3333333333333333, 1.0, 0.05), beta=beta, correct=False)
dfw = schwarzschilddf(
profileParams=(0.3333333333333333, 1.0, 0.05), beta=beta, correct=False
)
assert (
numpy.fabs(dfs.meanvT(0.97) - dfw.meanvT(0.97)) < 10.0**-3.0
), "Shu and Schwarschild DF differ more than expected for small sigma"
assert (
numpy.fabs(dfs.oortA(0.97) - dfw.oortA(0.97)) < 10.0**-2.8
), "Shu and Schwarschild DF differ more than expected for small sigma"
return None
###############################################################################
# Tests of DFcorrection
###############################################################################
def test_dehnendf_flat_DFcorrection_setup():
global ddf_correct_flat
global ddf_correct2_flat
ddf_correct_flat = dehnendf(
beta=0.0,
profileParams=(1.0 / 4.0, 1.0, 0.2),
correct=True,
niter=1,
npoints=21,
savedir=".",
)
ddf_correct2_flat = dehnendf(
beta=0.0,
profileParams=(1.0 / 4.0, 1.0, 0.2),
correct=True,
niter=2,
npoints=21,
savedir=".",
)
return None
def test_dehnendf_flat_DFcorrection_mag():
# Test that the call is not too different from before
tcorr = ddf_correct2_flat._corr.correct(1.1, log=True)
assert numpy.fabs(tcorr[0]) < 0.15, "dehnendf correction is larger than expected"
assert numpy.fabs(tcorr[1]) < 0.1, "dehnendf correction is larger than expected"
# small R
tcorr = numpy.log(ddf_correct2_flat._corr.correct(10.0**-12.0))
assert numpy.fabs(tcorr[0]) < 0.4, "dehnendf correction is larger than expected"
assert numpy.fabs(tcorr[1]) < 1.0, "dehnendf correction is larger than expected"
# large R
tcorr = numpy.log(ddf_correct2_flat._corr.correct(12.0))
assert numpy.fabs(tcorr[0]) < 0.01, "dehnendf correction is larger than expected"
assert numpy.fabs(tcorr[1]) < 0.01, "dehnendf correction is larger than expected"
# small R, array
tcorr = numpy.log(ddf_correct2_flat._corr.correct(10.0**-12.0 * numpy.ones(2)))
assert numpy.all(
numpy.fabs(tcorr[0]) < 0.4
), "dehnendf correction is larger than expected"
assert numpy.all(
numpy.fabs(tcorr[1]) < 1.0
), "dehnendf correction is larger than expected"
# large R
tcorr = numpy.log(ddf_correct2_flat._corr.correct(12.0 * numpy.ones(2)))
assert numpy.all(
numpy.fabs(tcorr[0]) < 0.01
), "dehnendf correction is larger than expected"
assert numpy.all(
numpy.fabs(tcorr[1]) < 0.01
), "dehnendf correction is larger than expected"
# large R, log
tcorr = ddf_correct2_flat._corr.correct(12.0 * numpy.ones(2), log=True)
assert numpy.all(
numpy.fabs(tcorr[0]) < 0.01
), "dehnendf correction is larger than expected"
assert numpy.all(
numpy.fabs(tcorr[1]) < 0.01
), "dehnendf correction is larger than expected"
return None
def test_dehnendf_flat_DFcorrection_deriv_mag():
# Test that the derivative behaves as expected
tcorr = ddf_correct2_flat._corr.derivLogcorrect(2.0)
assert (
numpy.fabs(tcorr[0]) < 0.1
), "dehnendf correction derivative is larger than expected"
assert (
numpy.fabs(tcorr[1]) < 0.1
), "dehnendf correction derivative is larger than expected"
# small R, derivative should be very large
tcorr = ddf_correct2_flat._corr.derivLogcorrect(10.0**-12.0)
assert (
numpy.fabs(tcorr[0]) > 1.0
), "dehnendf correction derivative is smaller than expected"
assert (
numpy.fabs(tcorr[1]) > 1.0
), "dehnendf correction derivative is larger than expected"
# large R
tcorr = ddf_correct2_flat._corr.derivLogcorrect(12.0)
assert (
numpy.fabs(tcorr[0]) < 0.01
), "dehnendf correction derivative is larger than expected"
assert (
numpy.fabs(tcorr[1]) < 0.01
), "dehnendf correction derivative is larger than expected"
return None
def test_dehnendf_flat_DFcorrection_surfacemass():
# Test that the surfacemass is better than before
dfc = dehnendf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.2), correct=False)
diff_uncorr = numpy.fabs(
numpy.log(dfc.surfacemass(0.8)) - numpy.log(dfc.targetSurfacemass(0.8))
)
diff_corr = numpy.fabs(
numpy.log(ddf_correct_flat.surfacemass(0.8))
- numpy.log(dfc.targetSurfacemass(0.8))
)
diff_corr2 = numpy.fabs(
numpy.log(ddf_correct2_flat.surfacemass(0.8))
- numpy.log(dfc.targetSurfacemass(0.8))
)
assert (
diff_corr < diff_uncorr
), "surfacemass w/ corrected dehnenDF is does not agree better with target than with uncorrected dehnenDF"
assert (
diff_corr2 < diff_corr
), "surfacemass w/ corrected dehnenDF w/ 2 iterations is does not agree better with target than with 1 iteration"
return None
def test_dehnendf_flat_DFcorrection_sigmaR2():
# Test that the sigmaR2 is better than before
dfc = dehnendf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.2), correct=False)
diff_uncorr = numpy.fabs(
numpy.log(dfc.sigmaR2(0.8)) - numpy.log(dfc.targetSigma2(0.8))
)
diff_corr = numpy.fabs(
numpy.log(ddf_correct_flat.sigmaR2(0.8)) - numpy.log(dfc.targetSigma2(0.8))
)
diff_corr2 = numpy.fabs(
numpy.log(ddf_correct2_flat.sigmaR2(0.8)) - numpy.log(dfc.targetSigma2(0.8))
)
assert (
diff_corr < diff_uncorr
), "sigmaR2 w/ corrected dehnenDF is does not agree better with target than with uncorrected dehnenDF"
assert (
diff_corr2 < diff_corr
), "sigmaR2 w/ corrected dehnenDF w/ 2 iterations is does not agree better with target than with 1 iteration"
return None
def test_dehnendf_flat_DFcorrection_reload():
# Test that the corrections aren't re-calculated if they were saved
import time
start = time.time()
reddf = dehnendf(
beta=0.0,
profileParams=(1.0 / 4.0, 1.0, 0.2),
correct=True,
niter=1,
npoints=21,
savedir=".",
)
assert (
time.time() - start < 1.0
), "Setup w/ correct=True, but already computed corrections takes too long"
return None
def test_dehnendf_flat_DFcorrection_cleanup():
# This should run quickly
dfc = dehnendf(
beta=0.0,
profileParams=(1.0 / 4.0, 1.0, 0.2),
correct=True,
niter=1,
npoints=21,
savedir=".",
)
try:
os.remove(dfc._corr._createSavefilename(1))
except:
raise AssertionError("removing DFcorrection's savefile did not work")
try:
os.remove(dfc._corr._createSavefilename(2))
except:
raise AssertionError("removing DFcorrection's savefile did not work")
return None
def test_DFcorrection_setup():
# Test that the keywords are setup correctly and that exceptions are raised
dfc = dehnendf(
beta=0.1,
profileParams=(1.0 / 3.0, 1.0, 0.2),
correct=True,
rmax=4.0,
niter=2,
npoints=5,
interp_k=3,
savedir=".",
)
assert (
numpy.fabs(dfc._corr._rmax - 4.0) < 10.0**-10.0
), "rmax not set up correctly in DFcorrection"
assert (
numpy.fabs(dfc._corr._npoints - 5) < 10.0**-10.0
), "npoints not set up correctly in DFcorrection"
assert (
numpy.fabs(dfc._corr._interp_k - 3) < 10.0**-10.0
), "interp_k not set up correctly in DFcorrection"
assert (
numpy.fabs(dfc._corr._beta - 0.1) < 10.0**-10.0
), "beta not set up correctly in DFcorrection"
# setup w/ corrections
corrs = dfc._corr._corrections
dfc = dehnendf(
beta=0.1,
profileParams=(1.0 / 3.0, 1.0, 0.2),
correct=True,
rmax=4.0,
niter=2,
interp_k=3,
savedir=".",
corrections=corrs,
)
assert numpy.all(
numpy.fabs(corrs - dfc._corr._corrections) < 10.0**-10.0
), "DFcorrection initialized w/ corrections does not work properly"
# If corrections.shape[0] neq npoints, should raise error
from galpy.df.diskdf import DFcorrectionError
try:
dfc = dehnendf(
beta=0.1,
profileParams=(1.0 / 3.0, 1.0, 0.2),
correct=True,
rmax=4.0,
niter=2,
npoints=6,
interp_k=3,
savedir=".",
corrections=corrs,
)
except DFcorrectionError:
pass
else:
raise AssertionError(
"DFcorrection setup with corrections.shape[0] neq npoints did not raise DFcorrectionError"
)
# rm savefile
dfc = dehnendf(
beta=0.1,
profileParams=(1.0 / 3.0, 1.0, 0.2),
correct=True,
rmax=4.0,
niter=2,
npoints=5,
interp_k=3,
savedir=".",
)
try:
os.remove(dfc._corr._createSavefilename(2))
except:
raise AssertionError("removing DFcorrection's savefile did not work")
# Also explicily setup a DFcorrection, to test for other stuff
from galpy.df import DFcorrection
from galpy.df.diskdf import DFcorrectionError
# Should raise DFcorrectionError bc surfaceSigmaProfile is not set
try:
dfc = DFcorrection(npoints=2, niter=2, rmax=4.0, beta=-0.1, interp_k=3)
except DFcorrectionError as e:
print(e)
else:
raise AssertionError(
"DFcorrection setup with no surfaceSigmaProfile set did not raise DFcorrectionError"
)
# Now w/ surfaceSigmaProfile to test default dftype
from galpy.df import expSurfaceSigmaProfile
essp = expSurfaceSigmaProfile(params=(0.25, 0.75, 0.1))
dfc = DFcorrection(
npoints=5, niter=1, rmax=4.0, surfaceSigmaProfile=essp, interp_k=3
)
assert issubclass(
dfc._dftype, dehnendf
), "DFcorrection w/ no dftype set does not default to dehnendf"
assert (
numpy.fabs(dfc._beta) < 10.0**-10.0
), "DFcorrection w/ no beta does not default to zero"
try:
os.remove(dfc._createSavefilename(1))
except:
raise AssertionError("removing DFcorrection's savefile did not work")
return None
def test_dehnendf_sample_flat_returnROrbit_wcorrections():
beta = 0.0
dfc = ddf_correct2_flat
numpy.random.seed(1)
os = dfc.sample(n=100, returnROrbit=True)
# Test the spatial distribution
rs = numpy.array([o.R() for o in os])
assert (
numpy.fabs(numpy.mean(rs) - 0.5) < 0.05
), "mean R of sampled points does not agree with that of the input surface profile"
assert (
numpy.fabs(numpy.std(rs) - numpy.sqrt(2.0) / 4.0) < 0.03
), "stddev R of sampled points does not agree with that of the input surface profile"
# Test the velocity distribution
vrs = numpy.array([o.vR() for o in os])
assert (
numpy.fabs(numpy.mean(vrs)) < 0.1
), "mean vR of sampled points does not agree with that of the input surface profile (i.e., it is not zero)"
vts = numpy.array([o.vT() for o in os])
dvts = numpy.array(
[vt - r**beta + dfc.asymmetricdrift(r) for (r, vt) in zip(rs, vts)]
)
assert (
numpy.fabs(numpy.mean(dvts)) < 0.1
), "mean vT of sampled points does not agree with an estimate based on asymmetric drift"
return None
def test_shudf_flat_DFcorrection_setup():
global sdf_correct_flat
sdf_correct_flat = shudf(
beta=0.0,
profileParams=(1.0 / 4.0, 1.0, 0.2),
correct=True,
niter=1,
npoints=21,
savedir=".",
)
return None
def test_shudf_flat_DFcorrection_surfacemass():
# Test that the surfacemass is better than before
dfc = shudf(beta=0.0, profileParams=(1.0 / 4.0, 1.0, 0.2), correct=False)
diff_uncorr = numpy.fabs(
numpy.log(dfc.surfacemass(0.8)) - numpy.log(dfc.targetSurfacemass(0.8))
)
diff_corr = numpy.fabs(
numpy.log(sdf_correct_flat.surfacemass(0.8))
- numpy.log(dfc.targetSurfacemass(0.8))
)
assert (
diff_corr < diff_uncorr
), "surfacemass w/ corrected shuDF is does not agree better with target than with uncorrected shuDF"
return None
def test_shudf_sample_flat_returnROrbit_wcorrections():
beta = 0.0
dfc = sdf_correct_flat
numpy.random.seed(1)
os = dfc.sample(n=100, returnROrbit=True)
# Test the spatial distribution
rs = numpy.array([o.R() for o in os])
assert (
numpy.fabs(numpy.mean(rs) - 0.5) < 0.05
), "mean R of sampled points does not agree with that of the input surface profile"
assert (
numpy.fabs(numpy.std(rs) - numpy.sqrt(2.0) / 4.0) < 0.035
), "stddev R of sampled points does not agree with that of the input surface profile"
# Test the velocity distribution
vrs = numpy.array([o.vR() for o in os])
assert (
numpy.fabs(numpy.mean(vrs)) < 0.05
), "mean vR of sampled points does not agree with that of the input surface profile (i.e., it is not zero)"
vts = numpy.array([o.vT() for o in os])
dvts = numpy.array(
[vt - r**beta + dfc.asymmetricdrift(r) for (r, vt) in zip(rs, vts)]
)
assert (
numpy.fabs(numpy.mean(dvts)) < 0.1
), "mean vT of sampled points does not agree with an estimate based on asymmetric drift"
return None
def test_shudf_flat_DFcorrection_cleanup():
# This should run quickly
dfc = shudf(
beta=0.0,
profileParams=(1.0 / 4.0, 1.0, 0.2),
correct=True,
niter=1,
npoints=21,
savedir=".",
)
try:
os.remove(dfc._corr._createSavefilename(1))
except:
raise AssertionError("removing DFcorrection's savefile did not work")
return None
def test_axipotential():
from galpy.df.diskdf import _RMIN, axipotential
assert (
numpy.fabs(axipotential(numpy.array([0.5]), beta=0.0) - numpy.log(0.5))
< 10.0**-8
), "axipotential w/ beta=0.0 does not work as expected"
assert (
numpy.fabs(axipotential(numpy.array([0.5]), beta=0.2) - 1.0 / 0.4 * 0.5**0.4)
< 10.0**-8
), "axipotential w/ beta=0.2 does not work as expected"
assert (
numpy.fabs(axipotential(numpy.array([0.5]), beta=-0.2) + 1.0 / 0.4 * 0.5**-0.4)
< 10.0**-8
), "axipotential w/ beta=0.2 does not work as expected"
# special case of R=0 should go to _RMIN
assert (
numpy.fabs(axipotential(numpy.array([0.0]), beta=0.0) - numpy.log(_RMIN))
< 10.0**-8
), "axipotential w/ beta=0.0 does not work as expected"
return None
def test_dlToRphi():
from galpy.df.diskdf import _dlToRphi
R, theta = _dlToRphi(1.0, 0.0)
assert (
numpy.fabs(R) < 10.0**-3.0
), "_dlToRphi close to center does not behave properly"
assert (
numpy.fabs(theta % numpy.pi) < 10.0**-3.0
), "_dlToRphi close to center does not behave properly"
return None
def skew_samples(s):
m1 = numpy.mean(s)
m2 = numpy.mean((s - m1) ** 2.0)
m3 = numpy.mean((s - m1) ** 3.0)
return m3 / m2**1.5
def skew_pdist(s, ps):
norm = numpy.sum(ps)
m1 = numpy.sum(s * ps) / norm
m2 = numpy.sum((s - m1) ** 2.0 * ps) / norm
m3 = numpy.sum((s - m1) ** 3.0 * ps) / norm
return m3 / m2**1.5
|
jobovyREPO_NAMEgalpyPATH_START.@galpy_extracted@galpy-main@tests@test_diskdf.py@.PATH_END.py
|
{
"filename": "_enabled.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/yaxis/tickformatstop/_enabled.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class EnabledValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="enabled", parent_name="layout.yaxis.tickformatstop", **kwargs
):
super(EnabledValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@yaxis@tickformatstop@_enabled.py@.PATH_END.py
|
{
"filename": "_show.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/surface/contours/x/_show.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="show", parent_name="surface.contours.x", **kwargs):
super(ShowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@surface@contours@x@_show.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattergl/error_x/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="color", parent_name="scattergl.error_x", **kwargs):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattergl@error_x@_color.py@.PATH_END.py
|
{
"filename": "shell.py",
"repo_name": "xraypy/xraylarch",
"repo_path": "xraylarch_extracted/xraylarch-master/larch/shell.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""
Larch command-line shell
"""
import cmd
import os
import sys
import signal
from .interpreter import Interpreter
from .site_config import history_file
from .version import make_banner
from .larchlib import StdWriter
from .utils import uname
HAS_READLINE = False
try:
import readline
HAS_READLINE = True
except ImportError:
pass
HAS_WXPYTHON = False
try:
import wx
HAS_WXPYTHON = True
except ImportError:
wx = None
SHOW_LIBS = ['numpy', 'scipy', 'matplotlib', 'h5py',
'lmfit', 'xraydb', 'wx','wxmplot']
class Shell(cmd.Cmd):
"""command shell for Larch"""
def __init__(self, completekey='tab', debug=False, quiet=False,
stdin=None, stdout=None, banner_msg=None,
maxhist=25000, with_wx=False):
self.debug = debug
cmd.Cmd.__init__(self,completekey='tab')
if stdin is not None:
sys.stdin = stdin
if stdout is not None:
sys.stdout = stdout
self.stdin = sys.stdin
self.stdout = sys.stdout
if HAS_READLINE:
try:
readline.read_history_file(history_file)
except IOError:
print(f'could not read history from {history_file}')
self.larch = Interpreter(historyfile=history_file,
maxhistory=maxhist)
self.larch.writer = StdWriter(_larch=self.larch)
if with_wx and HAS_WXPYTHON:
symtable = self.larch.symtable
try:
from .wxlib import LarchWxApp, inputhook
app = LarchWxApp(redirect=False, clearSigInt=False)
symtable.set_symbol('_sys.wx.wxapp', app)
symtable.set_symbol('_sys.wx.force_wxupdate', False)
symtable.set_symbol('_sys.wx.parent', None)
symtable.set_symbol('_sys.wx.inputhook', inputhook)
if uname == 'darwin':
symtable.set_symbol('_sys.wx.ping', inputhook.ping_darwin)
else:
symtable.set_symbol('_sys.wx.ping', inputhook.ping)
inputhook.ON_INTERRUPT = self.onCtrlC
inputhook.WXLARCH_SYM = symtable
except:
pass
signal.signal(signal.SIGINT, self.onCtrlC)
self.prompt = self.larch.input.prompt
writer = self.larch.writer
self.color_writer = (uname != 'win' and hasattr(writer, 'set_textstyle'))
if not quiet:
if banner_msg is None:
banner_msg = make_banner(show_libraries=SHOW_LIBS)
if self.color_writer:
writer.set_textstyle('error')
writer.write(banner_msg)
writer.write("\n")
if self.color_writer:
writer.set_textstyle('text')
self.larch_execute = self.default
self.larch.run_init_scripts()
def onCtrlC(self, *args, **kws):
self.larch.symtable.set_symbol('_sys.wx.keyboard_interrupt', True)
return 0
def on_exit(self, text=None):
"exit"
trim_last = False
if text is not None:
trim_last = text.strip() in ('quit', 'exit')
try:
self.larch.input.history.save(trim_last=trim_last)
except PermissionError:
print("Warning: could not save session history -- permission denied")
self.larch.symtable._plotter.close_all_displays()
sys.exit()
def do_exit(self, text):
"exit"
self.on_exit(text=text)
def do_quit(self, text):
"quit"
self.on_exit(text=text)
def emptyline(self):
pass
def onecmd(self, line):
"single command"
return self.default(line)
def do_help(self, arg):
"help"
if arg.startswith('(') and arg.endswith(')'):
arg = arg[1:-1]
elif arg.startswith("'") and arg.endswith("'"):
arg = arg[1:-1]
elif arg.startswith('"') and arg.endswith('"'):
arg = arg[1:-1]
self.default(f"help({arg})")
def do_shell(self, txt):
"shell command"
os.system(txt)
def default(self, line):
"default handler"
if line.strip() in ('quit', 'exit', 'quit()', 'exit()', 'EOF'):
self.on_exit(line)
ret = self.larch.eval(line, fname='<stdin>', lineno=0)
if self.larch.error:
self.larch.input.clear()
if self.color_writer:
self.larch.writer.set_textstyle('error')
self.larch.show_errors()
if self.color_writer:
self.larch.writer.set_textstyle('line')
if ret is not None:
self.larch.writer.write("%s\n" % repr(ret))
self.larch.writer.flush()
self.prompt = self.larch.input.next_prompt
|
xraypyREPO_NAMExraylarchPATH_START.@xraylarch_extracted@xraylarch-master@larch@shell.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/scattergeo/selected/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._marker import Marker
from ._textfont import Textfont
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._marker.Marker", "._textfont.Textfont"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@scattergeo@selected@__init__.py@.PATH_END.py
|
{
"filename": "FRB_Cat.ipynb",
"repo_name": "FRBs/FRB",
"repo_path": "FRB_extracted/FRB-main/docs/nb/FRB_Cat.ipynb",
"type": "Jupyter Notebook"
}
|
# Reading FRB Cat [v1.3]
v1.2 -- Update in 2018
v1.3 -- Update on 07 Dec 2019
http://frbcat.org/
```python
# imports
from imp import reload
import numpy as np
from matplotlib import pyplot as plt
from astropy.table import Table
from astropy.time import Time
from frb import frbcat as ffcat
from frb import experiment
```
## Load with 2018 format
```python
reload(ffcat)
obs = ffcat.FRBCat()
```
Using /home/xavier/Projects/FRB/frb/data/FRBs/frbcat_2019-12-07.csv for the FRB catalog
```python
obs.frbcat.sort('frb_name')
```
```python
obs.frbcat[-5:]
```
<i>Table length=5</i>
<table id="table140302596695096" class="table-striped table-bordered table-condensed">
<thead><tr><th>frb_name</th><th>utc</th><th>telescope</th><th>rop_raj</th><th>rop_decj</th><th>rop_gl</th><th>rop_gb</th><th>rmp_dm</th><th>rmp_width</th><th>rmp_snr</th><th>RA</th><th>DEC</th></tr></thead>
<thead><tr><th>str18</th><th>str23</th><th>str9</th><th>str12</th><th>str12</th><th>float64</th><th>float64</th><th>str19</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th></tr></thead>
<tr><td>FRB181228</td><td>2018-12-28 13:48:50.100</td><td>UTMOST</td><td>06:09:23.64</td><td>-45:58:02.4</td><td>253.3915</td><td>-26.0633</td><td>354.2&plusmn0.9</td><td>1.24</td><td>12.0</td><td>92.47723726394653</td><td>-45.98330613177865</td></tr>
<tr><td>FRB190116.J1249+27</td><td>2019-01-16 13:07:33.833</td><td>CHIME/FRB</td><td>12:49</td><td>27:09</td><td>210.5</td><td>89.5</td><td>444&plusmn0.6</td><td>4.0</td><td>0.0</td><td>192.29807580812846</td><td>27.14835236872834</td></tr>
<tr><td>FRB190209.J0937+77</td><td>2019-02-09 08:20:20.977</td><td>CHIME/FRB</td><td>09:37</td><td>77:40</td><td>134.2</td><td>34.8</td><td>424.6&plusmn0.6</td><td>3.7</td><td>0.0</td><td>144.10993259332915</td><td>77.67748003429321</td></tr>
<tr><td>FRB190222.J2052+69</td><td>2019-02-22 18:46:01.367</td><td>CHIME/FRB</td><td>20:52</td><td>69:50</td><td>104.9</td><td>15.9</td><td>460.6&plusmn0.1</td><td>2.97</td><td>0.0</td><td>313.0466791352272</td><td>69.85418471743013</td></tr>
<tr><td>FRB190523</td><td>2019-05-23 06:05:55.815</td><td>DSA-10</td><td>13:48:15.6</td><td>+72:28:11</td><td>117.03</td><td>44.0</td><td>760.8&plusmn0.6</td><td>0.42</td><td>11.5</td><td>207.075575740478</td><td>72.47075196238896</td></tr>
</table>
```python
obs.frbcat[0:20]
```
<i>Table length=20</i>
<table id="table140302596695040" class="table-striped table-bordered table-condensed">
<thead><tr><th>frb_name</th><th>utc</th><th>telescope</th><th>rop_raj</th><th>rop_decj</th><th>rop_gl</th><th>rop_gb</th><th>rmp_dm</th><th>rmp_width</th><th>rmp_snr</th><th>RA</th><th>DEC</th></tr></thead>
<thead><tr><th>str18</th><th>str23</th><th>str9</th><th>str12</th><th>str12</th><th>float64</th><th>float64</th><th>str19</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th></tr></thead>
<tr><td>FRB010125</td><td>2001-01-25 00:29:15.790</td><td>parkes</td><td>19:06:53</td><td>-40:37:14</td><td>356.641</td><td>-20.0206</td><td>790&plusmn3</td><td>9.4</td><td>17.0</td><td>286.72072305094264</td><td>-40.62064517309942</td></tr>
<tr><td>FRB010312</td><td>2001-03-12 11:06:47.980</td><td>parkes</td><td>05:26:54.9</td><td>-64:56:19.2</td><td>274.72</td><td>-33.3</td><td>1187&plusmn14</td><td>24.3</td><td>11.0</td><td>81.71890591818621</td><td>-64.93631423273322</td></tr>
<tr><td>FRB010621</td><td>2001-06-21 13:02:11.299</td><td>parkes</td><td>18:52:05</td><td>-08:29:35</td><td>25.434</td><td>-4.00381</td><td>745&plusmn10</td><td>7.0</td><td>16.3</td><td>283.0208291465951</td><td>-8.493049586958135</td></tr>
<tr><td>FRB010724</td><td>2001-07-24 19:50:01.690</td><td>parkes</td><td>01:18:06</td><td>-75:12:18</td><td>300.653</td><td>-41.8051</td><td>375</td><td>5.0</td><td>23.0</td><td>19.525065904947255</td><td>-75.20496347123685</td></tr>
<tr><td>FRB090625</td><td>2009-06-25 21:53:51.379</td><td>parkes</td><td>03:07:47</td><td>-29:55:36</td><td>226.444</td><td>-60.0303</td><td>899.55&plusmn0.01</td><td>1.92</td><td>30.0</td><td>46.94584178165078</td><td>-29.926709866601552</td></tr>
<tr><td>FRB110214</td><td>2011-02-14 07:14:10.353</td><td>parkes</td><td>01:21:17</td><td>-49:47:11</td><td>290.7</td><td>-66.6</td><td>168.9&plusmn0.5</td><td>1.9</td><td>13.0</td><td>20.351647596049716</td><td>-49.81046938710202</td></tr>
<tr><td>FRB110220</td><td>2011-02-20 01:55:48.096</td><td>parkes</td><td>22:34:38</td><td>-12:23:45</td><td>50.829</td><td>-54.7663</td><td>944.38&plusmn0.05</td><td>5.6</td><td>49.0</td><td>338.65833967919855</td><td>-12.395829957098739</td></tr>
<tr><td>FRB110523</td><td>2011-05-23 15:06:19.700</td><td>GBT</td><td>21:45:12</td><td>-00:09:37</td><td>56.12</td><td>-37.82</td><td>623.3&plusmn0.06</td><td>1.73</td><td>42.0</td><td>326.2965238534899</td><td>-0.15926998703343367</td></tr>
<tr><td>FRB110626</td><td>2011-06-26 21:33:17.477</td><td>parkes</td><td>21:03:43</td><td>-44:44:19</td><td>355.862</td><td>-41.7522</td><td>723&plusmn0.3</td><td>1.4</td><td>11.0</td><td>315.9291924469609</td><td>-44.73847978976432</td></tr>
<tr><td>FRB110703</td><td>2011-07-03 18:59:40.607</td><td>parkes</td><td>23:30:51</td><td>-02:52:24</td><td>80.9978</td><td>-59.0191</td><td>1103.6&plusmn0.7</td><td>4.3</td><td>16.0</td><td>352.71250345649395</td><td>-2.8733513425079926</td></tr>
<tr><td>FRB120127</td><td>2012-01-27 08:11:21.725</td><td>parkes</td><td>23:15:06</td><td>-18:25:38</td><td>49.2871</td><td>-66.2037</td><td>553.3&plusmn0.3</td><td>1.1</td><td>11.0</td><td>348.7750050379304</td><td>-18.42723215173697</td></tr>
<tr><td>FRB121002</td><td>2012-10-02 13:09:18.436</td><td>parkes</td><td>18:14:47</td><td>-85:11:53</td><td>308.22</td><td>-26.2647</td><td>1629.18&plusmn0.02</td><td>5.44</td><td>16.0</td><td>273.6967687919654</td><td>-85.19777502573858</td></tr>
<tr><td>FRB121102</td><td>2012-11-02 06:35:53.244</td><td>arecibo</td><td>05:32:09</td><td>33:05:13</td><td>174.95</td><td>-0.225138</td><td>557&plusmn2</td><td>3.0</td><td>14.0</td><td>83.03733147397499</td><td>33.08715440393463</td></tr>
<tr><td>FRB130626</td><td>2013-06-26 14:55:59.771</td><td>parkes</td><td>16:27:06</td><td>-07:27:48</td><td>7.45003</td><td>27.4203</td><td>952.4&plusmn0.1</td><td>1.98</td><td>21.0</td><td>246.77495842935215</td><td>-7.4633132463924605</td></tr>
<tr><td>FRB130628</td><td>2013-06-28 03:58:00.178</td><td>parkes</td><td>09:03:02</td><td>03:26:16</td><td>225.955</td><td>30.6556</td><td>469.88&plusmn0.01</td><td>0.64</td><td>29.0</td><td>135.7581186508183</td><td>3.438101042303216</td></tr>
<tr><td>FRB130729</td><td>2013-07-29 09:01:51.190</td><td>parkes</td><td>13:41:21</td><td>-05:59:43</td><td>324.788</td><td>54.7446</td><td>861&plusmn2</td><td>15.61</td><td>14.0</td><td>205.33766462069678</td><td>-5.995193975709084</td></tr>
<tr><td>FRB131104</td><td>2013-11-04 18:04:11.200</td><td>parkes</td><td>06:44:10</td><td>-51:16:40</td><td>260.55</td><td>-21.9253</td><td>779&plusmn1</td><td>2.08</td><td>30.0</td><td>101.0403428601284</td><td>-51.271538619174756</td></tr>
<tr><td>FRB140514</td><td>2014-05-14 17:14:11.060</td><td>parkes</td><td>22:34:06</td><td>-12:18:46</td><td>50.8413</td><td>-54.612</td><td>562.7&plusmn0.6</td><td>2.8</td><td>16.0</td><td>338.5250223506901</td><td>-12.312768823204856</td></tr>
<tr><td>FRB141113</td><td>2014-11-13 07:42:55.220</td><td>arecibo</td><td>06:13:00.1</td><td>18:47:11.2</td><td>191.9</td><td>0.36</td><td>400.3</td><td>2.0</td><td>8.4</td><td>93.24840124523706</td><td>18.799947101323646</td></tr>
<tr><td>FRB150215</td><td>2015-02-15 20:41:41.714</td><td>parkes</td><td>18:17:27</td><td>-04:54:15</td><td>24.6628</td><td>5.28092</td><td>1105.6&plusmn0.8</td><td>2.88</td><td>19.0</td><td>274.36250785471003</td><td>-4.9041513275113635</td></tr>
</table>
### Simple histogram
```python
times = Time(obs.frbcat['utc'])
```
```python
times.jyear
```
array([2001.06644852, 2001.19360179, 2001.47034411, 2001.56146861,
2009.48230637, 2011.12060646, 2011.13642825, 2011.38981354,
2011.48363619, 2011.50250908, 2012.07074941, 2012.75304074,
2012.83716611, 2013.48424975, 2013.48847441, 2013.57392549,
2013.843266 , 2014.36610043, 2014.86604099, 2015.12487964,
2015.29277723, 2015.43799336, 2015.59820886, 2015.79341737,
2015.92816541, 2015.9950106 , 2016.00233603, 2016.20773559,
2016.27339277, 2016.43439238, 2016.71903893, 2017.01940287,
2017.29080389, 2017.3230713 , 2017.42893652, 2017.51338043,
2017.5278772 , 2017.65415678, 2017.68116766, 2017.72477671,
2017.7540638 , 2017.75671848, 2017.79893275, 2017.80133086,
2017.87576917, 2017.93937636, 2017.94962103, 2017.95824622,
2018.02550495, 2018.05069716, 2018.07403537, 2018.0744799 ,
2018.0799595 , 2018.0827916 , 2018.1176992 , 2018.18375837,
2018.18939066, 2018.20044398, 2018.22559088, 2018.29173039,
2018.32694615, 2018.36937684, 2018.395998 , 2018.40296603,
2018.5322841 , 2018.56331194, 2018.56683412, 2018.57420394,
2018.57230269, 2018.57536207, 2018.58142681, 2018.59573552,
2018.6070593 , 2018.60765212, 2018.61188217, 2018.61770819,
2018.61765199, 2018.62443786, 2018.70753542, 2018.73013767,
2018.78898954, 2018.79242646, 2018.79391246, 2018.82731238,
2018.86442174, 2018.88350645, 2018.90719388, 2018.98993999,
2019.04188068, 2019.10704303, 2019.14382467, 2019.38878609])
```python
bins = np.arange(2001, 2020,1)
bins
```
array([2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019])
```python
plt.clf()
ax = plt.gca()
ax.hist(times.jyear, bins=bins)
#
plt.show()
```

## Load an old one
```python
reload(ffcat)
obs = ffcat.FRBCat(frbcat_file='frbcat_2017-04-06.csv')
obs
```
Using /home/xavier/Projects/FRB/frb/data/FRBs/frbcat_2017-04-06.csv for the FRB catalog
<FRBCat: FRB Catalog with 21 sources
>
```python
obs.frbcat[0:5]
```
<i>Table masked=True length=5</i>
<table id="table140302595723784" class="table-striped table-bordered table-condensed">
<thead><tr><th>Name</th><th>Telescope</th><th>Type</th><th>UTC</th><th>Beam</th><th>Receiver</th><th>Backend</th><th>RAJ</th><th>DECJ</th><th>Pointing Error</th><th>FWHM</th><th>Sampling Time</th><th>Bandwidth</th><th>Centre Frequnecy</th><th>Bits per Sample</th><th>Gain</th><th>System Temperature</th><th>NE2001 DM Limit</th><th>DM</th><th>DM error</th><th>SNR</th><th>Width</th><th>Width error lower</th><th>Width error upper</th><th>Flux</th><th>Flux error lower</th><th>Flux error upper</th><th>DM Index</th><th>DM Index Error</th><th>Scattering Index</th><th>Scattering Index Error</th><th>Scattering Time</th><th>Scattering Time Error</th><th>Linear Polarization Fraction</th><th>Linear Polarization Fraction Error</th><th>Circular Polarization Fraction</th><th>Circular Polarization Fraction Error</th><th>Photometric Redshift</th><th>Photometric Redshift Error</th><th>Spectroscopic Redshift</th><th>Spectroscopic Redshift Error</th><th>Reference</th><th>RA</th><th>DEC</th></tr></thead>
<thead><tr><th>str9</th><th>str7</th><th>str5</th><th>str19</th><th>int64</th><th>str6</th><th>str18</th><th>str8</th><th>str9</th><th>int64</th><th>float64</th><th>float64</th><th>float64</th><th>int64</th><th>int64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>int64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>str97</th><th>float64</th><th>float64</th></tr></thead>
<tr><td>FRB010125</td><td>parkes</td><td>radio</td><td>2001-01-25 00:29:14</td><td>5</td><td>MB20</td><td>AFB</td><td>19:06:53</td><td>-40:37:14</td><td>11</td><td>15.0</td><td>0.125</td><td>288.0</td><td>--</td><td>1</td><td>0.69</td><td>28.0</td><td>110.0</td><td>790.0</td><td>3.0</td><td>17.0</td><td>9.4</td><td>0.2</td><td>0.2</td><td>0.3</td><td>--</td><td>--</td><td>2.0</td><td>0.01</td><td>-4.2</td><td>1.2</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>Burke-Spolaor S., Bannister K. W., 2014, ApJ, 792, 19</td><td>286.7208333333333</td><td>-40.620555555555555</td></tr>
<tr><td>FRB010125</td><td>parkes</td><td>radio</td><td>2001-01-25 00:29:14</td><td>5</td><td>MB20</td><td>AFB</td><td>19:06:53</td><td>-40:37:14</td><td>11</td><td>15.0</td><td>0.125</td><td>288.0</td><td>--</td><td>1</td><td>0.69</td><td>28.0</td><td>110.0</td><td>790.3</td><td>3.0</td><td>25.0</td><td>10.6</td><td>2.5</td><td>2.8</td><td>0.54</td><td>0.07</td><td>0.11</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>Burke-Spolaor S., Bannister K. W., 2014, ApJ, 792, 19</td><td>286.7208333333333</td><td>-40.620555555555555</td></tr>
<tr><td>FRB010621</td><td>parkes</td><td>radio</td><td>2001-06-21 13:02:09</td><td>10</td><td>MB20</td><td>AFB</td><td>18:52:05</td><td>-08:29:35</td><td>11</td><td>15.0</td><td>0.25</td><td>288.0</td><td>--</td><td>1</td><td>0.581</td><td>28.0</td><td>523.0</td><td>748.0</td><td>3.0</td><td>18.0</td><td>8.0</td><td>2.25</td><td>4.0</td><td>0.53</td><td>0.09</td><td>0.26</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>Keane E. F., Kramer M., Lyne A. G., Stappers B. W., McLaughlin M. A., 2011, MNRAS, 415, 3065</td><td>283.0208333333333</td><td>-8.493055555555555</td></tr>
<tr><td>FRB010621</td><td>parkes</td><td>radio</td><td>2001-06-21 13:02:09</td><td>10</td><td>MB20</td><td>AFB</td><td>18:52:05</td><td>-08:29:35</td><td>11</td><td>15.0</td><td>0.25</td><td>288.0</td><td>--</td><td>1</td><td>0.581</td><td>28.0</td><td>523.0</td><td>745.0</td><td>10.0</td><td>--</td><td>7.0</td><td>--</td><td>--</td><td>0.41</td><td>--</td><td>--</td><td>2.02</td><td>0.01</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>Keane E. F., Kramer M., Lyne A. G., Stappers B. W., McLaughlin M. A., 2011, MNRAS, 415, 3065</td><td>283.0208333333333</td><td>-8.493055555555555</td></tr>
<tr><td>FRB010724</td><td>parkes</td><td>radio</td><td>2001-07-24 19:50:00</td><td>6</td><td>MB20</td><td>AFB</td><td>01:18:06</td><td>-75:12:18</td><td>11</td><td>15.0</td><td>1.0</td><td>288.0</td><td>--</td><td>1</td><td>0.69</td><td>28.0</td><td>44.58</td><td>375.0</td><td>3.0</td><td>100.0</td><td>20.0</td><td>0.0</td><td>0.0</td><td>1.574</td><td>0.0</td><td>0.0</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>--</td><td>Lorimer D. R., Bailes M., McLaughlin M. A., Narkevic, D. J., Crawford F., 2007, Science, 318, 777</td><td>19.525</td><td>-75.205</td></tr>
</table>
----
# For Sky
```python
obs = ffcat.FRBCat()
```
Using /home/xavier/Projects/FRB/frb/data/FRBs/frbcat_2019-12-07.csv for the FRB catalog
```python
obs.frbcat.sort('utc')
```
```python
obs.frbcat.write('sky_tbl.fits')
```
```python
```
|
FRBsREPO_NAMEFRBPATH_START.@FRB_extracted@FRB-main@docs@nb@FRB_Cat.ipynb@.PATH_END.py
|
{
"filename": "E12_not_second.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/parso/py3/tests/normalizer_issue_files/E12_not_second.py",
"type": "Python"
}
|
def qualify_by_address(
self, cr, uid, ids, context=None,
params_to_check=frozenset(QUALIF_BY_ADDRESS_PARAM)):
""" This gets called by the web server """
def qualify_by_address(self, cr, uid, ids, context=None,
params_to_check=frozenset(QUALIF_BY_ADDRESS_PARAM)):
""" This gets called by the web server """
_ipv4_re = re.compile('^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.'
'(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.'
'(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.'
'(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$')
fct("""
AAA """ + status_2_string)
if context:
msg = """\
action: GET-CONFIG
payload:
ip_address: "%(ip)s"
username: "%(username)s"
""" % context
if context:
msg = """\
action: \
GET-CONFIG
""" % context
if context:
#: E122+2:0
msg = """\
action: """\
"""GET-CONFIG
""" % context
def unicode2html(s):
"""Convert the characters &<>'" in string s to HTML-safe sequences.
Convert newline to <br> too."""
#: E127+1:28
return unicode((s or '').replace('&', '&')
.replace('\n', '<br>\n'))
parser.add_option('--count', action='store_true',
help="print total number of errors and warnings "
"to standard error and set exit code to 1 if "
"total is not null")
parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE,
help="exclude files or directories which match these "
"comma separated patterns (default: %s)" %
DEFAULT_EXCLUDE)
add_option('--count',
#: E135+1
help="print total number of errors "
"to standard error total is not null")
add_option('--count',
#: E135+2:11
help="print total number of errors "
"to standard error "
"total is not null")
help = ("print total number of errors " +
"to standard error")
help = "print total number of errors " \
"to standard error"
help = u"print total number of errors " \
u"to standard error"
help = b"print total number of errors " \
b"to standard error"
#: E122+1:5
help = br"print total number of errors " \
br"to standard error"
d = dict('foo', help="exclude files or directories which match these "
#: E135:9
"comma separated patterns (default: %s)" % DEFAULT_EXCLUDE)
d = dict('foo', help=u"exclude files or directories which match these "
u"comma separated patterns (default: %s)"
% DEFAULT_EXCLUDE)
#: E135+1:9 E135+2:9
d = dict('foo', help=b"exclude files or directories which match these "
b"comma separated patterns (default: %s)"
% DEFAULT_EXCLUDE)
d = dict('foo', help=br"exclude files or directories which match these "
br"comma separated patterns (default: %s)" %
DEFAULT_EXCLUDE)
d = dict('foo',
help="exclude files or directories which match these "
"comma separated patterns (default: %s)" %
DEFAULT_EXCLUDE)
d = dict('foo',
help="exclude files or directories which match these "
"comma separated patterns (default: %s, %s)" %
(DEFAULT_EXCLUDE, DEFAULT_IGNORE)
)
d = dict('foo',
help="exclude files or directories which match these "
"comma separated patterns (default: %s, %s)" %
# who knows what might happen here?
(DEFAULT_EXCLUDE, DEFAULT_IGNORE)
)
# parens used to allow the indenting.
troublefree_hash = {
"hash": "value",
"long": ("the quick brown fox jumps over the lazy dog before doing a "
"somersault"),
"long key that tends to happen more when you're indented": (
"stringwithalongtoken you don't want to break"
),
}
# another accepted form
troublefree_hash = {
"hash": "value",
"long": "the quick brown fox jumps over the lazy dog before doing "
"a somersault",
("long key that tends to happen more "
"when you're indented"): "stringwithalongtoken you don't want to break",
}
# confusing but accepted... don't do that
troublesome_hash = {
"hash": "value",
"long": "the quick brown fox jumps over the lazy dog before doing a "
#: E135:4
"somersault",
"longer":
"the quick brown fox jumps over the lazy dog before doing a "
"somersaulty",
"long key that tends to happen more "
"when you're indented": "stringwithalongtoken you don't want to break",
}
d = dict('foo',
help="exclude files or directories which match these "
"comma separated patterns (default: %s)" %
DEFAULT_EXCLUDE
)
d = dict('foo',
help="exclude files or directories which match these "
"comma separated patterns (default: %s)" % DEFAULT_EXCLUDE,
foobar="this clearly should work, because it is at "
"the right indent level",
)
rv.update(dict.fromkeys(
('qualif_nr', 'reasonComment_en', 'reasonComment_fr',
'reasonComment_de', 'reasonComment_it'),
'?'), "foo",
context={'alpha': 4, 'beta': 53242234, 'gamma': 17})
def f():
try:
if not Debug:
hello('''
If you would like to see debugging output,
try: %s -d5
''' % sys.argv[0])
# The try statement above was not finished.
#: E901
d = { # comment
1: 2
}
# issue 138 (we won't allow this in parso)
#: E126+2:9
[
12, # this is a multi-line inline
# comment
]
# issue 151
#: E122+1:3
if a > b and \
c > d:
moo_like_a_cow()
my_list = [
1, 2, 3,
4, 5, 6,
]
my_list = [1, 2, 3,
4, 5, 6,
]
result = some_function_that_takes_arguments(
'a', 'b', 'c',
'd', 'e', 'f',
)
result = some_function_that_takes_arguments('a', 'b', 'c',
'd', 'e', 'f',
)
# issue 203
dica = {
('abc'
'def'): (
'abc'),
}
(abcdef[0]
[1]) = (
'abc')
('abc'
'def') == (
'abc')
# issue 214
bar(
1).zap(
2)
bar(
1).zap(
2)
if True:
def example_issue254():
return [node.copy(
(
replacement
# First, look at all the node's current children.
for child in node.children
# Replace them.
for replacement in replace(child)
),
dict(name=token.undefined)
)]
def valid_example():
return [node.copy(properties=dict(
(key, val if val is not None else token.undefined)
for key, val in node.items()
))]
foo([
'bug'
])
# issue 144, finally!
some_hash = {
"long key that tends to happen more when you're indented":
"stringwithalongtoken you don't want to break",
}
{
1:
999999 if True
else 0,
}
abc = dedent(
'''
mkdir -p ./{build}/
mv ./build/ ./{build}/%(revision)s/
'''.format(
build='build',
# more stuff
)
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@parso@py3@tests@normalizer_issue_files@E12_not_second.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "qubvel/segmentation_models.pytorch",
"repo_path": "segmentation_models.pytorch_extracted/segmentation_models.pytorch-main/segmentation_models_pytorch/base/__init__.py",
"type": "Python"
}
|
from .model import SegmentationModel
from .modules import Conv2dReLU, Attention
from .heads import SegmentationHead, ClassificationHead
__all__ = [
"SegmentationModel",
"Conv2dReLU",
"Attention",
"SegmentationHead",
"ClassificationHead",
]
|
qubvelREPO_NAMEsegmentation_models.pytorchPATH_START.@segmentation_models.pytorch_extracted@segmentation_models.pytorch-main@segmentation_models_pytorch@base@__init__.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/integrations/prefect-ray/README.md",
"type": "Markdown"
}
|
# prefect-ray
<p align="center">
<!--- Insert a cover image here -->
<!--- <br> -->
<a href="https://pypi.python.org/pypi/prefect-ray/" alt="PyPI version">
<img alt="PyPI" src="https://img.shields.io/pypi/v/prefect-ray?color=26272B&labelColor=090422"></a>
<a href="https://pepy.tech/badge/prefect-ray/" alt="Downloads">
<img src="https://img.shields.io/pypi/dm/prefect-ray?color=26272B&labelColor=090422" /></a>
</p>
See the docs at [https://docs.prefect.io/integrations/prefect-ray](https://docs.prefect.io/integrations/prefect-ray) for more information.
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@integrations@prefect-ray@README.md@.PATH_END.py
|
{
"filename": "cp424.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/encodings/cp424.py",
"type": "Python"
}
|
""" Python Character Mapping Codec cp424 generated from 'MAPPINGS/VENDORS/MISC/CP424.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp424',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> SELECT
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> REQUIRED NEW LINE
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> GRAPHIC ESCAPE
'\x8d' # 0x09 -> SUPERSCRIPT
'\x8e' # 0x0A -> REPEAT
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> RESTORE/ENABLE PRESENTATION
'\x85' # 0x15 -> NEW LINE
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> PROGRAM OPERATOR COMMUNICATION
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> UNIT BACK SPACE
'\x8f' # 0x1B -> CUSTOMER USE ONE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> DIGIT SELECT
'\x81' # 0x21 -> START OF SIGNIFICANCE
'\x82' # 0x22 -> FIELD SEPARATOR
'\x83' # 0x23 -> WORD UNDERSCORE
'\x84' # 0x24 -> BYPASS OR INHIBIT PRESENTATION
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> SET ATTRIBUTE
'\x89' # 0x29 -> START FIELD EXTENDED
'\x8a' # 0x2A -> SET MODE OR SWITCH
'\x8b' # 0x2B -> CONTROL SEQUENCE PREFIX
'\x8c' # 0x2C -> MODIFY FIELD ATTRIBUTE
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> <reserved>
'\x91' # 0x31 -> <reserved>
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> INDEX RETURN
'\x94' # 0x34 -> PRESENTATION POSITION
'\x95' # 0x35 -> TRANSPARENT
'\x96' # 0x36 -> NUMERIC BACKSPACE
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> SUBSCRIPT
'\x99' # 0x39 -> INDENT TABULATION
'\x9a' # 0x3A -> REVERSE FORM FEED
'\x9b' # 0x3B -> CUSTOMER USE THREE
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> <reserved>
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\u05d0' # 0x41 -> HEBREW LETTER ALEF
'\u05d1' # 0x42 -> HEBREW LETTER BET
'\u05d2' # 0x43 -> HEBREW LETTER GIMEL
'\u05d3' # 0x44 -> HEBREW LETTER DALET
'\u05d4' # 0x45 -> HEBREW LETTER HE
'\u05d5' # 0x46 -> HEBREW LETTER VAV
'\u05d6' # 0x47 -> HEBREW LETTER ZAYIN
'\u05d7' # 0x48 -> HEBREW LETTER HET
'\u05d8' # 0x49 -> HEBREW LETTER TET
'\xa2' # 0x4A -> CENT SIGN
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'|' # 0x4F -> VERTICAL LINE
'&' # 0x50 -> AMPERSAND
'\u05d9' # 0x51 -> HEBREW LETTER YOD
'\u05da' # 0x52 -> HEBREW LETTER FINAL KAF
'\u05db' # 0x53 -> HEBREW LETTER KAF
'\u05dc' # 0x54 -> HEBREW LETTER LAMED
'\u05dd' # 0x55 -> HEBREW LETTER FINAL MEM
'\u05de' # 0x56 -> HEBREW LETTER MEM
'\u05df' # 0x57 -> HEBREW LETTER FINAL NUN
'\u05e0' # 0x58 -> HEBREW LETTER NUN
'\u05e1' # 0x59 -> HEBREW LETTER SAMEKH
'!' # 0x5A -> EXCLAMATION MARK
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'\xac' # 0x5F -> NOT SIGN
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\u05e2' # 0x62 -> HEBREW LETTER AYIN
'\u05e3' # 0x63 -> HEBREW LETTER FINAL PE
'\u05e4' # 0x64 -> HEBREW LETTER PE
'\u05e5' # 0x65 -> HEBREW LETTER FINAL TSADI
'\u05e6' # 0x66 -> HEBREW LETTER TSADI
'\u05e7' # 0x67 -> HEBREW LETTER QOF
'\u05e8' # 0x68 -> HEBREW LETTER RESH
'\u05e9' # 0x69 -> HEBREW LETTER SHIN
'\xa6' # 0x6A -> BROKEN BAR
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\ufffe' # 0x70 -> UNDEFINED
'\u05ea' # 0x71 -> HEBREW LETTER TAV
'\ufffe' # 0x72 -> UNDEFINED
'\ufffe' # 0x73 -> UNDEFINED
'\xa0' # 0x74 -> NO-BREAK SPACE
'\ufffe' # 0x75 -> UNDEFINED
'\ufffe' # 0x76 -> UNDEFINED
'\ufffe' # 0x77 -> UNDEFINED
'\u2017' # 0x78 -> DOUBLE LOW LINE
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\ufffe' # 0x80 -> UNDEFINED
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\ufffe' # 0x8C -> UNDEFINED
'\ufffe' # 0x8D -> UNDEFINED
'\ufffe' # 0x8E -> UNDEFINED
'\xb1' # 0x8F -> PLUS-MINUS SIGN
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\ufffe' # 0x9A -> UNDEFINED
'\ufffe' # 0x9B -> UNDEFINED
'\ufffe' # 0x9C -> UNDEFINED
'\xb8' # 0x9D -> CEDILLA
'\ufffe' # 0x9E -> UNDEFINED
'\xa4' # 0x9F -> CURRENCY SIGN
'\xb5' # 0xA0 -> MICRO SIGN
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\ufffe' # 0xAA -> UNDEFINED
'\ufffe' # 0xAB -> UNDEFINED
'\ufffe' # 0xAC -> UNDEFINED
'\ufffe' # 0xAD -> UNDEFINED
'\ufffe' # 0xAE -> UNDEFINED
'\xae' # 0xAF -> REGISTERED SIGN
'^' # 0xB0 -> CIRCUMFLEX ACCENT
'\xa3' # 0xB1 -> POUND SIGN
'\xa5' # 0xB2 -> YEN SIGN
'\xb7' # 0xB3 -> MIDDLE DOT
'\xa9' # 0xB4 -> COPYRIGHT SIGN
'\xa7' # 0xB5 -> SECTION SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
'[' # 0xBA -> LEFT SQUARE BRACKET
']' # 0xBB -> RIGHT SQUARE BRACKET
'\xaf' # 0xBC -> MACRON
'\xa8' # 0xBD -> DIAERESIS
'\xb4' # 0xBE -> ACUTE ACCENT
'\xd7' # 0xBF -> MULTIPLICATION SIGN
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\ufffe' # 0xCB -> UNDEFINED
'\ufffe' # 0xCC -> UNDEFINED
'\ufffe' # 0xCD -> UNDEFINED
'\ufffe' # 0xCE -> UNDEFINED
'\ufffe' # 0xCF -> UNDEFINED
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb9' # 0xDA -> SUPERSCRIPT ONE
'\ufffe' # 0xDB -> UNDEFINED
'\ufffe' # 0xDC -> UNDEFINED
'\ufffe' # 0xDD -> UNDEFINED
'\ufffe' # 0xDE -> UNDEFINED
'\ufffe' # 0xDF -> UNDEFINED
'\\' # 0xE0 -> REVERSE SOLIDUS
'\xf7' # 0xE1 -> DIVISION SIGN
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\ufffe' # 0xEB -> UNDEFINED
'\ufffe' # 0xEC -> UNDEFINED
'\ufffe' # 0xED -> UNDEFINED
'\ufffe' # 0xEE -> UNDEFINED
'\ufffe' # 0xEF -> UNDEFINED
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\ufffe' # 0xFB -> UNDEFINED
'\ufffe' # 0xFC -> UNDEFINED
'\ufffe' # 0xFD -> UNDEFINED
'\ufffe' # 0xFE -> UNDEFINED
'\x9f' # 0xFF -> EIGHT ONES
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@encodings@cp424.py@.PATH_END.py
|
{
"filename": "plugin_upload.py",
"repo_name": "sbraden/circle-craters",
"repo_path": "circle-craters_extracted/circle-craters-master/plugin_upload.py",
"type": "Python"
}
|
#!/usr/bin/env python
# coding=utf-8
"""This script uploads a plugin package on the server.
Authors: A. Pasotti, V. Picavet
git sha : $TemplateVCSFormat
"""
import sys
import getpass
import xmlrpclib
from optparse import OptionParser
# Configuration
PROTOCOL = 'http'
SERVER = 'plugins.qgis.org'
PORT = '80'
ENDPOINT = '/plugins/RPC2/'
VERBOSE = False
def main(parameters, arguments):
"""Main entry point.
:param parameters: Command line parameters.
:param arguments: Command line arguments.
"""
address = "%s://%s:%s@%s:%s%s" % (
PROTOCOL,
parameters.username,
parameters.password,
parameters.server,
parameters.port,
ENDPOINT)
print "Connecting to: %s" % hide_password(address)
server = xmlrpclib.ServerProxy(address, verbose=VERBOSE)
try:
plugin_id, version_id = server.plugin.upload(
xmlrpclib.Binary(open(arguments[0]).read()))
print "Plugin ID: %s" % plugin_id
print "Version ID: %s" % version_id
except xmlrpclib.ProtocolError, err:
print "A protocol error occurred"
print "URL: %s" % hide_password(err.url, 0)
print "HTTP/HTTPS headers: %s" % err.headers
print "Error code: %d" % err.errcode
print "Error message: %s" % err.errmsg
except xmlrpclib.Fault, err:
print "A fault occurred"
print "Fault code: %d" % err.faultCode
print "Fault string: %s" % err.faultString
def hide_password(url, start=6):
"""Returns the http url with password part replaced with '*'.
:param url: URL to upload the plugin to.
:type url: str
:param start: Position of start of password.
:type start: int
"""
start_position = url.find(':', start) + 1
end_position = url.find('@')
return "%s%s%s" % (
url[:start_position],
'*' * (end_position - start_position),
url[end_position:])
if __name__ == "__main__":
parser = OptionParser(usage="%prog [options] plugin.zip")
parser.add_option(
"-w", "--password", dest="password",
help="Password for plugin site", metavar="******")
parser.add_option(
"-u", "--username", dest="username",
help="Username of plugin site", metavar="user")
parser.add_option(
"-p", "--port", dest="port",
help="Server port to connect to", metavar="80")
parser.add_option(
"-s", "--server", dest="server",
help="Specify server name", metavar="plugins.qgis.org")
options, args = parser.parse_args()
if len(args) != 1:
print "Please specify zip file.\n"
parser.print_help()
sys.exit(1)
if not options.server:
options.server = SERVER
if not options.port:
options.port = PORT
if not options.username:
# interactive mode
username = getpass.getuser()
print "Please enter user name [%s] :" % username,
res = raw_input()
if res != "":
options.username = res
else:
options.username = username
if not options.password:
# interactive mode
options.password = getpass.getpass()
main(options, args)
|
sbradenREPO_NAMEcircle-cratersPATH_START.@circle-craters_extracted@circle-craters-master@plugin_upload.py@.PATH_END.py
|
{
"filename": "_family.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattergeo/hoverlabel/font/_family.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="family", parent_name="scattergeo.hoverlabel.font", **kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattergeo@hoverlabel@font@_family.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/restricted/googletest/googletest/README.md",
"type": "Markdown"
}
|
### Generic Build Instructions
#### Setup
To build GoogleTest and your tests that use it, you need to tell your build
system where to find its headers and source files. The exact way to do it
depends on which build system you use, and is usually straightforward.
### Build with CMake
GoogleTest comes with a CMake build script
([CMakeLists.txt](https://github.com/google/googletest/blob/main/CMakeLists.txt))
that can be used on a wide range of platforms ("C" stands for cross-platform.).
If you don't have CMake installed already, you can download it for free from
<https://cmake.org/>.
CMake works by generating native makefiles or build projects that can be used in
the compiler environment of your choice. You can either build GoogleTest as a
standalone project or it can be incorporated into an existing CMake build for
another project.
#### Standalone CMake Project
When building GoogleTest as a standalone project, the typical workflow starts
with
```
git clone https://github.com/google/googletest.git -b v1.15.0
cd googletest # Main directory of the cloned repository.
mkdir build # Create a directory to hold the build output.
cd build
cmake .. # Generate native build scripts for GoogleTest.
```
The above command also includes GoogleMock by default. And so, if you want to
build only GoogleTest, you should replace the last command with
```
cmake .. -DBUILD_GMOCK=OFF
```
If you are on a \*nix system, you should now see a Makefile in the current
directory. Just type `make` to build GoogleTest. And then you can simply install
GoogleTest if you are a system administrator.
```
make
sudo make install # Install in /usr/local/ by default
```
If you use Windows and have Visual Studio installed, a `gtest.sln` file and
several `.vcproj` files will be created. You can then build them using Visual
Studio.
On Mac OS X with Xcode installed, a `.xcodeproj` file will be generated.
#### Incorporating Into An Existing CMake Project
If you want to use GoogleTest in a project which already uses CMake, the easiest
way is to get installed libraries and headers.
* Import GoogleTest by using `find_package` (or `pkg_check_modules`). For
example, if `find_package(GTest CONFIG REQUIRED)` succeeds, you can use the
libraries as `GTest::gtest`, `GTest::gmock`.
And a more robust and flexible approach is to build GoogleTest as part of that
project directly. This is done by making the GoogleTest source code available to
the main build and adding it using CMake's `add_subdirectory()` command. This
has the significant advantage that the same compiler and linker settings are
used between GoogleTest and the rest of your project, so issues associated with
using incompatible libraries (eg debug/release), etc. are avoided. This is
particularly useful on Windows. Making GoogleTest's source code available to the
main build can be done a few different ways:
* Download the GoogleTest source code manually and place it at a known
location. This is the least flexible approach and can make it more difficult
to use with continuous integration systems, etc.
* Embed the GoogleTest source code as a direct copy in the main project's
source tree. This is often the simplest approach, but is also the hardest to
keep up to date. Some organizations may not permit this method.
* Add GoogleTest as a git submodule or equivalent. This may not always be
possible or appropriate. Git submodules, for example, have their own set of
advantages and drawbacks.
* Use CMake to download GoogleTest as part of the build's configure step. This
approach doesn't have the limitations of the other methods.
The last of the above methods is implemented with a small piece of CMake code
that downloads and pulls the GoogleTest code into the main build.
Just add to your `CMakeLists.txt`:
```cmake
include(FetchContent)
FetchContent_Declare(
googletest
# Specify the commit you depend on and update it regularly.
URL https://github.com/google/googletest/archive/5376968f6948923e2411081fd9372e71a59d8e77.zip
)
# For Windows: Prevent overriding the parent project's compiler/linker settings
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
FetchContent_MakeAvailable(googletest)
# Now simply link against gtest or gtest_main as needed. Eg
add_executable(example example.cpp)
target_link_libraries(example gtest_main)
add_test(NAME example_test COMMAND example)
```
Note that this approach requires CMake 3.14 or later due to its use of the
`FetchContent_MakeAvailable()` command.
##### Visual Studio Dynamic vs Static Runtimes
By default, new Visual Studio projects link the C runtimes dynamically but
GoogleTest links them statically. This will generate an error that looks
something like the following: gtest.lib(gtest-all.obj) : error LNK2038: mismatch
detected for 'RuntimeLibrary': value 'MTd_StaticDebug' doesn't match value
'MDd_DynamicDebug' in main.obj
GoogleTest already has a CMake option for this: `gtest_force_shared_crt`
Enabling this option will make gtest link the runtimes dynamically too, and
match the project in which it is included.
#### C++ Standard Version
An environment that supports C++14 is required in order to successfully build
GoogleTest. One way to ensure this is to specify the standard in the top-level
project, for example by using the `set(CMAKE_CXX_STANDARD 14)` command along
with `set(CMAKE_CXX_STANDARD_REQUIRED ON)`. If this is not feasible, for example
in a C project using GoogleTest for validation, then it can be specified by
adding it to the options for cmake via the`-DCMAKE_CXX_FLAGS` option.
### Tweaking GoogleTest
GoogleTest can be used in diverse environments. The default configuration may
not work (or may not work well) out of the box in some environments. However,
you can easily tweak GoogleTest by defining control macros on the compiler
command line. Generally, these macros are named like `GTEST_XYZ` and you define
them to either 1 or 0 to enable or disable a certain feature.
We list the most frequently used macros below. For a complete list, see file
[include/gtest/internal/gtest-port.h](https://github.com/google/googletest/blob/main/googletest/include/gtest/internal/gtest-port.h).
### Multi-threaded Tests
GoogleTest is thread-safe where the pthread library is available. After
`#include <gtest/gtest.h>`, you can check the
`GTEST_IS_THREADSAFE` macro to see whether this is the case (yes if the macro is
`#defined` to 1, no if it's undefined.).
If GoogleTest doesn't correctly detect whether pthread is available in your
environment, you can force it with
```
-DGTEST_HAS_PTHREAD=1
```
or
```
-DGTEST_HAS_PTHREAD=0
```
When GoogleTest uses pthread, you may need to add flags to your compiler and/or
linker to select the pthread library, or you'll get link errors. If you use the
CMake script, this is taken care of for you. If you use your own build script,
you'll need to read your compiler and linker's manual to figure out what flags
to add.
### As a Shared Library (DLL)
GoogleTest is compact, so most users can build and link it as a static library
for the simplicity. You can choose to use GoogleTest as a shared library (known
as a DLL on Windows) if you prefer.
To compile *gtest* as a shared library, add
```
-DGTEST_CREATE_SHARED_LIBRARY=1
```
to the compiler flags. You'll also need to tell the linker to produce a shared
library instead - consult your linker's manual for how to do it.
To compile your *tests* that use the gtest shared library, add
```
-DGTEST_LINKED_AS_SHARED_LIBRARY=1
```
to the compiler flags.
Note: while the above steps aren't technically necessary today when using some
compilers (e.g. GCC), they may become necessary in the future, if we decide to
improve the speed of loading the library (see
<https://gcc.gnu.org/wiki/Visibility> for details). Therefore you are
recommended to always add the above flags when using GoogleTest as a shared
library. Otherwise a future release of GoogleTest may break your build script.
### Avoiding Macro Name Clashes
In C++, macros don't obey namespaces. Therefore two libraries that both define a
macro of the same name will clash if you `#include` both definitions. In case a
GoogleTest macro clashes with another library, you can force GoogleTest to
rename its macro to avoid the conflict.
Specifically, if both GoogleTest and some other code define macro FOO, you can
add
```
-DGTEST_DONT_DEFINE_FOO=1
```
to the compiler flags to tell GoogleTest to change the macro's name from `FOO`
to `GTEST_FOO`. Currently `FOO` can be `ASSERT_EQ`, `ASSERT_FALSE`, `ASSERT_GE`,
`ASSERT_GT`, `ASSERT_LE`, `ASSERT_LT`, `ASSERT_NE`, `ASSERT_TRUE`,
`EXPECT_FALSE`, `EXPECT_TRUE`, `FAIL`, `SUCCEED`, `TEST`, or `TEST_F`. For
example, with `-DGTEST_DONT_DEFINE_TEST=1`, you'll need to write
```
GTEST_TEST(SomeTest, DoesThis) { ... }
```
instead of
```
TEST(SomeTest, DoesThis) { ... }
```
in order to define a test.
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@restricted@googletest@googletest@README.md@.PATH_END.py
|
{
"filename": "Step4_matching_img_ifs_part2-checkpoint.ipynb",
"repo_name": "aabdurrouf/piXedfit",
"repo_path": "piXedfit_extracted/piXedfit-main/examples/FUVtoNIR_CALIFA/.ipynb_checkpoints/Step4_matching_img_ifs_part2-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
# 4. Spatial and spectral matching between imaging data and IFS data cube using `piXedfit_spectrophotometric` -- Part II
<font size="4">In this step, we correct for the wavelength-dependant mismatch between the IFS spectra and the photometric SEDs on pixel level. This mismatch can be caused by at least two factors: systematics in the data processing (PSF matching, spatial resampling, reprojection, etc.) of the broadband imaging and IFS data and the uncertainty in the flux calibration of the IFS and imaging data. The correction can be performed using `piXedfit.piXedfit_spectrophotometric.match_specphoto` function. Basically, this function will perform simple SED fitting to the photometric SED of individual pixels to find a best-fit model spectrum for each pixel. A wavelength-dependent ratio (between the best-fit model spectrum and the IFS spectrum) is then fit with a third-order Legendre polynomial function to get smooth correction factor, which is then applied to correct the IFS spectra.</font>
```python
import numpy as np
from astropy.io import fits
import os, sys
import matplotlib.pyplot as plt
global PIXEDFIT_HOME
PIXEDFIT_HOME = os.environ['PIXEDFIT_HOME']
sys.path.insert(0, PIXEDFIT_HOME)
%matplotlib inline
```
```python
from piXedfit.piXedfit_spectrophotometric import match_imgifs_spectral
```
<font size="4"> The following calculation is rather heavy and time-consuming, depending on the number of pixels (that have spec+photo SEDs) and the number of cores used (nproc) in the calculation. It's recommended to run this calculation separately (from this jupyter notebook) on a multicores computer (or cluster). The computational time can be shorten by increasing the number of cores. A script for executing this function separately (on the terminal) is given in the directory: `script_match_imgifs_spectral.py`. </font>
### Below is a copy of the script
specphoto_file = "specphoto_fluxmap_ngc309.fits"
name_saved_randmod = "ngc309_models.fits.gz" # model SEDs generated before
name_out_fits = "corr_%s" % specphoto_file
match_imgifs_spectral(specphoto_file, nproc=20, name_out_fits=name_out_fits)
## 4.3. Check the result: spectrophotometric data cube
```python
cube = fits.open("corr_specphoto_fluxmap_ngc309.fits")
cube.info()
header = cube[0].header
print (header)
```
Filename: corr_specphoto_fluxmap_ngc309.fits
No. Name Ver Type Cards Dimensions Format
0 PHOTO_FLUX 1 PrimaryHDU 34 (131, 131, 12) float64
1 PHOTO_FLUXERR 1 ImageHDU 9 (131, 131, 12) float64
2 WAVE 1 ImageHDU 7 (1901,) float64
3 SPEC_FLUX 1 ImageHDU 9 (131, 131, 1901) float64
4 SPEC_FLUXERR 1 ImageHDU 9 (131, 131, 1901) float64
5 PHOTO_REGION 1 ImageHDU 8 (131, 131) float64
6 SPEC_REGION 1 ImageHDU 8 (131, 131) float64
7 MOD_WAVE 1 ImageHDU 7 (1553,) float64
8 MOD_FLUX 1 ImageHDU 9 (131, 131, 1553) float64
9 CORR_FACTOR 1 ImageHDU 9 (131, 131, 1901) float64
SIMPLE = T / conforms to FITS standard BITPIX = -64 / array data type NAXIS = 3 / number of array dimensions NAXIS1 = 131 NAXIS2 = 131 NAXIS3 = 12 EXTEND = T NFILTERS= 12 Z = 0.0188977 RA = 14.177751925 DEC = -9.913864294 GALEBV = 0.03370103765151496 UNIT = 1E-17 BUNIT = 'erg/s/cm^2/A' STRUCTPH= '(band,y,x)' STRUCTSP= '(wavelength,y,x)' FSAMP = 'galex_fuv' PIXSIZE = 1.500000000000001 FPSFMTCH= 'wise_w2 ' PSFFWHM = 6.37 SPECPHOT= 1 FIL0 = 'galex_fuv' FIL1 = 'galex_nuv' FIL2 = 'sdss_u ' FIL3 = 'sdss_g ' FIL4 = 'sdss_r ' FIL5 = 'sdss_i ' FIL6 = 'sdss_z ' FIL7 = '2mass_j ' FIL8 = '2mass_h ' FIL9 = '2mass_k ' FIL10 = 'wise_w1 ' FIL11 = 'wise_w2 ' EXTNAME = 'PHOTO_FLUX' / extension name END
<font size="4"> Get the 3D data cubes </font>
```python
photo_region = cube['PHOTO_REGION'].data
spec_region = cube['SPEC_REGION'].data
wave = cube['wave'].data
photo_flux = cube['PHOTO_FLUX'].data
photo_flux_err = cube['PHOTO_FLUXERR'].data
spec_flux = cube['SPEC_FLUX'].data
spec_flux_err = cube['SPEC_FLUXERR'].data
# get model: best-fit to photometric SED
mod_wave = cube['MOD_WAVE'].data
mod_flux = cube['MOD_FLUX'].data
corr_factor = cube['corr_factor'].data
# get unit
unit = header['unit']
cube.close()
```
<font size="4"> See coverages of Images and IFS
```python
%matplotlib inline
from astropy.visualization import make_lupton_rgb
g = photo_flux[3]*10
r = photo_flux[4]*10
i = photo_flux[5]*10
rgb_default = make_lupton_rgb(i, r, g)
### plot photo and spec+photo regio
fig1 = plt.figure(figsize=(5,5))
f1 = plt.subplot()
plt.xlabel('[pixel]', fontsize=15)
plt.ylabel('[pixel]', fontsize=15)
plt.imshow(rgb_default, origin='lower', alpha=1.0)
plt.imshow(spec_region, origin='lower', cmap='Greys', alpha=0.2)
```
<matplotlib.image.AxesImage at 0x7f7f2611d5c0>

<font size="4"> **Check SEDs of some pixels** </font>
```python
# get photometric and spectroscopic SEDs of pixels:
#transpose (band,y,x) => (y,x,band):
pix_photo_SED = np.transpose(photo_flux, axes=(1, 2, 0))*unit ## erg/s/cm^2/A
pix_photo_SED_err = np.transpose(photo_flux_err, axes=(1, 2, 0))*unit
#transpose (wavelength,y,x) => (y,x,wavelength):
pix_spec_SED = np.transpose(spec_flux, axes=(1, 2, 0))*unit
pix_spec_SED_err = np.transpose(spec_flux_err, axes=(1, 2, 0))*unit
pix_mod_spec_SED = np.transpose(mod_flux, axes=(1, 2, 0))*unit
pix_corr_factor = np.transpose(corr_factor, axes=(1, 2, 0))
```
```python
# get filters
nbands = header['nfilters']
filters = []
for bb in range(0,nbands):
str_temp = 'fil%d' % bb
filters.append(header[str_temp])
# get central wavelength of filters
from piXedfit.utils.filtering import cwave_filters
photo_wave = cwave_filters(filters)
```
<font size="4"> Plot SEDs of some pixels in the central region. The black spectra are the best-fit model to photometric SED that was used as reference in correcting for the photometry vs. spectroscopy mismatch. </font>
```python
from matplotlib.ticker import ScalarFormatter
# Optional: cut the spectra around the edge to exclude those commonly uncertain fluxes
nwaves = len(wave)
for yy in range(58,63):
for xx in range(58,63):
if spec_region[yy][xx]==1:
fig1 = plt.figure(figsize=(14,7))
f1 = plt.subplot()
plt.title("pixel: (x=%d, y=%d)" % (xx,yy), fontsize=20)
f1.set_yscale('log')
f1.set_xscale('log')
plt.setp(f1.get_yticklabels(), fontsize=14)
plt.setp(f1.get_xticklabels(), fontsize=14)
plt.xlabel(r'Wavelength $[\AA]$', fontsize=21)
plt.ylabel(r'$F_{\lambda}$ [$10^{-17}$erg $s^{-1}cm^{-2}\AA^{-1}$]', fontsize=21)
xticks = [2000,4000,6000,10000,20000,30000,50000]
plt.xticks(xticks)
#plt.xlim(3000,10000)
#plt.ylim(1.0e-19,8e-16)
for axis in [f1.xaxis]:
axis.set_major_formatter(ScalarFormatter())
# Optional: cut the spectra around the edge to exclude
# those commonly uncertain fluxes
plt.plot(wave[20:nwaves-20], pix_spec_SED[yy][xx][20:nwaves-20], lw=1.0, color='red')
plt.errorbar(photo_wave, pix_photo_SED[yy][xx], yerr=pix_photo_SED_err[yy][xx],
markersize=10,color='blue', fmt='o')
#plt.plot(mod_wave, pix_mod_spec_SED[yy][xx], lw=1.0, color='black')
#plt.plot(wave, pix_corr_factor[yy][xx]*unit, lw=2, color='gray')
```

























```python
```
|
aabdurroufREPO_NAMEpiXedfitPATH_START.@piXedfit_extracted@piXedfit-main@examples@FUVtoNIR_CALIFA@.ipynb_checkpoints@Step4_matching_img_ifs_part2-checkpoint.ipynb@.PATH_END.py
|
{
"filename": "_name.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/splom/_name.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="name", parent_name="splom", **kwargs):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@splom@_name.py@.PATH_END.py
|
{
"filename": "_width.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattermap/line/_width.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="width", parent_name="scattermap.line", **kwargs):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattermap@line@_width.py@.PATH_END.py
|
{
"filename": "_spectra.py",
"repo_name": "LoganAMorrison/Hazma",
"repo_path": "Hazma_extracted/Hazma-master/hazma/experimental/pseudo_scalar_mediator/_spectra.py",
"type": "Python"
}
|
import numpy as np
from hazma import spectra
from hazma.parameters import muon_mass as mmu
from hazma.parameters import electron_mass as me
from ._proto import PseudoScalarMediatorBase
from ._fsr import dnde_xx_to_p_to_ffg
from ._msqrd import msqrd_xx_to_p_to_000, msqrd_xx_to_p_to_pm0
# Stuff needed to compute fsr from x xbar -> P -> pip pim pi0
# from ..gamma_ray_decay import gamma_ray_fsr
# from .pseudo_scalar_mediator_mat_elem_sqrd_rambo import msqrd_xx_to_p_to_pm0g
# TODO: pp spectrum. Gonna need Logan to do this since it
# requires cython...
def dnde_pp(model: PseudoScalarMediatorBase, egams, Q, mode="total"):
# eng_p = Q / 2.
pass
def dnde_ee(_: PseudoScalarMediatorBase, egams, cme):
"""Computes spectrum from DM annihilation into electrons."""
return dnde_xx_to_p_to_ffg(egams, cme, me)
def dnde_mumu(_: PseudoScalarMediatorBase, egams, cme):
"""Computes spectrum from DM annihilation into muons."""
fsr = dnde_xx_to_p_to_ffg(egams, cme, mmu)
decay = 2.0 * spectra.dnde_photon_muon(egams, cme / 2.0)
return fsr + decay
def dnde_pi0pipi(model: PseudoScalarMediatorBase, photon_energies, cme):
"""Computes spectrum from DM annihilation into a neutral pion and two
charged pions.
Notes
-----
This function uses RAMBO to "convolve" the pions' spectra with the
matrix element over the pi0 pi pi phase space.
"""
def msqrd(momenta):
return msqrd_xx_to_p_to_pm0(momenta, model)
return spectra.dnde_photon(
photon_energies=photon_energies,
cme=cme,
final_states=["pi0", "pi", "pi"],
msqrd=msqrd,
msqrd_signature="momenta",
)
def dnde_pi0pi0pi0(model: PseudoScalarMediatorBase, photon_energies, cme):
"""Return the gamma ray spectrum for dark matter annihilations into
three neutral pions.
Notes
-----
This function uses RAMBO to "convolve" the pions' spectra with the
matrix element over the pi0 pi0 pi0 phase space.
"""
def msqrd(momenta):
return msqrd_xx_to_p_to_000(momenta, model)
return spectra.dnde_photon(
photon_energies=photon_energies,
cme=cme,
final_states=["pi0", "pi0", "pi0"],
msqrd=msqrd,
msqrd_signature="momenta",
)
def spectrum_funcs(model):
"""
Returns a dictionary of all the avaiable spectrum functions for
a pair of initial state fermions with mass `mx` annihilating into
each available final state.
Each argument of the spectrum functions in `eng_gams`, an array
of the gamma ray energies to evaluate the spectra at and `cme`, the
center of mass energy of the process.
"""
def mk_dnde(fn):
def dnde(photon_energies, cme):
return fn(model, photon_energies, cme)
return dnde
return {
"mu mu": mk_dnde(dnde_mumu),
"e e": mk_dnde(dnde_ee),
"pi0 pi pi": mk_dnde(dnde_pi0pipi),
"pi0 pi0 pi0": mk_dnde(dnde_pi0pi0pi0),
"p p": mk_dnde(dnde_pp),
}
def gamma_ray_lines(model, cme):
bf = annihilation_branching_fractions(model, cme)["g g"]
return {"g g": {"energy": cme / 2.0, "bf": bf}}
|
LoganAMorrisonREPO_NAMEHazmaPATH_START.@Hazma_extracted@Hazma-master@hazma@experimental@pseudo_scalar_mediator@_spectra.py@.PATH_END.py
|
{
"filename": "jwstnirspec.py",
"repo_name": "jruffio/breads",
"repo_path": "breads_extracted/breads-main/breads/instruments/jwstnirspec.py",
"type": "Python"
}
|
from matplotlib.pyplot import axis
import matplotlib.pyplot as plt
from breads.instruments.instrument import Instrument
import breads.utils as utils
from warnings import warn
import astropy.io.fits as pyfits
import numpy as np
import ctypes
from astropy.coordinates import SkyCoord, EarthLocation
import astropy.units as u
from astropy.time import Time
from copy import copy
from breads.utils import broaden
from breads.calibration import SkyCalibration
import multiprocessing as mp
from itertools import repeat
import pandas as pd
import astropy
#NIRSPEC Wavelengths
def get_wavelen_values(header, wavelen_axis=3):
"""Get array of wavelength values in microns, via WCS.
Works on JWST NIRSpec cubes but should be general across other instruments too
Returns wavelengths in microns
"""
wcs = astropy.wcs.WCS(header)
pix_coords = np.zeros((header[f'NAXIS{wavelen_axis}'], 3))
pix_coords[:, 2] = np.arange(header[f'NAXIS{wavelen_axis}'])
wavelens = wcs.wcs_pix2world(pix_coords,0)[:, 2]*1e6
return wavelens
class JWSTNirspec(Instrument):
def __init__(self, filename=None):
super().__init__('jwstnirspec')
if filename is None:
warning_text = "No data file provided. " + \
"Please manually add data or use JWSTNirspec.read_data_file()"
warn(warning_text)
else:
self.read_data_file(filename)
def read_data_file(self, filename):
"""
Read OSIRIS spectral cube, also checks validity at the end
"""
with pyfits.open(filename) as hdulist:
priheader = hdulist[0].header
cube = hdulist[1].data
noisecube = hdulist[2].data
badpixcube = hdulist[3].data.astype(np.float)
# plt.plot(cube[:,17,25]/np.nanmedian(cube[:,17,25]))
# plt.plot(badpixcube[:,17,25])
# for k in range(200):
# plt.imshow(badpixcube[k*10,::])
# plt.clim([0,1])
# plt.show()
badpixcube[np.where(badpixcube!=0)] = np.nan
badpixcube[np.where(np.isfinite(badpixcube))] = 1
badpixcube[np.where(np.abs(cube)>=1e-6)] = np.nan
# wvs=get_wavelen_values(hdulist[1].header)
from spectral_cube import SpectralCube
tmpcube=SpectralCube.read(filename,hdu=1)
wvs=tmpcube.spectral_axis
wvs=np.array(wvs)
nz, ny, nx = cube.shape
self.wavelengths = np.tile(wvs[:,None,None],(1,ny,nx))
self.data = cube
self.noise = noisecube
self.bad_pixels = badpixcube
self.bary_RV = 0
self.R = 2700
self.priheader = priheader
print(priheader["DETECTOR"].strip())
if priheader["DETECTOR"].strip() == 'NRS1':
crop_min, crop_max = 10, 140
elif priheader["DETECTOR"].strip() == 'NRS2':
crop_min, crop_max = 160, 150
print(crop_min, crop_max)
self.bad_pixels[0:crop_min, :, :] = np.nan
self.bad_pixels[nz - crop_max::, :, :] = np.nan
self.valid_data_check()
def trim_data(self, trim):
if trim <= 0:
return
nz, nx, ny = self.data.shape
self.bad_pixels[:trim] = np.nan
self.bad_pixels[nz-trim:] = np.nan
# def remove_bad_pixels(self, chunks=20, mypool=None, med_spec=None, nan_mask_boxsize=3, w=5,threshold=3):
# if med_spec == "transmission" or med_spec == "pair subtraction":
# img_mean = np.nanmean(self.data, axis=0)
# x, y = np.unravel_index(np.nanargmax(img_mean), img_mean.shape)
# med_spec = np.nanmedian(self.data[:,x-w:x+w, y-w:y+w], axis=(1,2))
# elif med_spec == "default":
# med_spec = None
# new_badpixcube, new_cube, res = \
# utils.findbadpix(self.data, self.noise, self.bad_pixels, chunks, mypool, med_spec, nan_mask_boxsize,threshold)
# self.bad_pixels = new_badpixcube
# self.data = new_cube
# utils.clean_nans(self.data)
# return res
def remove_bad_pixels(self, chunks=20, mypool=None, med_spec=None, nan_mask_boxsize=3, w=5, \
num_threads = 16, wid_mov=None,threshold=3):
# if med_spec == "transmission" or med_spec == "pair subtraction":
# img_mean = np.nanmean(self.data, axis=0)
# x, y = np.unravel_index(np.nanargmax(img_mean), img_mean.shape)
# med_spec = np.nanmedian(self.data[:,x-w:x+w, y-w:y+w], axis=(1,2))
# elif med_spec == "default":
# med_spec = None
new_badpixcube, new_cube, res = \
utils.findbadpix(self.data, self.noise, self.bad_pixels, chunks, mypool, med_spec, nan_mask_boxsize,threshold=threshold)
self.bad_pixels = new_badpixcube
self.data = new_cube
try:
temp = self.continuum
except:
nz, ny, nx = self.data.shape
my_pool = mp.Pool(processes=num_threads)
if wid_mov is None:
wid_mov = 10#nz // 10
args = []
for i in range(ny):
for j in range(nx):
args += [self.data[:, i, j]]
output = my_pool.map(set_continnuum, zip(args, repeat(wid_mov)))
self.continuum = np.zeros((nz, ny, nx))
for i in range(ny):
for j in range(nx):
self.continuum[:, i, j] = output[(i*nx+j)]
# utils.mask_bleeding(self)
# utils.clean_nans(self.data)
return res
def crop_image(self, x_range, y_range):
self.data = self.data[:, x_range[0]:x_range[1], y_range[0]:y_range[1]]
self.wavelengths = self.wavelengths[:, x_range[0]:x_range[1], y_range[0]:y_range[1]]
self.noise = self.noise[:, x_range[0]:x_range[1], y_range[0]:y_range[1]]
self.bad_pixels = self.data[:, x_range[0]:x_range[1], y_range[0]:y_range[1]]
def broaden(self, wvs,spectrum, loc=None,mppool=None):
"""
Broaden a spectrum to the resolution of this data object using the resolution attribute (self.R).
LSF is assumed to be a 1D gaussian.
The broadening is technically fiber dependent so you need to specify which fiber calibration to use.
Args:
wvs: Wavelength sampling of the spectrum to be broadened.
spectrum: 1D spectrum to be broadened.
loc: To be ignored. Could be used in the future to specify (x,y) position if field dependent resolution is
available.
mypool: Multiprocessing pool to parallelize the code. If None (default), non parallelization is applied.
E.g. mppool = mp.Pool(processes=10) # 10 is the number processes
Return:
Broadened spectrum
"""
return broaden(wvs, spectrum, self.R, mppool=mppool)
def set_noise(self, method="sqrt_cont", num_threads = 16, wid_mov=None):
nz, ny, nx = self.data.shape
my_pool = mp.Pool(processes=num_threads)
if wid_mov is None:
wid_mov = nz // 10
args = []
for i in range(ny):
for j in range(nx):
args += [self.data[:, i, j]]
output = my_pool.map(set_continnuum, zip(args, repeat(wid_mov)))
self.continuum = np.zeros((nz, ny, nx))
for i in range(ny):
for j in range(nx):
self.continuum[:, i, j] = output[(i*nx+j)]
# self.continuum = np.reshape(self.continuum, (nz, ny, nx), order='F')
if method == "sqrt_cont":
self.noise = np.sqrt(np.abs(self.continuum))
if method == "cont":
self.noise = self.continuum
def set_continnuum(args):
data, window = args
tmp = np.array(pd.DataFrame(np.concatenate([data, data[::-1]], axis=0)).interpolate(method="linear").fillna(method="bfill").fillna(method="ffill"))
myvec_cp_lpf = np.array(pd.DataFrame(tmp).rolling(window=window, center=True).median().interpolate(method="linear").fillna(method="bfill").fillna(method="ffill"))[0:np.size(data), 0]
return myvec_cp_lpf
|
jruffioREPO_NAMEbreadsPATH_START.@breads_extracted@breads-main@breads@instruments@jwstnirspec.py@.PATH_END.py
|
{
"filename": "derived_field.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/doc/source/cookbook/derived_field.py",
"type": "Python"
}
|
import yt
# Load the dataset.
ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
# You can create a derived field by manipulating any existing derived fields
# in any way you choose. In this case, let's just make a simple one:
# thermal_energy_density = 3/2 nkT
# First create a function which yields your new derived field
def thermal_energy_dens(field, data):
return (3 / 2) * data["gas", "number_density"] * data["gas", "kT"]
# Then add it to your dataset and define the units
ds.add_field(
("gas", "thermal_energy_density"),
units="erg/cm**3",
function=thermal_energy_dens,
sampling_type="cell",
)
# It will now show up in your derived_field_list
for i in sorted(ds.derived_field_list):
print(i)
# Let's use it to make a projection
ad = ds.all_data()
yt.ProjectionPlot(
ds,
"x",
("gas", "thermal_energy_density"),
weight_field=("gas", "density"),
width=(200, "kpc"),
).save()
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@doc@source@cookbook@derived_field.py@.PATH_END.py
|
{
"filename": "_legend.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter3d/_legend.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(self, plotly_name="legend", parent_name="scatter3d", **kwargs):
super(LegendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", "legend"),
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter3d@_legend.py@.PATH_END.py
|
{
"filename": "_utils.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/cosmology/_utils.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import annotations
__all__: list[str] = [] # nothing is publicly scoped
import functools
import operator
from collections.abc import Callable
from dataclasses import Field
from numbers import Number
from typing import TYPE_CHECKING, Any, TypeVar
import numpy as np
from astropy.units import Quantity
from . import units as cu
from ._signature_deprecations import _depr_kws_wrap
if TYPE_CHECKING:
from astropy.cosmology import Parameter
_F = TypeVar("_F", bound=Callable[..., Any])
def vectorize_redshift_method(func=None, nin=1):
"""Vectorize a method of redshift(s).
Parameters
----------
func : callable or None
method to wrap. If `None` returns a :func:`functools.partial`
with ``nin`` loaded.
nin : int
Number of positional redshift arguments.
Returns
-------
wrapper : callable
:func:`functools.wraps` of ``func`` where the first ``nin``
arguments are converted from |Quantity| to :class:`numpy.ndarray`.
"""
# allow for pie-syntax & setting nin
if func is None:
return functools.partial(vectorize_redshift_method, nin=nin)
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
"""Wrapper converting arguments to numpy-compatible inputs.
:func:`functools.wraps` of ``func`` where the first ``nin`` arguments are
converted from |Quantity| to `numpy.ndarray` or scalar.
"""
# process inputs
# TODO! quantity-aware vectorization can simplify this.
zs = [
z if not isinstance(z, Quantity) else z.to_value(cu.redshift)
for z in args[:nin]
]
# scalar inputs
if all(isinstance(z, (Number, np.generic)) for z in zs):
return func(self, *zs, *args[nin:], **kwargs)
# non-scalar. use vectorized func
return wrapper.__vectorized__(self, *zs, *args[nin:], **kwargs)
wrapper.__vectorized__ = np.vectorize(func) # attach vectorized function
# TODO! use frompyfunc when can solve return type errors
return wrapper
def aszarr(z):
"""Redshift as a `~numbers.Number` or |ndarray| / |Quantity| / |Column|.
Allows for any ndarray ducktype by checking for attribute "shape".
"""
if isinstance(z, (Number, np.generic)): # scalars
return z
elif hasattr(z, "shape"): # ducktypes NumPy array
if getattr(z, "__module__", "").startswith("pandas"):
# See https://github.com/astropy/astropy/issues/15576. Pandas does not play
# well with others and will ignore unit-ful calculations so we need to
# convert to it's underlying value.
z = z.values
if hasattr(z, "unit"): # Quantity Column
return (z << cu.redshift).value # for speed only use enabled equivs
return z
# not one of the preferred types: Number / array ducktype
return Quantity(z, cu.redshift).value
def all_cls_vars(obj: object | type, /) -> dict[str, Any]:
"""Return all variables in the whole class hierarchy."""
cls = obj if isinstance(obj, type) else obj.__class__
return functools.reduce(operator.__or__, map(vars, cls.mro()[::-1]))
def all_parameters(obj: object, /) -> dict[str, Field | Parameter]:
"""Get all fields of a dataclass, including those not-yet finalized.
Parameters
----------
obj : object | type
A dataclass.
Returns
-------
dict[str, Field | Parameter]
All fields of the dataclass, including those not yet finalized in the class, if
it's still under construction, e.g. in ``__init_subclass__``.
"""
from astropy.cosmology.parameter import Parameter
return {
k: (v if isinstance(v, Parameter) else v.default)
for k, v in all_cls_vars(obj).items()
if (
isinstance(v, Parameter)
or (isinstance(v, Field) and isinstance(v.default, Parameter))
)
}
def deprecated_keywords(*kws, since):
"""Deprecate calling one or more arguments as keywords.
Parameters
----------
*kws: str
Names of the arguments that will become positional-only.
since : str or number or sequence of str or number
The release at which the old argument became deprecated.
"""
return functools.partial(_depr_kws, kws=kws, since=since)
def _depr_kws(func: _F, /, kws: tuple[str, ...], since: str) -> _F:
wrapper = _depr_kws_wrap(func, kws, since)
functools.update_wrapper(wrapper, func)
return wrapper
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@cosmology@_utils.py@.PATH_END.py
|
{
"filename": "make_transmission.py",
"repo_name": "igmhub/LyaCoLoRe",
"repo_path": "LyaCoLoRe_extracted/LyaCoLoRe-master/scripts/make_transmission.py",
"type": "Python"
}
|
#!/usr/bin/env python
import multiprocessing
import numpy as np
import os
import sys
import time
from astropy.io import fits
from multiprocessing import Pool
from scipy.interpolate import interp1d
from lyacolore import parse, simulation_data, utils
################################################################################
#Script to make a transmission file for a given pixel, given a
# TODO: Get rid of the need to specify file numbers?
# TODO: Set up option to specify exactly which meatls we want
# TODO: Tidy up measuring of SIGMA_G and subsequent DLA method.
# TODO: Exchange lambda_min for z_min for cells.
################################################################################
"""
Set up the file locations and filename structures.
Also define option preferences.
"""
args = parse.get_args(sys.argv)
################################################################################
#Define global variables.
master_file = args.out_dir+'/master.fits'
if args.pixels is None:
args.pixels = list(range(12*args.nside**2))
# TODO: print to confirm the arguments. e.g. "DLAs will be added"
if np.log2(args.nside)-int(np.log2(args.nside)) != 0:
raise ValueError('nside must be a power of 2!')
else:
N_pix = 12*args.nside**2
#colore skewers filename (except number that will be added later)
colore_base_filename = args.in_dir+'/out_srcs_s1_'
#Calculate the minimum value of z that we are interested in.
#i.e. the z value for which lambda_min cooresponds to the lya wavelength.
z_min = args.lambda_min/utils.lya_rest - 1
small = 10**-10
#Get the simulation parameters from the parameter file.
simulation_parameters = utils.get_simulation_parameters(args.in_dir,args.param_file)
#If we have density input skewers and want to add DLAs, then raise an error:
#this functionality is not yet implemented.
if (args.skewer_type=='density') & args.add_DLAs:
raise ValueError('Adding DLAs from density input skewers is not possible yet!')
if (args.metals_selection is not None) and (args.metals_list is not None):
raise ValueError('Both a selection of metals and a list of metals have been provided: choose one!')
################################################################################
"""
Construct the MOCKID_lookup from the master file.
"""
# TODO: potential issue with differnt values of nside being used in make_master.py
master = fits.open(master_file)
master_data = master[1].data
master.close()
#Make a MOCKID lookup.
master_data_pixel_set = set(master_data['PIXNUM'])
pixels_set = set(args.pixels)
pixel_list = list(sorted(master_data_pixel_set.intersection(pixels_set)))
MOCKID_lookup = {}
for pixel in pixel_list:
#pixel_indices = [i for i in range(len(master_data['PIXNUM'])) if master_data['PIXNUM'][i]==pixel]
pixel_indices = (master_data['PIXNUM']==pixel)
pixel_MOCKIDs = master_data['MOCKID'][pixel_indices]
pixel_file_number_list = list(sorted(set(master_data['FILENUM'][pixel_indices])))
for file_number in pixel_file_number_list:
pixel_file_indices = ((master_data['FILENUM'][pixel_indices])==file_number)
#MOCKID_list = [master_data['MOCKID'][i] for i in range(len(master_data['PIXNUM'])) if master_data['PIXNUM'][i]==pixel and master_data['FILENUM'][i]==file_number]
pixel_file_MOCKIDs = pixel_MOCKIDs[pixel_file_indices]
MOCKID_lookup = {**MOCKID_lookup,**{(file_number,pixel):list(pixel_file_MOCKIDs)}}
################################################################################
"""
Define the multiprocessing tracking functions
"""
#Define a progress-tracking function.
def log_result(retval):
results.append(retval)
N_complete = len(results)
N_tasks = len(tasks)
utils.progress_bar(N_complete,N_tasks,start_time)
#Define an error-tracking function.
def log_error(retval):
print('Error:',retval)
################################################################################
print('Working on per-HEALPix pixel initial skewer files...')
start_time = time.time()
#Define the pixelisation process.
def pixelise_colore_output(pixel,colore_base_filename,z_min,out_dir,N_side):
#Define the output directory the pixel, according to the new file structure.
location = utils.get_dir_name(out_dir,pixel)
#Make file into an object
pixel_object = simulation_data.make_pixel_object(pixel,colore_base_filename,args.file_format,args.skewer_type,shared_MOCKID_lookup,IVAR_cutoff=args.rest_frame_weights_cut)
# TODO: These could be made beforehand and passed to the function? Or is there already enough being passed?
#Make some useful headers
header = fits.Header()
header['HPXNSIDE'] = N_side
header['HPXPIXEL'] = pixel
header['HPXNEST'] = True
header['LYA'] = utils.lya_rest
## Save the pixelised colore file.
filename = utils.get_file_name(location,'{}-colore'.format(args.skewer_type),N_side,pixel)
pixel_object.save_as_colore(args.skewer_type,filename,header,overwrite=args.overwrite,compress=args.compress)
if args.skewer_type == 'gaussian':
pixel_object.compute_SIGMA_G(type='single_value',lr_max=args.rest_frame_weights_cut)
header['SIGMA_G'] = pixel_object.SIGMA_G
N = np.sum(pixel_object.IVAR_rows.astype('int'))
return (N,pixel_object.SIGMA_G)
else:
return
#Set up the multiprocessing pool parameters and make a list of tasks.
#what's the sharing doing here?
manager = multiprocessing.Manager()
shared_MOCKID_lookup = manager.dict(MOCKID_lookup)
tasks = [(pixel,colore_base_filename,z_min,args.out_dir,args.nside) for pixel in pixel_list]
#Run the multiprocessing pool
if __name__ == '__main__':
pool = Pool(processes = args.nproc)
results = []
start_time = time.time()
for task in tasks:
pool.apply_async(pixelise_colore_output,task,callback=log_result,error_callback=log_error)
pool.close()
pool.join()
################################################################################
"""
To correctly calculate the physical fields when using Gaussian input skewers, we
must measure sigma from all skewers.
Here, we combine the results from each pixel, to compute an overall value.
"""
if args.skewer_type == 'gaussian':
N_values = np.array([r[0] for r in results])
sg_values = np.array([r[1] for r in results])
SIGMA_G_global = np.sqrt(np.sum((sg_values**2)*N_values)/np.sum(N_values))
print('Gaussian skewers have mean sigma {:2.4f}.'.format(SIGMA_G_global))
print('\nModifying header showing sigma_G in Gaussian CoLoRe files...')
def modify_header(pixel):
location = utils.get_dir_name(args.out_dir,pixel)
filename = utils.get_file_name(location,'gaussian-colore',args.nside,pixel,compressed=args.compress)
h = fits.open(filename)
for HDU in h[1:]:
HDU.header['SIGMA_G'] = SIGMA_G_global
h.writeto(filename,overwrite=True)
h.close()
return
#Run the multiprocessing pool
if __name__ == '__main__':
pool = Pool(processes = args.nproc)
results = []
start_time = time.time()
for pixel in pixel_list:
pool.apply_async(modify_header,(pixel,),callback=log_result,error_callback=log_error)
pool.close()
pool.join()
################################################################################
"""
We may now do the main work of LyaCoLoRe. This includes:
- add extra small scale power
- convert from the gaussian field to the lognormal field, then tau
- add metals
- add RSDs
- add DLAs
- convert from tau to flux
- save the transmission files
We also save picca format delta files for running correlation function tests.
Deltas are normalised using the global mean in 'make_summaries'
"""
print('Working on per-HEALPix pixel final skewer files...')
start_time = time.time()
def produce_final_skewers(base_out_dir,pixel,N_side,lambda_min,tuning_file):
t = time.time()
# Define a random seed for use in this pixel.
seed = int(pixel * 10**5 + args.seed)
#We work from the gaussian colore files made in 'pixelise gaussian skewers'.
location = utils.get_dir_name(base_out_dir,pixel)
gaussian_filename = utils.get_file_name(location,'{}-colore'.format(args.skewer_type),N_side,pixel,compressed=args.compress)
# Make a pixel object from it.
file_number = None
pixel_object = simulation_data.SimulationData.get_skewers_object(gaussian_filename,file_number,args.file_format,args.skewer_type,IVAR_cutoff=args.rest_frame_weights_cut)
if args.skewer_type == 'gaussian':
pixel_object.SIGMA_G = SIGMA_G_global
# Make a transformation object and add it to the pixel object.
pixel_object.add_transformation_from_file(tuning_file)
#Scale the velocities.
pixel_object.scale_velocities(use_transformation=True)
#print('{:3.2f} checkpoint object'.format(time.time()-t)); t = time.time()
#Add Lyb and metal absorbers if needed.
if args.add_Lyb:
pixel_object.setup_Lyb_absorber()
if args.add_metals:
pixel_object.setup_metal_absorbers(selection=args.metals_selection,metals_list=args.metals_list)
#Make some useful headers
header = fits.Header()
header['HPXNSIDE'] = N_side
header['HPXPIXEL'] = pixel
header['HPXNEST'] = True
header['LYA'] = utils.lya_rest
if args.skewer_type == 'gaussian':
header['SIGMA_G'] = pixel_object.SIGMA_G
#Save CoLoRe format files.
if args.transmission_only == False:
if args.skewer_type == 'gaussian':
pixel_object.compute_physical_skewers()
filename = utils.get_file_name(location,'density-colore',N_side,pixel)
pixel_object.save_as_colore('density',filename,header,overwrite=args.overwrite,compress=args.compress)
#Trim the skewers (remove low lambda cells). Exit if no QSOs are left.
#We don't cut too tightly on the low lambda to allow for RSDs.
lambda_buffer = 100. #A
pixel_object.trim_skewers(lambda_min-lambda_buffer,args.min_cat_z,extra_cells=1)
if pixel_object.N_qso == 0:
print('\nwarning: no objects left in pixel {} after trimming.'.format(pixel))
return pixel
#Save picca format files without adding small scale power.
if args.transmission_only == False:
if args.skewer_type == 'gaussian':
filename = utils.get_file_name(location,'picca-gaussian-colorecell',N_side,pixel)
pixel_object.save_as_picca_delta('gaussian',filename,header,overwrite=args.overwrite,add_QSO_RSDs=args.add_QSO_RSDs,compress=args.compress)
filename = utils.get_file_name(location,'picca-density-colorecell',N_side,pixel)
pixel_object.save_as_picca_delta('density',filename,header,overwrite=args.overwrite,add_QSO_RSDs=args.add_QSO_RSDs,compress=args.compress)
#print('{:3.2f} checkpoint colore files'.format(time.time()-t)); t = time.time()
#Add a table with DLAs in to the pixel object.
# TODO: in future, we want DLAs all the way down to z=0.
#That means we need to store skewers all the way down to z=0.
#May need to adjust how many nodes are used when running.
if args.add_DLAs:
pixel_object.add_DLA_table(seed,dla_bias=args.DLA_bias,evol=args.DLA_bias_evol,method=args.DLA_bias_method)
#print('{:3.2f} checkpoint DLAs'.format(time.time()-t)); t = time.time()
#Add small scale power to the gaussian skewers:
if args.add_small_scale_fluctuations:
generator = np.random.RandomState(seed)
pixel_object.add_small_scale_fluctuations(args.cell_size,generator,white_noise=False,lambda_min=lambda_min,IVAR_cutoff=args.rest_frame_weights_cut,use_transformation=True)
if args.skewer_type == 'gaussian':
#Remove the 'SIGMA_G' header as SIGMA_G now varies with z, so can't be stored in a header.
del header['SIGMA_G']
#print('{:3.2f} checkpoint SSF'.format(time.time()-t)); t = time.time()
#Recompute physical skewers, and then the tau skewers.
if args.skewer_type == 'gaussian':
pixel_object.compute_physical_skewers()
pixel_object.compute_all_tau_skewers()
if args.transmission_only == False:
if args.skewer_type == 'gaussian':
#Picca Gaussian, small cells
filename = utils.get_file_name(location,'picca-gaussian',N_side,pixel)
pixel_object.save_as_picca_delta('gaussian',filename,header,overwrite=args.overwrite,add_QSO_RSDs=args.add_QSO_RSDs,compress=args.compress)
#Picca density
filename = utils.get_file_name(location,'picca-density',N_side,pixel)
pixel_object.save_as_picca_delta('density',filename,header,overwrite=args.overwrite,add_QSO_RSDs=args.add_QSO_RSDs,compress=args.compress)
#Picca tau
filename = utils.get_file_name(location,'picca-tau-noRSD-notnorm',N_side,pixel)
pixel_object.save_as_picca_delta('tau',filename,header,notnorm=True,overwrite=args.overwrite,add_QSO_RSDs=args.add_QSO_RSDs,compress=args.compress,all_absorbers=args.picca_all_absorbers)
#Picca flux
filename = utils.get_file_name(location,'picca-flux-noRSD-notnorm',N_side,pixel)
pixel_object.save_as_picca_delta('flux',filename,header,notnorm=True,overwrite=args.overwrite,add_QSO_RSDs=args.add_QSO_RSDs,compress=args.compress,all_absorbers=args.picca_all_absorbers)
"""
## Disable this for the moment.
#Save the no RSD statistics file for this pixel.
filename = utils.get_file_name(location,'statistics-noRSD',N_side,pixel)
statistics = pixel_object.save_statistics(filename,overwrite=args.overwrite,compress=args.compress,all_absorbers=args.picca_all_absorbers)
"""
#print('{:3.2f} checkpoint noRSD files'.format(time.time()-t)); t = time.time()
#Add RSDs from the velocity skewers provided by CoLoRe.
if args.add_RSDs == True:
pixel_object.add_all_RSDs(thermal=args.include_thermal_effects)
#print('{:3.2f} checkpoint RSDs'.format(time.time()-t)); t = time.time()
#Trim the skewers (remove low lambda cells). Exit if no QSOs are left.
#We now cut hard at lambda min as RSDs have been implemented.
pixel_object.trim_skewers(lambda_min,args.min_cat_z,extra_cells=1)
if pixel_object.N_qso == 0:
print('\nwarning: no objects left in pixel {} after trimming.'.format(pixel))
return pixel
#Make a variable containing the new cosmology data.
new_cosmology = pixel_object.return_cosmology()
#Save the transmission file.
filename = utils.get_file_name(location,'transmission',N_side,pixel)
pixel_object.save_as_transmission(filename,header,overwrite=args.overwrite,wave_min=args.transmission_lambda_min,wave_max=args.transmission_lambda_max,wave_step=args.transmission_delta_lambda,fmt=args.transmission_format,add_QSO_RSDs=args.add_QSO_RSDs,compress=args.compress)
if args.transmission_only == False and args.add_RSDs == True:
#Picca tau
filename = utils.get_file_name(location,'picca-tau-notnorm',N_side,pixel)
pixel_object.save_as_picca_delta('tau',filename,header,notnorm=True,overwrite=args.overwrite,add_QSO_RSDs=args.add_QSO_RSDs,compress=args.compress,all_absorbers=args.picca_all_absorbers)
#Picca flux
filename = utils.get_file_name(location,'picca-flux-notnorm',N_side,pixel)
pixel_object.save_as_picca_delta('flux',filename,header,notnorm=True,overwrite=args.overwrite,add_QSO_RSDs=args.add_QSO_RSDs,compress=args.compress,all_absorbers=args.picca_all_absorbers)
"""
## Disable this for the moment.
#Save the final statistics file for this pixel.
filename = utils.get_file_name(location,'statistics',N_side,pixel)
statistics = pixel_object.save_statistics(filename,overwrite=args.overwrite,compress=args.compress,all_absorbers=args.picca_all_absorbers)
"""
else:
#If transmission_only is not False, remove the gaussian-colore file.
os.remove(gaussian_filename)
#print('{:3.2f} checkpoint RSD files'.format(time.time()-t)); t = time.time()
return new_cosmology
#define the tasks
tasks = [(args.out_dir,pixel,args.nside,args.lambda_min,args.tuning_file) for pixel in pixel_list]
#Run the multiprocessing pool
if __name__ == '__main__':
pool = Pool(processes = args.nproc)
results = []
start_time = time.time()
for task in tasks:
pool.apply_async(produce_final_skewers,task,callback=log_result,error_callback=log_error)
pool.close()
pool.join()
################################################################################
"""
PROBABLY COULD MOVE THIS TO make_summaries
Having added small scale power, we must add a new HDU to the master file's cosmology.
"""
print('Updating master file\'s cosmology...')
#First check that the new cosmologies are all the same.
# TODO: some kind of system to check consistency here?
new_cosmology = results[0]
#Reorganise the data.
master = fits.open(master_file)
try:
test = master[3].data
master.close()
except IndexError:
master_catalog = master[1].data
master_colore_cosmology = master[2].data
master_new_cosmology = new_cosmology
#Make an appropriate header.
header = fits.Header()
header['NSIDE'] = args.nside
#Make the data into tables.
hdu_ID = fits.BinTableHDU.from_columns(master_catalog,header=header,name='CATALOG')
hdu_cosmology_colore = fits.BinTableHDU.from_columns(master_colore_cosmology,header=header,name='COSMO_COL')
hdu_cosmology_expanded = fits.BinTableHDU.from_columns(master_new_cosmology,header=header,name='COSMO_EXP')
#Make a primary HDU.
prihdr = fits.Header()
prihdu = fits.PrimaryHDU(header=prihdr)
#Make the .fits file.
hdulist = fits.HDUList([prihdu,hdu_ID,hdu_cosmology_colore,hdu_cosmology_expanded])
hdulist.writeto(master_file,overwrite=True)
hdulist.close()
print('Process complete!\n')
################################################################################
"""
Celebrate!
"""
|
igmhubREPO_NAMELyaCoLoRePATH_START.@LyaCoLoRe_extracted@LyaCoLoRe-master@scripts@make_transmission.py@.PATH_END.py
|
{
"filename": "download-data.ipynb",
"repo_name": "HITS-AIN/PINK",
"repo_path": "PINK_extracted/PINK-master/jupyter/download-data.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import sys
!{sys.executable} -m pip install --user googledrivedownloader requests
%reset -f
```
Requirement already satisfied: googledrivedownloader in /usr/local/lib/python3.6/dist-packages (0.4)
Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (2.22.0)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests) (1.25.8)
Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests) (3.0.4)
Requirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests) (2.8)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests) (2019.11.28)
Download data files (690 MB) ... will take some time!
```python
from google_drive_downloader import GoogleDriveDownloader as gdd
gdd.download_file_from_google_drive(file_id='1Owv1c8qsc6RWQ9UgZuZ9qsRcXwQE2Viw',
dest_path='./data/AIN-ADASS.zip',
unzip=True,
showsize=True,
overwrite=True)
print('Done')
```
Downloading 1Owv1c8qsc6RWQ9UgZuZ9qsRcXwQE2Viw into ./data/AIN-ADASS.zip...
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-2-ef889c6b8975> in <module>
5 unzip=True,
6 showsize=True,
----> 7 overwrite=True)
8
9 print('Done')
/usr/local/lib/python3.6/dist-packages/google_drive_downloader/google_drive_downloader.py in download_file_from_google_drive(file_id, dest_path, overwrite, unzip, showsize)
65
66 current_download_size = [0]
---> 67 GoogleDriveDownloader._save_response_content(response, dest_path, showsize, current_download_size)
68 print('Done.')
69
/usr/local/lib/python3.6/dist-packages/google_drive_downloader/google_drive_downloader.py in _save_response_content(response, destination, showsize, current_size)
87 @staticmethod
88 def _save_response_content(response, destination, showsize, current_size):
---> 89 with open(destination, 'wb') as f:
90 for chunk in response.iter_content(GoogleDriveDownloader.CHUNK_SIZE):
91 if chunk: # filter out keep-alive new chunks
OSError: [Errno 30] Read-only file system: './data/AIN-ADASS.zip'
```python
```
|
HITS-AINREPO_NAMEPINKPATH_START.@PINK_extracted@PINK-master@jupyter@download-data.ipynb@.PATH_END.py
|
{
"filename": "_tickfont.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/heatmapgl/colorbar/_tickfont.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="tickfont", parent_name="heatmapgl.colorbar", **kwargs
):
super(TickfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
size
style
Sets whether a font should be styled with a
normal or italic face from its family.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@heatmapgl@colorbar@_tickfont.py@.PATH_END.py
|
{
"filename": "scan_util.py",
"repo_name": "brinckmann/montepython_public",
"repo_path": "montepython_public_extracted/montepython_public-master/montepython/likelihoods/bao_BOSS_DR11_Lya_cross/scan_util.py",
"type": "Python"
}
|
# scan_util.py
#
# This is a module containing subfunctions to read the chi2 tables from BOSS and
# eBOSS Lya auto and Lya-QSO cross correlation results
#
# class:chi2_interpolators
# - __init__
# - get_chi2_distances
#
import numpy as np
from scipy.interpolate import RectBivariateSpline
#####################################################################
#Class to read alpha_t by alpha_p chi2 scans e.g. from BOSS and interpolate.
class chi2_interpolators():
def __init__(self,scan_locations,transverse_fid,parallel_fid):
"""
Arguments:
scan_locations: dictionary of filepaths to the different scans, with
keys as scan types.
transverse_fid: fiducial value of transverse separation used to
calculate alpha_t.
parallel_fid: fiducial value of parallel separation used to calculate
alpha_p.
"""
#Create a dictionary containing an interpolator for each scan.
interpolators = {}
for corr_type in scan_locations:
scan = np.loadtxt(scan_locations[corr_type])
#Column numbers in scan for data points.
ap_index = 0
at_index = 1
chi2_index = 2
#Get the alphas and make the scan grid.
ap = np.array(sorted(set(scan[:,ap_index])))
at = np.array(sorted(set(scan[:,at_index])))
N_ap = ap.shape[0]
N_at = at.shape[0]
grid = np.zeros((N_at,N_ap))
for i in range(N_ap):
#Filter the data to only those corresponding to the ap value.
indices = (scan[:,ap_index]==ap[i])
scan_chunk = scan[indices,:]
#Ensure that they're sorted by at value.
scan_chunk = scan_chunk[scan_chunk[:,at_index].argsort()]
#Add the chi2 column to the grid.
#Note that the grid is of shape (N_at,N_ap)
grid[:,i] = scan_chunk[:,chi2_index]
#Make the interpolator (x refers to at, y refers to ap).
interpolators[corr_type] = RectBivariateSpline(at,ap,grid,kx=1,ky=1)
#Add the dictionary to the object.
self.interpolators = interpolators
self.transverse_fid = transverse_fid
self.parallel_fid = parallel_fid
return
#Function to return the interpolated value of chi2 given distance measures.
def get_Dchi2_from_distances(self,transverse,parallel,corr_type='cf'):
"""
Arguments:
transverse: value of transverse separation to evaluate chi2 for.
parallel: value of parallel separation to evaluate chi2 for.
corr_type: which scan to interpolate.
Returns:
Dchi2: value of delta chi2
"""
#Convert distances to alphas.
at = transverse/self.transverse_fid
ap = parallel/self.parallel_fid
#With the new alphas, get the log likelihood.
Dchi2 = self.interpolators[corr_type](at,ap)
return Dchi2
|
brinckmannREPO_NAMEmontepython_publicPATH_START.@montepython_public_extracted@montepython_public-master@montepython@likelihoods@bao_BOSS_DR11_Lya_cross@scan_util.py@.PATH_END.py
|
{
"filename": "_tickwidth.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattermapbox/marker/colorbar/_tickwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="tickwidth",
parent_name="scattermapbox.marker.colorbar",
**kwargs,
):
super(TickwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattermapbox@marker@colorbar@_tickwidth.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/splom/legendgrouptitle/_font.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="font", parent_name="splom.legendgrouptitle", **kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
size
style
Sets whether a font should be styled with a
normal or italic face from its family.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@splom@legendgrouptitle@_font.py@.PATH_END.py
|
{
"filename": "sleep.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/data/experimental/ops/sleep.py",
"type": "Python"
}
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for manually injecting delays into `tf.data` pipelines."""
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops
class _SleepDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that sleeps before producing each upstream element."""
def __init__(self, input_dataset, sleep_microseconds):
self._input_dataset = input_dataset
self._sleep_microseconds = sleep_microseconds
variant_tensor = gen_experimental_dataset_ops.sleep_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._sleep_microseconds,
**self._flat_structure)
super(_SleepDataset, self).__init__(input_dataset, variant_tensor)
def sleep(sleep_microseconds):
"""Sleeps for `sleep_microseconds` before producing each input element.
Args:
sleep_microseconds: The number of microseconds to sleep before producing an
input element.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _SleepDataset(dataset, sleep_microseconds)
return _apply_fn
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@data@experimental@ops@sleep.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/splom/marker/colorbar/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._tickfont import Tickfont
from ._tickformatstop import Tickformatstop
from ._title import Title
from . import title
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[".title"],
["._tickfont.Tickfont", "._tickformatstop.Tickformatstop", "._title.Title"],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@splom@marker@colorbar@__init__.py@.PATH_END.py
|
{
"filename": "test_imports.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/unit_tests/chat_models/test_imports.py",
"type": "Python"
}
|
from langchain_community.chat_models import __all__, _module_lookup
EXPECTED_ALL = [
"AzureChatOpenAI",
"BedrockChat",
"ChatAnthropic",
"ChatAnyscale",
"ChatBaichuan",
"ChatClovaX",
"ChatCohere",
"ChatCoze",
"ChatDatabricks",
"ChatDeepInfra",
"ChatEverlyAI",
"ChatEdenAI",
"ChatFireworks",
"ChatFriendli",
"ChatGooglePalm",
"ChatHuggingFace",
"ChatHunyuan",
"ChatJavelinAIGateway",
"ChatKinetica",
"ChatKonko",
"ChatLiteLLM",
"ChatLiteLLMRouter",
"ChatLlamaCpp",
"ChatMLflowAIGateway",
"ChatMaritalk",
"ChatMlflow",
"ChatMLflowAIGateway",
"ChatMLX",
"ChatNebula",
"ChatOCIGenAI",
"ChatOCIModelDeployment",
"ChatOCIModelDeploymentVLLM",
"ChatOCIModelDeploymentTGI",
"ChatOllama",
"ChatOpenAI",
"ChatOutlines",
"ChatPerplexity",
"ChatPremAI",
"ChatSambaNovaCloud",
"ChatSambaStudio",
"ChatSparkLLM",
"ChatTongyi",
"ChatVertexAI",
"ChatYandexGPT",
"ChatYuan2",
"ChatReka",
"ChatZhipuAI",
"ErnieBotChat",
"FakeListChatModel",
"GPTRouter",
"GigaChat",
"HumanInputChatModel",
"JinaChat",
"LlamaEdgeChatService",
"MiniMaxChat",
"MoonshotChat",
"PaiEasChatEndpoint",
"PromptLayerChatOpenAI",
"SolarChat",
"QianfanChatEndpoint",
"VolcEngineMaasChat",
"ChatOctoAI",
"ChatSnowflakeCortex",
"ChatYi",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
assert set(__all__) == set(_module_lookup.keys())
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@unit_tests@chat_models@test_imports.py@.PATH_END.py
|
{
"filename": "TOA_characterise.py",
"repo_name": "mattpitkin/tempo2",
"repo_path": "tempo2_extracted/tempo2-master/python/toasim/bin/TOA_characterise.py",
"type": "Python"
}
|
#!/usr/bin/python
import sys
import matplotlib.pylab as plt
from math import sqrt,pi,degrees
import random
import ephem
def printhelp():
print("TOA_characterise [options] tempo2.tim")
print("")
print("Characterises the given TOAs (tempo2 format tim file).")
print("Can simulate future observations based on the existing data")
print("")
print("--simulate [ndays] simulate ndays future TOAs.")
print("--shuffle shuffle observations inside observing blocks.")
print("--seed [seed] set the random seed for simulating")
print("--select \"[flags]\" only passes through given '-f' flags")
print("--psr [psrname] give the pulsar name for HA calculations")
print("--plot show some plots of the loaded TOAs")
print("")
print("e.g. typical usage:")
print("TOA_characterise.py 0437-4715.tim --simulate 365 --shuffle --select '20CM_DFB3 20CM_APSR' --psr J0437-4715")
print("")
sys.exit(0)
def mjd2djd(mjd):
jd=mjd+2400000.5
return jd-2415020
def getHA(target,tel,mjd):
tel.date=mjd2djd(mjd)
ha_rad = float(tel.sidereal_time()) - float(target.ra)
while ha_rad > pi:
ha_rad -= 2*pi
while ha_rad < -pi:
ha_rad += 2*pi
return ha_rad / pi * 12 # hours
class TOA:
def __init__(self,t,e,f,F,o,xtraf):
self.t=t
self.e=e
self.f=f
self.F=F
self.o=o.lower()
self.siml=list()
self.ha=0
self.xtraf=xtraf
def printxtraf(self):
ret=""
for k,v in self.xtraf:
ret+=" %s %s"%(k,v)
return ret
observatories = dict()
parkes = ephem.Observer()
parkes.long = '148.263333'
parkes.lat = '-32.999962'
parkes.elevation = 414.8
parkes.horizon = '30.25'
gbt = ephem.Observer()
gbt.long = '-79.839495'
gbt.lat = '38.432783'
gbt.elevation = 826.9
gbt.horizon = '10.0'
ao = ephem.Observer()
ao.long = '-66.752311'
ao.lat = '18.343853'
ao.elevation = 504.2
ao.horizon = '72.0'
observatories['7'] = parkes
observatories['parkes'] = parkes
observatories['arecibo'] = ao
observatories['gbt'] = gbt
if len(sys.argv) < 2:
printhelp()
target = None
psrn=""
simulate=0
shuffle = 0
plot=0
file=open(sys.argv[1])
select=list()
random.seed()
good_has=list()
i=2
while i < len(sys.argv):
if sys.argv[i]=="--select":
i+=1
select.extend(sys.argv[i].split())
if sys.argv[i]=="--shuffle":
shuffle=1
if sys.argv[i]=="--simulate":
i+=1
simulate=int(sys.argv[i])
if sys.argv[i]=="--plot":
i+=1
plot=sys.argv[i]
if sys.argv[i]=="--seed":
i+=1
random.seed(int(sys.argv[i]))
if sys.argv[i]=='--psr':
i+=1
psrn=sys.argv[i]
if sys.argv[i]=='--help' or sys.argv[i]=='-h':
printhelp()
i+=1
if len(psrn) > 6:
s=psrn.strip(" J")
ra=s[0:2]+":"+s[2:4]
dec=s[4:7]+":"+s[7:9]
target=ephem.Equatorial(ra,dec,epoch='2000')
print(ra,target.ra,float(target.ra))
print(dec,target.dec,float(target.dec))
flags=dict()
total=0
sessions=list()
cur_ses=list()
sessions.append(cur_ses)
sess_seps=list()
prev_toa=None
total+=1
alltoas=list()
for line in file:
if line[0] == "C":
continue
elems = line.strip().split()
if elems[0] == "FORMAT":
continue
i = 5
flag="??"
xtraf=list()
while i < len(elems):
if elems[i] == "-f":
flag=elems[i+1]
i+=1
if elems[i][0] == '-':
xtraf.append((elems[i],elems[i+1]))
i+=1
i+=1
if len(select) > 0 and not flag in select:
continue
if not flag in flags:
flags[flag] = list()
if len(elems) < 4:
print("ERROR: ",line, file=sys.stderr)
#WARNING: This truncates the TOA! No use for timing
# but ok for simulations
approx_time=float(elems[2])
error = float(elems[3])
freq = float(elems[1])
obs = elems[4]
toa = TOA(approx_time,error,freq,flag,obs,xtraf)
if target!=None:
toa.ha=getHA(target,observatories[toa.o],approx_time)
good_has.append(toa.ha)
if prev_toa!=None and abs(approx_time-prev_toa.t) < 0.005:
# simlt
toa.siml.append(prev_toa)
prev_toa.siml.append(toa)
if prev_toa!=None and abs(approx_time-prev_toa.t) > 5:
if approx_time-prev_toa.t < 0:
print("ERR",total)
print(approx_time,error,freq,flag,obs,xtraf)
sess_seps.append(approx_time - cur_ses[0].t)
cur_ses=list()
sessions.append(cur_ses)
prev_toa=toa
flags[flag].append(toa)
cur_ses.append(toa)
total+=1
alltoas.append(toa)
file.close()
print("There are %d TOAs in %d sessions"%(total,len(sessions)))
keys=list(flags.keys())
fig = plt.figure()
if plot == "sepn" or plot == "errs":
nx=int(sqrt(len(keys)))
ny=int(len(keys)/nx + 0.99)
p=0
for flag in keys:
if len(flags[flag]) < 5:
continue
print(flag, len(flags[flag]))
toas=flags[flag]
sepns=list()
errs=list()
freq=list()
prev_t = -1
for toa in toas:
f=toa.f
e=toa.e
t=toa.t
if prev_t == -1:
prev_t=t
continue
t_sepn = t-prev_t
errs.append(e)
freq.append(f)
sepns.append(t_sepn)
prev_t=t
p+=1
if p==1:
ax=fig.add_subplot(nx,ny,p)
ax1=ax
else:
ax=fig.add_subplot(nx,ny,p)#, sharex=ax1)
if plot=="sepn":
print(len(sepns))
ax.hist(sepns,50)
if plot=="errs":
ax.hist(errs,50)
ax.set_title(flag)
print(plot)
if plot == "sessep":
ax= fig.add_subplot(111)
ax.hist(sess_seps,20)
if(plot!=0):
plt.show()
print(simulate)
if simulate > 0:
simtoas=list()
start = sessions[-1][0].t
print(start)
last = start
while last-start < simulate:
# next=random.choice(sessions)
# next_st=last+random.choice(sess_seps)
r=random.randint(0,len(sess_seps)-1)
next=sessions[r]
next_st=last + sess_seps[r]
# print "ST %f"%next_st
range = next[-1].t - next[0].t
for toa in next:
toa.siml_shuf=None
for toa in next:
shuf=0
if toa.siml_shuf != None:
shuf=toa.siml_shuf
# we have already done this for the siml obs
else:
if (shuffle):
shuf=random.uniform(next[0].t,next[-1].t)
shuf-=toa.t
if target != None:
# check HA
goodHA=0
count = 0
while not goodHA:
count+=1
t=toa.t-next[0].t + next_st + shuf
ha=getHA(target,observatories[toa.o],t)
if shuffle and count < 5:
reroll=1
random.shuffle(good_has)
for test in good_has:
v=abs(test-ha)
if v > 12:
v = 24 - v
if v < 0.2:
goodHA=1
reroll=0
break
if v < 2:
v= test-ha
if v > 12:
v = 24 - v
if v < -12:
v = -24 - v
shuf += v/24.0
reroll=0
break
if reroll:
shuf=random.uniform(next[0].t,next[-1].t)
shuf-=toa.t
else:
goodHA = (abs(ha-toa.ha) < 0.2)
shuf+=0.1/24.0
for tsim in toa.siml:
tsim.siml_shuf = shuf
t=toa.t-next[0].t + next_st + shuf
# print "TT %f"%t
newtoa=TOA(t,toa.e,toa.f,toa.F,toa.o,toa.xtraf)
if target != None:
newtoa.ha = getHA(target,observatories[toa.o],t)
simtoas.append(newtoa)
last=next_st
print("\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", end=' ')
print("%d"%(last-start), end=' ')
print("Done")
i=0
file = open("sim.tim","w")
file.write("FORMAT 1\n")
for toa in alltoas:
file.write(" REAL_%d %f %f %f %s -f %s -sim R %s\n"%(i,toa.f,toa.t,toa.e,toa.o,toa.F,toa.printxtraf()))
i+=1
i=0
for toa in simtoas:
file.write(" SIML_%d %f %f %f %s -f %s -sim F %s\n"%(i,toa.f,toa.t,toa.e,toa.o,toa.F,toa.printxtraf()))
i+=1
file.close()
|
mattpitkinREPO_NAMEtempo2PATH_START.@tempo2_extracted@tempo2-master@python@toasim@bin@TOA_characterise.py@.PATH_END.py
|
{
"filename": "values.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/distribute/coordinator/values.py",
"type": "Python"
}
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Important value classes relevant to `ClusterCoordinator`.
This is currently under development and the API is subject to change.
"""
import threading
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops.options import ExternalStatePolicy
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute.coordinator import remote_value
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as tf_function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import type_spec as type_spec_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# TODO(yuefengz): create an implementation for resource RemoteValue which needs
# to remember the closure object while a normal RemoteValue doesn't.
class RemoteValueImpl(remote_value.RemoteValue):
"""Implementation of `RemoteValue`."""
def __init__(self, closure, type_spec): # pylint: disable=super-init-not-called
"""Initializes a `RemoteValueImpl`.
Args:
closure: The closure from which the `RemoteValue` is created.
type_spec: The type spec for this `RemoteValue` which is used to trace
functions that take this `RemoteValue` as input.
"""
self._closure = closure
self._type_spec = type_spec
self._values = None
self._has_fetched_to_local = False
self._has_fetched_to_local_lock = threading.Lock()
self._fetched_tensors = None
self._error = None
self._status_available_event = threading.Event()
self._status = remote_value.RemoteValueStatus.NOT_READY
def _set_aborted(self, error):
self._status = remote_value.RemoteValueStatus.ABORTED
self._values = None
self._error = error
# Wake up any waiting thread and clear the event.
self._status_available_event.set()
def _rebuild_on(self, worker):
self._status_available_event.clear()
# TODO(yuefengz): we may need to rebuild its inputs as well.
self._closure.execute_on(worker)
def _set_values(self, tensors):
self._status = remote_value.RemoteValueStatus.READY
self._values = tensors
self._error = None
self._status_available_event.set()
def _set_error(self, error):
self._status = remote_value.RemoteValueStatus.READY
self._values = None
self._error = error
self._status_available_event.set()
def _get_values(self):
self._status_available_event.wait()
return self._values
def _get_error(self):
self._status_available_event.wait()
return self._error
def _wait_and_maybe_error(self):
self._status_available_event.wait()
if self._status is remote_value.RemoteValueStatus.ABORTED:
raise errors.CancelledError(
None, None,
"The corresponding function is aborted. Please reschedule the "
"function.")
if self._error is not None:
raise self._error
def fetch(self):
# TODO(rchao): Discuss the possibility of letting users perform `numpy`
# themselves at API graduation.
return nest.map_structure(
lambda x: x.numpy() if hasattr(x, "numpy") else x, self.get())
def _copy_to_local(self):
def copy_tensor(composite_tensor_obj):
"""Copy a remote tensor to local (coordinator)."""
if isinstance(composite_tensor_obj, input_lib.DistributedIterator):
# A DistributedIterator cannot be copied to local; users should not
# access that anyway.
return composite_tensor_obj
with ops.device("/job:%s" % context.get_server_def().job_name):
# Copying to local (the coordinator) with `tf.device`.
return array_ops.identity(composite_tensor_obj)
fetched_result = None
if self._values is not None:
# When `self._values` is `None`, it indicates the associated function
# does not have a return value.
fetched_result = nest.map_structure(copy_tensor, self._values)
return fetched_result
def get(self):
self._wait_and_maybe_error()
with self._has_fetched_to_local_lock:
if not self._has_fetched_to_local:
self._fetched_tensors = self._copy_to_local()
self._has_fetched_to_local = True
return self._fetched_tensors
class RemoteVariable(RemoteValueImpl):
"""A RemoteValue that represents a mutable per-worker variable."""
def get(self):
"""Retrieve value with no caching to ensure we get the up-to-date value."""
self._wait_and_maybe_error()
return self._copy_to_local()
@tf_export("distribute.experimental.coordinator.PerWorkerValues",
"distribute.coordinator.PerWorkerValue", v1=[])
class PerWorkerValues(composite_tensor.CompositeTensor):
"""A container that holds a list of values, one value per worker.
`tf.distribute.experimental.coordinator.PerWorkerValues` contains a collection
of values, where each of the values is located on its corresponding worker,
and upon being used as one of the `args` or `kwargs` of
`tf.distribute.experimental.coordinator.ClusterCoordinator.schedule()`, the
value specific to a worker will be passed into the function being executed at
that corresponding worker.
Currently, the only supported path to create an object of
`tf.distribute.experimental.coordinator.PerWorkerValues` is through calling
`iter` on a `ClusterCoordinator.create_per_worker_dataset`-returned
distributed dataset instance. The mechanism to create a custom
`tf.distribute.experimental.coordinator.PerWorkerValues` is not yet supported.
"""
def __init__(self, values):
for v in values:
if not isinstance(v, remote_value.RemoteValue):
raise AssertionError(
"`PerWorkerValues` should only take `RemoteValue`s.")
self._values = tuple(values)
@property
def _type_spec(self):
return PerWorkerValuesTypeSpec(
self._values[0]._type_spec, # pylint: disable=protected-access
type(self))
class PerWorkerValuesTypeSpec(type_spec_lib.TypeSpec):
"""TypeSpec for PerWorkerValues.
It only support tracing a function using a PerWorkerValues.
"""
def __init__(self, value_spec, descendant_type):
assert value_spec
self._value_spec = value_spec
self._descendant_type = descendant_type
def _serialize(self):
return (self._value_spec,)
@property
def value_type(self):
return self._descendant_type
def most_specific_common_supertype(self, others):
raise NotImplementedError(
"most_specific_common_supertype is not implemented")
@property
def _component_specs(self):
return self._value_spec
def _to_components(self, value):
return self._value_spec
def _from_components(self, value):
return value
class PerWorkerDatasetFromDatasetFunction(object):
"""Represents worker-distributed datasets created from dataset function."""
def __init__(self, dataset_fn, coordinator):
"""Makes an iterable from datasets created by the given function.
Args:
dataset_fn: A function that returns a `Dataset`.
coordinator: a `ClusterCoordinator` object, used to create dataset
resources.
"""
def disallow_variable_creation(next_creator, **kwargs):
raise ValueError("Creating variables in `dataset_fn` is not allowed.")
if isinstance(dataset_fn, def_function.Function):
with variable_scope.variable_creator_scope(disallow_variable_creation):
dataset_fn = dataset_fn.get_concrete_function()
elif not isinstance(dataset_fn, tf_function.ConcreteFunction):
with variable_scope.variable_creator_scope(disallow_variable_creation):
dataset_fn = def_function.function(dataset_fn).get_concrete_function()
self._dataset_fn = dataset_fn
self._coordinator = coordinator
self._element_spec = None
def build(self):
"""Trigger dataset creation on workers without creating an iterator.
Returns:
A PerWorkerValues object containing a tuple of RemoteValues, themselves
containing the built Dataset for each worker
"""
def _create_per_worker_dataset():
dataset = self._dataset_fn()
return dataset
# pylint: disable=protected-access
per_worker_dataset = self._coordinator._create_per_worker_resources(
_create_per_worker_dataset)
# hack type_spec of RemoteValues
dataset_fn_output_type_spec = self._dataset_fn.structured_outputs._type_spec
for dataset_remote_value in per_worker_dataset._values:
dataset_remote_value._type_spec = dataset_fn_output_type_spec
return per_worker_dataset
def __iter__(self):
# We would like users to create iterators outside `tf.function`s so that we
# can track them.
if (not context.executing_eagerly() or
ops.get_default_graph().building_function):
raise RuntimeError(
"__iter__() is not supported inside of tf.function or in graph mode.")
def _create_per_worker_iterator():
dataset = self._dataset_fn()
return iter(dataset)
# If PerWorkerDatasetFromDatasetFunction.__iter__ is called multiple
# times, for the same object it should only create and register resource
# once. Using object id to distinguish different iterator resources.
per_worker_iterator = self._coordinator._create_per_worker_resources(
_create_per_worker_iterator)
# Setting type_spec of each RemoteValue so that functions taking these
# RemoteValues as inputs can be traced.
for iterator_remote_value in per_worker_iterator._values:
iterator_remote_value._type_spec = (
input_lib.get_iterator_spec_from_dataset(
self._coordinator.strategy, self._dataset_fn.structured_outputs))
return PerWorkerDistributedIterator(per_worker_iterator._values)
@property
def element_spec(self):
"""The type specification of an element of this dataset.
This property is subject to change without notice.
"""
if not isinstance(self._dataset_fn, tf_function.ConcreteFunction):
raise NotImplementedError(
"`element_spec` is not supported when the `dataset_fn` is not "
"a `ConcreteFunction`.")
return self._dataset_fn.structured_outputs.element_spec
def serialize_dataset_to_graph(dataset):
dataset = dataset._apply_debug_options() # pylint: disable=protected-access
graph_def = gen_dataset_ops.dataset_to_graph_v2(
dataset._variant_tensor, # pylint: disable=protected-access
external_state_policy=ExternalStatePolicy.WARN.value,
strip_device_assignment=True)
return graph_def
class _RemoteDataset(dataset_ops.DatasetSource):
"""Creates a dataset given a graph def."""
def __init__(self, graph_def, element_spec):
self._elem_spec = element_spec
variant_tensor = ged_ops.dataset_from_graph(graph_def)
super(_RemoteDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._elem_spec
def deserialize_dataset_from_graph(graph_def, element_spec):
return _RemoteDataset(graph_def, element_spec)
class PerWorkerDatasetFromDataset(PerWorkerDatasetFromDatasetFunction):
"""Represents worker-distributed datasets created from a dataset."""
def __init__(self, dataset, coordinator):
"""Makes an iterable from datasets created by the given dataset.
It creates a dataset_fn which deserializes a dataset from a graph under the
hood.
Args:
dataset: A tf.data.Dataset, a DistributedDataset or a
DistributedDatasetsFromFunction
coordinator: a `ClusterCoordinator` object, used to create dataset
resources.
"""
if isinstance(dataset, input_lib.DistributedDataset):
original_dataset = dataset._original_dataset
serialized = serialize_dataset_to_graph(original_dataset)
def dataset_fn():
deserialized = deserialize_dataset_from_graph(
serialized, original_dataset.element_spec)
dataset.build(dataset_to_replace=deserialized)
return dataset
elif isinstance(dataset, input_lib.DistributedDatasetsFromFunction):
def dataset_fn():
dataset.build()
return dataset
elif isinstance(dataset, dataset_ops.Dataset):
serialized = serialize_dataset_to_graph(dataset)
def dataset_fn():
return deserialize_dataset_from_graph(serialized, dataset.element_spec)
else:
raise ValueError("Unexpected dataset type!")
super(PerWorkerDatasetFromDataset, self).__init__(dataset_fn, coordinator)
def get_per_worker_dataset(dataset_or_dataset_fn, coordinator):
"""Returns a per-worker dataset from a dataset or a dataset function."""
if callable(dataset_or_dataset_fn):
return PerWorkerDatasetFromDatasetFunction(dataset_or_dataset_fn,
coordinator)
else:
return PerWorkerDatasetFromDataset(dataset_or_dataset_fn, coordinator)
class PerWorkerDistributedIterator(PerWorkerValues):
"""Distributed iterator for `ClusterCoordinator`."""
def __next__(self):
return self.get_next()
def get_next(self, name=None):
"""Returns the next input from the iterator for all replicas."""
raise NotImplementedError("Iterating over an `AsyncDistributedIterator` "
"is not supported right now.")
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@distribute@coordinator@values.py@.PATH_END.py
|
{
"filename": "cli__cross-validation__purpose__div.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/_includes/work_src/reusage/cli__cross-validation__purpose__div.md",
"type": "Markdown"
}
|
Training can be launched in cross-validation mode. In this case, only the training dataset is required. This dataset is split, and the resulting folds are used as the learning and evaluation datasets. If the input dataset contains the {{ cd-file__col-type__GroupId }} column, all objects from one group are added to the same fold.
Each cross-validation run from the command-line interface launches one training out of N trainings in N-fold cross-validation.
Use one of the following methods to get aggregated N-fold cross-validation results:
- Run the training in cross-validation mode from the command-line interface N times with different validation folds and aggregate results by hand.
- Use the [cv](../../../concepts/python-reference_cv.md) function of the [Python package](../../../concepts/python-quickstart.md) instead of the command-line version. It returns aggregated results out-of-the-box.
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@_includes@work_src@reusage@cli__cross-validation__purpose__div.md@.PATH_END.py
|
{
"filename": "input.py",
"repo_name": "jcolinhill/pyilc",
"repo_path": "pyilc_extracted/pyilc-main/pyilc/input.py",
"type": "Python"
}
|
from __future__ import print_function
import numpy as np
import yaml
import os
import healpy as hp
"""
module to read in relevant input specified by user
"""
##########################
# wavelet types implemented thus far
# WV_TYPES = ['GaussianNeedlets','TopHatHarmonic']
WV_TYPES = ['GaussianNeedlets','TopHatHarmonic','CosineNeedlets','TaperedTopHats'] # Fiona added CosineNeedlets
##########################
##########################
# bandpass types -- either delta functions or actual bandpasses
BP_TYPES = ['DeltaBandpasses','ActualBandpasses']
##########################
##########################
# beam types -- either symmetric gaussians or 1D ell-dependent profiles
BEAM_TYPES = ['Gaussians','1DBeams']
##########################
##########################
# component types implemented thus far
COMP_TYPES = ['CMB','kSZ','tSZ','rSZ','mu','CIB', 'CIB_dbeta','CIB_dT','radio','radio_dbeta','radio2']
##########################
##########################
# SED parameter types implemented thus far (that can be varied)
PARAM_TYPES = ['kT_e_keV','beta_CIB','Tdust_CIB']
##########################
##########################
# prior types on SED parameters
PRIOR_TYPES = ['Delta','Gaussian','TopHat']
##########################
##########################
### DEFAULT INPUT FILE ###
# modify this if you want to use your own
# or you can specify it when constructing ILCInfo
default_path = '../input/'
default_input = 'pyilc_input.yml'
##########################
##########################
# simple function for opening the file
def read_dict_from_yaml(yaml_file):
assert(yaml_file != None)
with open(yaml_file) as f:
config = yaml.safe_load(f)
return config
##########################
##########################
"""
class that contains map info (and associated data), ILC specifications, etc., and handles input
"""
class ILCInfo(object):
def __init__(self, input_file=None):
self.input_file = input_file
if (self.input_file is None):
# default case
#fpath = os.path.dirname(__file__)
self.input_file = default_path+default_input
else:
pass
p = read_dict_from_yaml(self.input_file)
# output file directory
self.output_dir = p['output_dir']
assert type(self.output_dir) is str, "TypeError: output_dir"
# prefix for output file names
self.output_prefix = p['output_prefix']
# suffix for output file names (only ILC weights and ILC maps)
self.output_suffix = ''
if 'output_suffix' in p.keys():
self.output_suffix = p['output_suffix']
assert type(self.output_suffix) is str, "TypeError: output_suffix"
# if you are applying previously computed and saved weights to a new set of maps,
# you need to tell the code what the output suffix was for those weights.
# this also changes the output_suffix for the weights if they are being computed and saved
# default is output_suffix
self.output_suffix_weights = self.output_suffix
if 'output_suffix_weights' in p.keys():
self.output_suffix_weights = p['output_suffix_weights']
assert type(self.output_suffix_weights) is str, "TypeError: output_suffix_weights"
# flag whether to save maps of the ILC weights (if 'yes' then they will be saved; otherwise not)
# Fiona suggestion: change this to a bool which is True if p['save_weights'].lower() is {'yes','y','true','t'}
# as for self.save_scale_ILC_maps below
self.save_weights = p['save_weights']
assert type(self.save_weights) is str, "TypeError: save_weights"
#flag whether to save the ILC map at each scale - if this is not in input file, will default to False
# Note this saves the input to synthesize() and so the needlet filters are *NOT* applied.
# these maps cannot be naively added to form the final map.
self.save_scale_ILC_maps = False
if 'save_scale_ILC_maps' in p.keys():
if p['save_scale_ILC_maps'].lower() in ['yes','true','y','t']:
self.save_scale_ILC_maps = True
# maximum multipole for this analysis
self.ELLMAX = p['ELLMAX']
assert type(self.ELLMAX) is int and self.ELLMAX > 0, "ELLMAX"
# type of wavelets to use -- see WV_TYPES above
self.wavelet_type = p['wavelet_type']
assert type(self.wavelet_type) is str, "TypeError: wavelet_type"
assert self.wavelet_type in WV_TYPES, "unsupported wavelet type"
## number of wavelet filter scales used
#Remove this an donly read it if the wavelt_type is not TopHatarmonic (below)
#self.N_scales = p['N_scales']
#assert type(self.N_scales) is int and self.N_scales > 0, "N_scales"
# tolerance for the checks for the responses: preserved component should be within resp_tol of 1,
# deprojected components should be within resp_tol of 0
# default is 1.0e-3
self.resp_tol = 1.0e-3
if 'resp_tol' in p.keys():
self.resp_tol = p['resp_tol']
# width of high ell taper for filters, set to 0 if no taper desired. Default is 200
self.taper_width = 200
if 'taper_width' in p.keys():
self.taper_width = p['taper_width']
assert self.ELLMAX - self.taper_width > 10., "desired taper is too broad for given ELLMAX"
if not self.wavelet_type == 'TopHatHarmonic':
# Number of scales for the NILC
self.N_scales = p['N_scales']
assert type(self.N_scales) is int and self.N_scales > 0, "N_scales"
# parameters for each wavelet type
if self.wavelet_type == 'GaussianNeedlets':
# FWHM values defining the gaussian needlets
self.GN_FWHM_arcmin = np.asarray(p['GN_FWHM_arcmin'])
assert len(self.GN_FWHM_arcmin) == self.N_scales - 1, "GN_FWHM_arcmin"
assert all(FWHM_val > 0. for FWHM_val in self.GN_FWHM_arcmin), "GN_FWHM_arcmin"
assert 'ellboundaries' not in p.keys()
assert 'ellpeaks' not in p.keys()
elif self.wavelet_type == 'CosineNeedlets':
# ellpeak values defining the cosine needlets
self.ellpeaks = np.asarray(p['ellpeaks'])
self.ellmin = np.asarray(p['ellmin'])
assert len(self.ellpeaks) == self.N_scales - 1, "ellpeaks"
assert all(ellpeak> 0. for ellpeak in self.ellpeaks), "ellpeaks"
assert self.ellmin>=0, 'ellmin'
assert 'GN_FWHM_arcmin' not in p.keys()
assert 'ellboundaries' not in p.keys()
elif self.wavelet_type == 'TaperedTopHats':
# the ellboundaries between different tophat ell bins and the taperwidths between them
self.ellboundaries = np.asarray(p['ellboundaries'])
self.taperwidths= np.asarray(p['taperwidths'])
assert len(self.ellboundaries)==len(self.taperwidths),"Ellboundaries!= taperwidths"
assert len(self.ellboundaries) == self.N_scales - 1, "ellboundaries"
assert all(ellpeak> 0. for ellpeak in self.ellboundaries), "ellboundaries"
assert 'GN_FWHM_arcmin' not in p.keys()
assert 'ellpeaks' not in p.keys()
elif self.wavelet_type == 'TopHatHarmonic':
# TODO: add functionality for the user to specity arbitrary ell-bins directly
# the bin sizes for a linearly-ell-binnedHILC
self.Delta_ell_HILC = p['BinSize']
self.ellbins = np.arange(0,self.ELLMAX+1,self.Delta_ell_HILC)
self.N_scales = len(self.ellbins)-1
assert type(self.N_scales) is int and self.N_scales > 0, "N_scales"
# Option to save the harmonic covmat; by default it is False
self.save_harmonic_covmat = False
if 'save_harmonic_covmat' in p.keys():
if p['save_harmonic_covmat'].lower() in ['true','yes','y']:
self.save_harmonic_covmat = True
self.save_alms = False
if 'save_alms' in p.keys():
if p['save_alms'].lower() in ['true','yes','y']:
self.save_alms = True
# TODO: implement these
#elif self.wavelet_type == 'ScaleDiscretizedWavelets':
# parameters defining these wavelets
# TODO: add relevant assertions
#self.B_param = p['B_param']
#self.J_min = p['J_min']
# flag to perform cross-ILC
self.cross_ILC = False
if 'cross_ILC' in p.keys():
if p['cross_ILC'].lower() in ['true','yes','y']:
self.cross_ILC = True
# number of frequency maps used
self.N_freqs = p['N_freqs']
assert type(self.N_freqs) is int and self.N_freqs > 0, "N_freqs"
# if you want to use some pre-computed covmat and drop certain frequency channels, put this in here
# as a list of the indices of the frequency channels you want to drop. Defaults to an empty list.
self.drop_channels = []
if 'drop_channels' in p.keys():
self.drop_channels = p['drop_channels']
assert type(self.drop_channels) is list
for x in self.drop_channels:
assert type(x) is int
assert x < self.N_freqs
# wavelet_beam_criterion, set to 1e-3 by default. This removes frequencies from the NILC
# whose beams are a certain fraction smaller than the appropriate needlet filter within the range
# of ells appropriate for the filter.
if 'wavelet_beam_criterion' in p.keys():
assert 'override_N_freqs_to_use' not in p.keys()
self.wavelet_beam_criterion = p['wavelet_beam_criterion']
elif 'override_N_freqs_to_use' not in p.keys():
self.wavelet_beam_criterion = 1.e-3
# override_N_freqs_to_use OVERRIDES information from wavlet_beam_criterion
# and allows you to explicitly specify how many frequencies to use at each wavelet scale
# this should be a list of ints, of length N_scales, where each entry in the list specifies
# how many frequency channels one should use at the scale corresponding to that entry.
# if the entry is less than N_freqs, the lowest resolution maps will be dropped from the NILC
# such that there are the appropriate number of frequency channels used in each scale
self.override_N_freqs_to_use = False
if 'override_N_freqs_to_use' in p.keys():
assert 'wavelet_beam_criterion' not in p.keys()
self.override_N_freqs_to_use = True
self.N_freqs_to_use = p['override_N_freqs_to_use']
if self.N_freqs_to_use == 'all':
self.N_freqs_to_use = [self.N_freqs for x in self.N_scales]
else:
assert type(self.N_freqs_to_use) is list
assert len(self.N_freqs_to_use) == self.N_scales
for x in self.N_freqs_to_use:
print(x)
assert type(x) is int
assert x>0
assert x<=self.N_freqs
# optionally input the param_dict_file. The default is '../input/fg_SEDs_default_params.yml'
self.param_dict_file = '../input/fg_SEDs_default_params.yml'
if 'param_dict_file' in p.keys():
self.param_dict_file = p['param_dict_file']
# delta-function bandpasses/passbands or actual bandpasses/passbands
# Fiona - I don't know how to read in the None object in a yml file so i have been reading strings of Nones.. not ideal
self.bandpass_type = p['bandpass_type']
assert self.bandpass_type in BP_TYPES, "unsupported bandpass type"
if self.bandpass_type == 'DeltaBandpasses':
# delta function bandpasses: frequency values in GHz
self.freqs_delta_ghz = p['freqs_delta_ghz']
assert len(self.freqs_delta_ghz) == self.N_freqs, "freqs_delta_ghz"
for xind, x in enumerate(self.freqs_delta_ghz):
if x in ["none","None"]:
self.freqs_delta_ghz[xind] = None
elif self.bandpass_type == 'ActualBandpasses':
# actual bandpasses: list of bandpass file names, each containing two columns: [freq [GHz]] [transmission [arbitrary norm.]]
self.freq_bp_files = p['freq_bp_files']
for xind,x in enumerate(self.freq_bp_files):
if x.lower()=="none":
self.freq_bp_files[xind] = None
if x.lower()=='delta':
self.freq_bp_files[xind] = 'delta'
assert len(self.freq_bp_files) == self.N_freqs, "freq_bp_files"
if 'delta' in self.freq_bp_files:
self.freqs_delta_ghz = p['freqs_delta_ghz']
assert len(self.freqs_delta_ghz) == self.N_freqs, "freqs_delta_ghz"
for xind, x in enumerate(self.freqs_delta_ghz):
if x in ["none","None"]:
self.freqs_delta_ghz[xind] = None
# do the wavelet maps already exist as saved files? we can tell the code to skip the check for this, if
# we know this alredy. Deafults to False (it will automatically check if they exist).
# this just allows you to skip the check if you know they exist.
self.wavelet_maps_exist = False
if 'wavelet_maps_exist' in p.keys():
if p['wavelet_maps_exist'].lower() in ['true','yes','y']:
self.wavelet_maps_exist = True
# do the weights already exist as saved files? we can tell the code to skip the check for this, if
# we know this already. Defaults to False (it will automatically check if they exist).
# this just allows you to skip the check if you know they exist.
self.weights_exist = False
if 'weights_exist' in p.keys():
if p['weights_exist'].lower() in ['true','yes','y']:
self.weights_exist = True
# frequency map file names
self.freq_map_files = p['freq_map_files']
assert len(self.freq_map_files) == self.N_freqs, "freq_map_files"
# some preprocessing maps: Is there one map you want to subtract from all the inputs (eg the kinematic dipole)?
# put the filename in a a string here
self.map_to_subtract = None
if 'map_to_subtract' in p.keys():
self.map_to_subtract = p['map_to_subtract']
# Is there a different map you want to subtract from all of the inputs? If so, put them in as a list here
# this should be a list with N_freq entries. Each entry can either be one map filename or a list of map filenames,
# if you want to subtract several maps from a given channel
self.maps_to_subtract = None
if 'maps_to_subtract' in p.keys():
self.maps_to_subtract = p['maps_to_subtract']
assert len(self.maps_to_subtract) == self.N_freqs
for xind,x in enumerate(self.maps_to_subtract):
if type(x) is str:
if x.lower() == 'none':
self.maps_to_subtract[xind] = None
elif type(x) is list:
for a in x:
assert type(a) is str
# Sometimes we may want to remove the means of the domains we are calculating the covmats on
# by default we don't do this, but if you want to do this put this here
# this should be a list with N_scales entries, eg if you want to subtract the means on the realspace domains
# on some scales and not others.
# THIS REMOVES SIGNAL DON'T DO THIS UNLESS YOU KNOW WHAT IT IS DOING
self.subtract_means_before_sums = [False] * self.N_scales
if 'subtract_means_before_sums' in p.keys():
self.subtract_means_before_sums = p['subtract_means_before_sums']
assert type(self.subtract_means_before_sums) is list
assert len(self.subtract_means_before_sums) == self.N_scales
# flags to subtract the mean/monopole of each frequency
# subtract_mean should be a list of N_freq bools of whether you subtract the mean at each frequency
# if you input one bool it will be broadcast to N_Freq bools.
self.subtract_mean = False
self.subtract_monopole = [False] * self.N_freqs
if 'subtract_mean' in p.keys():
if type(p['subtract_mean'] ) is str:
sub_mean = self.N_freqs*[p['subtract_mean']]
print("made submean",sub_mean)
else:
sub_mean = p['subtract_mean']
print("made sub_mean",sub_mean)
assert len(sub_mean)== self.N_freqs
for x in range(self.N_freqs):
if 'monpoole' in sub_mean[x].lower():
self.subtract_monopole[x] = True
# S1 and S2 maps for the cross-ILC
if self.cross_ILC:
self.freq_map_files_s1 = p['freq_map_files_s1']
assert len(self.freq_map_files_s1) == self.N_freqs, "freq_map_files_s1"
self.freq_map_files_s2 = p['freq_map_files_s2']
assert len(self.freq_map_files_s2) == self.N_freqs, "freq_map_files_s2"
# Flag to apply weights to other maps than those used in the ILC weight calculation
if 'maps_to_apply_weights' in p.keys():
self.freq_map_files_for_weights = p['maps_to_apply_weights']
assert len(self.freq_map_files_for_weights) == self.N_freqs, "freq_map_files_for_weights"
self.apply_weights_to_other_maps = True
else:
self.apply_weights_to_other_maps = False
# beams: symmetric gaussians or 1D ell-dependent profiles
self.beam_type = p['beam_type']
assert self.beam_type in BEAM_TYPES, "unsupported beam type"
if self.beam_type == 'Gaussians':
# symmetric gaussian beams: FWHM values in arcmin
self.beam_FWHM_arcmin = np.asarray(p['beam_FWHM_arcmin'])
assert len(self.beam_FWHM_arcmin) == self.N_freqs, "beam_FWHM_arcmin"
assert all(FWHM_val > 0. for FWHM_val in self.beam_FWHM_arcmin), "beam_FWHM_arcmin"
# FWHM assumed to be in strictly decreasing order
if ( any( i < j for i, j in zip(self.beam_FWHM_arcmin, self.beam_FWHM_arcmin[1:]))):
raise AssertionError
elif self.beam_type == '1DBeams':
# symmetric 1D beams with arbitrary profiles: list of beam file names, each containing two columns: [ell] [b_ell (norm. to 1 at ell=0)]
self.beam_files = p['beam_files']
assert len(self.beam_files) == self.N_freqs, "beam_files"
print("Note: frequency maps are assumed to be in strictly decreasing beam size ordering!")
# resolution at which to perform the ILC (if unspecified, deafults to resolution of the highest-resolution input map)
self.perform_ILC_at_beam = None
if 'perform_ILC_at_beam' in p.keys():
#perform_ILC_at_beam should be in arcmin.
self.perform_ILC_at_beam = p['perform_ILC_at_beam']
# N_side value of highest-resolution input map (and presumed output map N_side)
# be conservative and assume N_side must be a power of 2 (stricly speaking, only necessary for nest-ordering)
# https://healpy.readthedocs.io/en/latest/generated/healpy.pixelfunc.isnsideok.html
self.N_side = p['N_side']
assert hp.pixelfunc.isnsideok(self.N_side, nest=True), "invalid N_side"
self.N_pix = 12*self.N_side**2
self.mean_by_smoothing = True
self.mean_by_upgrading = False # placeholder - remove this functionality in wavelets.py
# Do we only want to perform NILC on part of the sky? if so, include the mask
# todo: think about apodization etc.......
self.mask_before_covariance_computation = None
if 'mask_before_covariance_computation' in p.keys():
self.mask_before_covariance_computation = hp.fitsfunc.read_map(p['mask_before_covariance_computation'][0],field=p['mask_before_covariance_computation'][1])
assert hp.get_nside(self.mask_before_covariance_computation) >= self.N_side
if hp.get_nside(self.mask_before_covariance_computation) >self.N_side:
print("fsky before is",np.sum(self.mask_before_covariance_computation)/self.mask_before_covariance_computation.shape[0],flush=True)
self.mask_before_covariance_computation = hp.ud_grade(self.mask_before_covariance_computation,self.N_side)
self.mask_before_covariance_computation[self.mask_before_covariance_computation<1]=0
print("fsky after is",np.sum(self.mask_before_covariance_computation)/self.mask_before_covariance_computation.shape[0],flush=True)
# Do we only want to perform the waveletizing on part of the sky? If so , include this mask
self.mask_before_wavelet_computation = None
if 'mask_before_wavelet_computation' in p.keys():
self.mask_before_wavelet_computation= hp.fitsfunc.read_map(p['mask_before_wavelet_computation'][0],field=p['mask_before_wavelet_computation'][1])
assert hp.get_nside(self.mask_before_wavelet_computation) >= self.N_side
if hp.get_nside(self.mask_before_wavelet_computation) >self.N_side:
print("fsky before is",np.sum(self.mask_before_wavelet_computation)/self.mask_before_wavelet_computation.shape[0],flush=True)
self.mask_before_wavelet_computation= hp.ud_grade(self.mask_before_wavelet_computation,self.N_side)
self.mask_before_wavelet_computation[self.mask_before_wavelet_computation<1]=0
print("fsky after is",np.sum(self.mask_before_wavelet_computation)/self.mask_before_wavelet_computation.shape[0],flush=True)
if len(self.mask_before_wavelet_computation[self.mask_before_wavelet_computation==0]>0):
assert self.mask_before_covariance_computation[self.mask_before_wavelet_computation==0].all()==0
# ILC: component to preserve
self.ILC_preserved_comp = p['ILC_preserved_comp']
assert self.ILC_preserved_comp in COMP_TYPES, "unsupported component type in ILC_preserved_comp"
# real-space filters:
if not self.wavelet_type == 'TopHatHarmonic':
assert ('ILC_bias_tol' in p.keys() or 'FWHM_pix' in p.keys())
# ILC: bias tolerance
if 'ILC_bias_tol' in p.keys():
assert 'FWHM_pix' not in p.keys()
self.ILC_bias_tol = p['ILC_bias_tol']
assert self.ILC_bias_tol > 0. and self.ILC_bias_tol < 1., "invalid ILC bias tolerance"
# if you want to allow ILC biases that are too large for the number of modes available:
self.override_ILCbiastol = False
if 'override_ILCbiastol_threshold' in p.keys():
assert type(p['override_ILCbiastol_threshold']) is str
if p['override_ILCbiastol_threshold'].lower() in ['true','yes']:
self.override_ILCbiastol = True
#manually set realspace areas (in radians)
if 'FWHM_pix' in p.keys():
assert 'ILC_bias_tol' not in p.keys()
self.FWHM_pix = p['FWHM_pix']
assert type(self.FWHM_pix) is list
assert len(self.FWHM_pix) == self.N_scales
for x in self.FWHM_pix:
sigma_pix_temp = x / np.sqrt(8.*np.log(2.))
assert sigma_pix_temp < np.pi
else:
self.FWHM_pix = None
# ILC: component(s) to deproject (if any)
self.N_deproj = p['N_deproj']
assert (type(self.N_deproj) is int) or (type(self.N_deproj) is list)
# if an integer is input, deproject this at all scales
if type(self.N_deproj) is int:
assert type(self.N_deproj) is int and self.N_deproj >= 0, "N_deproj"
if (self.N_deproj > 0):
self.ILC_deproj_comps = p['ILC_deproj_comps']
assert len(self.ILC_deproj_comps) == self.N_deproj, "ILC_deproj_comps"
assert all(comp in COMP_TYPES for comp in self.ILC_deproj_comps), "unsupported component type in ILC_deproj_comps"
assert((self.N_deproj + 1) <= self.N_freqs), "not enough frequency channels to deproject this many components"
# If a list is input, assign each element the corresponding scale
if type(self.N_deproj) is list:
assert len(self.N_deproj) == self.N_scales
ind = 0
self.ILC_deproj_comps=[]
for N_deproj in self.N_deproj:
assert type(N_deproj) is int and N_deproj >= 0, "N_deproj"
if (N_deproj > 0):
self.ILC_deproj_comps.append(p['ILC_deproj_comps'][ind])
assert len(self.ILC_deproj_comps[ind]) == N_deproj, "ILC_deproj_comps"
assert all(comp in COMP_TYPES for comp in self.ILC_deproj_comps[ind]), "unsupported component type in ILC_deproj_comps"
assert((N_deproj + 1) <= self.N_freqs), "not enough frequency channels to deproject this many components"
else:
self.ILC_deproj_comps.append([])
ind = ind+1
# Flag for printing the time taken for several of the linear algebra
# computations for the weights. Default is False
self.print_timing = False
if 'print_timing' in p.keys():
assert type(p['print_timing']) is str
if p['print_timing'].lower() in ['true','yes','t','y']:
self.print_timing = True
# Flag for using the numba functions in the linear algebra calculation of the weights,
# these parallelize the computation in a pixelwise manner and can provide significant
# speed up. Default is True, but it can be set to False in the input file
self.use_numba = True
if 'use_numba' in p.keys():
assert type (p['use_numba']) is str
if p['use_numba'].lower() in ['false','no','f','n']:
self.use_numba = False
# I have started saving this as hdf5 files but the old fits format functionality still exists.
assert 'save_as' in p.keys(), "You need to specify whether to save as fits files or hdf5 files. hdf5 files are recommended, but fits files is available for back-compatibility."
assert p['save_as'] in ['fits','hdf5']
if p['save_as'] == 'fits':
self.save_as_fits = True
self.save_as_hdf5 = False
elif p['save_as'] == 'hdf5':
self.save_as_hdf5 = True
self.save_as_fits = False
#filenames for the covmaps, invcovmaps, and wavelet coeff and weight files
self.covmaps_hdf5_filename = self.output_dir + self.output_prefix + '_covmaps'+'_crossILC'*self.cross_ILC+'.hdf5'
self.wavelet_coeff_hdf5_filename = self.output_dir + self.output_prefix + '_waveletmaps.hdf5'
self.weight_filename_hdf5 = self.output_dir + self.output_prefix + '_weightmaps_component_'+self.ILC_preserved_comp+'_crossILC'*self.cross_ILC+self.output_suffix_weights+'.fits'
if type(self.N_deproj )is int:
if self.N_deproj>0:
self.weight_filename_hdf5 = self.output_dir+self.output_prefix+'_weightmaps_component_'+self.ILC_preserved_comp+'_deproject_'+'_'.join(self.ILC_deproj_comps)+'_crossILC'*self.cross_ILC+self.output_suffix_weights+'.fits'
else:
if self.N_deproj[0]>0:
self.weight_filename_hdf5 = self.output_dir+self.output_prefix+'_weightmaps_component_'+self.ILC_preserved_comp+'_deproject_'+'_'.join(self.ILC_deproj_comps[0])+'_crossILC'*self.cross_ILC+self.output_suffix_weights+'.fits'
####################
### TODO: this block of code with SED parameters, etc is currently not used anywhere
### instead, we currently just get the SED parameter info from fg_SEDs_default_params.yml
### if we wanted to do something fancy like sample over SED parameters, we would want to make use of this code
# ILC: SED parameters
if 'N_SED_params' in p.keys():
self.N_SED_params = p['N_SED_params']
else:
self.N_SED_params = 0
assert type(self.N_SED_params) is int and self.N_SED_params >= 0, "N_SED_params"
if (self.N_SED_params > 0):
#TODO: implement checks that only SED parameters are called here for components that are being explicitly deprojected
#TODO: more generally, implement some way of associating the parameters with the components
self.SED_params = p['SED_params']
assert len(self.SED_params) == self.N_SED_params, "SED_params"
assert all(param in PARAM_TYPES for param in self.SED_params), "unsupported parameter type in SED_params"
# get fiducial values (which are also taken to be centers of priors)
self.SED_params_vals = np.asarray(p['SED_params_vals'])
assert len(self.SED_params_vals) == self.N_SED_params, "SED_params_vals"
# get prior ranges (Delta = don't vary)
self.SED_params_priors = p['SED_params_priors']
assert len(self.SED_params_priors) == self.N_SED_params, "SED_params_priors"
assert all(prior in PRIOR_TYPES for prior in self.SED_params_priors), "unsupported prior type in SED_params_priors"
# Delta -> parameter has no meaning
# Gaussian -> parameter is std dev
# TopHat -> parameter is width
self.SED_params_priors_params = np.asarray(p['SED_params_priors_params'])
assert len(self.SED_params_priors_params) == self.N_SED_params, "SED_params_priors_params"
####################
####################
# TODO: cross-correlation not yet implemented (not hard to do)
# file names of maps with which to cross-correlate
if 'N_maps_xcorr' in p.keys():
self.N_maps_xcorr = p['N_maps_xcorr']
else:
self.N_maps_xcorr = 0
assert type(self.N_maps_xcorr) is int and self.N_maps_xcorr >= 0, "N_maps_xcorr"
if (self.N_maps_xcorr > 0):
self.maps_xcorr_files = p['maps_xcorr_files']
assert len(self.maps_xcorr_files) == self.N_maps_xcorr, "maps_xcorr_files"
# file names of masks to use in each cross-correlation
# masks should be pre-apodized
self.masks_xcorr_files = p['masks_xcorr_files']
if self.masks_xcorr_files is not None: #None = no mask to be applied
assert len(self.masks_xcorr_files) == self.N_maps_xcorr, "masks_xcorr_files"
####################
# method for reading in maps
def read_maps(self):
if not self.wavelet_maps_exist:
self.maps = np.zeros((self.N_freqs,self.N_pix), dtype=np.float64)
for i in range(self.N_freqs):
# TODO: allow specification of nested or ring ordering (although will already work here if fits keyword ORDERING is present)
temp_map = hp.fitsfunc.read_map(self.freq_map_files[i], )
assert len(temp_map) <= self.N_pix, "input map at higher resolution than specified N_side"
if (len(temp_map) == self.N_pix):
self.maps[i] = np.copy(temp_map)
elif (len(temp_map) < self.N_pix):
# TODO: should probably upgrade in harmonic space to get pixel window correct
self.maps[i] = np.copy( hp.pixelfunc.ud_grade(temp_map, nside_out=self.N_side, order_out='RING', dtype=np.float64) )
del(temp_map)
# if cross-ILC read in the S1 and S2 maps
if self.cross_ILC:
self.maps_s1 = np.zeros((self.N_freqs,self.N_pix), dtype=np.float64)
self.maps_s2 = np.zeros((self.N_freqs,self.N_pix), dtype=np.float64)
for i in range(self.N_freqs):
# TODO: allow specification of nested or ring ordering (although will already work here if fits keyword ORDERING is present)
temp_map_s1 = hp.fitsfunc.read_map(self.freq_map_files_s1[i], )
assert len(temp_map_s1) <= self.N_pix, "input map at higher resolution than specified N_side"
temp_map_s2 = hp.fitsfunc.read_map(self.freq_map_files_s2[i], )
assert len(temp_map_s2) <= self.N_pix, "input map at higher resolution than specified N_side"
if (len(temp_map_s1) == self.N_pix):
self.maps_s1[i] = np.copy(temp_map_s1)
elif (len(temp_map_s1) < self.N_pix):
# TODO: should probably upgrade in harmonic space to get pixel window correct
self.maps_s1[i] = np.copy( hp.pixelfunc.ud_grade(temp_map_s1, nside_out=self.N_side, order_out='RING', dtype=np.float64) )
if (len(temp_map_s2) == self.N_pix):
self.maps_s2[i] = np.copy(temp_map_s2)
elif (len(temp_map_s2) < self.N_pix):
# TODO: should probably upgrade in harmonic space to get pixel window correct
self.maps_s2[i] = np.copy( hp.pixelfunc.ud_grade(temp_map_s2, nside_out=self.N_side, order_out='RING', dtype=np.float64) )
del(temp_map_s1)
del(temp_map_s2)
# if you want to subtract something from the maps, do it here
if self.map_to_subtract is not None:
map_to_subtract = hp.fitsfunc.read_map(self.map_to_subtract)
assert hp.get_nside(map_to_subtract) >= self.N_side
if hp.get_nside(map_to_subtract) > self.N_side:
map_to_subtract = hp.ud_grade(map_to_subtract,self.N_side)
self.maps = self.maps - map_to_subtract[None,:]
if self.cross_ILC:
self.maps_s1 = self.maps_s1 - map_to_subtract[None,:]
self.maps_s2 = self.maps_s2 - map_to_subtract[None,:]
# if you want to subtract something from the maps only in specific frequency channels, do it here
if self.maps_to_subtract is not None:
for freqind in range(self.N_freqs):
if self.maps_to_subtract[freqind] is not None:
if type(self.maps_to_subtract[freqind]) is str:
map_to_subtract = hp.fitsfunc.read_map(self.maps_to_subtract[freqind])
else:
maps_to_subtract = [hp.fitsfunc.read_map(x) for x in self.maps_to_subtract[freqind]]
for xind,mapp in enumerate(maps_to_subtract):
if hp.get_nside(mapp) > self.N_side:
mapp_dg = hp.ud_grade(mapp,self.N_side)
maps_to_subtract[xind] = mapp_dg
maps_to_subtract = np.array(maps_to_subtract)
map_to_subtract = np.sum(maps_to_subtract,axis=0)
print("shape is",map_to_subtract.shape)
else:
map_to_subtract = 0*self.maps[freqind]
assert hp.get_nside(map_to_subtract) >= self.N_side
if hp.get_nside(map_to_subtract) > self.N_side:
map_to_subtract = hp.ud_grade(map_to_subtract,self.N_side)
self.maps[freqind] = self.maps[freqind] - map_to_subtract
if self.subtract_monopole[freqind]:
print("subtracting monopole",freqind,flush=True)
self.maps[freqind] -= np.mean(self.maps[freqind])
if self.cross_ILC:
self.maps_s1[freqind] = self.maps_s1[freqind] - map_to_subtract
self.maps_s2[freqind] = self.maps_s2[freqind] - map_to_subtract
# if we need to apply weights to alternative maps, read them in
if self.apply_weights_to_other_maps:
print("reading in maps for weights",flush=True)
self.maps_for_weights = np.zeros((self.N_freqs,self.N_pix), dtype=np.float64)
for i in range(self.N_freqs):
# TODO: allow specification of nested or ring ordering (although will already work here if fits keyword ORDERING is present)
temp_map = hp.fitsfunc.read_map(self.freq_map_files_for_weights[i])
assert len(temp_map) <= self.N_pix, "input map at higher resolution than specified N_side"
if (len(temp_map) == self.N_pix):
self.maps_for_weights[i] = np.copy(temp_map)
elif (len(temp_map) < self.N_pix):
# TODO: should probably upgrade in harmonic space to get pixel window correct
self.maps_for_weights[i] = np.copy( hp.pixelfunc.ud_grade(temp_map, nside_out=self.N_side, order_out='RING', dtype=np.float64) )
del(temp_map)
# also read in maps with which to cross-correlate, if specified
if self.N_maps_xcorr != 0:
# maps
self.maps_xcorr = np.zeros((self.N_maps_xcorr,self.N_pix), dtype=np.float64)
for i in range(self.N_maps_xcorr):
temp_map = hp.fitsfunc.read_map(self.maps_xcorr_files[i], field=0)
if not self.allow_dgrading:
assert len(temp_map) <= self.N_pix, "input map for cross-correlation at higher resolution than specified N_side"
if (len(temp_map) == self.N_pix):
self.maps_xcorr[i] = np.copy(temp_map)
elif (len(temp_map) < self.N_pix):
# TODO: should probably upgrade in harmonic space to get pixel window correct
self.maps_xcorr[i] = np.copy( hp.pixelfunc.ud_grade(temp_map, nside_out=self.N_side, order_out='RING', dtype=np.float64) )
# masks
if self.masks_xcorr_files is not None: #None = no mask to be applied
self.masks_xcorr = np.zeros((self.N_maps_xcorr,self.N_pix), dtype=np.float64)
for i in range(self.N_maps_xcorr):
temp_map = hp.fitsfunc.read_map(self.masks_xcorr_files[i], field=0)
if not self.allow_dgrading:
assert len(temp_map) <= self.N_pix, "input mask for cross-correlation at higher resolution than specified N_side"
if (len(temp_map) == self.N_pix):
self.masks_xcorr[i] = np.copy(temp_map)
elif (len(temp_map) < self.N_pix):
self.masks_xcorr[i] = np.copy( hp.pixelfunc.ud_grade(temp_map, nside_out=self.N_side, order_out='RING', dtype=np.float64) )
else: #no mask
self.masks_xcorr = np.ones((self.N_maps_xcorr,self.N_pix), dtype=np.float64)
# method for reading in bandpasses
# self.bandpasses is a list of length self.N_freqs where each entry is an N x 2 array where N can be different for each frequency channel
def read_bandpasses(self):
if self.bandpass_type == 'ActualBandpasses':
self.bandpasses = [] #initialize empty list
for i in range(self.N_freqs):
if self.freq_bp_files[i] is not None:
if self.freq_bp_files[i].lower() !='delta':
(self.bandpasses).append(np.loadtxt(self.freq_bp_files[i], unpack=True, usecols=(0,1)))
else:
self.bandpasses.append('Delta')
else:
self.bandpasses.append(None)
# method for reading in beams
# self.beams is a list of length self.N_freqs where each entry is an (ELLMAX+1) x 2 array
def read_beams(self):
if self.beam_type == 'Gaussians':
self.beams = np.zeros((self.N_freqs,self.ELLMAX+1,2), dtype=np.float64)
for i in range(self.N_freqs):
self.beams[i] = np.transpose(np.array([np.arange(self.ELLMAX+1), hp.sphtfunc.gauss_beam(self.beam_FWHM_arcmin[i]*(np.pi/180.0/60.0), lmax=self.ELLMAX)]))
# if self.perform_ILC_at_beam is specified, convolve all maps to the common_beam
if self.perform_ILC_at_beam is not None:
self.common_beam =np.transpose(np.array([np.arange(self.ELLMAX+1), hp.sphtfunc.gauss_beam(self.perform_ILC_at_beam*(np.pi/180.0/60.0), lmax=self.ELLMAX)]))
else:
self.common_beam = self.beams[-1] # if perform_ILC_at_beam is unspecified, convolve to the beam of the highest-resolution map
elif self.beam_type == '1DBeams':
self.beams = [] #initialize empty list
for i in range(self.N_freqs):
#(self.beams).append(np.loadtxt(self.beam_files[i], unpack=True, usecols=(0,1)))
(self.beams).append(np.loadtxt(self.beam_files[i], usecols=(0,1)))
# check that beam profiles start at ell=0 and extend to self.ELLMAX or beyond
assert (self.beams)[i][0][0] == 0, "beam profiles must start at ell=0"
assert (self.beams)[i][-1][0] >= self.ELLMAX, "beam profiles must extend to ELLMAX or higher"
if ((self.beams)[i][-1][0] > self.ELLMAX):
(self.beams)[i] = (self.beams)[i][0:self.ELLMAX+1]
assert (len((self.beams)[i]) == self.ELLMAX+1), "beam profiles must contain all integer ells up to ELLMAX"
if self.perform_ILC_at_beam is not None:
self.common_beam =np.transpose(np.array([np.arange(self.ELLMAX+1), hp.sphtfunc.gauss_beam(self.perform_ILC_at_beam*(np.pi/180.0/60.0), lmax=self.ELLMAX)]))
else:
self.common_beam = self.beams[-1] # if perform_ILC_at_beam is unspecified, convolve to the beam of the highest-resolution map
# method for turning maps to alms
def maps2alms(self):
self.alms=[]
for freqind,mapp in enumerate(self.maps):
filename = self.output_dir + self.output_prefix + '_alm_freq'+str(freqind)+'.fits'
exists = os.path.isfile(filename)
if exists:
self.alms.append(hp.fitsfunc.read_alm(filename))
else:
self.alms.append(hp.map2alm(mapp, lmax=self.ELLMAX))
if self.save_alms:
hp.fitsfunc.write_alm(filename,self.alms[freqind])
if self.cross_ILC:
self.alms_s1 = []
self.alms_s2 = []
for mapp in self.maps_s1:
self.alms_s1.append(hp.map2alm(mapp, lmax=self.ELLMAX))
for mapp in self.maps_s2:
self.alms_s2.append(hp.map2alm(mapp, lmax=self.ELLMAX))
def maps_to_apply_weights2alms(self):
self.alms_to_apply_weights = []
for freqind,mapp in enumerate(self.maps_for_weights):
self.alms_to_apply_weights.append(hp.map2alm(mapp, lmax=self.ELLMAX))
def alms2cls(self):
self.cls = np.zeros((len(self.alms),len(self.alms),self.ELLMAX+1))
new_beam = self.common_beam
for a in range(len(self.maps)):
inp_beam_a = (self.beams)[a]
beam_fac_a = new_beam[:,1]/inp_beam_a[:,1]
for b in range(a,len(self.maps)):
inp_beam_b = (self.beams)[b]
beam_fac_b = new_beam[:,1]/inp_beam_b[:,1]
self.cls[a,b]=self.cls[b,a] = hp.alm2cl(self.alms[a],self.alms[b],lmax=self.ELLMAX) * beam_fac_b * beam_fac_a
if self.cross_ILC:
self.cls_s1s2= np.zeros((len(self.alms),len(self.alms),self.ELLMAX+1))
for a in range(len(self.maps)):
inp_beam_a = (self.beams)[a]
beam_fac_a = new_beam[:,1]/inp_beam_a[:,1]
for b in range(len(self.maps)):
inp_beam_b = (self.beams)[b]
beam_fac_b = new_beam[:,1]/inp_beam_b[:,1]
self.cls_s1s2[a,b]=hp.alm2cl(self.alms_s1[a],self.alms_s2[b],lmax=self.ELLMAX) * beam_fac_b * beam_fac_a
|
jcolinhillREPO_NAMEpyilcPATH_START.@pyilc_extracted@pyilc-main@pyilc@input.py@.PATH_END.py
|
{
"filename": "athena.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/document_loaders/athena.py",
"type": "Python"
}
|
from __future__ import annotations
import io
import json
import time
from typing import Any, Dict, Iterator, List, Optional, Tuple
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class AthenaLoader(BaseLoader):
"""Load documents from `AWS Athena`.
Each document represents one row of the result.
- By default, all columns are written into the `page_content` of the document
and none into the `metadata` of the document.
- If `metadata_columns` are provided then these columns are written
into the `metadata` of the document while the rest of the columns
are written into the `page_content` of the document.
To authenticate, the AWS client uses this method to automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Amazon Textract service.
"""
def __init__(
self,
query: str,
database: str,
s3_output_uri: str,
profile_name: Optional[str] = None,
metadata_columns: Optional[List[str]] = None,
):
"""Initialize Athena document loader.
Args:
query: The query to run in Athena.
database: Athena database.
s3_output_uri: Athena output path.
profile_name: Optional. AWS credential profile, if profiles are being used.
metadata_columns: Optional. Columns written to Document `metadata`.
"""
self.query = query
self.database = database
self.s3_output_uri = s3_output_uri
self.metadata_columns = metadata_columns if metadata_columns is not None else []
try:
import boto3
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
try:
session = (
boto3.Session(profile_name=profile_name)
if profile_name is not None
else boto3.Session()
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
self.athena_client = session.client("athena")
self.s3_client = session.client("s3")
def _execute_query(self) -> List[Dict[str, Any]]:
response = self.athena_client.start_query_execution(
QueryString=self.query,
QueryExecutionContext={"Database": self.database},
ResultConfiguration={"OutputLocation": self.s3_output_uri},
)
query_execution_id = response["QueryExecutionId"]
while True:
response = self.athena_client.get_query_execution(
QueryExecutionId=query_execution_id
)
state = response["QueryExecution"]["Status"]["State"]
if state == "SUCCEEDED":
break
elif state == "FAILED":
resp_status = response["QueryExecution"]["Status"]
state_change_reason = resp_status["StateChangeReason"]
err = f"Query Failed: {state_change_reason}"
raise Exception(err)
elif state == "CANCELLED":
raise Exception("Query was cancelled by the user.")
time.sleep(1)
result_set = self._get_result_set(query_execution_id)
return json.loads(result_set.to_json(orient="records"))
def _remove_suffix(self, input_string: str, suffix: str) -> str:
if suffix and input_string.endswith(suffix):
return input_string[: -len(suffix)]
return input_string
def _remove_prefix(self, input_string: str, suffix: str) -> str:
if suffix and input_string.startswith(suffix):
return input_string[len(suffix) :]
return input_string
def _get_result_set(self, query_execution_id: str) -> Any:
try:
import pandas as pd
except ImportError:
raise ImportError(
"Could not import pandas python package. "
"Please install it with `pip install pandas`."
)
output_uri = self.s3_output_uri
tokens = self._remove_prefix(
self._remove_suffix(output_uri, "/"), "s3://"
).split("/")
bucket = tokens[0]
key = "/".join(tokens[1:] + [query_execution_id]) + ".csv"
obj = self.s3_client.get_object(Bucket=bucket, Key=key)
df = pd.read_csv(io.BytesIO(obj["Body"].read()), encoding="utf8")
return df
def _get_columns(
self, query_result: List[Dict[str, Any]]
) -> Tuple[List[str], List[str]]:
content_columns = []
metadata_columns = []
all_columns = list(query_result[0].keys())
for key in all_columns:
if key in self.metadata_columns:
metadata_columns.append(key)
else:
content_columns.append(key)
return content_columns, metadata_columns
def lazy_load(self) -> Iterator[Document]:
query_result = self._execute_query()
content_columns, metadata_columns = self._get_columns(query_result)
for row in query_result:
page_content = "\n".join(
f"{k}: {v}" for k, v in row.items() if k in content_columns
)
metadata = {
k: v for k, v in row.items() if k in metadata_columns and v is not None
}
doc = Document(page_content=page_content, metadata=metadata)
yield doc
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@document_loaders@athena.py@.PATH_END.py
|
{
"filename": "sumLine.py",
"repo_name": "GBTSpectroscopy/gbtpipe",
"repo_path": "gbtpipe_extracted/gbtpipe-master/gbtpipe/contrib/sumLine.py",
"type": "Python"
}
|
# parsel-tongue script tuned for SGR B2 obs of NH3
#HISTORY
#11JAN26 GIL add help
#10OCT06 GIL measure the NH3 1-1 integrated profile only
from AIPS import *
from AIPS import AIPS
from AIPSTask import AIPSTask, AIPSList
from AIPSData import *
from AIPSData import AIPSUVData, AIPSImage
from Wizardry.AIPSData import AIPSUVData as WizAIPSUVData
import sys
import os
import math
AIPS.userno=int(sys.argv[1]) # Extract AIPS pipeline number
outName = sys.argv[2] # Extract Integrated Line Name
restFreqMHz = float(sys.argv[3]) # rest Frequency (MHz)
velocityKmS = float(sys.argv[4]) # velocity (km/s)
velWidthKmS = float(sys.argv[5]) # velocity Full Width (km/s)
mydisk=2
defaultName = 'Pipeline'
#Enforce name > 5 characters
if len(outName) < 6:
outName = defaultName[0:(6-len(outName))] + outName
print 'Outname : ',outName
print 'RestFreq: ',restFreqMHz, ' (MHz)'
print 'Velocity: ',velocityKmS,' (km/s)'
print 'VelWidth: ',velWidthKmS,' (km/s)'
fittp=AIPSTask('fittp')
momnt=AIPSTask('momnt')
subim=AIPSTask('subim')
image = AIPSImage(AIPSCat()[mydisk][-1].name, 'IMLIN', mydisk, 1)
# now read parameters passed inside the data header
nChan = round(image.header.naxis[0])
refChan = image.header.crpix[0]
nuRef = image.header.crval[0]
dNu = image.header.cdelt[0]
print nChan, refChan, nuRef, dNu
#set rest frequency to select out the line
restFreq = restFreqMHz * 1.e6
cLightKmSec = 299792.458 # speed of light in Km/Sec
#These constants are particular to each source
sourceLineWidthHz = .2E6
sourceFreq = restFreq*(1.-(velocityKmS/cLightKmSec))
print 'Source Frequency:',sourceFreq*1.E-6,'+/-', \
dNu*1.E-6,' MHz'
#now set the default line channels and widths
lineWidthChan = 100
lineChan = round(nChan*.5)
#Compute number of channels from the Velocity width
if dNu != 0:
lineWidthChan = round(velWidthKmS * sourceFreq/(dNu * cLightKmSec))
lineWidthChan = round(lineWidthChan/2.)
if lineWidthChan < 0:
lineWidthChan = - lineWidthChan
lineChan = round(((sourceFreq-nuRef)/dNu) + refChan)
print 'Source Channel :',lineChan,'+/-',lineWidthChan
#now calcuate channels from channel width
bChan = lineChan - lineWidthChan
eChan = lineChan + lineWidthChan
if bChan < 1:
bChan = 1
if bChan > nChan:
bChan = 1
if eChan > nChan:
eChan = nChan
if eChan < 1:
eChan = nChan
#Run momnt task
momnt.indisk=mydisk
momnt.outdisk=mydisk
momnt.inname=AIPSCat()[mydisk][-1].name
momnt.inclass='IMLIN'
momnt.inseq=1
momnt.icut=-10000.
momnt.flux=-.00001
momnt.outclass='0'
momnt.cellsize[1] = 0
momnt.cellsize[2] = 0
momnt.blc[1]=bChan
momnt.blc[2]=0
momnt.blc[3]=0
momnt.trc[1]=eChan
momnt.trc[2]=0
momnt.trc[3]=0
momnt.go()
# prepare to zap after the copy
image = AIPSImage( AIPSCat()[mydisk][-1].name, \
AIPSCat()[mydisk][-1].klass, mydisk, \
AIPSCat()[mydisk][-1].seq)
## Use the subim task to rename the output image
subim.indisk=mydisk
subim.inname=AIPSCat()[mydisk][-1].name
subim.inclass=AIPSCat()[mydisk][-1].klass
subim.inseq=AIPSCat()[mydisk][-1].seq
subim.outclass=outName[-6:]
subim.outdi=mydisk
subim.go()
#cleanup
#image.zap()
## and write the last thing now in the catalog to disk
fittp.indisk=mydisk
fittp.inname=AIPSCat()[mydisk][-1].name
fittp.inclass=AIPSCat()[mydisk][-1].klass
fittp.inseq=AIPSCat()[mydisk][-1].seq
outimage = outName+'.fits'
if os.path.exists(outimage):
os.remove(outimage)
print 'Removed existing file to make room for new one :',outimage
fittp.dataout='PWD:'+outimage
fittp.go()
|
GBTSpectroscopyREPO_NAMEgbtpipePATH_START.@gbtpipe_extracted@gbtpipe-master@gbtpipe@contrib@sumLine.py@.PATH_END.py
|
{
"filename": "menubars.py",
"repo_name": "spacetelescope/specview",
"repo_path": "specview_extracted/specview-master/specview/ui/qt/menubars.py",
"type": "Python"
}
|
from ...external.qt import QtGui
class MainMainBar(QtGui.QMenuBar):
def __init__(self):
super(MainMainBar, self).__init__()
# File
self.atn_exit = QtGui.QAction('&Exit', self)
self.atn_exit.setShortcut('Ctrl+Q')
self.atn_exit.setStatusTip('Exit application')
self.atn_open = QtGui.QAction('&Open', self)
self.atn_open.setShortcut('Ctrl+O')
self.atn_open.setStatusTip('Open file')
# File
file_menu = self.addMenu('&File')
file_menu.addAction(self.atn_open)
file_menu.addAction(self.atn_exit)
self.window_menu = self.addMenu('&Windows')
|
spacetelescopeREPO_NAMEspecviewPATH_START.@specview_extracted@specview-master@specview@ui@qt@menubars.py@.PATH_END.py
|
{
"filename": "option_utils.py",
"repo_name": "gwastro/pycbc",
"repo_path": "pycbc_extracted/pycbc-master/pycbc/inference/option_utils.py",
"type": "Python"
}
|
# Copyright (C) 2016 Collin Capano, Duncan Brown
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Generals
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This module contains standard options used for inference-related programs.
"""
import argparse
from pycbc import waveform
# -----------------------------------------------------------------------------
#
# Utilities for plotting results
#
# -----------------------------------------------------------------------------
class ParseLabelArg(argparse.Action):
"""Argparse action that will parse arguments that can accept labels.
This assumes that the values set on the command line for its assigned
argument are strings formatted like ``PARAM[:LABEL]``. When the arguments
are parsed, the ``LABEL`` bit is stripped off and added to a dictionary
mapping ``PARAM -> LABEL``. This dictionary is stored to the parsed
namespace called ``{dest}_labels``, where ``{dest}`` is the argument's
``dest`` setting (by default, this is the same as the option string).
Likewise, the argument's ``dest`` in the parsed namespace is updated so
that it is just ``PARAM``.
If no ``LABEL`` is provided, then ``PARAM`` will be used for ``LABEL``.
This action can work on arguments that have ``nargs != 0`` and ``type`` set
to ``str``.
"""
def __init__(self, type=str, nargs=None,
**kwargs): # pylint: disable=redefined-builtin
# check that type is string
if type != str:
raise ValueError("the type for this action must be a string")
if nargs == 0:
raise ValueError("nargs must not be 0 for this action")
super(ParseLabelArg, self).__init__(type=type, nargs=nargs,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
singlearg = isinstance(values, str)
if singlearg:
values = [values]
params = []
labels = {}
for param in values:
psplit = param.split(':')
if len(psplit) == 2:
param, label = psplit
else:
label = param
labels[param] = label
params.append(param)
# update the namespace
if singlearg:
params = params[0]
setattr(namespace, self.dest, params)
setattr(namespace, '{}_labels'.format(self.dest), labels)
class ParseParametersArg(ParseLabelArg):
"""Argparse action that will parse parameters and labels from an opton.
Does the same as ``ParseLabelArg``, with the additional functionality that
if ``LABEL`` is a known parameter in ``pycbc.waveform.parameters``, then
the label attribute there will be used in the labels dictionary.
Otherwise, ``LABEL`` will be used.
Examples
--------
Create a parser and add two arguments that use this action (note that the
first argument accepts multiple inputs while the second only accepts a
single input):
>>> import argparse
>>> parser = argparse.ArgumentParser()
>>> parser.add_argument('--parameters', type=str, nargs="+",
action=ParseParametersArg)
>>> parser.add_argument('--z-arg', type=str, action=ParseParametersArg)
Parse a command line that uses these options:
>>> import shlex
>>> cli = "--parameters 'mass1+mass2:mtotal' ra ni --z-arg foo:bar"
>>> opts = parser.parse_args(shlex.split(cli))
>>> opts.parameters
['mass1+mass2', 'ra', 'ni']
>>> opts.parameters_labels
{'mass1+mass2': '$M~(\\mathrm{M}_\\odot)$', 'ni': 'ni', 'ra': '$\\alpha$'}
>>> opts.z_arg
'foo'
>>> opts.z_arg_labels
{'foo': 'bar'}
In the above, the first argument to ``--parameters`` was ``mtotal``. Since
this is a recognized parameter in ``pycbc.waveform.parameters``, the label
dictionary contains the latex string associated with the ``mtotal``
parameter. A label was not provided for the second argument, and so ``ra``
was used. Since ``ra`` is also a recognized parameter, its associated latex
string was used in the labels dictionary. Since ``ni`` and ``bar`` (the
label for ``z-arg``) are not recognized parameters, they were just used
as-is in the labels dictionaries.
"""
def __call__(self, parser, namespace, values, option_string=None):
super(ParseParametersArg, self).__call__(parser, namespace, values,
option_string=option_string)
# try to replace the labels with a label from waveform.parameters
labels = getattr(namespace, '{}_labels'.format(self.dest))
for param, label in labels.items():
try:
label = getattr(waveform.parameters, label).label
labels[param] = label
except AttributeError:
pass
def add_injsamples_map_opt(parser):
"""Adds option to parser to specify a mapping between injection parameters
an sample parameters.
"""
parser.add_argument('--injection-samples-map', nargs='+',
metavar='INJECTION_PARAM:SAMPLES_PARAM',
help='Rename/apply functions to the injection '
'parameters and name them the same as one of the '
'parameters in samples. This can be used if the '
'injection parameters are not the same as the '
'samples parameters. INJECTION_PARAM may be a '
'function of the injection parameters; '
'SAMPLES_PARAM must a name of one of the '
'parameters in the samples group.')
def add_plot_posterior_option_group(parser):
"""Adds the options needed to configure plots of posterior results.
Parameters
----------
parser : object
ArgumentParser instance.
"""
pgroup = parser.add_argument_group("Options for what plots to create and "
"their formats.")
pgroup.add_argument('--plot-marginal', action='store_true', default=False,
help="Plot 1D marginalized distributions on the "
"diagonal axes.")
pgroup.add_argument('--marginal-percentiles', nargs='+', default=None,
type=float,
help="Percentiles to draw lines at on the 1D "
"histograms.")
pgroup.add_argument('--no-marginal-lines', action='store_true',
default=False,
help="Do not add vertical lines in the 1D marginal "
"plots showing the marginal percentiles.")
pgroup.add_argument('--no-marginal-titles', action='store_true',
default=False,
help="Do not add titles giving the 1D credible range "
"over the 1D marginal plots.")
pgroup.add_argument("--plot-scatter", action='store_true', default=False,
help="Plot each sample point as a scatter plot.")
pgroup.add_argument("--plot-density", action="store_true", default=False,
help="Plot the posterior density as a color map.")
pgroup.add_argument("--plot-contours", action="store_true", default=False,
help="Draw contours showing the 50th and 90th "
"percentile confidence regions.")
pgroup.add_argument('--contour-percentiles', nargs='+', default=None,
type=float,
help="Percentiles to draw contours if different "
"than 50th and 90th.")
# add mins, maxs options
pgroup.add_argument('--mins', nargs='+', metavar='PARAM:VAL', default=[],
help="Specify minimum parameter values to plot. This "
"should be done by specifying the parameter name "
"followed by the value. Parameter names must be "
"the same as the PARAM argument in --parameters "
"(or, if no parameters are provided, the same as "
"the parameter name specified in the variable "
"args in the input file. If none provided, "
"the smallest parameter value in the posterior "
"will be used.")
pgroup.add_argument('--maxs', nargs='+', metavar='PARAM:VAL', default=[],
help="Same as mins, but for the maximum values to "
"plot.")
# add expected parameters options
pgroup.add_argument('--expected-parameters', nargs='+',
metavar='PARAM:VAL',
default=[],
help="Specify expected parameter values to plot. If "
"provided, a cross will be plotted in each axis "
"that an expected parameter is provided. "
"Parameter names must be "
"the same as the PARAM argument in --parameters "
"(or, if no parameters are provided, the same as "
"the parameter name specified in the variable "
"args in the input file.")
pgroup.add_argument('--expected-parameters-color', default='r',
help="What to color the expected-parameters cross. "
"Default is red.")
pgroup.add_argument('--plot-injection-parameters', action='store_true',
default=False,
help="Get the expected parameters from the injection "
"in the input file. There must be only a single "
"injection in the file to work. Any values "
"specified by expected-parameters will override "
"the values obtained for the injection.")
pgroup.add_argument('--pick-injection-by-time', action='store_true',
default=False,
help="In the case of multiple injections, pick one"
" for plotting based on its proximity in time.")
add_injsamples_map_opt(pgroup)
return pgroup
def plot_ranges_from_cli(opts):
"""Parses the mins and maxs arguments from the `plot_posterior` option
group.
Parameters
----------
opts : ArgumentParser
The parsed arguments from the command line.
Returns
-------
mins : dict
Dictionary of parameter name -> specified mins. Only parameters that
were specified in the --mins option will be included; if no parameters
were provided, will return an empty dictionary.
maxs : dict
Dictionary of parameter name -> specified maxs. Only parameters that
were specified in the --mins option will be included; if no parameters
were provided, will return an empty dictionary.
"""
mins = {}
for x in opts.mins:
x = x.split(':')
if len(x) != 2:
raise ValueError("option --mins not specified correctly; see help")
mins[x[0]] = float(x[1])
maxs = {}
for x in opts.maxs:
x = x.split(':')
if len(x) != 2:
raise ValueError("option --maxs not specified correctly; see help")
maxs[x[0]] = float(x[1])
return mins, maxs
def expected_parameters_from_cli(opts):
"""Parses the --expected-parameters arguments from the `plot_posterior`
option group.
Parameters
----------
opts : ArgumentParser
The parsed arguments from the command line.
Returns
-------
dict
Dictionary of parameter name -> expected value. Only parameters that
were specified in the --expected-parameters option will be included; if
no parameters were provided, will return an empty dictionary.
"""
expected = {}
for x in opts.expected_parameters:
x = x.split(':')
if len(x) != 2:
raise ValueError("option --expected-paramters not specified "
"correctly; see help")
expected[x[0]] = float(x[1])
return expected
def add_scatter_option_group(parser):
"""Adds the options needed to configure scatter plots.
Parameters
----------
parser : object
ArgumentParser instance.
"""
scatter_group = parser.add_argument_group("Options for configuring the "
"scatter plot.")
scatter_group.add_argument(
'--z-arg', type=str, default=None, action=ParseParametersArg,
help='What to color the scatter points by. Syntax is the same as the '
'parameters option.')
scatter_group.add_argument(
"--vmin", type=float, help="Minimum value for the colorbar.")
scatter_group.add_argument(
"--vmax", type=float, help="Maximum value for the colorbar.")
scatter_group.add_argument(
"--scatter-cmap", type=str, default='plasma',
help="Specify the colormap to use for points. Default is plasma.")
return scatter_group
def add_density_option_group(parser):
"""Adds the options needed to configure contours and density colour map.
Parameters
----------
parser : object
ArgumentParser instance.
"""
density_group = parser.add_argument_group("Options for configuring the "
"contours and density color map")
density_group.add_argument(
"--density-cmap", type=str, default='viridis',
help="Specify the colormap to use for the density. "
"Default is viridis.")
density_group.add_argument(
"--contour-color", type=str, default=None,
help="Specify the color to use for the contour lines. Default is "
"white for density plots and black for scatter plots.")
density_group.add_argument(
"--contour-linestyles", type=str, default=None, nargs="+",
help="Specify the linestyles to use for the contour lines. Defaut "
"is solid for all.")
density_group.add_argument(
"--no-contour-labels", action="store_true", default=False,
help="Don't put labels on the contours.")
density_group.add_argument(
'--use-kombine-kde', default=False, action="store_true",
help="Use kombine's clustered KDE for determining 2D marginal "
"contours and density instead of scipy's gaussian_kde (the "
"default). This is better at distinguishing bimodal "
"distributions, but is much slower than the default. For speed, "
"suggest setting --kde-args 'max_samples:20000' or smaller if "
"using this. Requires kombine to be installed.")
density_group.add_argument(
'--max-kde-samples', type=int, default=None,
help="Limit the number of samples used for KDE construction to the "
"given value. This can substantially speed up plot generation "
"(particularly when plotting multiple parameters). Suggested "
"values: 5000 to 10000.")
density_group.add_argument(
'--kde-args', metavar="ARG:VALUE", nargs='+', default=None,
help="Pass the given argrument, value pairs to the KDE function "
"(either scipy's or kombine's) when setting it up.")
return density_group
|
gwastroREPO_NAMEpycbcPATH_START.@pycbc_extracted@pycbc-master@pycbc@inference@option_utils.py@.PATH_END.py
|
{
"filename": "utilfuncs.py",
"repo_name": "IvS-KULeuven/IvSPythonRepository",
"repo_path": "IvSPythonRepository_extracted/IvSPythonRepository-master/sigproc/lmfit/utilfuncs.py",
"type": "Python"
}
|
"""Utility mathematical functions and common lineshapes for minimizer
"""
import numpy
import scipy
from scipy.special import gamma
CUSTOM_FUNCTIONS = {}
log2 = numpy.log(2)
pi = numpy.pi
def gauss(x, amp, cen, wid):
"gaussian function: wid = half-width at half-max"
return amp * numpy.exp(-log2 * (x-cen) **2 / wid**2)
def loren(x, amp, cen, wid):
"lorenztian function: wid = half-width at half-max"
return (amp / (1 + ((x-cen)/wid)**2))
def gauss_area(x, amp, cen, wid):
"scaled gaussian function: wid = half-width at half-max"
return numpy.sqrt(log2/pi) * gauss(x, amp, cen, wid) / wid
def loren_area(x, amp, cen, wid):
"scaled lorenztian function: wid = half-width at half-max"
return loren(x, amp, cen, wid) / (pi*wid)
def pvoigt(x, amp, cen, wid, frac):
"""pseudo-voigt function:
(1-frac)*gauss(amp, cen, wid) + frac*loren(amp, cen, wid)"""
return amp * (gauss(x, (1-frac), cen, wid) +
loren(x, frac, cen, wid))
def pvoigt_area(x, amp, cen, wid, frac):
"""scaled pseudo-voigt function:
(1-frac)*gauss_area(amp, cen, wid) + frac*loren_are(amp, cen, wid)"""
return amp * (gauss_area(x, (1-frac), cen, wid) +
loren_area(x, frac, cen, wid))
def pearson7(x, amp, cen, wid, expon):
"""pearson peak function """
xp = 1.0 * expon
return amp / (1 + ( ((x-cen)/wid)**2) * (2**(1/xp) -1) )**xp
def pearson7_area(x, amp, cen, wid, expon):
"""scaled pearson peak function """
xp = 1.0 * expon
scale = gamma(xp) * sqrt((2**(1/xp) - 1)) / (gamma(xp-0.5))
return scale * pearson7(x, amp, cen, wid, xp) / (wid*sqrt(pi))
return scale * pearson7(x, amp, cen, sigma, expon) / (sigma*sqrt(pi))
CUSTOM_FUNCTIONS = {'gauss': gauss, 'gauss_area': gauss_area,
'loren': loren, 'loren_area': loren_area,
'pvoigt': pvoigt, 'pvoigt_area': pvoigt_area,
'pearson7': pearson7, 'pearson7_area': pearson7_area}
|
IvS-KULeuvenREPO_NAMEIvSPythonRepositoryPATH_START.@IvSPythonRepository_extracted@IvSPythonRepository-master@sigproc@lmfit@utilfuncs.py@.PATH_END.py
|
{
"filename": "plot.py",
"repo_name": "zenitani/OpenMHD",
"repo_path": "OpenMHD_extracted/OpenMHD-master/3D_basic/plot.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
import numpy as np
import openmhd3d as openmhd
# dummy index
vx=0;vy=1;vz=2;pr=3;ro=4;bx=5;by=6;bz=7;ps=8
# reading the data ...
x,y,z,t,data = openmhd.data_read("data/field-00010.dat")
# reading the data (partial domain: [ix1,ix2] x [jx1,jx2] x [kx1,kx2])
# x,y,z,t,data = openmhd.data_read("data/field-00010.dat",ix1=0,ix2=100,jx1=11)
def plot_volume(ax, array, x, y, z, xcut=0.0, ycut=0.0, zcut=0.0, cmap='jet'):
"""For a given 3d *array* plot three cuts, with help from their subplanes.
Generalized from https://matplotlib.org/stable/gallery/mplot3d/intersecting_planes.html."""
ix0, jx0, kx0 = array.shape
for i in range(2,ix0-1):
if( x[i] > xcut ):
ix1 = i; xrlist = [ slice(None,ix1), slice(ix1,None) ]
break
for j in range(2,jx0-1):
if( y[j] > ycut ):
jx1 = j; yrlist = [ slice(None,jx1), slice(jx1,None) ]
break
for k in range(2,kx0-1):
if( z[k] > zcut ):
kx1 = k; zrlist = [ slice(None,kx1), slice(kx1,None) ]
break
cmap = plt.get_cmap(cmap)
mymax = max(array.max(), -array.min()) if( array.max() > 0.0 ) else 0.0
mymin = min(array.min(), -array.max()) if( array.min() < 0.0 ) else 0.0
facecolors = cmap((array[ix1,:,:] - mymin) / (mymax - mymin))
Z, Y = np.meshgrid( z, y )
X = x[ix1] * np.ones_like(Y)
for yr in yrlist:
for zr in zrlist:
ax.plot_surface(X[yr,zr], Y[yr,zr], Z[yr,zr], rstride=3, cstride=3, facecolors=facecolors[yr,zr], shade=False)
facecolors = cmap((array[:,jx1,:] - mymin) / (mymax - mymin))
Z, X = np.meshgrid( z, x )
Y = y[jx1] * np.ones_like(Z)
for xr in xrlist:
for zr in zrlist:
ax.plot_surface(X[xr,zr], Y[xr,zr], Z[xr,zr], rstride=3, cstride=3, facecolors=facecolors[xr,zr], shade=False, alpha=0.5)
facecolors = cmap((array[:,:,kx1] - mymin) / (mymax - mymin))
Y, X = np.meshgrid( y, x )
Z = z[kx1] * np.ones_like(X)
for xr in xrlist:
for yr in yrlist:
ax.plot_surface(X[xr,yr], Y[xr,yr], Z[xr,yr], rstride=3, cstride=3, facecolors=facecolors[xr,yr], shade=False)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
tmp = 0.5*( data[:,:,:,bx]**2 + data[:,:,:,by]**2 + data[:,:,:,bz]**2 )
plot_volume(ax, tmp, x, y, z, cmap='jet')
ax.set_xlabel("X",size=16)
ax.set_ylabel("Y",size=16)
ax.set_zlabel("Z",size=16)
ax.set_xlim( x[0],x[-1] )
ax.set_ylim( y[0],y[-1] )
ax.set_zlim( z[0],z[-1] )
ax.set_aspect('equal')
# plt.title('Magnetic energy (t = %6.1f)' % t, size=20)
# plot
plt.show()
# image file
# plt.savefig('output.png', dpi=144)
# end
|
zenitaniREPO_NAMEOpenMHDPATH_START.@OpenMHD_extracted@OpenMHD-master@3D_basic@plot.py@.PATH_END.py
|
{
"filename": "prepare_pfailsimulations.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/scripts/prepare_pfailsimulations.py",
"type": "Python"
}
|
import numpy as np
import scipy
import sys
from scipy import stats
from scipy.stats import norm
from scipy.stats import binom
import fitsio
import glob
import os
import matplotlib.pyplot as plt
import statistics
import argparse
import astropy
from astropy.table import Table
from astropy.table import join
from astropy.table import Column
from astropy.time import Time
from astropy.io import fits
from mpi4py import MPI
rank = MPI.COMM_WORLD.Get_rank()
import warnings
warnings.filterwarnings('ignore')
sys.path.insert(0,'/global/cfs/cdirs/desi/users/akrolew/LSS/py/LSS')
from ssr_tools_new import model_ssr, model_ssr_zfac
import LSS.common_tools as common
fibers_per_task = int(sys.argv[6])
# createbins
def create_bins(lower_bound, width, upper_bound):
bins = []
for low in range(lower_bound, upper_bound, width):
bins.append((low, low+width))
return bins
def find_bin(value, bins):
for i in range(0, len(bins)):
if bins[i][0] <= value < bins[i][1]:
return i
return -1
#function to preprocess data from fitsfile
def ash_code(tp): # Make changes for Y1/daily will run
from desitarget import targetmask
if tp =='LRG':
lb=lower[0]
elif tp =='BGS_BRIGHT':
lb = lower[1]
elif tp =='BGS_FAINT':
lb=lower[1]
elif tp =='ELG_LOPnotqso':
lb = lower[2]
elif tp =='ELG_VLOnotqso':
lb = lower[2]
elif tp =='QSO':
lb = lower[3]
if (survey == 'main') or (survey == 'Y1') or (survey == 'DA02') or (survey == 'DA2'):
if ((specver == 'jura-v1') or (specver == 'kibo-v1')) and (tp[:3] != 'BGS'):
zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_dark_spec_zdone.fits'
elif ((specver == 'jura-v1') or (specver == 'kibo-v1')) and (tp[:3] == 'BGS'):
zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_bright_spec_zdone.fits'
else:
if tp == 'BGS_FAINT':
zf = basedir+'/'+survey+'/LSS/'+specver+('/LSScats/%s/' % ver)+'BGS_ANY'+'_full_noveto.dat.fits'
elif tp == 'ELG_VLOnotqso':
zf = basedir+'/'+survey+'/LSS/'+specver+('/LSScats/%s/' % ver)+'ELG'+'_full_noveto.dat.fits'
else:
zf = basedir+'/'+survey+'/LSS/'+specver+('/LSScats/%s/' % ver)+tp+'_full_noveto.dat.fits'
dz = Table(fitsio.read(zf))
if (specver == 'jura-v1') or (specver == 'kibo-v1'):
if tp[:3] == 'BGS':
targs = Table(fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/main/LSS/BGS_ANYtargetsDR9v1.1.1.fits'))
else:
targs = Table(fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/main/LSS/%stargetsDR9v1.1.1.fits' % tp[:3]))
dz = join(dz, targs, keys='TARGETID',join_type='left',uniq_col_name='{col_name}{table_name}',table_names=['','_TARG'])
if tp == 'LRG':
wtype = ((dz['DESI_TARGET'] & 2**0) != 0)
elif tp == 'ELG_LOPnotqso':
wtype = ((dz['DESI_TARGET'] & 2**5) != 0)
wtype &= ((dz['DESI_TARGET'] & 2**2) == 0)
elif tp == 'QSO':
wtype = ((dz['DESI_TARGET'] & 2**2) != 0)
elif tp == 'BGS_BRIGHT':
wtype = ((dz['BGS_TARGET'] & 2**1) != 0)
dz = dz[wtype]
if tp == 'ELG_LOPnotqso':
wtype = ((dz['DESI_TARGET'] & 4) == 0) #remove QSO
dz = dz[wtype]
elif tp == 'ELG_VLOnotqso':
wtype = ((dz['DESI_TARGET'] & 4) == 0) #remove QSO
dz = dz[wtype]
wtype = ((dz['DESI_TARGET'] & targetmask.desi_mask['ELG_VLO']) != 0) #keep VLO
dz = dz[wtype]
elif tp == 'BGS_FAINT':
wtype = ((dz['BGS_TARGET'] & targetmask.bgs_mask['BGS_BRIGHT']) == 0) #remove BGS_BRIGHT
dz = dz[wtype]
dz = common.cut_specdat(dz)
from LSS.globals import main
#pars = main(tp,specver)
#print('len of pars.elgzf',len(pars.elgzf))
elif survey == 'SV3':
sys.exit('not written for SV3 yet')
zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_dark_tarspecwdup_Alltiles.fits'
dz = Table(fitsio.read(zf))
desitarg = 'SV3_DESI_TARGET'
bit = 1 #for selecting LRG
wtype = ((dz[desitarg] & bit) > 0)
print(len(dz[wtype]))
#dz = dz[wtype&wg]
dz = dz[wtype]
wz = dz['ZWARN'] != 999999 #this is what the null column becomes
wz &= dz['ZWARN']*0 == 0 #just in case of nans
wz &= dz['COADD_FIBERSTATUS'] == 0
ff = dz[wz]
zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_bright_tarspecwdup_Alltiles.fits'
dz = Table(fitsio.read(zf))
desitarg = 'SV3_BGS_TARGET'
wtype = dz[desitarg] > 0#((dz[desitarg] & bit) > 0)
print(len(dz[wtype]))
#dz = dz[wtype&wg]
dz = dz[wtype]
wz = dz['ZWARN'] != 999999 #this is what the null column becomes
wz &= dz['ZWARN']*0 == 0 #just in case of nans
wz &= dz['COADD_FIBERSTATUS'] == 0
ff2 = dz[wz]
if (tp == 'ELG_LOPnotqso') or (tp == 'ELG_VLOnotqso'):
if (specver == 'jura-v1') or (specver == 'kibo-v1'):
emlin = Table(fitsio.read(basedir+'/'+survey+'/LSS/'+specver+'/emlin_catalog.fits'))
emlin['TILEID'] = emlin['TILEID'].astype('int')
dz = join(dz, emlin, keys=['TARGETID','TILEID','LOCATION'],join_type='left',uniq_col_name='{col_name}{table_name}',table_names=['','_EMLIN'])
if tp == 'QSO':
if (specver == 'jura-v1') or (specver == 'kibo-v1'):
qso = Table(fitsio.read(basedir+'/'+survey+'/QSO/%s/QSO_cat_%s_cumulative_v1.fits' % (specver.split('-')[0],specver.split('-')[0])))
dz = join(dz,qso,keys=['TARGETID','TILEID','LOCATION'],join_type='left',uniq_col_name='{col_name}{table_name}',table_names=['','_QF'])
dz['Z'].name = 'Z_orig' #rename the original redrock redshifts
dz['Z_QF'].name = 'Z' #the redshifts from the quasar file should be used instead
z_tot = dz['ZWARN'] != 999999
z_tot &= dz['ZWARN']*0 == 0
z_tot &= ((dz['COADD_FIBERSTATUS'] == 0) | (dz['COADD_FIBERSTATUS'] == 8))
if tp[:3] == 'BGS':
z_tot &= dz['TSNR2_BGS'] > 1000
else:
z_tot &= dz['TSNR2_ELG'] > 80
# TSNR cuts
print('L148',np.unique(dz[z_tot]['COADD_FIBERSTATUS']))
if tp == 'LRG':
z_suc= dz['ZWARN']==0
z_suc &= dz['DELTACHI2']>15
z_suc &= dz['Z']<1.5
if (tp == 'ELG_LOPnotqso') or (tp == 'ELG_VLOnotqso'):
#else:
z_suc = np.log10(dz['OII_FLUX'] * np.sqrt(dz['OII_FLUX_IVAR']))+0.2*np.log10(dz['DELTACHI2'])> 0.9
if tp == 'QSO':
#print(5/0)
#qsozf = pars.qsozf
#if specver == 'guadalupe':
# #qsozf = '/global/cfs/cdirs/desi/users/edmondc/QSO_catalog/guadalupe/QSO_cat_guadalupe_cumulative.fits'
#arz = Table(fitsio.read(qsozf))
#arz.keep_columns(['TARGETID','LOCATION','TILEID','Z','Z_QN'])
#arz['TILEID'] = arz['TILEID'].astype(int)
#dz = join(dz,arz,keys=['TARGETID','TILEID','LOCATION'],join_type='left',uniq_col_name='{col_name}{table_name}',table_names=['','_QF'])
#dz['Z'].name = 'Z_RR' #rename the original redrock redshifts
#dz['Z_QF'].name = 'Z' #the redshifts from the quasar file should be used instead
if (specver == 'jura-v1') or (specver == 'kibo-v1'):
z_suc = (dz['Z'] != 999999.0) | (dz['Z'].mask == False)
else:
z_suc = dz['Z'] != 999999.0
print('L191',np.unique(dz[z_tot]['COADD_FIBERSTATUS']))
ind = np.where(dz['TARGETID'] == 39627380788040729)[0]
print('dz ind 192',dz[ind])
#print('z_suc ind',z_suc[ind])
#print('z_tot ind',z_tot[ind])
print('zwarn == 0 ind',(dz['ZWARN'] == 0)[ind])
print('deltachi2 > 40',(dz['DELTACHI2'] > 40)[ind])
print('deltachi2',dz['DELTACHI2'][ind])
if tp == 'BGS_BRIGHT':
z_suc = dz['ZWARN']==0
z_suc &= dz['DELTACHI2']>40
ind = np.where(dz['TARGETID'] == 39627380788040729)[0]
print('dz ind 195',dz[ind])
print('z_suc ind',z_suc[ind])
print('z_tot ind',z_tot[ind])
print('zwarn == 0 ind',(dz['ZWARN'] == 0)[ind])
print('deltachi2 > 40',(dz['DELTACHI2'] > 40)[ind])
if tp == 'BGS_FAINT':
z_suc = dz['ZWARN']==0
z_suc &= dz['DELTACHI2']>40
# QSO: 0.8 < z < 3.5
# ELG: 0.8 < z < 1.5
# LRG: 0.4 < z < 1.1
# BGS 0.1 < z < 0.5
if (specver == 'jura-v1') or (specver == 'kibo-v1'):
cat = Table(dz)
#cat['Z_not4clus'] = cat['Z']
selobs = cat['ZWARN']*0 == 0
selobs &= cat['ZWARN'] != 999999
if tp[:3] != 'BGS':
selobs &= cat['TSNR2_ELG'] > 80
else:
selobs &= cat['TSNR2_BGS'] > 1000
if tp[:3] == 'LRG':
band = 'Z'
mintsnr=500/12.15
#maxtsnr =2000/12.15
maxtsnr =1700/12.15
elif tp[:3] == 'QSO':
band = 'R'
mintsnr=450/(8.60/0.255)
maxtsnr=1800/(8.60/0.255)
elif tp[:3] == 'BGS':
band = 'R'
mintsnr=120/(12.15/89.8)
maxtsnr =300/(12.15/89.8)
elif tp[:3] == 'ELG':
band = 'G'
mintsnr = 80
maxtsnr = 200
if tp[:3] != 'ELG':
modelN = model_ssr(cat[selobs],tracer=tp[:3],reg='N',band=band,tsnr_min=mintsnr,tsnr_max=maxtsnr,readpars=True,outdir=basedir+'/'+survey+'/LSS/'+specver+'/LSScats/%s/' % ver,outfn_root=tp,overwrite_pars_ssrmaxflux=False)
modelS = model_ssr(cat[selobs],tracer=tp[:3],reg='S',band=band,tsnr_min=mintsnr,tsnr_max=maxtsnr,readpars=True,outdir=basedir+'/'+survey+'/LSS/'+specver+'/LSScats/%s/' % ver,outfn_root=tp,overwrite_pars_ssrmaxflux=False)
else:
cat.add_column(np.log10(cat['OII_FLUX'] * np.sqrt(cat['OII_FLUX_IVAR']))+0.2*np.log10(cat['DELTACHI2']), name='o2c')
modelN = model_ssr_zfac(cat[selobs],reg='N',outdir=basedir+'/'+survey+'/LSS/'+specver+'/LSScats/test/',outfn_root=tp[:3])
modelS = model_ssr_zfac(cat[selobs],reg='S',outdir=basedir+'/'+survey+'/LSS/'+specver+'/LSScats/test/',outfn_root=tp[:3])
if tp[:3] == 'LRG':
modelN.fluxfittype='piecewise'
modelN.flux_break = 3
modelS.fluxfittype = 'piecewise'
modelS.flux_break = 3
elif tp[:3] != 'ELG':
modelN.fluxfittype='linear'
modelS.fluxfittype='linear'
if tp[:3] != 'ELG':
parsmaxflux = np.loadtxt(basedir+'/'+survey+'/LSS/'+specver+'/LSScats/test/%sNpars_ssrmaxflux.txt' % tp)
modelN.pars_ferf = parsmaxflux
parsmaxflux = np.loadtxt(basedir+'/'+survey+'/LSS/'+specver+'/LSScats/test/%sSpars_ssrmaxflux.txt' % tp)
modelS.pars_ferf = parsmaxflux
wtfN, modN = modelN.add_modpre(cat)
wtfS, modS = modelS.add_modpre(cat)
mod = np.zeros(len(cat))
mod[cat['PHOTSYS'] == 'S'] = modS[cat['PHOTSYS'] == 'S']
mod[cat['PHOTSYS'] == 'N'] = modN[cat['PHOTSYS'] == 'N']
wtf = np.zeros(len(cat))
wtf[cat['PHOTSYS'] == 'S'] = wtfS[cat['PHOTSYS'] == 'S']
wtf[cat['PHOTSYS'] == 'N'] = wtfN[cat['PHOTSYS'] == 'N']
ind = np.where(dz['TARGETID'] == 39627380788040729)[0]
print('dz ind 277',dz[ind])
print('z_suc ind',z_suc[ind])
print('z_tot ind',z_tot[ind])
print('L294',np.unique(dz[z_tot]['COADD_FIBERSTATUS']))
return(dz,z_suc,z_tot,lb,mod)
#fiber cut function
def fiber_cut(cut,tp,dz,z_tot,z_suc):
if cut==0: tag='full'
elif cut==1:
badfiber_list = np.loadtxt("/global/homes/a/akrolew/lrg+bgs_3sig_bad.txt",dtype=int)
tag='check'
elif cut==2:
mfailpvalues = np.loadtxt("/global/homes/a/akrolew/foldtest/"+survey+"/"+specver+"/"+tp+"_zsuc.txt")[:,6]
tag = 'mfailp'
mfail=mfailpvalues<norm.cdf(-4.0)
badfiber_list = np.loadtxt("/global/homes/a/akrolew/foldtest/"+survey+"/"+specver+"/"+tp+"_zsuc.txt")[:,0]
badfiber_list = badfiber_list[mfail]
elif cut==3:
if tp=='LRG' or 'BGS_ANY':
badfiber_list = np.loadtxt("/global/homes/a/akrolew/foldtest/"+survey+"/"+specver+"/plotdata/newcut/"+tp+"_newcut.txt",dtype=int)
tag= 'newcut'
else:
tag='uncut'
badfiber_list = []
else:
print("wrong input for cut\n Execution stopped")
exit()
if(cut):
badfib_mask=dz==dz
for f in badfiber_list:
badfib_mask[dz['FIBER'] == f] = False
z_suc&=badfib_mask
print("zsuccess rate for "+tp,len(dz[z_suc&z_tot])/len(dz[z_tot])) # success rate with criterion
return(tag, dz, z_tot, z_suc)
#writing files
def write_in_file(tp,dz,z_tot,z_suc,cut,tag):
fibl,n_tot = np.unique(dz[z_tot]['FIBER'],return_counts=True)
fiblg,n_g = np.unique(dz[z_suc&z_tot]['FIBER'],return_counts=True)
fib_test = np.isin(fibl,fiblg)
z_tot &= np.isin(dz['FIBER'],fibl[fib_test]) # looks like z tots change here due to isin(ask ashley)
fibl,n_tot = np.unique(dz[z_tot]['FIBER'],return_counts=True)# n total is based on updated z_tot due to previous line
# sumz=sum(dz
if np.array_equal(fibl,fiblg):
gfrac = n_g/n_tot
else:
sys.exit('need to put something in for mismatch fiber lists')
fn = '/global/homes/a/akrolew/foldtest/'
fn=fn+survey+'/'+specver+'/'+tp+'_zsuc.txt'
sumz=[]
sumzsq=[]
mean=np.sum(n_g)/np.sum(n_tot)
print('mean',mean)
#print(1/0)
if tag != 'full':
txtmfailp =np.loadtxt("/global/homes/a/akrolew/foldtest/"+survey+"/"+specver+"/"+tp+"_zsuc.txt")[:,6]# dont recalculate!!
txtfib = np.loadtxt("/global/homes/a/akrolew/foldtest/"+survey+"/"+specver+"/"+tp+"_zsuc.txt")[:,0]
binno=[]
value=[]
morefailp=[]
bins = create_bins(lb,1,ub)
#tsnr_mean=[]
#modsucmean=[]
if tag == 'full':
fo = open(fn,'w')
for ii in range(len(fibl)):
m=dz['FIBER']==fibl[ii]
m&= z_suc
m&= z_tot
n = n_tot[ii]
s = n_g[ii]
p = mean
#modsucmean.append(np.mean(dz['mod_success_rate'][m]))
zvalues= dz['Z'][m&z_tot]
#if tp == 'ELGnotqso':
#tsnr_mean.append(np.mean(dz['TSNR2_ELG'][m]))
#else:
#exec("tsnr_mean.append( np.mean(dz['TSNR2_"+tp+"'][m]))")
sumz.append(np.sum(zvalues))
sumzsq.append(np.sum(zvalues**2))
if tag != 'full':
morefailp.append(txtmfailp[txtfib == fibl[ii]])# load it from text files directly!!!
else:
morefailp.append(binom.cdf(s-1, n, p))
value.append(np.log10(morefailp[ii]))
####cdf = value = morefailp = 1/2 * (1 + erf(x/sqrt(2)))
#value2 = np.sqrt(2) * scipy.special.erfinv(2 * morefailp[ii] - 1)
binno.append(find_bin(value[ii],bins))
if tag == 'full':
fo.write(str(fibl[ii])+' '+str(n_g[ii]/n_tot[ii])+' '+str(n_g[ii])+' '
+str(n_tot[ii])+' '+str(sumz[ii])+' '+str(sumzsq[ii])+' '
+str(morefailp[ii])+' '+str(binno[ii])+'\n')
if tag == 'full':
fo.close()
dz['bin']=np.zeros(len(dz))
for i in range(len(fibl)):
dzfib= dz['FIBER']==fibl[i]
dz['bin'][dzfib]= binno[i]
return (bins, binno, dz, z_tot, z_suc)
#function to create p_values for each fiber
def weighted_pvalues(dz,z_tot,z_suc):
dz['p_value'] = np.zeros(len(dz))
#dz['p_suc']= np.zeros(len(dz))
#dz['mod_suc']=np.zeros(len(dz))
#dz['p_suc_w']=np.zeros(len(dz))
p_suc = len(dz[z_suc&z_tot])/len(dz[z_tot])
#dz = dz[z_tot]
#norm = len(dz['WEIGHT_ZFAIL'])/np.sum(1/dz['WEIGHT_ZFAIL'])
fibl,n_tot = np.unique(dz[z_tot]['FIBER'],return_counts=True)
fiblg,n_g = np.unique(dz[z_suc&z_tot]['FIBER'],return_counts=True)
lownt = fibl
#lownt = np.loadtxt('/global/homes/a/akrolew/foldtest/Y1/himalayas/concat/fibermissing.txt')
#lownt = np.loadtxt('/global/cfs/cdirs/desi/users/akrolew/QSOmissing.txt')
p_valuefib = np.zeros(len(lownt))
Varr = np.zeros(len(lownt))
Nsig = np.zeros(len(lownt))
Nlownt = np.zeros(len(lownt))
Slownt = np.zeros(len(lownt))
#ff2['obs_fail_p'] = np.zeros(len(ff2))
#lownt = fibl[np.where(n_tot <100)]
#print("number of low n tot:", len(lownt))
start_time = time.time()
n = np.zeros(len(lownt))
s = np.zeros(len(lownt))
mean_x = np.zeros(len(lownt))
mean_y = np.zeros(len(lownt))
mean_mod_suc = np.zeros(len(lownt))
frac_succ = np.zeros(len(lownt))
f = open('/global/cfs/cdirs/desi/users/akrolew/summary/%s/%i.txt'%(tp,int(sys.argv[5]) + rank),'w')
for j in range(len(lownt)):
fmask=dz['FIBER']==lownt[j]
n[j] = n_tot[fibl==lownt[j]]
s[j] = n_g[fiblg==lownt[j]]
mean_x[j] = np.mean(dz['FIBERASSIGN_X'][fmask])
mean_y[j] = np.mean(dz['FIBERASSIGN_Y'][fmask])
mod_suc = dz['mod_success_rate'][fmask & z_tot]
mean_mod_suc[j] = np.mean(mod_suc)
#frac_suc = s/n
frac_succ[j] = s[j]/n[j] #frac_suc
scale = np.mean(frac_succ)/np.mean(mean_mod_suc)
dz['mod_success_rate'] = dz['mod_success_rate']*scale
dz['mod_success_rate'][dz['mod_success_rate'] > 1] = 1
dz['mod_success_rate'][dz['mod_success_rate'] < 0] = 0
mean_mod_suc = mean_mod_suc*scale
#tot_mod_suc_rate = np.sum(dz['mod_success_rate'][z_tot]) / len(dz['mod_success_rate'][z_tot])
#print(np.where(lownt == 3986)[0])
for ii in range(len(lownt)):
if (ii-int(sys.argv[5]))//fibers_per_task == rank: #change number from 0 to 7 for each batch run
#if ii == 661:
fmask=dz['FIBER']==lownt[ii]
#if fibl[ii] == 1995:
# print(5/0)
#fmask&= z_suc
fmask&= z_tot
#n = n_tot[fibl==lownt[ii]]
#s = n_g[fibl==lownt[ii]]
#print(s)
#print(len(dz['WEIGHT_ZFAIL'][fmask]))
#obs = dz['WEIGHT_ZFAIL'][fmask]
#testobs = 1/obs * p_suc * norm
#TSNR2_Sum = np.sum(1/dz['WEIGHT_ZFAIL'][fmask])
mod_suc = dz['mod_success_rate'][fmask]
#print(5/0)
#frac_suc = s/n
#frac_suc_w = np.sum(dz['WEIGHT_ZFAIL'][fmask])/n
sim_n= int(3e6)
np.random.seed(7)
p_succ = np.zeros(sim_n)
#p_succ = np.sum(sample, axis=1)/len(mod_suc)
for i in range(sim_n):
sample = np.random.binomial(1, mod_suc)
p_succ[i] = np.sum(sample)/len(sample)
#p_succ = np.fromfile('/global/homes/a/akrolew/foldtest/Y1/himalayas/fibersims/%i.bin' % lownt[ii])
#print(p_succ, frac_suc)
print("--- Total time is %s seconds ---" % (time.time() - start_time))
#s = np.sqrt(1/n*(np.sum(mod_suc-np.mean(mod_suc))**2))
ss = np.std(mod_suc)
var = (n[ii]*np.mean(mod_suc)*(1-np.mean(mod_suc))-n[ii]*ss**2)/n[ii]**2 #confirm with alex
nsig = (frac_succ[ii]-np.mean(mod_suc))/np.sqrt(var)
p_value = np.searchsorted(np.sort(p_succ), 1e-10+frac_succ[ii])/sim_n #changed it to frac_suc from 1-frac_suc
dz['p_value'][fmask] = p_value
p_valuefib[ii] = p_value
Varr[ii] = var
Nsig[ii] = nsig
#Nlownt[ii] = n
#Slownt[ii] = s
#np.savetxt('/global/homes/a/akrolew/foldtest/Y1/himalayas/fibersims/%i.txt'%lownt[ii],p_succ)
p_succ.tofile('/global/cfs/cdirs/desi/users/akrolew/fibersims/%s/%i.bin'%(tp,lownt[ii]))
#print("2")
#print(nsig)
#print("3")
#dz['p_suc'][fmask] = frac_suc
#print("4")
#print(ii,dz['p_suc'][ii])
#dz['p_suc_w'][fmask] = frac_suc_w
#dz['testobs'][ii] = np.mean(testobs)
#print("loop no:",ii)
f.write('%i %.5f %.5e %.5f %i %i %.5f %.5f %.2f %.2f\n' % (fibl[ii],p_value, var, nsig, n[ii], s[ii], mean_mod_suc[ii],frac_succ[ii], mean_x[ii], mean_y[ii]))
print('%i %.5f %.5e %.5f %i %i %.5f %.5f %.2f %.2f\n' % (fibl[ii],p_value, var, nsig, n[ii], s[ii], mean_mod_suc[ii],frac_succ[ii], mean_x[ii], mean_y[ii]))
f.flush()
print("--- Total time is %s seconds ---" % (time.time() - start_time))
f.close()
return(dz,var,Varr,Nsig,p_valuefib,lownt,ss,n,mod_suc,s,Nlownt,Slownt)
basedir='/global/cfs/cdirs/desi/survey/catalogs'
survey=sys.argv[1]#str(input("Enter survey name:\n")) #DA02 or main
specver=sys.argv[2]#str(input("Enter specver:\n"))
ver = sys.argv[4]
tracers=[sys.argv[3]]
cut = 0 #int(input("Remove the badfibers from the analysis?\n 0)No 1)check 2)mfailp 3) newcut\n"))
lower = [-22,-14,-13,-6]#use as appropriate for version
ub = 0
import time
start_time = time.time()
for tp in tracers:
dz, z_suc, z_tot, lb,mod= ash_code(tp) #Selects catalogs and cuts them down to Z_suc using criteria for tracers.
tag, dz, z_tot, z_suc = fiber_cut(cut, tp, dz, z_tot, z_suc)# Makes a bad fiber cut and updates z_suc/z_tot
bins, binno, dz,z_tot, z_suc = write_in_file(tp, dz, z_tot, z_suc, cut, tag)# creates bins and binnos
t = Table(np.array([dz['FIBER'],dz['FIBERASSIGN_X'],dz['FIBERASSIGN_Y'],mod]).T,names=('FIBER','FIBERASSIGN_X','FIBERASSIGN_Y','mod_success_rate'))
t.write('pfailsimulations_%s/%s_cat_trunc.fits' % (specver,tp),overwrite=True)
np.savetxt('pfailsimulations_%s/%s_ztot.txt' % (specver,tp),z_tot)
if tp == 'QSO':
fucker = np.zeros(len(z_suc))
fucker[z_suc.mask==True] = 0
fucker[z_suc.mask==False] = 1
np.savetxt('pfailsimulations_%s/QSO_zsuc.txt' % specver,fucker)
else:
np.savetxt('pfailsimulations_%s/%s_zsuc.txt' % (specver,tp),z_suc)
#weighted_pvalues(dz,z_tot,z_suc)
#print(dz['p_value'])
#print('time',time.time()-start_time)
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@scripts@prepare_pfailsimulations.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "Harry45/DESEMU",
"repo_path": "DESEMU_extracted/DESEMU-main/jax_cosmo/scipy/__init__.py",
"type": "Python"
}
|
Harry45REPO_NAMEDESEMUPATH_START.@DESEMU_extracted@DESEMU-main@jax_cosmo@scipy@__init__.py@.PATH_END.py
|
|
{
"filename": "_treemapcolorway.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/_treemapcolorway.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TreemapcolorwayValidator(_plotly_utils.basevalidators.ColorlistValidator):
def __init__(self, plotly_name="treemapcolorway", parent_name="layout", **kwargs):
super(TreemapcolorwayValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@_treemapcolorway.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "golmschenk/eesunhong",
"repo_path": "eesunhong_extracted/eesunhong-main/tests/end_to_end_tests/binary_lens/advanced_dseek/__init__.py",
"type": "Python"
}
|
golmschenkREPO_NAMEeesunhongPATH_START.@eesunhong_extracted@eesunhong-main@tests@end_to_end_tests@binary_lens@advanced_dseek@__init__.py@.PATH_END.py
|
|
{
"filename": "in-depth-guide.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/tools/proto_splitter/g3doc/in-depth-guide.md",
"type": "Markdown"
}
|
# Proto Splitter / Merger Library
This doc lists implementation details about the [Proto Splitter/Merger library](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/tools/proto_splitter). New Splitters should take these details into consideration to generate valid chunks and metadata that are compatible with the Merger. If you'd just like to use the new feature when exporting a SavedModel, simply add the following flag to `tf.saved_model.SaveOptions`:
```python
tf.saved_model.save(
...,
options=tf.saved_model.SaveOptions(experimental_image_format=True)
)
```
The Merger has been integrated with `tf.saved_model.load`, so no change needs to be made to SavedModel loading code.
## Chunking Schema
A proto larger than 2GB cannot be serialized. This is a limit of the protobuf implementation that we must work around, which is why we created a proto Splitter/Merger solution. The Splitter takes a proto as input and produces **chunks** and **metadata**. Chunks are parts of a proto that have been split into units of binary data, and can be merged together to form the original proto. Metadata refers to the auxiliary information about where these chunks are extracted from the original proto. This structural information of the proto is contained in the tree-like `ChunkedMessage`. When writing to disk, the metadata takes the form of `ChunkMetadata`, which contains the `ChunkedMessage` as well as information about the chunks' location within the file. When simply splitting the message in memory, only the `ChunkedMessage` is needed. On the Merger side of things, the metadata is used to build the proto back from its disjointed chunks.
`ChunkedMessage` contains an optional `chunk_index`, which references a `chunk` that contains the corresponding message. This message may be further chunked and have one or more of its fields with their own chunks. Therefore, `ChunkedMessage` also contains a list of `ChunkedField`s.
A `ChunkedField` represents a field within a message that has been delegated to its own `chunk`. It contains `field_tag`s that specify where it is located relative to the message `ChunkedField` belongs to. It also contains a `ChunkedMessage`, which allows for a structure that resembles a tree, which is a natural fit for proto metadata.
As an example, consider the following message `A` and its corresponding `ChunkedMessage`:
```proto
message A {
int num = 1;
string str = 2;
B b = 3;
}
message B {
...
}
```
#### Metadata:
```proto
ChunkedMessage {
chunk_index: 0
chunked_fields: [
ChunkedField {
field_tag: [b]
message: ChunkedMessage {
chunk_index: 1
}
}
]
}
```
#### View of memory (deserialized):
```proto
chunks [
0: A {
num: ...
str: ...
}
1: B {
...
}
]
```
Here, `A`'s `ChunkedMessage` has the optional `chunk_index`, so we see in memory that `chunks[0]` does indeed contain the message `A`. Note that the `A` in `chunks[0]` lacks the `b` field, which has been chunked out. We see this reflected in `A`'s `ChunkedMessage`, whose `chunked_field`s contains the `ChunkedField` that corresponds to this `b` field. The `field_tag`s contain the (very short) path to the `b` field, and the `ChunkedMessage` within the `ChunkedField` references the location of the `chunk` in memory. Indeed, we see the `B` message in memory at `chunks[1]`.
## Field Tag Serialization
A `chunked_field`'s location within the proto is specified by its `field_tag`s.
```proto
message ChunkedField {
repeated FieldIndex field_tag = 1;
}
message FieldIndex {
message MapKey {
oneof type {
string s = 1;
bool boolean = 2;
uint32 ui32 = 3;
uint64 ui64 = 4;
int32 i32 = 5;
int64 i64 = 6;
}
}
oneof kind {
uint32 field = 1;
MapKey map_key = 2;
uint64 index = 3;
}
}
```
Consider the following messages `A`, `B`, and `C`:
```proto
message A {
map<string,B> b = 1;
}
message B {
repeated C c = 1;
}
message C {
BigMessage big = 1;
}
message BigMessage {
...
}
```
Say we were given an `A` proto and wanted to chunk out `big`, since it is quite large. To reference `big`, we use the following path: `A.b["example_string"].c[3].big`. In this case, our list of `field_tag`s would look something like: `[ b, "example_string", c, 3, big ]`. The `field_tag`s for a `chunked_field` (`big`) specify its location relative to the given proto.
These tags represent either a `field`, `map_key`, or `index`, depending on what exactly is being referenced. For example, this allows us to differentiate between `G1 = GraphDef.node.1.attr.value.tensor` and `G2 = GraphDef.node[1].attr["value"].tensor`, even though their lists of `field_tag`s appear to be very similar. `G1`'s `node` field is simply a message containing a field `1`, while `G2`'s `node` field is a repeated message, who's `1`st element is being referenced. Similarly, `G1`'s `attr` field is a message containing a field called `attr`, while `G2`'s `attr` is a map, with the `value` key being referenced. Technically, we could use the proto reflection API to tell whether these ambiguous fields are repeated/map fields or not. However, it is better to be explicit, since it avoids bugs and the extra information makes for a better debugging experience.
## Chunk Extraction and Storage
Proto fields relevant to splitting/merging are classified using their type and occurrence:
- Field type: **Scalar** or **Message**
- Field occurrence: **Singular**, **Repeated**, or **Map**
Other proto field qualifiers like `oneof`, `required`, `optional`, and `packed` do not affect splitting and merging, so they are not taken into account in the implementation.
### Singular Fields
Scalar fields are simply serialized as bytes. Numerical types, such as ints, are serialized in numpy-readable binary. Message fields are also serialized as bytes, once they have been chunked down to <2GB.
### Repeated Fields
When repeated fields are split, they are stored in a chunk that has the same type as the parent of that repeated field. The order of the `chunked_field` for repeated fields is the same order in which the chunks should be merged.
For example, consider the message `A` which contains a repeated field `i`:
```proto
message A {
repeated int i = 1;
}
A(i=[1, 2, 3, 4, 5])
```
#### Metadata
```proto
ChunkedMessage {
chunked_fields: [
ChunkedField {
field_tag = [],
chunk = 0
},
ChunkedField {
field_tag = [],
chunk = 1
},
]
}
```
#### View of memory (deserialized)
```proto
chunks [
0: A {
i=[1, 2]
}
1: A {
i=[3, 4, 5]
}
]
```
`A`'s `ChunkedMessage` contains two `ChunkedField`s, one for the indices `[1, 2]` and another for the indices `[3, 4, 5]`. The `field_tag`s for both are empty, because the chunks are also of type `A`, and not a field within `A`. During merging, `chunks[0]` must be merged into the in-memory message `A` before `chunks[1]` so that the ordering of the repeated field elements is correct.
### Map Fields
Protobuf maps, like repeated fields, are not a distinct structure within the proto specification. Instead, maps are actually represented by repeated messages with `key` and `value` fields. (This means proto maps aren't really associative containers, but that isn't important here.) Here's an example of a map:
```proto
message A {
map<string, int> my_map = 1;
}
A(my_map={"abc": 123, "def": 456})
```
#### Underlying proto structure:
```proto
A: {
my_map: {
key: "abc"
value: 123
}
my_map: {
key: "def"
value: 456
}
}
```
Since maps are really just repeated fields under the hood, we can chunk them the same way we chunk repeated fields:
```proto
message A {
map<int, int> m = 1;
}
A(i={1:2, 3:4, 5:6})
```
#### Metadata
```proto
ChunkedMessage {
chunked_fields: [
ChunkedField {
field_tag = [],
chunk = 0
},
ChunkedField {
field_tag = [],
chunk = 1
},
]
}
```
#### View of memory (deserialized)
```proto
chunks [
0: A {
i={3: 4}
}
1: A {
i={1: 2, 5: 6}
}
]
```
However, we can also chunk out the values in the map entry directly if we'd like:
```proto
message A {
map<int, B> m = 1;
}
message B {
int i = 1;
}
A(i={1:B(i=3), 2:B(i=4)})
```
#### Metadata
```proto
ChunkedMessage {
chunked_fields: [
ChunkedField {
field_tag = [m, 3],
chunk = 0
},
ChunkedField {
field_tag = [m, 2],
chunk = 1
},
]
}
```
#### View of memory (deserialized)
```proto
chunks [
0: B {
i=3
}
1: B {
i=4
}
]
```
### Blank Message Compression
In general, we assume the first chunk to be the base message from which all the chunks are extracted (during the split), or the chunk that exists. **However, it's important to note that this isn't required.** If all data is extracted from the user-provided proto into chunks, there is no need for the initial chunk to be the base message. Here's an example with message `A`:
```proto
message A {
B b = 1;
C c = 2;
}
a = A(b=B(...), c=C(...))
```
Message `a` can be split into chunks `[b, c]` in two ways:
*First chunk is the same as the parent type*
```proto
chunked_message {
chunk_index: 0 // Chunk index is set as the parent message type
chunked_fields { // First field is chunked
field_tag { field: 1 }
message { chunk_index: 1 }
}
chunked_fields { // Second field stored in a separate chunk
field_tag { field: 2 }
message { chunk_index: 2 }
}
}
```
#### View of memory (deserialized)
```proto
chunks [
0: A {...}
1: B {...}
2: C {...}
]
```
*First chunk is not the parent type*
```proto
chunked_message {
// Chunk index is not set in the parent message type
chunked_fields { // First field is chunked
field_tag { field: 1 }
message { chunk_index: 0 }
}
chunked_fields { // Second field stored in a separate chunk
field_tag { field: 2 }
message { chunk_index: 1 }
}
}
```
#### View of memory (deserialized)
```proto
chunks [
0: B {...}
1: C {...}
]
```
This second method is viable since Message `A` only contains data from fields `b` and `c`. Once `b` and `c` are chunked, there's no other data from `A` to include, so we don't bother creating a chunk for `A`. The merging implementation should not make an assumption on the type of the first chunk, and in this case must create a new (blank) `A` message to merge the `b` and `c` chunks into.
**tldr: A chunked_message may not have a parent chunk to merge its chunked_fields into**
## Creating a Splitter
Now that we've covered the format used by the Splitters/Merger, we can work on implementing our own Splitter. By now you can understand why each proto requires its own bespoke Splitter, since automatic splitting wouldn't take advantage of the knowledge we have as proto designers of bottlenecks and opportunities for optimization. So, let's walk through the process of creating a Splitter for our message `ModelConfig`:
```proto
enum ActivationFunction {
RELU = 0;
SIGMOID = 1;
TANH = 2;
}
message Layer {
string name = 1;
int32 num_units = 2;
ActivationFunction activation_function = 3;
}
message ModelConfig {
string model_name = 1;
int32 input_shape = 2;
repeated Layer hidden_layers = 3;
int32 output_units = 4;
ActivationFunction output_activation = 5;
map<string, float> hyperparameters = 6;
}
```
To create a `ModelConfig` Splitter, we have to decide what exactly is being split. As the designers of `ModelConfig`, we know that the `hidden_layers` tend to be quite large, so that makes the `Layer`s messages good candidates to split out into their own chunks. For the sake of example, we're also going to split out the `hyperparameters` field.
To create a Splitter, we must subclass the `ComposableSplitter` class and override its `build_chunks` method. If we wanted to store state in a Splitter, we could also override the `__init__` method, but it isn't required. In our example this would be enough to split and chunk out the fields we settled on (`hidden_layers` and `hyperparameters`), but we'll also create a Splitter for the `Layer` message to showcase Splitter composition.
```python
class ModelConfigSplitter(ComposableSplitter):
def build_chunks(self):
for k, v in self._proto.hyperparameters:
self.add_chunk(bytes(str(v), "utf-8"), ["hyperparameters", k])
for i, layer in enumerate(self._proto.hidden_layers):
LayerSplitter(
layer,
parent_splitter=self,
fields_in_parent=["hidden_layers", i]
).build_chunks()
class LayerSplitter(ComposableSplitter):
def build_chunks(self):
self.add_chunk(self._proto, [])
ModelConfigSplitter(
proto=ModelConfig(...)
)
```
`build_chunks` generates chunks from `self._proto`, then for each chunk, calls `add_chunk` to add it to `self._chunks` and update `self._chunked_message`. `ModelConfigSplitter` does this once for `hyperparameters`, by simply converting the float value to a string and then to bytes. The Splitter does it again for `hidden_layers`, which get chunked by a dedicated `LayerSplitter` class. `LayerSplitter` doesn't actually do any chunking, but is here to showcase the ability to have a hierarchy of Splitters.
## Merging
There are two ways of merging a chunked proto using the provided Merger:
- `Merger::Read()`, merges directly into a user-provided merged_message from a .cpb file on disk
- `Merger::Merge()`, requires that the chunks and chunked metadata be stored in memory
`Merge()` should be called at runtime with the C++ Splitter, and allows one to skip any unnecessary disk reads/writes. `Read()` is therefore more holistic, handling both file IO and merging, so we'll consider its implementation below. The provided Merger is independent of any Splitter or protobuf, so developers will not have to write their own in the vast majority of cases.
### Riegeli
Since chunked protos use the riegeli file format, we use the riegeli api for file IO. The `riegeli::RecordReader` makes it easy to `Seek()` to a position in the file and `ReadRecord()` at that location.
### Reflection
We also make use of the protobuf reflection api to add and modify fields in `merged_message` using `FieldDescriptor`s.
### ChunkedMetadata
But to understand what should be read and where to read it from, we need the `ChunkedMetadata`. The metadata is always stored in the last chunk of the chunked proto, so we simply read that record to begin the merging process. Within the `ChunkedMetadata`, the sequence of `ChunkInfo` tells us where in the chunked proto to find the chunk we're looking for. And the `ChunkedMessage` contains a tree of metadata that we can use to reconstruct the desired proto.
### Field Processing
Starting at the root `ChunkedMessage`, we first check to see if it references a chunk by specifying a `chunk_index`. If so, we need to merge that chunk into the target proto (let's call it `A`) before processing each of its `chunked_field`s. If there is no `chunk_index`, then `A` only contains fields that have been chunked out. Before merging in the `chunked_field`s, they must be sorted by depth and index. For example, we need to merge in `GraphDef.library` before `GraphDef.library.function[0]`, which needs to be merged in before `GraphDef.library.function[1]`. We must merge in the `library` field first so that the `library.function`s have some place to be merged into, and the `0`th `function` must be merged before the `1`st `function` to maintain the proper ordering. Now we're ready to merge in the `chunked_field`s.
For each `ChunkedField` in a `ChunkedMessage`:
1. Read in the `chunk` specified by the `chunks_info[chunked_field.message.chunk_index]`
2. If the `chunked_field` has no `field_tag`s, then it does not reference a field within the parent message, but rather part of the parent message itself. For example, consider the following message and its corresponding `chunked_message`:
```proto
message A {
...
}
chunked_message = {
chunked_fields { // empty field_tag, belongs to the parent chunked_message
field_tag { }
message { chunk_index: 0}
}
chunked_fields { // also belongs to the parent
chunk
field_tag { }
message { chunk_index: 1}
}
}
```
In this case, a message `A` has been split into multiple chunks (here `A1` and `A2`, but hypothetically up to `An`), rather than splitting its fields into their own chunks. Splitting a message into chunks directly or splitting a message's fields into chunks are simply two different approaches that we offer in our api. So, the `chunk` should be merged directly into the parent message (`A`), and we skip the remaining steps to move on to the next `chunked_field`.
3. Navigate the `merged_message` using the `field_tag`s, until reaching the target field. Fields may need to be constructed along the way if they were not kept during the splitting process (see [Blank Message Compression above](#blank_message_compression)).
4. If the field is not a message, it is a primitive data type like bool or int, so we simply convert the `chunk` string to the appropriate type and set the field using reflection. If it is a message, then we recursively process it using its corresponding `ChunkedMessage`.
When the recursive process is complete, the `chunk`s have been successfully merged into the `merged_message`, so it's ready to be used in your program.
## Putting It All Together
Now that we've covered the entire splitting and merging process, let's go over an end-to-end example. We'll use the `ModelConfigSplitter` class we created in the [Creating a Splitter](#creating_a_splitter) section above. To write our proto to disk, we simply call `Splitter.write()`:
```python
my_proto = ModelConfig(...)
export_dir = "..."
my_splitter = ModelConfigSplitter(my_proto)
my_splitter.write(export_dir)
```
And in C++, we can use the Merger to read in our chunked proto:
```c++
ModelConfig my_proto;
string export_dir = "...";
Merger::Read(export_dir, &my_proto);
```
If we'd instead like to split and merge our proto directly in memory, we'd need `ModelConfigSplitter` to be a C++ class, but the process is very similar:
```c++
class ModelConfigSplitter : public ComposableSplitter {
...
};
ModelConfig my_proto{...};
string export_dir = "...";
ModelConfigSplitter my_splitter(my_proto);
// std::pair<std::vector<MessageBytes>*, ::proto_splitter::ChunkedMessage*>
auto[chunks, chunked_message] = my_splitter.Split();
// chunks, chunked_message are processed
ModelConfig my_new_proto;
Merger::Merge(chunks, chunked_message, &my_new_proto);
```
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@tools@proto_splitter@g3doc@in-depth-guide.md@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram2d/hoverlabel/_font.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="font", parent_name="histogram2d.hoverlabel", **kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud
for `lineposition`.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
shadowsrc
Sets the source reference on Chart Studio Cloud
for `shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
style
Sets whether a font should be styled with a
normal or italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud
for `style`.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud
for `textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud
for `variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud
for `weight`.
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram2d@hoverlabel@_font.py@.PATH_END.py
|
{
"filename": "_width.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattergl/line/_width.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="width", parent_name="scattergl.line", **kwargs):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattergl@line@_width.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/geminidr/gemini/tests/__init__.py",
"type": "Python"
}
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@geminidr@gemini@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "__init__.py",
"repo_name": "sibirrer/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/lenstronomy/Conf/__init__.py",
"type": "Python"
}
|
sibirrerREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@lenstronomy@Conf@__init__.py@.PATH_END.py
|
|
{
"filename": "__init__.py",
"repo_name": "GeminiDRSoftware/GHOSTDR",
"repo_path": "GHOSTDR_extracted/GHOSTDR-master/ghostdr/ghost/test/__init__.py",
"type": "Python"
}
|
"""
Primitive unit tests for the GHOST instrument.
Only the simplest of the unit tests are provided in this module. A lot of
primitives require various calibrators in order to be run correctly. These
primitives are tested during the full reduction tests
(:any:`ghostdr.ghost.recipes.test`).
"""
|
GeminiDRSoftwareREPO_NAMEGHOSTDRPATH_START.@GHOSTDR_extracted@GHOSTDR-master@ghostdr@ghost@test@__init__.py@.PATH_END.py
|
{
"filename": "lat_dm.py",
"repo_name": "HeRTA/FRBSTATS",
"repo_path": "FRBSTATS_extracted/FRBSTATS-main/figs/lat_dm.py",
"type": "Python"
}
|
from csv import reader
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
### Set MPL plot parameters
# Selectable SVG text
plt.rcParams['svg.fonttype'] = 'none'
# Use TeX
plt.rcParams['text.usetex'] = True
# Set figsize
plt.rcParams["figure.figsize"] = (24,20)
plt.rcParams["figure.dpi"] = 300
# Set xtick size
plt.rcParams['xtick.major.size'] = 20
plt.rcParams['xtick.major.width'] = 2
plt.rcParams['xtick.minor.size'] = 10
plt.rcParams['xtick.minor.width'] = 2
# Set ytick size
plt.rcParams['ytick.major.size'] = 20
plt.rcParams['ytick.major.width'] = 2
plt.rcParams['ytick.minor.size'] = 10
plt.rcParams['ytick.minor.width'] = 2
# Hide secondary spines
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.spines.right'] = False
### Load data
# Initiate empty parameter lists
dm = []
lat = []
# Read FRBSTATS CSV catalogue
with open('../catalogue.csv', 'r') as read_obj:
csv_reader = reader(read_obj)
header = next(csv_reader)
# Skip header
if header != None:
for row in csv_reader:
dm.append(row[9])
lat.append(row[7])
### Pre-process data
# Pick out incompatible rows
idx_mask = set()
for idx, val in enumerate(dm):
try:
dm[idx] = float(val)
except ValueError:
idx_mask.add(idx)
for idx, val in enumerate(lat):
try:
lat[idx] = float(val)
except ValueError:
idx_mask.add(idx)
# Dump rows with missing data
for idx in sorted(idx_mask, reverse=True):
del dm[idx]
del lat[idx]
### Initiate plot
# Apply grid
plt.grid(color='grey', linestyle='-', linewidth=0.25, alpha=1)
# Scatter plot
plt.scatter(dm, lat, c='cornflowerblue', s=500, alpha=0.8, edgecolor='royalblue', linewidth=2, zorder=10)
# Set axis labels & figure title
plt.xlabel(r'$\mathrm{Dispersion \ Measure \ }\Bigg[\mathrm{pc \ cm}^{-3}\Bigg]$', fontsize=52)
plt.ylabel(r'$\mathrm{Galactic \ Latitude \ [deg]}$', fontsize=52)
plt.title(r'$\mathrm{FRB \ Gal. \ Latitude-DM \ Distribution}$', fontsize=72, y=1.01)
# Set log-log scaling
#plt.xscale('log')
#plt.yscale('log')
# Set axis limits
plt.gca().set_xlim(left=0)
# Set tick size
plt.xticks(fontsize=42, y=-0.005)
plt.yticks(fontsize=42)
plt.tight_layout()
# Save data to a scalable format
plt.savefig('lat_dm.svg', format='svg')
plt.savefig('lat_dm.pdf')
plt.savefig('lat_dm.png')
|
HeRTAREPO_NAMEFRBSTATSPATH_START.@FRBSTATS_extracted@FRBSTATS-main@figs@lat_dm.py@.PATH_END.py
|
{
"filename": "hydro_2_python.py",
"repo_name": "agnwinds/python",
"repo_path": "python_extracted/python-main/py_progs/hydro_2_python.py",
"type": "Python"
}
|
#!/usr/bin/env python
'''
Parses a hydro input file into an astropy table.
Synopsis:
This is a simple program which will
parse a hydro input file into an astropy
table that python can read in as a wind
Description:
The code attempts to work out if the
input file is an hdf file or an ascii file.
The hdf file is produced by default from
zeus, and is much the easier option
however the code will have a try and work
out how the r and theta files are named
using the ndf filename.
In this initial incarnation, it only deals with
r theta files, future iterations will deal
with different dimensionalities, and coordinate
types as and when it becomes necessary.
There are two subroutines, which deal with
importing the data from either of the input
file formats, and returning an indentical
dictionary that the main routine can use.
Arguments:
filename of the input file - if missed out code
will prompt for it
Returns:
an astropy formatted ascii file, containing
r, theta, the three components of velocity,
the temperature of each cell and the density.
Notes:
History:
15may nsh Coded
'''
import sys
import numpy as np
from pyhdf import SD
from astropy import constants as consts
from astropy.io import ascii
from astropy.table import Table
'''The following routine reads in hdf data from the file fname
It takes a filename as input argument, and returns a dictionary
The upper level of the dictionay contains
Filename - the filename originally supplied
Coord_sys - the coordinate system - NB, it only knows about spol at the moment
Time - the time stamp of the simulation
N_Data - the number of different data types
Dims - the number of dimensions
Data - This is in turn a dictionary that contains the different data
This dictionary contains infividual dictionaries, each of which has
data - an array of the data
x1 - the first corrdinate (usually theta)
x2 - the second coordinate (usually r)'''
def get_hdf_data(fname):
hdf = SD.SD(fname)
info= hdf.datasets()
#Lets see what is inside
hdf.info()
data_sets=[]
for name in sorted(info.keys()):
if name[0:4]=="Data":
sds=hdf.select(name)
long_name=sds.attributes()["long_name"]
for i in range(len(sds.attributes()["long_name"])):
if long_name[i:i+2]=="AT":
junk=i-1
short_name=long_name[:junk]
data_sets.append([name,long_name,short_name])
#Get the time from the last long name
if long_name[junk+9:] != "********":
time=float(long_name[junk+9:])
else:
time=0.0
#Get the dimensions from the last data set
dims=len((sds.info()[2]))
#Get the coordinate system from the last data set
coord_sys=sds.attributes()["coordsys"]
if coord_sys=='spherical polar':
coord_sys='spol'
else:
print(("get_hdf_data: I don't understand coordinate system ",coord_sys))
exit()
#Now we know which of the datasets contain real data, we can extract all the data
alldat={}
alldat["Filename"]=fname
alldat["Coord_sys"]=coord_sys
alldat["Time"]=time
alldat["Data"]={}
alldat["Data_names"]=np.array(data_sets)[:,2]
alldat["N_data"]=len(data_sets)
alldat["Dims"]=dims
#Loop over all the data sets in the hdf file - name each of the resulting dictionaries with the short name
for i in range (len(data_sets)):
print((data_sets[i][2]))
sds=hdf.select(data_sets[i][0])
data = sds.get()
c1=info[data_sets[i][0]][0][0]
c2=info[data_sets[i][0]][0][1]
sds=hdf.select(c1)
x2=sds.get()
sds=hdf.select(c2)
x1=sds.get()
alldat[data_sets[i][2]]={}
alldat[data_sets[i][2]]=data
alldat["r_cent"]=x1
alldat["theta_cent"]=x2
#HDF files only give us the centre of the grid, python needs the inner edges as well.
r_edge=[]
r_ratio=(x1[2]-x1[1])/(x1[1]-x1[0])
dr=(x1[1]-x1[0])/(0.5*(1.0+r_ratio))
r_edge.append(x1[0]-0.5*dr)
print((r_edge[0],r_ratio))
for i in range(len(x1)-1):
r_edge.append(r_edge[-1]+dr)
dr=dr*r_ratio
theta_edge=[]
theta_ratio=(x2[2]-x2[1])/(x2[1]-x2[0])
dtheta=(x2[1]-x2[0])/(0.5*(1.0+theta_ratio))
theta_min=x2[0]-0.5*dtheta
if theta_min<0.0:
theta_min=0.0
theta_edge.append(theta_min)
print((x2[0]))
print((theta_edge[0],theta_ratio))
for i in range(len(x2)-1):
theta_edge.append(theta_edge[-1]+dtheta)
dtheta=dtheta*theta_ratio
if (theta_edge[-1]+(x2[-1]-theta_edge[-1])*2.0)>(np.pi/2.0):
x2[-1]=(theta_edge[-1]+(np.pi/2.0))/2.0
alldat["r_edge"]=r_edge
alldat["theta_edge"]=theta_edge
return(alldat)
'''The following routine reads in data from the file fname, radial
grid data from r_file an theta grid data from theta_file
It takes three filenames as input arguments, and returns a dictionary
The upper level of the dictionay contains
Filename - the filename originally supplied
Coord_sys - the coordinate system - NB, it only knows about spol at the moment
Time - the time stamp of the simulation
N_Data - the number of different data types
Dims - the number of dimensions
Data - This is in turn a dictionary that contains the different data
This dictionary contains infividual dictionaries, each of which has
data - an array of the data
x1 - the first corrdinate (usually theta)
x2 - the second coordinate (usually r)'''
def get_ndf_data(fname,r_file,theta_file):
inp=open(fname,"r")
ir=[]
itheta=[]
data=[]
raw_names=inp.readline()
for line in inp.readlines():
data_temp=line.split()
ir.append(int(data_temp[0]))
itheta.append(int(data_temp[1]))
temp=[]
for i in range(len(data_temp)-2):
temp.append(float(data_temp[i+2]))
data.append(temp)
inp.close()
r_cent=[]
r_edge=[]
theta_cent=[]
theta_edge=[]
inp=open(theta_file,"r")
for line in inp.readlines():
data_temp=line.split()
try:
if int(data_temp[0]) >=np.min(itheta) and int(data_temp[0]) <= np.max(itheta):
theta_cent.append(float(data_temp[2]))
theta_edge.append(float(data_temp[1]))
if int(data_temp[0]) == np.max(itheta):
break
except:
print(("Something wrong with theta data file ",theta_file))
inp.close()
inp=open(r_file,"r")
for line in inp.readlines():
data_temp=line.split()
try:
if int(data_temp[0]) >=np.min(ir) and int(data_temp[0]) <= np.max(ir):
r_cent.append(float(data_temp[2]))
r_edge.append(float(data_temp[1]))
if int(data_temp[0]) == np.max(ir):
break
except:
print(("Something wrong with r data file ",r_file))
inp.close()
data_sets=["DENSITY","1-VELOCITY","2-VELOCITY","3-VELOCITY","TOTAL ENERGY"]
alldat={}
alldat["Filename"]=fname
alldat["Coord_sys"]="spol"
alldat["Time"]=0
alldat["N_data"]=5
alldat["Dims"]=2
alldat["Data_names"]=data_sets
alldat["theta_cent"]=np.array(theta_cent)
alldat["theta_edge"]=np.array(theta_edge)
alldat["r_cent"]=np.array(r_cent)
alldat["r_edge"]=np.array(r_edge)
for i in range (len(data_sets)):
alldat[data_sets[i]]=np.reshape(np.array(data)[:,i],(len(theta_cent),len(r_cent)))
return(alldat)
# Next lines permit one to run the routine from the command line
if __name__ == "__main__":
import sys
#Start of the main code
print ("Welcome to hydro_2_python")
MU=0.6
if len(sys.argv)>1:
fname=sys.argv[1]
else:
fname=input("Enter name of file to convert (main data file): ")
if fname[0:3]=="ndf":
print ("I think this is an ascii file")
r_file="grid_r_"+fname[-3:]+".dat"
theta_file="grid_theta_"+fname[-3:]+".dat"
data=get_ndf_data(fname,r_file,theta_file)
elif fname[0:3]=="hdf":
print ("I think this is an hdf file")
data=get_hdf_data(fname)
#We should now have three components of velocity, internal energy, and density.
#We need to compute the temperature in order to supply it to python.
temp=(2.0/3.0)*data["TOTAL ENERGY"]
data["TEMPERATURE"]=temp/((data["DENSITY"]/(consts.m_p.cgs*MU))*consts.k_B.cgs)
# Open an output file
fname=data["Filename"]+".zeu"
# Preamble
out=open(fname,'w')
out.write("# This is a file generated by hydro_to_python\n")
out.write("# We can put any number of comments in behind # signs\n")
out.write("# By default, the order of coordinates are \n")
out.write("# r, theta phi for spherical polars\n")
out.write("# x,y,z for carteisan\n")
out.write("# or w, z, phi for cylindrical\n")
out.write("# Coordinate_system "+data["Coord_sys"]+"\n")
out.write("# Dimensions "+str(data["Dims"])+"\n")
ndata=len(data)
data_names=list(data.keys())
titles=[]
titles=titles+["ir","r_cent","r_edge"]
titles=titles+["itheta","theta_cent","theta_edge"]
titles=titles+["v_r","v_theta","v_phi","density","temperature"]
col0=np.array([])
col1=np.array([])
col2=np.array([])
col3=np.array([])
col4=np.array([])
col5=np.array([])
col6=np.array([])
col7=np.array([])
col8=np.array([])
col9=np.array([])
col10=np.array([])
fmt='%013.6e'
#This next line defines formats for the output variables. This is set in a dictionary
fmts={ 'ir':'%03i',
'r_cent':fmt,
'r_edge':fmt,
'itheta':'%i',
'theta_cent':fmt,
'theta_edge':fmt,
'iphi':'%03i',
'phi_cent':fmt,
'phi_edge':fmt,
'ix':'%03i',
'x_cent':fmt,
'x_edge':fmt,
'v_r':fmt,
'v_theta':fmt,
'v_phi':fmt,
'density':fmt,
'temperature':fmt}
for j in range(len(data["theta_cent"])):
col0=np.append(col0,np.arange(len(data["r_cent"])))
col1=np.append(col1,data["r_cent"])
col2=np.append(col2,data["r_edge"])
col3=np.append(col3,np.ones(len(data["r_cent"]))*j)
col4=np.append(col4,np.ones(len(data["r_cent"]))*data["theta_cent"][j])
col5=np.append(col5,np.ones(len(data["r_cent"]))*data["theta_edge"][j])
col6=np.append(col6,data["1-VELOCITY"][j])
col7=np.append(col7,data["2-VELOCITY"][j])
col8=np.append(col8,data["3-VELOCITY"][j])
col9=np.append(col9,data["DENSITY"][j])
col10=np.append(col10,data["TEMPERATURE"][j])
out_dat=Table([col0,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10],names=titles)
ascii.write(out_dat,out,formats=fmts)
out.close()
data=ascii.read(fname)
|
agnwindsREPO_NAMEpythonPATH_START.@python_extracted@python-main@py_progs@hydro_2_python.py@.PATH_END.py
|
{
"filename": "base_observer.py",
"repo_name": "telegraphic/pygdsm",
"repo_path": "pygdsm_extracted/pygdsm-master/pygdsm/base_observer.py",
"type": "Python"
}
|
import ephem
import healpy as hp
import numpy as np
from astropy.time import Time
from pygdsm.plot_utils import show_plt
from pygdsm.utils import hpix2sky, sky2hpix
from astropy.coordinates import SkyCoord
class BaseObserver(ephem.Observer):
""" Observer of the Global Sky Model.
Generates the Observed sky, for a given point on Earth.
Applies the necessary rotations and coordinate transformations
so that the observed 'sky' can be returned, instead of the
full galaxy-centered GSM.
This class is based on pyephem's Observer(). The GSM bit can be thought of
as an 'add on' to ephem.Observer, adding the methods generate() and view(),
which allows all-sky images for a given point on earth to be produced.
"""
def __init__(self, gsm):
""" Initialize the Observer object.
Calls ephem.Observer.__init__ function and adds on gsm
"""
super(BaseObserver, self).__init__()
self.observed_sky = None
self.gsm = gsm()
self._setup()
def _setup(self):
self._freq = 100
self._time = Time(self.date.datetime())
# Generate mapping from pix <-> angles
self.gsm.generate(self._freq)
self._n_pix = hp.get_map_size(self.gsm.generated_map_data)
self._n_side = hp.npix2nside(self._n_pix)
self._theta, self._phi = hp.pix2ang(self._n_side, np.arange(self._n_pix))
self._pix0 = None
self._mask = None
self._observed_ra = None
self._observed_dec = None
def generate(self, freq=None, obstime=None):
""" Generate the observed sky for the observer, based on the GSM.
Parameters
----------
freq: float
Frequency of map to generate, in units of MHz (default).
obstime: astropy.time.Time
Time of observation to generate
Returns
-------
observed_sky: np.array
Numpy array representing the healpix image, centered on zenith,
with below the horizon masked.
"""
# Check to see if frequency has changed.
if freq is not None:
if not np.isclose(freq, self._freq):
self.gsm.generate(freq)
self._freq = freq
sky = self.gsm.generated_map_data
# Check if time has changed -- astropy allows None == Time() comparison
if obstime == self._time or obstime is None:
time_has_changed = False
else:
time_has_changed = True
self._time = Time(obstime) # This will catch datetimes, but Time() object should be passed
self.date = obstime.to_datetime()
# Rotation is quite slow, only recompute if time or frequency has changed, or it has never been run
if time_has_changed or self.observed_sky is None:
# Get RA and DEC of zenith
ra_zen, dec_zen = self.radec_of(0, np.pi/2)
sc_zen = SkyCoord(ra_zen, dec_zen, unit=('rad', 'rad'))
pix_zen = sky2hpix(self._n_side, sc_zen)
vec_zen = hp.pix2vec(self._n_side, pix_zen)
# Convert to degrees
ra_zen *= (180 / np.pi)
dec_zen *= (180 / np.pi)
# Generate below-horizon mask using query_disc
mask = np.ones(shape=self._n_pix, dtype='bool')
pix_visible = hp.query_disc(self._n_side, vec=vec_zen, radius=np.pi/2)
mask[pix_visible] = 0
self._mask = mask
# Transform from Galactic coordinates to Equatorial
rot = hp.Rotator(coord=['G', 'C'])
eq_theta, eq_phi = rot(self._theta, self._phi)
# Convert from Equatorial colatitude and longitude to normal RA and DEC
dec = 90.0 - np.abs(eq_theta*(180/np.pi))
ra = ( (eq_phi + 2*np.pi) % (2*np.pi) )*(180/np.pi)
# Apply rotation to convert from Galactic to Equatorial and center on zenith
hrot = hp.Rotator(rot=[ra_zen, dec_zen], coord=['G', 'C'], inv=True)
g0, g1 = hrot(self._theta, self._phi)
pix0 = hp.ang2pix(self._n_side, g0, g1)
self._pix0 = pix0
dec_rotated = dec[self._pix0]
ra_rotated = ra[self._pix0]
self._observed_ra = ra_rotated
self._observed_dec = dec_rotated
sky_rotated = sky[self._pix0]
mask_rotated = self._mask[self._pix0]
self.observed_sky = hp.ma(sky_rotated)
self.observed_sky.mask = mask_rotated
return self.observed_sky
def view(self, logged=False, show=False, **kwargs):
""" View the local sky, in orthographic projection.
Parameters
----------
logged: bool
Default False, return the log2 image
"""
sky = self.observed_sky
if logged:
sky = np.log2(sky)
hp.orthview(sky, half_sky=True, **kwargs)
if show:
show_plt()
return sky
@property
def observed_gsm(self):
""" Return the GSM (Mollweide), with below-horizon area masked. """
sky = self.observed_sky
# Get RA and DEC of zenith
ra_rad, dec_rad = self.radec_of(0, np.pi / 2)
ra_deg = ra_rad / np.pi * 180
dec_deg = dec_rad / np.pi * 180
# Apply rotation
derotate = hp.Rotator(rot=[ra_deg, dec_deg])
g0, g1 = derotate(self._theta, self._phi)
pix0 = hp.ang2pix(self._n_side, g0, g1)
sky = sky[pix0]
coordrotate = hp.Rotator(coord=['C', 'G'], inv=True)
g0, g1 = coordrotate(self._theta, self._phi)
pix0 = hp.ang2pix(self._n_side, g0, g1)
sky = sky[pix0]
return sky
def view_observed_gsm(self, logged=False, show=False, **kwargs):
""" View the GSM (Mollweide), with below-horizon area masked.
Args:
logged (bool): Apply log2 to data (default False)
show (bool): Call plt.show() (default False)
Returns:
sky (np.array): Healpix map of observed GSM.
"""
sky = self.observed_gsm
if logged:
sky = np.log2(sky)
hp.mollview(sky, coord='G', **kwargs)
if show:
show_plt()
return sky
|
telegraphicREPO_NAMEpygdsmPATH_START.@pygdsm_extracted@pygdsm-master@pygdsm@base_observer.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "wcoulton/CompressedFisher",
"repo_path": "CompressedFisher_extracted/CompressedFisher-main/docs/source/conf.py",
"type": "Python"
}
|
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import pathlib
import sys
import sphinx_rtd_theme
sys.path.insert(0, pathlib.Path(__file__).parents[2].resolve().as_posix())
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = 'CompressedFisher'
copyright = '2022, William Coulton'
author = 'William Coulton'
release = '0.1'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
'sphinx.ext.duration',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx_rtd_theme'
]
templates_path = ['_templates']
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
autodoc_mock_imports = ["numpy",'scipy']
|
wcoultonREPO_NAMECompressedFisherPATH_START.@CompressedFisher_extracted@CompressedFisher-main@docs@source@conf.py@.PATH_END.py
|
{
"filename": "test_reader.py",
"repo_name": "rmjarvis/TreeCorr",
"repo_path": "TreeCorr_extracted/TreeCorr-main/tests/test_reader.py",
"type": "Python"
}
|
# Copyright (c) 2003-2024 by Mike Jarvis
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
import os
import sys
import numpy as np
import warnings
from unittest import mock
from treecorr.reader import FitsReader, HdfReader, PandasReader, AsciiReader, ParquetReader
from treecorr.writer import FitsWriter, HdfWriter, AsciiWriter
from test_helper import get_from_wiki, assert_raises, timer, CaptureLog
from treecorr.util import make_writer, make_reader
@timer
def test_fits_reader():
try:
import fitsio
except ImportError:
# Just once emit a real warning, so it shows up when running pytest.
warnings.warn("Skipping some tests because fitsio is not installed.")
print('Skip test_fits_reader, since fitsio not installed')
return
get_from_wiki('Aardvark.fit')
r = FitsReader(os.path.join('data','Aardvark.fit'))
# Check things not allowed if not in context
with assert_raises(RuntimeError):
r.read(['RA'], slice(0,10,2), ext=1)
with assert_raises(RuntimeError):
r.read('RA')
with assert_raises(RuntimeError):
r.row_count('DEC', ext=1)
with assert_raises(RuntimeError):
r.row_count()
with assert_raises(RuntimeError):
r.names(ext=1)
with assert_raises(RuntimeError):
r.names()
with assert_raises(RuntimeError):
1 in r
with r:
assert_raises(ValueError, r.check_valid_ext, 'invalid')
assert_raises(ValueError, r.check_valid_ext, 0)
r.check_valid_ext('AARDWOLF')
r.check_valid_ext(1)
# Default ext is 1
assert r.default_ext == 1
# Default ext is "in" reader
assert 1 in r
s = slice(0, 10, 2)
for ext in [1, 'AARDWOLF']:
data = r.read(['RA'], s, ext=ext)
dec = r.read('DEC', s, ext=ext)
assert data['RA'].size == 5
assert dec.size == 5
assert r.row_count('RA', ext=ext) == 390935
assert r.row_count('GAMMA1', ext=ext) == 390935
assert set(r.names(ext=ext)) == set("INDEX RA DEC Z EPSILON GAMMA1 GAMMA2 KAPPA MU".split())
assert set(r.names(ext=ext)) == set(r.names())
# Can read without slice or ext to use defaults
assert r.row_count() == 390935
g2 = r.read('GAMMA2')
assert len(g2) == 390935
d = r.read(['KAPPA', 'MU'])
assert len(d['KAPPA']) == 390935
assert len(d['MU']) == 390935
kappa = d['KAPPA']
mu = d['MU']
# check we can also index by integer, not just number
d = r.read(['DEC'], np.arange(10), ext='AARDWOLF')
assert d.size==10
# Again check things not allowed if not in context
with assert_raises(RuntimeError):
r.read(['RA'], slice(0,10,2), ext=1)
with assert_raises(RuntimeError):
r.read('RA')
with assert_raises(RuntimeError):
r.row_count('DEC', ext=1)
with assert_raises(RuntimeError):
r.row_count()
with assert_raises(RuntimeError):
r.names(ext=1)
with assert_raises(RuntimeError):
r.names()
with assert_raises(RuntimeError):
1 in r
# Check writer too.
with FitsWriter(os.path.join('output','test_fits_writer.fits')) as w:
w.write(['KAPPA', 'MU'], [kappa, mu], params={'test': True}, ext='KM')
with FitsReader(os.path.join('output','test_fits_writer.fits')) as r:
params = r.read_params(ext='KM')
data = r.read_data(ext='KM')
with assert_raises(OSError):
params = r.read_params(ext='KK')
assert params['test'] is True
assert np.array_equal(data['KAPPA'], kappa)
assert np.array_equal(data['MU'], mu)
# Test write_array, read_array
cov = np.random.normal(5, 1, size=(10,10))
with FitsWriter(os.path.join('output','test_fits_writer_ar.fits')) as w:
w.write_array(cov)
w.write_array(2*cov, ext='double')
with FitsReader(os.path.join('output','test_fits_writer_ar.fits')) as r:
cov1 = r.read_array(cov.shape)
cov2 = r.read_array(cov.shape, ext='double')
np.testing.assert_array_equal(cov1, cov)
np.testing.assert_array_equal(cov2, 2*cov)
# Use make_writer, make_reader
with make_writer(os.path.join('output','test_fits_writer.fits')) as w:
w.write(['KAPPA', 'MU'], [kappa, mu], params={'test': True})
with make_reader(os.path.join('output','test_fits_writer.fits')) as r:
params = r.read_params()
data = r.read_data()
assert params['test'] is True
assert np.array_equal(data['KAPPA'], kappa)
assert np.array_equal(data['MU'], mu)
# No params
with make_writer(os.path.join('output','test_fits_writer.fits')) as w:
w.write(['KAPPA', 'MU'], [kappa, mu])
with make_reader(os.path.join('output','test_fits_writer.fits')) as r:
params = r.read_params()
data = r.read_data()
assert 'test' not in params # The test key isn't in params
assert params['naxis1'] == 16 # But there are all the regular fits header items.
assert params['naxis2'] == 390935
assert np.array_equal(data['KAPPA'], kappa)
assert np.array_equal(data['MU'], mu)
with assert_raises(ValueError):
make_writer(os.path.join('output','test_fits_writer.fits'), file_type='invalid')
with assert_raises(ValueError):
make_reader(os.path.join('output','test_fits_writer.fits'), file_type='invalid')
# Not allowed to write when not in with context
w = FitsWriter(os.path.join('output','test_fits_writer.fits'))
with assert_raises(RuntimeError):
w.write(['KAPPA', 'MU'], [kappa, mu], params={'test': True}, ext='KM')
with mock.patch.dict(sys.modules, {'fitsio':None}):
with CaptureLog() as cl:
with assert_raises(ImportError):
FitsReader(os.path.join('data','Aardvark.fit'), logger=cl.logger)
assert 'Cannot read' in cl.output
with CaptureLog() as cl:
with assert_raises(ImportError):
FitsWriter(os.path.join('output','test_fits_writer.fits'), logger=cl.logger)
assert 'Cannot write to' in cl.output
@timer
def test_hdf_reader():
try:
import h5py # noqa: F401
except ImportError:
warnings.warn("Skipping some tests because h5py is not installed.")
print('Skipping HdfReader tests, since h5py not installed.')
return
get_from_wiki('Aardvark.hdf5')
r = HdfReader(os.path.join('data','Aardvark.hdf5'))
# Check things not allowed if not in context
with assert_raises(RuntimeError):
r.read(['RA'], slice(0,10,2), ext='/')
with assert_raises(RuntimeError):
r.read('RA')
with assert_raises(RuntimeError):
r.row_count('DEC', ext='/')
with assert_raises(RuntimeError):
r.row_count('DEC')
with assert_raises(RuntimeError):
r.names(ext='/')
with assert_raises(RuntimeError):
r.names()
with assert_raises(RuntimeError):
'/' in r
with r:
# '/' is the only extension in this file.
# TODO: Add an hdf5 example with other valid choices for ext
assert_raises(ValueError, r.check_valid_ext, 'invalid')
r.check_valid_ext('/')
# Default ext is '/'
assert r.default_ext == '/'
# Default ext is "in" reader
assert '/' in r
s = slice(0, 10, 2)
data = r.read(['RA'], s)
dec = r.read('DEC', s)
assert data['RA'].size == 5
assert dec.size == 5
assert r.row_count('RA') == 390935
assert r.row_count('RA', ext='/') == 390935
assert r.row_count('GAMMA1') == 390935
# Unlike the other readers, this needs a column name.
with assert_raises(TypeError):
r.row_count()
assert set(r.names()) == set("INDEX RA DEC Z EPSILON GAMMA1 GAMMA2 KAPPA MU".split())
assert set(r.names(ext='/')) == set(r.names())
# Can read without slice or ext to use defaults
g2 = r.read('GAMMA2')
assert len(g2) == 390935
d = r.read(['KAPPA', 'MU'])
assert len(d['KAPPA']) == 390935
assert len(d['MU']) == 390935
kappa = d['KAPPA']
mu = d['MU']
# Again check things not allowed if not in context
with assert_raises(RuntimeError):
r.read(['RA'], slice(0,10,2), ext='/')
with assert_raises(RuntimeError):
r.read('RA')
with assert_raises(RuntimeError):
r.row_count('DEC', ext='/')
with assert_raises(RuntimeError):
r.row_count('DEC')
with assert_raises(RuntimeError):
r.names(ext='/')
with assert_raises(RuntimeError):
r.names()
with assert_raises(RuntimeError):
'/' in r
# Check writer too.
with HdfWriter(os.path.join('output','test_hdf_writer.hdf')) as w:
w.write(['KAPPA', 'MU'], [kappa, mu], params={'test': True}, ext='KM')
with HdfReader(os.path.join('output','test_hdf_writer.hdf')) as r:
params = r.read_params(ext='KM')
data = r.read_data(ext='KM')
with assert_raises(OSError):
params = r.read_params(ext='KK')
assert params['test']
assert np.array_equal(data['KAPPA'], kappa)
assert np.array_equal(data['MU'], mu)
# Test write_array, read_array
cov = np.random.normal(5, 1, size=(10,10))
with HdfWriter(os.path.join('output','test_fits_writer_ar.hdf')) as w:
w.write_array(cov)
w.write_array(2*cov, ext='double')
with HdfReader(os.path.join('output','test_fits_writer_ar.hdf')) as r:
cov1 = r.read_array(cov.shape)
cov2 = r.read_array(cov.shape, ext='double')
np.testing.assert_array_equal(cov1, cov)
np.testing.assert_array_equal(cov2, 2*cov)
# Not allowed to write when not in with context
w = HdfWriter(os.path.join('output','test_hdf_writer.hdf'))
with assert_raises(RuntimeError):
w.write(['KAPPA', 'MU'], [kappa, mu], params={'test': True}, ext='KM')
@timer
def test_parquet_reader():
try:
import pandas # noqa: F401
import pyarrow # noqa: F401
except ImportError:
warnings.warn("Skipping some tests because pyarrow is not installed.")
print('Skipping ParquetReader tests, since pandas or pyarrow not installed.')
return
get_from_wiki('Aardvark.parquet')
r = ParquetReader(os.path.join('data','Aardvark.parquet'))
# Check things not allowed if not in context
with assert_raises(RuntimeError):
r.read(['RA'], slice(0,10,2), ext=None)
with assert_raises(RuntimeError):
r.read('RA')
with assert_raises(RuntimeError):
r.row_count('DEC', ext=None)
with assert_raises(RuntimeError):
r.row_count('DEC')
with assert_raises(RuntimeError):
r.row_count()
with assert_raises(RuntimeError):
r.names(ext=None)
with assert_raises(RuntimeError):
r.names()
with r:
# None is the only extension in this file.
assert_raises(ValueError, r.check_valid_ext, 'invalid')
r.check_valid_ext(None)
# Default ext is None
assert r.default_ext == None
# Default ext is "in" reader
assert None in r
s = slice(0, 10, 2)
data = r.read(['RA'], s)
dec = r.read('DEC', s)
assert data['RA'].size == 5
assert dec.size == 5
assert r.row_count('RA') == 390935
assert r.row_count('RA', ext=None) == 390935
assert r.row_count('GAMMA1') == 390935
assert r.row_count() == 390935
print('names = ',set(r.names()))
print('names = ',set("INDEX RA DEC Z GAMMA1 GAMMA2 KAPPA MU".split()))
assert set(r.names()) == set("INDEX RA DEC Z GAMMA1 GAMMA2 KAPPA MU".split())
assert set(r.names(ext=None)) == set(r.names())
# Again check things not allowed if not in context
with assert_raises(RuntimeError):
r.read(['RA'], slice(0,10,2), ext=None)
with assert_raises(RuntimeError):
r.read('RA')
with assert_raises(RuntimeError):
r.row_count('DEC', ext=None)
with assert_raises(RuntimeError):
r.row_count('DEC')
with assert_raises(RuntimeError):
r.row_count()
with assert_raises(RuntimeError):
r.names(ext=None)
with assert_raises(RuntimeError):
r.names()
def _test_ascii_reader(r, has_names=True):
# Same tests for AsciiReader and PandasReader
# Check things not allowed if not in context
with assert_raises(RuntimeError):
r.read([1,3,9], None)
with assert_raises(RuntimeError):
r.read([1,3,9])
with assert_raises(RuntimeError):
r.read('ra')
with assert_raises(RuntimeError):
r.row_count(1, ext=None)
with assert_raises(RuntimeError):
r.row_count()
with assert_raises(RuntimeError):
r.names(ext=None)
with assert_raises(RuntimeError):
r.names()
with r:
# None is only value ext.
assert_raises(ValueError, r.check_valid_ext, 'invalid')
assert_raises(ValueError, r.check_valid_ext, '0')
assert_raises(ValueError, r.check_valid_ext, 1)
r.check_valid_ext(None)
assert r.default_ext is None
# Default ext is "in" reader
assert None in r
# cols are: ra, dec, x, y, k, g1, g2, w, z, r, wpos, flag
s = slice(0, 10, 2)
data = r.read([1,3,9], s)
dec = r.read(2, s)
assert sorted(data.keys()) == [1,3,9]
assert data[1].size == 5
assert data[3].size == 5
assert data[9].size == 5
assert dec.size == 5
# Check a few random values
assert data[1][0] == 0.34044927 # ra, row 1
assert data[3][4] == 0.01816738 # x, row 9
assert data[9][3] == 0.79008204 # z, row 7
assert r.row_count(1, ext=None) == 20
assert r.row_count() == 20
assert r.ncols == 12
for i in range(12):
assert str(i+1) in r.names()
all_data = r.read(range(1,r.ncols+1))
assert len(all_data) == 12
assert len(all_data[1]) == 20
assert r.row_count() == 20
# Check reading specific rows
s2 = np.array([0,6,8])
data2 = r.read([1,3,9], s2)
dec2 = r.read(2, s2)
assert sorted(data2.keys()) == [1,3,9]
assert data2[1].size == 3
assert data2[3].size == 3
assert data2[9].size == 3
assert dec2.size == 3
# Check the same values in this selection
assert data2[1][0] == 0.34044927 # ra, row 1
assert data2[3][2] == 0.01816738 # x, row 9
assert data2[9][1] == 0.79008204 # z, row 7
if not has_names:
return
# Repeat with column names
data = r.read(['ra','x','z'], s)
dec = r.read('dec', s)
assert sorted(data.keys()) == ['ra','x','z']
assert data['ra'].size == 5
assert data['x'].size == 5
assert data['z'].size == 5
assert dec.size == 5
# Check the same random values
assert data['ra'][0] == 0.34044927
assert data['x'][4] == 0.01816738
assert data['z'][3] == 0.79008204
assert r.row_count('ra', ext=None) == 20
assert r.row_count() == 20
assert r.ncols == 12
names = ['ra', 'dec', 'x', 'y', 'k', 'g1', 'g2', 'w', 'z', 'r', 'wpos', 'flag']
for name in names:
assert name in r.names()
all_data = r.read(names)
assert len(all_data) == 12
assert len(all_data['ra']) == 20
assert r.row_count() == 20
g1 = all_data['g1']
g2 = all_data['g2']
# Check reading specific rows
data2 = r.read(['ra','x','z'], s2)
dec2 = r.read('dec', s2)
assert sorted(data2.keys()) == ['ra','x','z']
assert data2['ra'].size == 3
assert data2['x'].size == 3
assert data2['z'].size == 3
assert dec2.size == 3
assert data2['ra'][0] == 0.34044927
assert data2['x'][2] == 0.01816738
assert data2['z'][1] == 0.79008204
# Again check things not allowed if not in context
with assert_raises(RuntimeError):
r.read([1,3,9], None)
with assert_raises(RuntimeError):
r.read([1,3,9])
with assert_raises(RuntimeError):
r.read('ra')
r.nrows = None
with assert_raises(RuntimeError):
r.row_count(1, ext=None)
with assert_raises(RuntimeError):
r.row_count()
with assert_raises(RuntimeError):
r.names(ext=None)
with assert_raises(RuntimeError):
r.names()
# Check writer too.
with AsciiWriter(os.path.join('output','test_ascii_writer.dat'), precision=16) as w:
w.write(['g1', 'g2'], [g1, g2], params={'test': True}, ext='g1g2')
with AsciiReader(os.path.join('output','test_ascii_writer.dat')) as r:
params = r.read_params(ext='g1g2')
data = r.read_data(ext='g1g2')
assert params['test']
assert np.array_equal(data['g1'], g1)
assert np.array_equal(data['g2'], g2)
with AsciiReader(os.path.join('output','test_ascii_writer.dat')) as r:
with assert_raises(OSError):
params = r.read_params(ext='gg')
# Test write_array, read_array
cov = np.random.normal(5, 1, size=(10,10))
with AsciiWriter(os.path.join('output','test_fits_writer_ar.dat'), precision=16) as w:
w.write_array(cov)
w.write_array(2*cov, ext='double')
with AsciiReader(os.path.join('output','test_fits_writer_ar.dat')) as r:
cov1 = r.read_array(cov.shape)
cov2 = r.read_array(cov.shape, ext='double')
np.testing.assert_array_equal(cov1, cov)
np.testing.assert_array_equal(cov2, 2*cov)
# Test no ext name
with AsciiWriter(os.path.join('output','test_ascii_writer.dat'), precision=16) as w:
w.write(['g1', 'g2'], [g1, g2], params={'test': True})
with AsciiReader(os.path.join('output','test_ascii_writer.dat')) as r:
params = r.read_params()
data = r.read_data()
assert params['test']
assert np.array_equal(data['g1'], g1)
assert np.array_equal(data['g2'], g2)
# And no params
with AsciiWriter(os.path.join('output','test_ascii_writer.dat'), precision=16) as w:
w.write(['g1', 'g2'], [g1, g2], ext='g1g2')
with AsciiReader(os.path.join('output','test_ascii_writer.dat')) as r:
params = r.read_params(ext='g1g2')
data = r.read_data(ext='g1g2')
assert params == {}
assert np.array_equal(data['g1'], g1)
assert np.array_equal(data['g2'], g2)
# Neither
with AsciiWriter(os.path.join('output','test_ascii_writer.dat'), precision=16) as w:
w.write(['g1', 'g2'], [g1, g2])
with AsciiReader(os.path.join('output','test_ascii_writer.dat')) as r:
params = r.read_params()
data = r.read_data()
assert params == {}
assert np.array_equal(data['g1'], g1)
assert np.array_equal(data['g2'], g2)
# Not allowed to write when not in with context
w = AsciiWriter(os.path.join('output','test_ascii_writer.dat'), precision=16)
with assert_raises(RuntimeError):
w.write(['g1', 'g2'], [g1, g2], params={'test': True}, ext='g1g2')
@timer
def test_ascii_reader():
# These all have the same data, but different comment lines
_test_ascii_reader(AsciiReader(os.path.join('data','test1.dat')))
_test_ascii_reader(AsciiReader(os.path.join('data','test2.dat')),False)
_test_ascii_reader(AsciiReader(os.path.join('data','test3.dat')))
@timer
def test_pandas_reader():
try:
import pandas # noqa: F401
except ImportError:
warnings.warn("Skipping some tests because pandas is not installed.")
print('Skipping PandasReader tests, since pandas not installed.')
return
_test_ascii_reader(PandasReader(os.path.join('data','test1.dat')))
_test_ascii_reader(PandasReader(os.path.join('data','test2.dat')),False)
_test_ascii_reader(PandasReader(os.path.join('data','test3.dat')))
if __name__ == '__main__':
test_fits_reader()
test_hdf_reader()
test_parquet_reader()
test_ascii_reader()
test_pandas_reader()
|
rmjarvisREPO_NAMETreeCorrPATH_START.@TreeCorr_extracted@TreeCorr-main@tests@test_reader.py@.PATH_END.py
|
{
"filename": "conftest.py",
"repo_name": "halomod/halomod",
"repo_path": "halomod_extracted/halomod-main/tests/conftest.py",
"type": "Python"
}
|
from pathlib import Path
import pytest
@pytest.fixture(scope="session")
def datadir() -> Path:
"""The directory in which the test data resides."""
return Path(__file__).parent / "data"
|
halomodREPO_NAMEhalomodPATH_START.@halomod_extracted@halomod-main@tests@conftest.py@.PATH_END.py
|
{
"filename": "wavfile.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/io/wavfile.py",
"type": "Python"
}
|
"""
Module to read / write wav files using NumPy arrays
Functions
---------
`read`: Return the sample rate (in samples/sec) and data from a WAV file.
`write`: Write a NumPy array as a WAV file.
"""
import io
import sys
import numpy as np
import struct
import warnings
from enum import IntEnum
__all__ = [
'WavFileWarning',
'read',
'write'
]
class WavFileWarning(UserWarning):
pass
class WAVE_FORMAT(IntEnum):
"""
WAVE form wFormatTag IDs
Complete list is in mmreg.h in Windows 10 SDK. ALAC and OPUS are the
newest additions, in v10.0.14393 2016-07
"""
UNKNOWN = 0x0000
PCM = 0x0001
ADPCM = 0x0002
IEEE_FLOAT = 0x0003
VSELP = 0x0004
IBM_CVSD = 0x0005
ALAW = 0x0006
MULAW = 0x0007
DTS = 0x0008
DRM = 0x0009
WMAVOICE9 = 0x000A
WMAVOICE10 = 0x000B
OKI_ADPCM = 0x0010
DVI_ADPCM = 0x0011
IMA_ADPCM = 0x0011 # Duplicate
MEDIASPACE_ADPCM = 0x0012
SIERRA_ADPCM = 0x0013
G723_ADPCM = 0x0014
DIGISTD = 0x0015
DIGIFIX = 0x0016
DIALOGIC_OKI_ADPCM = 0x0017
MEDIAVISION_ADPCM = 0x0018
CU_CODEC = 0x0019
HP_DYN_VOICE = 0x001A
YAMAHA_ADPCM = 0x0020
SONARC = 0x0021
DSPGROUP_TRUESPEECH = 0x0022
ECHOSC1 = 0x0023
AUDIOFILE_AF36 = 0x0024
APTX = 0x0025
AUDIOFILE_AF10 = 0x0026
PROSODY_1612 = 0x0027
LRC = 0x0028
DOLBY_AC2 = 0x0030
GSM610 = 0x0031
MSNAUDIO = 0x0032
ANTEX_ADPCME = 0x0033
CONTROL_RES_VQLPC = 0x0034
DIGIREAL = 0x0035
DIGIADPCM = 0x0036
CONTROL_RES_CR10 = 0x0037
NMS_VBXADPCM = 0x0038
CS_IMAADPCM = 0x0039
ECHOSC3 = 0x003A
ROCKWELL_ADPCM = 0x003B
ROCKWELL_DIGITALK = 0x003C
XEBEC = 0x003D
G721_ADPCM = 0x0040
G728_CELP = 0x0041
MSG723 = 0x0042
INTEL_G723_1 = 0x0043
INTEL_G729 = 0x0044
SHARP_G726 = 0x0045
MPEG = 0x0050
RT24 = 0x0052
PAC = 0x0053
MPEGLAYER3 = 0x0055
LUCENT_G723 = 0x0059
CIRRUS = 0x0060
ESPCM = 0x0061
VOXWARE = 0x0062
CANOPUS_ATRAC = 0x0063
G726_ADPCM = 0x0064
G722_ADPCM = 0x0065
DSAT = 0x0066
DSAT_DISPLAY = 0x0067
VOXWARE_BYTE_ALIGNED = 0x0069
VOXWARE_AC8 = 0x0070
VOXWARE_AC10 = 0x0071
VOXWARE_AC16 = 0x0072
VOXWARE_AC20 = 0x0073
VOXWARE_RT24 = 0x0074
VOXWARE_RT29 = 0x0075
VOXWARE_RT29HW = 0x0076
VOXWARE_VR12 = 0x0077
VOXWARE_VR18 = 0x0078
VOXWARE_TQ40 = 0x0079
VOXWARE_SC3 = 0x007A
VOXWARE_SC3_1 = 0x007B
SOFTSOUND = 0x0080
VOXWARE_TQ60 = 0x0081
MSRT24 = 0x0082
G729A = 0x0083
MVI_MVI2 = 0x0084
DF_G726 = 0x0085
DF_GSM610 = 0x0086
ISIAUDIO = 0x0088
ONLIVE = 0x0089
MULTITUDE_FT_SX20 = 0x008A
INFOCOM_ITS_G721_ADPCM = 0x008B
CONVEDIA_G729 = 0x008C
CONGRUENCY = 0x008D
SBC24 = 0x0091
DOLBY_AC3_SPDIF = 0x0092
MEDIASONIC_G723 = 0x0093
PROSODY_8KBPS = 0x0094
ZYXEL_ADPCM = 0x0097
PHILIPS_LPCBB = 0x0098
PACKED = 0x0099
MALDEN_PHONYTALK = 0x00A0
RACAL_RECORDER_GSM = 0x00A1
RACAL_RECORDER_G720_A = 0x00A2
RACAL_RECORDER_G723_1 = 0x00A3
RACAL_RECORDER_TETRA_ACELP = 0x00A4
NEC_AAC = 0x00B0
RAW_AAC1 = 0x00FF
RHETOREX_ADPCM = 0x0100
IRAT = 0x0101
VIVO_G723 = 0x0111
VIVO_SIREN = 0x0112
PHILIPS_CELP = 0x0120
PHILIPS_GRUNDIG = 0x0121
DIGITAL_G723 = 0x0123
SANYO_LD_ADPCM = 0x0125
SIPROLAB_ACEPLNET = 0x0130
SIPROLAB_ACELP4800 = 0x0131
SIPROLAB_ACELP8V3 = 0x0132
SIPROLAB_G729 = 0x0133
SIPROLAB_G729A = 0x0134
SIPROLAB_KELVIN = 0x0135
VOICEAGE_AMR = 0x0136
G726ADPCM = 0x0140
DICTAPHONE_CELP68 = 0x0141
DICTAPHONE_CELP54 = 0x0142
QUALCOMM_PUREVOICE = 0x0150
QUALCOMM_HALFRATE = 0x0151
TUBGSM = 0x0155
MSAUDIO1 = 0x0160
WMAUDIO2 = 0x0161
WMAUDIO3 = 0x0162
WMAUDIO_LOSSLESS = 0x0163
WMASPDIF = 0x0164
UNISYS_NAP_ADPCM = 0x0170
UNISYS_NAP_ULAW = 0x0171
UNISYS_NAP_ALAW = 0x0172
UNISYS_NAP_16K = 0x0173
SYCOM_ACM_SYC008 = 0x0174
SYCOM_ACM_SYC701_G726L = 0x0175
SYCOM_ACM_SYC701_CELP54 = 0x0176
SYCOM_ACM_SYC701_CELP68 = 0x0177
KNOWLEDGE_ADVENTURE_ADPCM = 0x0178
FRAUNHOFER_IIS_MPEG2_AAC = 0x0180
DTS_DS = 0x0190
CREATIVE_ADPCM = 0x0200
CREATIVE_FASTSPEECH8 = 0x0202
CREATIVE_FASTSPEECH10 = 0x0203
UHER_ADPCM = 0x0210
ULEAD_DV_AUDIO = 0x0215
ULEAD_DV_AUDIO_1 = 0x0216
QUARTERDECK = 0x0220
ILINK_VC = 0x0230
RAW_SPORT = 0x0240
ESST_AC3 = 0x0241
GENERIC_PASSTHRU = 0x0249
IPI_HSX = 0x0250
IPI_RPELP = 0x0251
CS2 = 0x0260
SONY_SCX = 0x0270
SONY_SCY = 0x0271
SONY_ATRAC3 = 0x0272
SONY_SPC = 0x0273
TELUM_AUDIO = 0x0280
TELUM_IA_AUDIO = 0x0281
NORCOM_VOICE_SYSTEMS_ADPCM = 0x0285
FM_TOWNS_SND = 0x0300
MICRONAS = 0x0350
MICRONAS_CELP833 = 0x0351
BTV_DIGITAL = 0x0400
INTEL_MUSIC_CODER = 0x0401
INDEO_AUDIO = 0x0402
QDESIGN_MUSIC = 0x0450
ON2_VP7_AUDIO = 0x0500
ON2_VP6_AUDIO = 0x0501
VME_VMPCM = 0x0680
TPC = 0x0681
LIGHTWAVE_LOSSLESS = 0x08AE
OLIGSM = 0x1000
OLIADPCM = 0x1001
OLICELP = 0x1002
OLISBC = 0x1003
OLIOPR = 0x1004
LH_CODEC = 0x1100
LH_CODEC_CELP = 0x1101
LH_CODEC_SBC8 = 0x1102
LH_CODEC_SBC12 = 0x1103
LH_CODEC_SBC16 = 0x1104
NORRIS = 0x1400
ISIAUDIO_2 = 0x1401
SOUNDSPACE_MUSICOMPRESS = 0x1500
MPEG_ADTS_AAC = 0x1600
MPEG_RAW_AAC = 0x1601
MPEG_LOAS = 0x1602
NOKIA_MPEG_ADTS_AAC = 0x1608
NOKIA_MPEG_RAW_AAC = 0x1609
VODAFONE_MPEG_ADTS_AAC = 0x160A
VODAFONE_MPEG_RAW_AAC = 0x160B
MPEG_HEAAC = 0x1610
VOXWARE_RT24_SPEECH = 0x181C
SONICFOUNDRY_LOSSLESS = 0x1971
INNINGS_TELECOM_ADPCM = 0x1979
LUCENT_SX8300P = 0x1C07
LUCENT_SX5363S = 0x1C0C
CUSEEME = 0x1F03
NTCSOFT_ALF2CM_ACM = 0x1FC4
DVM = 0x2000
DTS2 = 0x2001
MAKEAVIS = 0x3313
DIVIO_MPEG4_AAC = 0x4143
NOKIA_ADAPTIVE_MULTIRATE = 0x4201
DIVIO_G726 = 0x4243
LEAD_SPEECH = 0x434C
LEAD_VORBIS = 0x564C
WAVPACK_AUDIO = 0x5756
OGG_VORBIS_MODE_1 = 0x674F
OGG_VORBIS_MODE_2 = 0x6750
OGG_VORBIS_MODE_3 = 0x6751
OGG_VORBIS_MODE_1_PLUS = 0x676F
OGG_VORBIS_MODE_2_PLUS = 0x6770
OGG_VORBIS_MODE_3_PLUS = 0x6771
ALAC = 0x6C61
_3COM_NBX = 0x7000 # Can't have leading digit
OPUS = 0x704F
FAAD_AAC = 0x706D
AMR_NB = 0x7361
AMR_WB = 0x7362
AMR_WP = 0x7363
GSM_AMR_CBR = 0x7A21
GSM_AMR_VBR_SID = 0x7A22
COMVERSE_INFOSYS_G723_1 = 0xA100
COMVERSE_INFOSYS_AVQSBC = 0xA101
COMVERSE_INFOSYS_SBC = 0xA102
SYMBOL_G729_A = 0xA103
VOICEAGE_AMR_WB = 0xA104
INGENIENT_G726 = 0xA105
MPEG4_AAC = 0xA106
ENCORE_G726 = 0xA107
ZOLL_ASAO = 0xA108
SPEEX_VOICE = 0xA109
VIANIX_MASC = 0xA10A
WM9_SPECTRUM_ANALYZER = 0xA10B
WMF_SPECTRUM_ANAYZER = 0xA10C
GSM_610 = 0xA10D
GSM_620 = 0xA10E
GSM_660 = 0xA10F
GSM_690 = 0xA110
GSM_ADAPTIVE_MULTIRATE_WB = 0xA111
POLYCOM_G722 = 0xA112
POLYCOM_G728 = 0xA113
POLYCOM_G729_A = 0xA114
POLYCOM_SIREN = 0xA115
GLOBAL_IP_ILBC = 0xA116
RADIOTIME_TIME_SHIFT_RADIO = 0xA117
NICE_ACA = 0xA118
NICE_ADPCM = 0xA119
VOCORD_G721 = 0xA11A
VOCORD_G726 = 0xA11B
VOCORD_G722_1 = 0xA11C
VOCORD_G728 = 0xA11D
VOCORD_G729 = 0xA11E
VOCORD_G729_A = 0xA11F
VOCORD_G723_1 = 0xA120
VOCORD_LBC = 0xA121
NICE_G728 = 0xA122
FRACE_TELECOM_G729 = 0xA123
CODIAN = 0xA124
FLAC = 0xF1AC
EXTENSIBLE = 0xFFFE
DEVELOPMENT = 0xFFFF
KNOWN_WAVE_FORMATS = {WAVE_FORMAT.PCM, WAVE_FORMAT.IEEE_FLOAT}
def _raise_bad_format(format_tag):
try:
format_name = WAVE_FORMAT(format_tag).name
except ValueError:
format_name = f'{format_tag:#06x}'
raise ValueError(f"Unknown wave file format: {format_name}. Supported "
"formats: " +
', '.join(x.name for x in KNOWN_WAVE_FORMATS))
def _read_fmt_chunk(fid, is_big_endian):
"""
Returns
-------
size : int
size of format subchunk in bytes (minus 8 for "fmt " and itself)
format_tag : int
PCM, float, or compressed format
channels : int
number of channels
fs : int
sampling frequency in samples per second
bytes_per_second : int
overall byte rate for the file
block_align : int
bytes per sample, including all channels
bit_depth : int
bits per sample
Notes
-----
Assumes file pointer is immediately after the 'fmt ' id
"""
if is_big_endian:
fmt = '>'
else:
fmt = '<'
size = struct.unpack(fmt+'I', fid.read(4))[0]
if size < 16:
raise ValueError("Binary structure of wave file is not compliant")
res = struct.unpack(fmt+'HHIIHH', fid.read(16))
bytes_read = 16
format_tag, channels, fs, bytes_per_second, block_align, bit_depth = res
if format_tag == WAVE_FORMAT.EXTENSIBLE and size >= (16+2):
ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0]
bytes_read += 2
if ext_chunk_size >= 22:
extensible_chunk_data = fid.read(22)
bytes_read += 22
raw_guid = extensible_chunk_data[2+4:2+4+16]
# GUID template {XXXXXXXX-0000-0010-8000-00AA00389B71} (RFC-2361)
# MS GUID byte order: first three groups are native byte order,
# rest is Big Endian
if is_big_endian:
tail = b'\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71'
else:
tail = b'\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71'
if raw_guid.endswith(tail):
format_tag = struct.unpack(fmt+'I', raw_guid[:4])[0]
else:
raise ValueError("Binary structure of wave file is not compliant")
if format_tag not in KNOWN_WAVE_FORMATS:
_raise_bad_format(format_tag)
# move file pointer to next chunk
if size > bytes_read:
fid.read(size - bytes_read)
# fmt should always be 16, 18 or 40, but handle it just in case
_handle_pad_byte(fid, size)
if format_tag == WAVE_FORMAT.PCM:
if bytes_per_second != fs * block_align:
raise ValueError("WAV header is invalid: nAvgBytesPerSec must"
" equal product of nSamplesPerSec and"
" nBlockAlign, but file has nSamplesPerSec ="
f" {fs}, nBlockAlign = {block_align}, and"
f" nAvgBytesPerSec = {bytes_per_second}")
return (size, format_tag, channels, fs, bytes_per_second, block_align,
bit_depth)
def _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian, is_rf64,
block_align, mmap=False):
"""
Notes
-----
Assumes file pointer is immediately after the 'data' id
It's possible to not use all available bits in a container, or to store
samples in a container bigger than necessary, so bytes_per_sample uses
the actual reported container size (nBlockAlign / nChannels). Real-world
examples:
Adobe Audition's "24-bit packed int (type 1, 20-bit)"
nChannels = 2, nBlockAlign = 6, wBitsPerSample = 20
http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Samples/AFsp/M1F1-int12-AFsp.wav
is:
nChannels = 2, nBlockAlign = 4, wBitsPerSample = 12
http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Docs/multichaudP.pdf
gives an example of:
nChannels = 2, nBlockAlign = 8, wBitsPerSample = 20
"""
if is_big_endian:
fmt = '>'
else:
fmt = '<'
# Size of the data subchunk in bytes
if not is_rf64:
size = struct.unpack(fmt+'I', fid.read(4))[0]
else:
pos = fid.tell()
# chunk size is stored in global file header for RF64
fid.seek(28)
size = struct.unpack('<Q', fid.read(8))[0]
fid.seek(pos)
# skip data chunk size as it is 0xFFFFFFF
fid.read(4)
# Number of bytes per sample (sample container size)
bytes_per_sample = block_align // channels
n_samples = size // bytes_per_sample
if format_tag == WAVE_FORMAT.PCM:
if 1 <= bit_depth <= 8:
dtype = 'u1' # WAV of 8-bit integer or less are unsigned
elif bytes_per_sample in {3, 5, 6, 7}:
# No compatible dtype. Load as raw bytes for reshaping later.
dtype = 'V1'
elif bit_depth <= 64:
# Remaining bit depths can map directly to signed numpy dtypes
dtype = f'{fmt}i{bytes_per_sample}'
else:
raise ValueError("Unsupported bit depth: the WAV file "
f"has {bit_depth}-bit integer data.")
elif format_tag == WAVE_FORMAT.IEEE_FLOAT:
if bit_depth in {32, 64}:
dtype = f'{fmt}f{bytes_per_sample}'
else:
raise ValueError("Unsupported bit depth: the WAV file "
f"has {bit_depth}-bit floating-point data.")
else:
_raise_bad_format(format_tag)
start = fid.tell()
if not mmap:
try:
count = size if dtype == 'V1' else n_samples
data = np.fromfile(fid, dtype=dtype, count=count)
except io.UnsupportedOperation: # not a C-like file
fid.seek(start, 0) # just in case it seeked, though it shouldn't
data = np.frombuffer(fid.read(size), dtype=dtype)
if dtype == 'V1':
# Rearrange raw bytes into smallest compatible numpy dtype
dt = f'{fmt}i4' if bytes_per_sample == 3 else f'{fmt}i8'
a = np.zeros((len(data) // bytes_per_sample, np.dtype(dt).itemsize),
dtype='V1')
if is_big_endian:
a[:, :bytes_per_sample] = data.reshape((-1, bytes_per_sample))
else:
a[:, -bytes_per_sample:] = data.reshape((-1, bytes_per_sample))
data = a.view(dt).reshape(a.shape[:-1])
else:
if bytes_per_sample in {1, 2, 4, 8}:
start = fid.tell()
data = np.memmap(fid, dtype=dtype, mode='c', offset=start,
shape=(n_samples,))
fid.seek(start + size)
else:
raise ValueError("mmap=True not compatible with "
f"{bytes_per_sample}-byte container size.")
_handle_pad_byte(fid, size)
if channels > 1:
data = data.reshape(-1, channels)
return data
def _skip_unknown_chunk(fid, is_big_endian):
if is_big_endian:
fmt = '>I'
else:
fmt = '<I'
data = fid.read(4)
# call unpack() and seek() only if we have really read data from file
# otherwise empty read at the end of the file would trigger
# unnecessary exception at unpack() call
# in case data equals somehow to 0, there is no need for seek() anyway
if data:
size = struct.unpack(fmt, data)[0]
fid.seek(size, 1)
_handle_pad_byte(fid, size)
def _read_riff_chunk(fid):
str1 = fid.read(4) # File signature
if str1 == b'RIFF':
is_rf64 = False
is_big_endian = False
fmt = '<I'
elif str1 == b'RIFX':
is_rf64 = False
is_big_endian = True
fmt = '>I'
elif str1 == b'RF64':
is_rf64 = True
is_big_endian = False
fmt = '<Q'
else:
# There are also .wav files with "FFIR" or "XFIR" signatures?
raise ValueError(f"File format {repr(str1)} not understood. Only "
"'RIFF', 'RIFX', and 'RF64' supported.")
# Size of entire file
if not is_rf64:
file_size = struct.unpack(fmt, fid.read(4))[0] + 8
str2 = fid.read(4)
else:
# Skip 0xFFFFFFFF (-1) bytes
fid.read(4)
str2 = fid.read(4)
str3 = fid.read(4)
if str3 != b'ds64':
raise ValueError("Invalid RF64 file: ds64 chunk not found.")
ds64_size = struct.unpack("<I", fid.read(4))[0]
file_size = struct.unpack(fmt, fid.read(8))[0] + 8
# Ignore additional attributes of ds64 chunk like sample count, tables, etc.
# and just skip to the next chunk
fid.seek(ds64_size - 8, 1)
if str2 != b'WAVE':
raise ValueError(f"Not a WAV file. RIFF form type is {repr(str2)}.")
return file_size, is_big_endian, is_rf64
def _handle_pad_byte(fid, size):
# "If the chunk size is an odd number of bytes, a pad byte with value zero
# is written after ckData." So we need to seek past this after each chunk.
if size % 2:
fid.seek(1, 1)
def read(filename, mmap=False):
"""
Open a WAV file.
Return the sample rate (in samples/sec) and data from an LPCM WAV file.
Parameters
----------
filename : string or open file handle
Input WAV file.
mmap : bool, optional
Whether to read data as memory-mapped (default: False). Not compatible
with some bit depths; see Notes. Only to be used on real files.
.. versionadded:: 0.12.0
Returns
-------
rate : int
Sample rate of WAV file.
data : numpy array
Data read from WAV file. Data-type is determined from the file;
see Notes. Data is 1-D for 1-channel WAV, or 2-D of shape
(Nsamples, Nchannels) otherwise. If a file-like input without a
C-like file descriptor (e.g., :class:`python:io.BytesIO`) is
passed, this will not be writeable.
Notes
-----
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit integer PCM -2147483648 +2147483647 int32
24-bit integer PCM -2147483648 +2147483392 int32
16-bit integer PCM -32768 +32767 int16
8-bit integer PCM 0 255 uint8
===================== =========== =========== =============
WAV files can specify arbitrary bit depth, and this function supports
reading any integer PCM depth from 1 to 64 bits. Data is returned in the
smallest compatible numpy int type, in left-justified format. 8-bit and
lower is unsigned, while 9-bit and higher is signed.
For example, 24-bit data will be stored as int32, with the MSB of the
24-bit data stored at the MSB of the int32, and typically the least
significant byte is 0x00. (However, if a file actually contains data past
its specified bit depth, those bits will be read and output, too. [2]_)
This bit justification and sign matches WAV's native internal format, which
allows memory mapping of WAV files that use 1, 2, 4, or 8 bytes per sample
(so 24-bit files cannot be memory-mapped, but 32-bit can).
IEEE float PCM in 32- or 64-bit format is supported, with or without mmap.
Values exceeding [-1, +1] are not clipped.
Non-linear PCM (mu-law, A-law) is not supported.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www.tactilemedia.com/info/MCI_Control_Info.html
.. [2] Adobe Systems Incorporated, "Adobe Audition 3 User Guide", section
"Audio file formats: 24-bit Packed Int (type 1, 20-bit)", 2007
Examples
--------
>>> from os.path import dirname, join as pjoin
>>> from scipy.io import wavfile
>>> import scipy.io
Get the filename for an example .wav file from the tests/data directory.
>>> data_dir = pjoin(dirname(scipy.io.__file__), 'tests', 'data')
>>> wav_fname = pjoin(data_dir, 'test-44100Hz-2ch-32bit-float-be.wav')
Load the .wav file contents.
>>> samplerate, data = wavfile.read(wav_fname)
>>> print(f"number of channels = {data.shape[1]}")
number of channels = 2
>>> length = data.shape[0] / samplerate
>>> print(f"length = {length}s")
length = 0.01s
Plot the waveform.
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> time = np.linspace(0., length, data.shape[0])
>>> plt.plot(time, data[:, 0], label="Left channel")
>>> plt.plot(time, data[:, 1], label="Right channel")
>>> plt.legend()
>>> plt.xlabel("Time [s]")
>>> plt.ylabel("Amplitude")
>>> plt.show()
"""
if hasattr(filename, 'read'):
fid = filename
mmap = False
else:
fid = open(filename, 'rb')
try:
file_size, is_big_endian, is_rf64 = _read_riff_chunk(fid)
fmt_chunk_received = False
data_chunk_received = False
while fid.tell() < file_size:
# read the next chunk
chunk_id = fid.read(4)
if not chunk_id:
if data_chunk_received:
# End of file but data successfully read
warnings.warn(
f"Reached EOF prematurely; finished at {fid.tell():d} bytes, "
f"expected {file_size:d} bytes from header.",
WavFileWarning, stacklevel=2)
break
else:
raise ValueError("Unexpected end of file.")
elif len(chunk_id) < 4:
msg = f"Incomplete chunk ID: {repr(chunk_id)}"
# If we have the data, ignore the broken chunk
if fmt_chunk_received and data_chunk_received:
warnings.warn(msg + ", ignoring it.", WavFileWarning,
stacklevel=2)
else:
raise ValueError(msg)
if chunk_id == b'fmt ':
fmt_chunk_received = True
fmt_chunk = _read_fmt_chunk(fid, is_big_endian)
format_tag, channels, fs = fmt_chunk[1:4]
bit_depth = fmt_chunk[6]
block_align = fmt_chunk[5]
elif chunk_id == b'fact':
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id == b'data':
data_chunk_received = True
if not fmt_chunk_received:
raise ValueError("No fmt chunk before data")
data = _read_data_chunk(fid, format_tag, channels, bit_depth,
is_big_endian, is_rf64, block_align, mmap)
elif chunk_id == b'LIST':
# Someday this could be handled properly but for now skip it
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id in {b'JUNK', b'Fake'}:
# Skip alignment chunks without warning
_skip_unknown_chunk(fid, is_big_endian)
else:
warnings.warn("Chunk (non-data) not understood, skipping it.",
WavFileWarning, stacklevel=2)
_skip_unknown_chunk(fid, is_big_endian)
finally:
if not hasattr(filename, 'read'):
fid.close()
else:
fid.seek(0)
return fs, data
def write(filename, rate, data):
"""
Write a NumPy array as a WAV file.
Parameters
----------
filename : string or open file handle
Output wav file.
rate : int
The sample rate (in samples/sec).
data : ndarray
A 1-D or 2-D NumPy array of either integer or float data-type.
Notes
-----
* Writes a simple uncompressed WAV file.
* To write multiple-channels, use a 2-D array of shape
(Nsamples, Nchannels).
* The bits-per-sample and PCM/float will be determined by the data-type.
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit PCM -2147483648 +2147483647 int32
16-bit PCM -32768 +32767 int16
8-bit PCM 0 255 uint8
===================== =========== =========== =============
Note that 8-bit PCM is unsigned.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www.tactilemedia.com/info/MCI_Control_Info.html
Examples
--------
Create a 100Hz sine wave, sampled at 44100Hz.
Write to 16-bit PCM, Mono.
>>> from scipy.io.wavfile import write
>>> import numpy as np
>>> samplerate = 44100; fs = 100
>>> t = np.linspace(0., 1., samplerate)
>>> amplitude = np.iinfo(np.int16).max
>>> data = amplitude * np.sin(2. * np.pi * fs * t)
>>> write("example.wav", samplerate, data.astype(np.int16))
"""
if hasattr(filename, 'write'):
fid = filename
else:
fid = open(filename, 'wb')
fs = rate
try:
dkind = data.dtype.kind
allowed_dtypes = ['float32', 'float64',
'uint8', 'int16', 'int32', 'int64']
if data.dtype.name not in allowed_dtypes:
raise ValueError(f"Unsupported data type '{data.dtype}'")
header_data = b''
header_data += b'RIFF'
header_data += b'\x00\x00\x00\x00'
header_data += b'WAVE'
# fmt chunk
header_data += b'fmt '
if dkind == 'f':
format_tag = WAVE_FORMAT.IEEE_FLOAT
else:
format_tag = WAVE_FORMAT.PCM
if data.ndim == 1:
channels = 1
else:
channels = data.shape[1]
bit_depth = data.dtype.itemsize * 8
bytes_per_second = fs*(bit_depth // 8)*channels
block_align = channels * (bit_depth // 8)
fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs,
bytes_per_second, block_align, bit_depth)
if not (dkind == 'i' or dkind == 'u'):
# add cbSize field for non-PCM files
fmt_chunk_data += b'\x00\x00'
header_data += struct.pack('<I', len(fmt_chunk_data))
header_data += fmt_chunk_data
# check data size (needs to be immediately before the data chunk)
# if too large for standard RIFF, use RF64 instead
resulting_file_size = len(header_data) + 4 + 4 + data.nbytes
is_rf64 = (resulting_file_size - 8) > 0xFFFFFFFF
if is_rf64:
header_data = b''
header_data += b'RF64'
header_data += b'\xFF\xFF\xFF\xFF'
header_data += b'WAVE'
header_data += b'ds64'
# size of ds64 chunk
header_data += struct.pack('<I', 28)
# will be filled later with real file size
header_data += struct.pack('<Q', 0)
header_data += struct.pack('<Q', data.nbytes)
header_data += struct.pack('<Q', data.shape[0])
# ignore 'table' field for now
header_data += struct.pack('<I', 0)
header_data += b'fmt '
header_data += struct.pack('<I', len(fmt_chunk_data))
header_data += fmt_chunk_data
# fact chunk (non-PCM files)
if not (dkind == 'i' or dkind == 'u'):
header_data += b'fact'
header_data += struct.pack('<II', 4, data.shape[0])
fid.write(header_data)
# data chunk
fid.write(b'data')
# write data chunk size, unless its too big in which case 0xFFFFFFFF is written
fid.write(struct.pack('<I', min(data.nbytes, 4294967295)))
if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and
sys.byteorder == 'big'):
data = data.byteswap()
_array_tofile(fid, data)
# Determine file size and place it in correct
# position at start of the file or the data chunk.
size = fid.tell()
if not is_rf64:
fid.seek(4)
fid.write(struct.pack('<I', size-8))
else:
fid.seek(20)
fid.write(struct.pack('<Q', size-8))
finally:
if not hasattr(filename, 'write'):
fid.close()
else:
fid.seek(0)
def _array_tofile(fid, data):
# ravel gives a c-contiguous buffer
fid.write(data.ravel().view('b').data)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@io@wavfile.py@.PATH_END.py
|
{
"filename": "parameter.py",
"repo_name": "gammapy/gammapy",
"repo_path": "gammapy_extracted/gammapy-main/gammapy/modeling/parameter.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Model parameter classes."""
import collections.abc
import copy
import html
import itertools
import logging
import numpy as np
from astropy import units as u
from astropy.table import Table
from gammapy.utils.interpolation import interpolation_scale
__all__ = ["Parameter", "Parameters", "PriorParameter", "PriorParameters"]
log = logging.getLogger(__name__)
def _get_parameters_str(parameters):
str_ = ""
for par in parameters:
if par.name == "amplitude":
value_format, error_format = "{:10.2e}", "{:7.1e}"
else:
value_format, error_format = "{:10.3f}", "{:7.2f}"
line = "\t{:21} {:8}: " + value_format + "\t {} {:<12s}\n"
if par._link_label_io is not None:
name = par._link_label_io
else:
name = par.name
if par.frozen:
frozen, error = "(frozen)", "\t\t"
else:
frozen = ""
try:
error = "+/- " + error_format.format(par.error)
except AttributeError:
error = ""
str_ += line.format(name, frozen, par.value, error, par.unit)
return str_.expandtabs(tabsize=2)
class Parameter:
"""A model parameter.
Note that the parameter value has been split into
a factor and scale like this::
value = factor x scale
Users should interact with the ``value``, ``quantity``
or ``min`` and ``max`` properties and consider the fact
that there is a ``factor``` and ``scale`` an implementation detail.
That was introduced for numerical stability in parameter and error
estimation methods, only in the Gammapy optimiser interface do we
interact with the ``factor``, ``factor_min`` and ``factor_max`` properties,
i.e. the optimiser "sees" the well-scaled problem.
Parameters
----------
name : str
Name.
value : float or `~astropy.units.Quantity`
Value.
scale : float, optional
Scale (sometimes used in fitting).
unit : `~astropy.units.Unit` or str, optional
Unit.
min : float, optional
Minimum (sometimes used in fitting).
max : float, optional
Maximum (sometimes used in fitting).
frozen : bool, optional
Frozen (used in fitting).
error : float
Parameter error.
scan_min : float
Minimum value for the parameter scan. Overwrites scan_n_sigma.
scan_max : float
Minimum value for the parameter scan. Overwrites scan_n_sigma.
scan_n_values: int
Number of values to be used for the parameter scan.
scan_n_sigma : int
Number of sigmas to scan.
scan_values: `numpy.array`
Scan values. Overwrites all the scan keywords before.
scale_method : {'scale10', 'factor1', None}
Method used to set ``factor`` and ``scale``.
interp : {"lin", "sqrt", "log"}
Parameter scaling to use for the scan.
prior : `~gammapy.modeling.models.Prior`
Prior set on the parameter.
"""
def __init__(
self,
name,
value,
unit="",
scale=1,
min=np.nan,
max=np.nan,
frozen=False,
error=0,
scan_min=None,
scan_max=None,
scan_n_values=11,
scan_n_sigma=2,
scan_values=None,
scale_method="scale10",
interp="lin",
prior=None,
):
if not isinstance(name, str):
raise TypeError(f"Name must be string, got '{type(name)}' instead")
self._name = name
self._link_label_io = None
self.scale = scale
self.min = min
self.max = max
self.frozen = frozen
self._error = error
self._type = None
# TODO: move this to a setter method that can be called from `__set__` also!
# Having it here is bad: behaviour not clear if Quantity and `unit` is passed.
if isinstance(value, u.Quantity) or isinstance(value, str):
val = u.Quantity(value)
self.value = val.value
self.unit = val.unit
else:
self.value = float(value)
self.unit = unit
self.scan_min = scan_min
self.scan_max = scan_max
self.scan_values = scan_values
self.scan_n_values = scan_n_values
self.scan_n_sigma = scan_n_sigma
self.interp = interp
self.scale_method = scale_method
self.prior = prior
def __get__(self, instance, owner):
if instance is None:
return self
par = instance.__dict__[self.name]
par._type = getattr(instance, "type", None)
return par
def __set__(self, instance, value):
if isinstance(value, Parameter):
instance.__dict__[self.name] = value
else:
par = instance.__dict__[self.name]
raise TypeError(f"Cannot assign {value!r} to parameter {par!r}")
def __set_name__(self, owner, name):
if not self._name == name:
raise ValueError(f"Expected parameter name '{name}', got {self._name}")
@property
def prior(self):
"""Prior applied to the parameter as a `~gammapy.modeling.models.Prior`."""
return self._prior
@prior.setter
def prior(self, value):
if value is not None:
from .models import Prior
if isinstance(value, dict):
from .models import Model
self._prior = Model.from_dict({"prior": value})
elif isinstance(value, Prior):
self._prior = value
else:
raise TypeError(f"Invalid type: {value!r}")
else:
self._prior = value
def prior_stat_sum(self):
if self.prior is not None:
return self.prior(self)
@property
def type(self):
return self._type
@property
def error(self):
return self._error
@error.setter
def error(self, value):
self._error = float(u.Quantity(value, unit=self.unit).value)
@property
def name(self):
"""Name as a string."""
return self._name
@property
def factor(self):
"""Factor as a float."""
return self._factor
@factor.setter
def factor(self, val):
self._factor = float(val)
@property
def scale(self):
"""Scale as a float."""
return self._scale
@scale.setter
def scale(self, val):
self._scale = float(val)
@property
def unit(self):
"""Unit as a `~astropy.units.Unit` object."""
return self._unit
@unit.setter
def unit(self, val):
self._unit = u.Unit(val)
@property
def min(self):
"""Minimum as a float."""
return self._min
@min.setter
def min(self, val):
"""`~astropy.table.Table` has masked values for NaN. Replacing with NaN."""
if isinstance(val, np.ma.core.MaskedConstant):
self._min = np.nan
else:
self._min = float(val)
@property
def factor_min(self):
"""Factor minimum as a float.
This ``factor_min = min / scale`` is for the optimizer interface.
"""
return self.min / self.scale
@property
def max(self):
"""Maximum as a float."""
return self._max
@max.setter
def max(self, val):
"""`~astropy.table.Table` has masked values for NaN. Replacing with NaN."""
if isinstance(val, np.ma.core.MaskedConstant):
self._max = np.nan
else:
self._max = float(val)
@property
def factor_max(self):
"""Factor maximum as a float.
This ``factor_max = max / scale`` is for the optimizer interface.
"""
return self.max / self.scale
@property
def scale_method(self):
"""Method used to set ``factor`` and ``scale``."""
return self._scale_method
@scale_method.setter
def scale_method(self, val):
if val not in ["scale10", "factor1"] and val is not None:
raise ValueError(f"Invalid method: {val}")
self._scale_method = val
@property
def frozen(self):
"""Frozen (used in fitting) (bool)."""
return self._frozen
@frozen.setter
def frozen(self, val):
if val in ["True", "False"]:
val = bool(val)
if not isinstance(val, bool) and not isinstance(val, np.bool_):
raise TypeError(f"Invalid type: {val}, {type(val)}")
self._frozen = val
@property
def value(self):
"""Value = factor x scale (float)."""
return self._factor * self._scale
@value.setter
def value(self, val):
self.factor = float(val) / self._scale
@property
def quantity(self):
"""Value times unit as a `~astropy.units.Quantity`."""
return self.value * self.unit
@quantity.setter
def quantity(self, val):
val = u.Quantity(val)
if not val.unit.is_equivalent(self.unit):
raise u.UnitConversionError(
f"Unit must be equivalent to {self.unit} for parameter {self.name}"
)
self.value = val.value
self.unit = val.unit
# TODO: possibly allow to set this independently
@property
def conf_min(self):
"""Confidence minimum value as a `float`.
Return parameter minimum if defined, otherwise return the scan_min.
"""
if not np.isnan(self.min):
return self.min
else:
return self.scan_min
# TODO: possibly allow to set this independently
@property
def conf_max(self):
"""Confidence maximum value as a `float`.
Return parameter maximum if defined, otherwise return the scan_max.
"""
if not np.isnan(self.max):
return self.max
else:
return self.scan_max
@property
def scan_min(self):
"""Stat scan minimum."""
if self._scan_min is None:
return self.value - self.error * self.scan_n_sigma
return self._scan_min
@property
def scan_max(self):
"""Stat scan maximum."""
if self._scan_max is None:
return self.value + self.error * self.scan_n_sigma
return self._scan_max
@scan_min.setter
def scan_min(self, value):
"""Stat scan minimum setter."""
self._scan_min = value
@scan_max.setter
def scan_max(self, value):
"""Stat scan maximum setter."""
self._scan_max = value
@property
def scan_n_sigma(self):
"""Stat scan n sigma."""
return self._scan_n_sigma
@scan_n_sigma.setter
def scan_n_sigma(self, n_sigma):
"""Stat scan n sigma."""
self._scan_n_sigma = int(n_sigma)
@property
def scan_values(self):
"""Stat scan values as a `~numpy.ndarray`."""
if self._scan_values is None:
scale = interpolation_scale(self.interp)
parmin, parmax = scale([self.scan_min, self.scan_max])
values = np.linspace(parmin, parmax, self.scan_n_values)
return scale.inverse(values)
return self._scan_values
@scan_values.setter
def scan_values(self, values):
"""Set scan values."""
self._scan_values = values
def check_limits(self):
"""Emit a warning or error if value is outside the minimum/maximum range."""
if not self.frozen:
if (~np.isnan(self.min) and (self.value <= self.min)) or (
~np.isnan(self.max) and (self.value >= self.max)
):
log.warning(
f"Value {self.value} is outside bounds [{self.min}, {self.max}]"
f" for parameter '{self.name}'"
)
def __repr__(self):
return (
f"{self.__class__.__name__}(name={self.name!r}, value={self.value!r}, "
f"factor={self.factor!r}, scale={self.scale!r}, unit={self.unit!r}, "
f"min={self.min!r}, max={self.max!r}, frozen={self.frozen!r}, prior={self.prior!r}, id={hex(id(self))})"
)
def _repr_html_(self):
try:
return self.to_html()
except AttributeError:
return f"<pre>{html.escape(str(self))}</pre>"
def copy(self):
"""Deep copy."""
return copy.deepcopy(self)
def update_from_dict(self, data):
"""Update parameters from a dictionary."""
keys = ["value", "unit", "min", "max", "frozen", "prior"]
for k in keys:
if k == "prior" and data[k] == "":
data[k] = None
setattr(self, k, data[k])
def to_dict(self):
"""Convert to dictionary."""
output = {
"name": self.name,
"value": self.value,
"unit": self.unit.to_string("fits"),
"error": self.error,
"min": self.min,
"max": self.max,
"frozen": self.frozen,
"interp": self.interp,
"scale_method": self.scale_method,
}
if self._link_label_io is not None:
output["link"] = self._link_label_io
if self.prior is not None:
output["prior"] = self.prior.to_dict()["prior"]
return output
def autoscale(self):
"""Autoscale the parameters.
Set ``factor`` and ``scale`` according to ``scale_method`` attribute.
Available ``scale_method``.
* ``scale10`` sets ``scale`` to power of 10,
so that abs(factor) is in the range 1 to 10
* ``factor1`` sets ``factor, scale = 1, value``
In both cases the sign of value is stored in ``factor``,
i.e. the ``scale`` is always positive.
If ``scale_method`` is None the scaling is ignored.
"""
if self.scale_method == "scale10":
value = self.value
if value != 0:
exponent = np.floor(np.log10(np.abs(value)))
scale = np.power(10.0, exponent)
self.factor = value / scale
self.scale = scale
elif self.scale_method == "factor1":
self.factor, self.scale = 1, self.value
class Parameters(collections.abc.Sequence):
"""Parameters container.
- List of `Parameter` objects.
- Covariance matrix.
Parameters
----------
parameters : list of `Parameter`
List of parameters.
"""
def __init__(self, parameters=None):
if parameters is None:
parameters = []
else:
parameters = list(parameters)
self._parameters = parameters
def _repr_html_(self):
try:
return self.to_html()
except AttributeError:
return f"<pre>{html.escape(str(self))}</pre>"
def check_limits(self):
"""Check parameter limits and emit a warning."""
for par in self:
par.check_limits()
@property
def prior(self):
return [par.prior for par in self]
def prior_stat_sum(self):
parameters_stat_sum = 0
for par in self:
if par.prior is not None:
parameters_stat_sum += par.prior_stat_sum()
return parameters_stat_sum
@property
def types(self):
"""Parameter types."""
return [par.type for par in self]
@property
def min(self):
"""Parameter minima as a `numpy.ndarray`."""
return np.array([_.min for _ in self._parameters], dtype=np.float64)
@min.setter
def min(self, min_array):
"""Parameter minima as a `numpy.ndarray`."""
if not len(self) == len(min_array):
raise ValueError("Minima must have same length as parameter list")
for min_, par in zip(min_array, self):
par.min = min_
@property
def max(self):
"""Parameter maxima as a `numpy.ndarray`."""
return np.array([_.max for _ in self._parameters], dtype=np.float64)
@max.setter
def max(self, max_array):
"""Parameter maxima as a `numpy.ndarray`."""
if not len(self) == len(max_array):
raise ValueError("Maxima must have same length as parameter list")
for max_, par in zip(max_array, self):
par.max = max_
@property
def value(self):
"""Parameter values as a `numpy.ndarray`."""
return np.array([_.value for _ in self._parameters], dtype=np.float64)
@value.setter
def value(self, values):
"""Parameter values as a `numpy.ndarray`."""
if not len(self) == len(values):
raise ValueError("Values must have same length as parameter list")
for value, par in zip(values, self):
par.value = value
@classmethod
def from_stack(cls, parameters_list):
"""Create `Parameters` by stacking a list of other `Parameters` objects.
Parameters
----------
parameters_list : list of `Parameters`
List of `Parameters` objects.
"""
pars = itertools.chain(*parameters_list)
return cls(pars)
def copy(self):
"""Deep copy."""
return copy.deepcopy(self)
@property
def free_parameters(self):
"""List of free parameters."""
return self.__class__([par for par in self._parameters if not par.frozen])
@property
def unique_parameters(self):
"""Unique parameters as a `Parameters` object."""
return self.__class__(dict.fromkeys(self._parameters))
@property
def names(self):
"""List of parameter names."""
return [par.name for par in self._parameters]
def index(self, val):
"""Get position index for a given parameter.
The input can be a parameter object, parameter name (str)
or if a parameter index (int) is passed in, it is simply returned.
"""
if isinstance(val, int):
return val
elif isinstance(val, Parameter):
return self._parameters.index(val)
elif isinstance(val, str):
for idx, par in enumerate(self._parameters):
if val == par.name:
return idx
raise IndexError(f"No parameter: {val!r}")
else:
raise TypeError(f"Invalid type: {type(val)!r}")
def __getitem__(self, key):
"""Access parameter by name, index or boolean mask."""
if isinstance(key, np.ndarray) and key.dtype == bool:
return self.__class__(list(np.array(self._parameters)[key]))
else:
idx = self.index(key)
return self._parameters[idx]
def __len__(self):
return len(self._parameters)
def __add__(self, other):
if isinstance(other, Parameters):
return Parameters.from_stack([self, other])
else:
raise TypeError(f"Invalid type: {other!r}")
def to_dict(self):
data = []
for par in self._parameters:
data.append(par.to_dict())
return data
@staticmethod
def _create_default_table():
name_to_type = {
"type": "str",
"name": "str",
"value": "float",
"unit": "str",
"error": "float",
"min": "float",
"max": "float",
"frozen": "bool",
"link": "str",
"prior": "str",
}
return Table(names=name_to_type.keys(), dtype=name_to_type.values())
def to_table(self):
"""Convert parameter attributes to `~astropy.table.Table`."""
table = self._create_default_table()
for p in self._parameters:
d = {k: v for k, v in p.to_dict().items() if k in table.colnames}
if "prior" in d:
d["prior"] = d["prior"]["type"]
table.add_row(d)
table["value"].format = ".4e"
for name in ["error", "min", "max"]:
table[name].format = ".3e"
return table
def __eq__(self, other):
all_equal = np.all([p is p_new for p, p_new in zip(self, other)])
return all_equal and len(self) == len(other)
@classmethod
def from_dict(cls, data):
parameters = []
for par in data:
link_label = par.pop("link", None)
par.pop("is_norm", None)
parameter = Parameter(**par)
parameter._link_label_io = link_label
parameters.append(parameter)
return cls(parameters=parameters)
def set_parameter_factors(self, factors):
"""Set factor of all parameters.
Used in the optimizer interface.
"""
idx = 0
for parameter in self._parameters:
if not parameter.frozen:
parameter.factor = factors[idx]
idx += 1
def autoscale(self):
"""Autoscale all parameters.
See :func:`~gammapy.modeling.Parameter.autoscale`.
"""
for par in self._parameters:
par.autoscale()
def select(
self,
name=None,
type=None,
frozen=None,
):
"""Create a mask of models, true if all conditions are verified.
Parameters
----------
name : str or list, optional
Name of the parameter. Default is None.
type : {None, "spatial", "spectral", "temporal"}
Type of models. Default is None.
frozen : bool, optional
Select frozen parameters if True, exclude them if False. Default is None.
Returns
-------
parameters : `Parameters`
Selected parameters.
"""
selection = np.ones(len(self), dtype=bool)
if name and not isinstance(name, list):
name = [name]
for idx, par in enumerate(self):
if name:
selection[idx] &= np.any([_ == par.name for _ in name])
if type:
selection[idx] &= type == par.type
if frozen is not None:
if frozen:
selection[idx] &= par.frozen
else:
selection[idx] &= ~par.frozen
return self[selection]
def freeze_all(self):
"""Freeze all parameters."""
for par in self._parameters:
par.frozen = True
def unfreeze_all(self):
"""Unfreeze all parameters (even those frozen by default)."""
for par in self._parameters:
par.frozen = False
def restore_status(self, restore_values=True):
"""Context manager to restore status.
A copy of the values is made on enter,
and those values are restored on exit.
Parameters
----------
restore_values : bool, optional
Restore values if True, otherwise restore only frozen status. Default is None.
Examples
--------
>>> from gammapy.modeling.models import PowerLawSpectralModel
>>> pwl = PowerLawSpectralModel(index=2)
>>> with pwl.parameters.restore_status():
... pwl.parameters["index"].value = 3
>>> print(pwl.parameters["index"].value) # doctest: +SKIP
"""
return restore_parameters_status(self, restore_values)
class restore_parameters_status:
def __init__(self, parameters, restore_values=True):
self.restore_values = restore_values
self._parameters = parameters
self.values = [_.value for _ in parameters]
self.frozen = [_.frozen for _ in parameters]
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
for value, par, frozen in zip(self.values, self._parameters, self.frozen):
if self.restore_values:
par.value = value
par.frozen = frozen
class PriorParameter(Parameter):
def __init__(
self,
name,
value,
unit="",
scale=1,
min=np.nan,
max=np.nan,
error=0,
):
if not isinstance(name, str):
raise TypeError(f"Name must be string, got '{type(name)}' instead")
self._name = name
self.scale = scale
self.min = min
self.max = max
self._error = error
if isinstance(value, u.Quantity) or isinstance(value, str):
val = u.Quantity(value)
self.value = val.value
self.unit = val.unit
else:
self.factor = value
self.unit = unit
self._type = "prior"
def to_dict(self):
"""Convert to dictionary."""
output = {
"name": self.name,
"value": self.value,
"unit": self.unit.to_string("fits"),
"error": self.error,
"min": self.min,
"max": self.max,
}
return output
def __repr__(self):
return (
f"{self.__class__.__name__}(name={self.name!r}, value={self.value!r}, "
f"factor={self.factor!r}, scale={self.scale!r}, unit={self.unit!r}, "
f"min={self.min!r}, max={self.max!r})"
)
class PriorParameters(Parameters):
def __init__(self, parameters=None):
if parameters is None:
parameters = []
else:
parameters = list(parameters)
self._parameters = parameters
def to_table(self):
"""Convert parameter attributes to `~astropy.table.Table`."""
rows = []
for p in self._parameters:
d = p.to_dict()
rows.append({**dict(type=p.type), **d})
table = Table(rows)
table["value"].format = ".4e"
for name in ["error", "min", "max"]:
table[name].format = ".3e"
return table
@classmethod
def from_dict(cls, data):
parameters = []
for par in data:
parameter = PriorParameter(**par)
parameters.append(parameter)
return cls(parameters=parameters)
|
gammapyREPO_NAMEgammapyPATH_START.@gammapy_extracted@gammapy-main@gammapy@modeling@parameter.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "mavrix93/LightCurvesClassifier",
"repo_path": "LightCurvesClassifier_extracted/LightCurvesClassifier-master/lcc_web/web/interface/static/css/__init__.py",
"type": "Python"
}
|
mavrix93REPO_NAMELightCurvesClassifierPATH_START.@LightCurvesClassifier_extracted@LightCurvesClassifier-master@lcc_web@web@interface@static@css@__init__.py@.PATH_END.py
|
|
{
"filename": "README.md",
"repo_name": "teuben/QAC",
"repo_path": "QAC_extracted/QAC-master/README.md",
"type": "Markdown"
}
|
# Quick Array Combinations (QAC)
QAC provides a set of functions that mostly call CASA tools and tasks
to help you combining data from a single dish and interferometer.
QAC hides some of the complexity of writing CASA scripts and
provide a simpler interface to array combination tools and tasks in
CASA.
An alternative abbreviation to QAC: Quick Access to CASA.
This project was conceived alongside the TP2VIS project, where it was
used to provide an easier way to call CASA, and perform regression
tests. We still keep these within QAC as they are not distributed with
[TP2VIS](https://github.com/tp2vis/distribute). In an earlier version
these functions were called QTP. We also used QAC for an ngVLA design
study, and was matured during the DC2019 project to deal with the
new style CASA6/python3.
See the
[INSTALL](INSTALL.md)
file for ways how to install and use these functions in your
[CASA](https://casa.nrao.edu/casa_obtaining.shtml)
shell.
For documentation on the available QAC routines, see [docs/qac.md](docs/qac.md).
## Example
Below a plot in which the top left panel is a selected channel from an
ALMA total power (TP) observation of the CO emissions of a small
region in the SMC. Overlayed on this greyscale are the pointing
centers of the 12-m Array. For one pointing the true extend of the 12
m field of view is given as well with the larger green circle. The
top right panel is the reconstructed TP map from the
pseudo-visibilities generated from a virtual interferometer emulating
the short spacings. The pure interferometric map that combines the 7m
and 12 m data is shown in the lower left panel, and combining the TP
visibilities with those of the 7m+12m arrays are then shown in the
lower right panel, now recovering the large scale flux, as well as the
finer scale structure.

### Benchmarks
A better supported show of QAC functionality is currently in the **test/bench.py, bench0.py** and **sky1.py** routines [March 2018] as those were used in the
[SD2018](https://github.com/teuben/sd2018) workshop. Please note the software in that repo is not maintained anymore, and updated versions can be found
within QAC.
## References
* CASA reference manual and cookbook : http://casa.nrao.edu/docs/cookbook/
* Measurement Set: https://casa.nrao.edu/casadocs/latest/reference-material/measurement-set
* MS V2 document: [MS v2 memo](https://casa.nrao.edu/casadocs/latest/reference-material/229-1.ps/@@download/file/229.ps)
* CASA simulations: https://casa.nrao.edu/casadocs/latest/simulation
* Simulations (in 4.4) https://casaguides.nrao.edu/index.php/Simulating_Observations_in_CASA_4.4
* See also our [workflow4](workflow4.md)
* CASA single dish imaging: https://casa.nrao.edu/casadocs/latest/single-dish-imaging
* Mangum et el. 2007: [OTF imaging technique](https://www.aanda.org/articles/aa/pdf/2007/41/aa7811-07.pdf)
* CASA feather: https://casa.nrao.edu/casadocs/latest/image-combination/feather
* CASA data weights and combination: https://casaguides.nrao.edu/index.php/DataWeightsAndCombination
* Nordic Tools SD2VIS: https://www.oso.nordic-alma.se/software-tools.php
* Kauffman's *Adding Zero-Spacing* workflow: https://sites.google.com/site/jenskauffmann/research-notes/adding-zero-spa
* Radio Imaging Combination Analysis (RICA) : https://gitlab.com/mileslucas/rica
* Papers of (historic) interest:
* [Ekers and Rots 1979](https://ui.adsabs.harvard.edu/abs/1979ASSL...76...61E)
* [Vogel et al. 1984](https://ui.adsabs.harvard.edu/abs/1984ApJ...283..655V)
* [Braun and Walterbos 1985](https://ui.adsabs.harvard.edu/abs/1985A%26A...143..307B)
* [Jorsater and van Moorsel 1995](https://ui.adsabs.harvard.edu/abs/1995AJ....110.2037J)
* [Kurono, Morita, Kamazaki 2009](https://ui.adsabs.harvard.edu/abs/2009PASJ...61..873K)
* [Koda et al. 2011](https://ui.adsabs.harvard.edu/abs/2011ApJS..193...19K)
* [Koda et al. 2019](https://ui.adsabs.harvard.edu/abs/2019PASP..131e4505K)
|
teubenREPO_NAMEQACPATH_START.@QAC_extracted@QAC-master@README.md@.PATH_END.py
|
{
"filename": "womcmwselector.py",
"repo_name": "msiebert1/UCSC_spectral_pipeline",
"repo_path": "UCSC_spectral_pipeline_extracted/UCSC_spectral_pipeline-master/spectral_reduction/tmath/wombat/womcmwselector.py",
"type": "Python"
}
|
def womcmwselector(hop):
"""select combine 2 with weights"""
from tmath.wombat.womcom import womcom
hop=womcom(hop,2,True)
return hop
|
msiebert1REPO_NAMEUCSC_spectral_pipelinePATH_START.@UCSC_spectral_pipeline_extracted@UCSC_spectral_pipeline-master@spectral_reduction@tmath@wombat@womcmwselector.py@.PATH_END.py
|
{
"filename": "_highlight.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/surface/contours/y/_highlight.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HighlightValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="highlight", parent_name="surface.contours.y", **kwargs
):
super(HighlightValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@surface@contours@y@_highlight.py@.PATH_END.py
|
{
"filename": "pinecone.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/vectorstores/pinecone.ipynb",
"type": "Jupyter Notebook"
}
|
# Pinecone
>[Pinecone](https://docs.pinecone.io/docs/overview) is a vector database with broad functionality.
This notebook shows how to use functionality related to the `Pinecone` vector database.
## Setup
To use the `PineconeVectorStore` you first need to install the partner package, as well as the other packages used throughout this notebook.
```python
%pip install -qU langchain-pinecone pinecone-notebooks
```
Migration note: if you are migrating from the `langchain_community.vectorstores` implementation of Pinecone, you may need to remove your `pinecone-client` v2 dependency before installing `langchain-pinecone`, which relies on `pinecone-client` v3.
### Credentials
Create a new Pinecone account, or sign into your existing one, and create an API key to use in this notebook.
```python
import getpass
import os
import time
from pinecone import Pinecone, ServerlessSpec
if not os.getenv("PINECONE_API_KEY"):
os.environ["PINECONE_API_KEY"] = getpass.getpass("Enter your Pinecone API key: ")
pinecone_api_key = os.environ.get("PINECONE_API_KEY")
pc = Pinecone(api_key=pinecone_api_key)
```
If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:
```python
# os.environ["LANGSMITH_API_KEY"] = getpass.getpass("Enter your LangSmith API key: ")
# os.environ["LANGSMITH_TRACING"] = "true"
```
## Initialization
Before initializing our vector store, let's connect to a Pinecone index. If one named `index_name` doesn't exist, it will be created.
```python
import time
index_name = "langchain-test-index" # change if desired
existing_indexes = [index_info["name"] for index_info in pc.list_indexes()]
if index_name not in existing_indexes:
pc.create_index(
name=index_name,
dimension=3072,
metric="cosine",
spec=ServerlessSpec(cloud="aws", region="us-east-1"),
)
while not pc.describe_index(index_name).status["ready"]:
time.sleep(1)
index = pc.Index(index_name)
```
Now that our Pinecone index is setup, we can initialize our vector store.
import EmbeddingTabs from "@theme/EmbeddingTabs";
<EmbeddingTabs/>
```python
# | output: false
# | echo: false
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings(model="text-embedding-3-large")
```
```python
from langchain_pinecone import PineconeVectorStore
vector_store = PineconeVectorStore(index=index, embedding=embeddings)
```
## Manage vector store
Once you have created your vector store, we can interact with it by adding and deleting different items.
### Add items to vector store
We can add items to our vector store by using the `add_documents` function.
```python
from uuid import uuid4
from langchain_core.documents import Document
document_1 = Document(
page_content="I had chocalate chip pancakes and scrambled eggs for breakfast this morning.",
metadata={"source": "tweet"},
)
document_2 = Document(
page_content="The weather forecast for tomorrow is cloudy and overcast, with a high of 62 degrees.",
metadata={"source": "news"},
)
document_3 = Document(
page_content="Building an exciting new project with LangChain - come check it out!",
metadata={"source": "tweet"},
)
document_4 = Document(
page_content="Robbers broke into the city bank and stole $1 million in cash.",
metadata={"source": "news"},
)
document_5 = Document(
page_content="Wow! That was an amazing movie. I can't wait to see it again.",
metadata={"source": "tweet"},
)
document_6 = Document(
page_content="Is the new iPhone worth the price? Read this review to find out.",
metadata={"source": "website"},
)
document_7 = Document(
page_content="The top 10 soccer players in the world right now.",
metadata={"source": "website"},
)
document_8 = Document(
page_content="LangGraph is the best framework for building stateful, agentic applications!",
metadata={"source": "tweet"},
)
document_9 = Document(
page_content="The stock market is down 500 points today due to fears of a recession.",
metadata={"source": "news"},
)
document_10 = Document(
page_content="I have a bad feeling I am going to get deleted :(",
metadata={"source": "tweet"},
)
documents = [
document_1,
document_2,
document_3,
document_4,
document_5,
document_6,
document_7,
document_8,
document_9,
document_10,
]
uuids = [str(uuid4()) for _ in range(len(documents))]
vector_store.add_documents(documents=documents, ids=uuids)
```
['167b8681-5974-467f-adcb-6e987a18df01',
'd16010fd-41f8-4d49-9c22-c66d5555a3fe',
'ffcacfb3-2bc2-44c3-a039-c2256a905c0e',
'cf3bfc9f-5dc7-4f5e-bb41-edb957394126',
'e99b07eb-fdff-4cb9-baa8-619fd8efeed3',
'68c93033-a24f-40bd-8492-92fa26b631a4',
'b27a4ecb-b505-4c5d-89ff-526e3d103558',
'4868a9e6-e6fb-4079-b400-4a1dfbf0d4c4',
'921c0e9c-0550-4eb5-9a6c-ed44410788b2',
'c446fc23-64e8-47e7-8c19-ecf985e9411e']
### Delete items from vector store
```python
vector_store.delete(ids=[uuids[-1]])
```
## Query vector store
Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent.
### Query directly
Performing a simple similarity search can be done as follows:
```python
results = vector_store.similarity_search(
"LangChain provides abstractions to make working with LLMs easy",
k=2,
filter={"source": "tweet"},
)
for res in results:
print(f"* {res.page_content} [{res.metadata}]")
```
* Building an exciting new project with LangChain - come check it out! [{'source': 'tweet'}]
* LangGraph is the best framework for building stateful, agentic applications! [{'source': 'tweet'}]
#### Similarity search with score
You can also search with score:
```python
results = vector_store.similarity_search_with_score(
"Will it be hot tomorrow?", k=1, filter={"source": "news"}
)
for res, score in results:
print(f"* [SIM={score:3f}] {res.page_content} [{res.metadata}]")
```
* [SIM=0.553187] The weather forecast for tomorrow is cloudy and overcast, with a high of 62 degrees. [{'source': 'news'}]
#### Other search methods
There are more search methods (such as MMR) not listed in this notebook, to find all of them be sure to read the [API reference](https://python.langchain.com/api_reference/pinecone/vectorstores/langchain_pinecone.vectorstores.PineconeVectorStore.html).
### Query by turning into retriever
You can also transform the vector store into a retriever for easier usage in your chains.
```python
retriever = vector_store.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 1, "score_threshold": 0.5},
)
retriever.invoke("Stealing from the bank is a crime", filter={"source": "news"})
```
[Document(metadata={'source': 'news'}, page_content='Robbers broke into the city bank and stole $1 million in cash.')]
## Usage for retrieval-augmented generation
For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:
- [Tutorials](/docs/tutorials/)
- [How-to: Question and answer with RAG](https://python.langchain.com/docs/how_to/#qa-with-rag)
- [Retrieval conceptual docs](https://python.langchain.com/docs/concepts/retrieval)
## API reference
For detailed documentation of all __ModuleName__VectorStore features and configurations head to the API reference: https://python.langchain.com/api_reference/pinecone/vectorstores/langchain_pinecone.vectorstores.PineconeVectorStore.html
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@vectorstores@pinecone.ipynb@.PATH_END.py
|
{
"filename": "_zmid.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/heatmap/_zmid.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ZmidValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="zmid", parent_name="heatmap", **kwargs):
super(ZmidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@heatmap@_zmid.py@.PATH_END.py
|
{
"filename": "test_data_classifier.py",
"repo_name": "soar-telescope/goodman_pipeline",
"repo_path": "goodman_pipeline_extracted/goodman_pipeline-main/goodman_pipeline/images/tests/test_data_classifier.py",
"type": "Python"
}
|
from __future__ import absolute_import
import numpy as np
import os
import shutil
from astropy.io import fits
from ccdproc import CCDData
from unittest import TestCase, skip
from ..data_classifier import DataClassifier
class DataClassifierTests(TestCase):
def setUp(self):
self.raw_path = os.path.join(
os.path.dirname(__file__),
'../../data/test_data/classify-data')
if not os.path.isdir(self.raw_path):
os.mkdir(self.raw_path)
self.create_fake_spectroscopic_data()
self.data_classifier = DataClassifier()
def create_fake_spectroscopic_data(self):
if os.path.isdir(self.raw_path):
card_values = [
{'obstype': 'OBJECT', 'object': 'NGC2070',
'obsra': '16:23:24.285', 'obsdec': '-39:12:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070',
'obsra': '16:24:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070',
'obsra': '16:24:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070',
'obsra': '16:24:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070',
'obsra': '16:25:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070',
'obsra': '16:25:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070',
'obsra': '16:25:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070',
'obsra': '16:26:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070',
'obsra': '16:27:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070',
'obsra': '16:28:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070',
'obsra': '16:23:34.285', 'obsdec': ' 39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070',
'obsra': '16:23:34.285', 'obsdec': ' 39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070',
'obsra': '16:23:34.285', 'obsdec': ' 39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070',
'obsra': '16:23:34.285', 'obsdec': ' 39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070',
'obsra': '16:23:34.285', 'obsdec': ' 39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe',
'obsra': '16:22:34.285', 'obsdec': '-39:23:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe',
'obsra': '16:24:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe',
'obsra': '16:24:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe',
'obsra': '16:24:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe',
'obsra': '16:25:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe',
'obsra': '16:25:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe',
'obsra': '16:25:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe',
'obsra': '16:26:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe',
'obsra': '16:27:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe',
'obsra': '16:28:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe',
'obsra': '16:23:34.285', 'obsdec': '39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe',
'obsra': '16:23:34.285', 'obsdec': '39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe',
'obsra': '16:23:34.285', 'obsdec': '39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe',
'obsra': '16:23:34.285', 'obsdec': '39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe',
'obsra': '16:23:34.285', 'obsdec': '39:13:53.954'}]
for i in range(len(card_values)):
ccd = CCDData(data=np.ones((3, 3)),
meta=fits.Header(),
unit='adu')
ccd.header.set('DATE', value='2019-03-22', comment='nc')
ccd.header.set('SLIT', value='1.0" long slit', comment='nc')
ccd.header.set('DATE-OBS', value='2019-03-22T09:59:33.654',
comment='nc')
ccd.header.set('OBSTYPE', value=card_values[i]['obstype'],
comment='nc')
ccd.header.set('OBJECT', value=card_values[i]['object'],
comment='nc')
ccd.header.set('EXPTIME', value='10', comment='nc')
ccd.header.set('OBSRA', value=card_values[i]['obsra'],
comment='nc')
ccd.header.set('OBSDEC', value=card_values[i]['obsdec'],
comment='nc')
ccd.header.set('GRATING', value='SYZY_400', comment='nc')
ccd.header.set('CAM_TARG', value='16.1', comment='nc')
ccd.header.set('GRT_TARG', value='7.5', comment='nc')
ccd.header.set('FILTER', value='<NO FILTER>', comment='nc')
ccd.header.set('FILTER2', value='GG455', comment='nc')
ccd.header.set('GAIN', value='1.48', comment='nc')
ccd.header.set('RDNOISE', value='3.89', comment='nc')
ccd.header.set('INSTCONF', value='Red', comment='nc')
ccd.header.set('WAVMODE', value='Spectroscopy', comment='nc')
ccd.write(os.path.join(self.raw_path,
'test_file_{:03d}.fits'.format(i)))
def tearDown(self):
if os.path.isdir(self.raw_path):
shutil.rmtree(self.raw_path)
def test___repr__undefined(self):
with self.assertRaises(TypeError):
self.data_classifier.__repr__()
def test___repr__(self):
self.data_classifier(raw_path=self.raw_path)
result = self.data_classifier.__repr__()
self.assertIn('Raw Path: {:s}'.format(self.raw_path), result)
self.assertIn('Instrument: Red Camera', result)
self.assertIn('Observing Technique: Spectroscopy', result)
def test_data_classifier_expected_usage(self):
assert isinstance(self.data_classifier, DataClassifier)
self.data_classifier(raw_path=self.raw_path)
self.assertEqual('Red', self.data_classifier.instrument)
self.assertEqual('Spectroscopy', self.data_classifier.technique)
def test_data_classifier_all_imaging(self):
for _file in os.listdir(self.raw_path):
raw_path_full = os.path.join(self.raw_path, _file)
recovered_ccd = CCDData.read(raw_path_full, unit='adu')
# recovered_ccd.header['INSTCONF'] = 'Blue'
recovered_ccd.header['WAVMODE'] = 'Imaging'
recovered_ccd.write(raw_path_full, overwrite=True)
self.data_classifier(raw_path=self.raw_path)
self.assertEqual('Imaging', self.data_classifier.technique)
def test_data_classifier_mixed_technique(self):
sample_file = os.listdir(self.raw_path)[0]
raw_path_full = os.path.join(self.raw_path, sample_file)
recovered_ccd = CCDData.read(raw_path_full, unit='adu')
# recovered_ccd.header['INSTCONF'] = 'Blue'
recovered_ccd.header['WAVMODE'] = 'Imaging'
recovered_ccd.write(raw_path_full, overwrite=True)
self.data_classifier(raw_path=self.raw_path)
self.assertEqual('Spectroscopy', self.data_classifier.technique)
def test_data_classifier_mixed_instconf(self):
sample_file = os.listdir(self.raw_path)[0]
raw_path_full = os.path.join(self.raw_path, sample_file)
recovered_ccd = CCDData.read(raw_path_full, unit='adu')
recovered_ccd.header['INSTCONF'] = 'Blue'
# recovered_ccd.header['WAVMODE'] = 'Imaging'
recovered_ccd.write(raw_path_full, overwrite=True)
with self.assertRaises(SystemExit):
self.data_classifier(raw_path=self.raw_path)
|
soar-telescopeREPO_NAMEgoodman_pipelinePATH_START.@goodman_pipeline_extracted@goodman_pipeline-main@goodman_pipeline@images@tests@test_data_classifier.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "cy-xu/cosmic-conn",
"repo_path": "cosmic-conn_extracted/cosmic-conn-main/cosmic_conn/dl_framework/__init__.py",
"type": "Python"
}
|
cy-xuREPO_NAMEcosmic-connPATH_START.@cosmic-conn_extracted@cosmic-conn-main@cosmic_conn@dl_framework@__init__.py@.PATH_END.py
|
|
{
"filename": "5_posterior_SHMF.ipynb",
"repo_name": "smsharma/mining-for-substructure-lens",
"repo_path": "mining-for-substructure-lens_extracted/mining-for-substructure-lens-master/notebooks/5_posterior_SHMF.ipynb",
"type": "Jupyter Notebook"
}
|
# Bayesian inference
```python
import sys, os
import logging
import numpy as np
from scipy.stats import uniform, norm
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerPatch
%matplotlib inline
sys.path.append('../')
import paper_settings
from simulation.units import *
from simulation.prior import get_grid
from inference.bayes import Posterior
```
```python
paper_settings.setup()
```
## Setup
```python
n = 100
n_events_for_expectation = 5000
```
```python
filename = "calibrated_alices_full_sgd1e2_grid"
```
## Data
```python
llr = np.load("../data/results/llr_{}.npy".format(filename))[:,:n_events_for_expectation]
```
```python
resolution = 25
f_sub_1d = np.linspace(0.001, 0.200, resolution)
beta_1d = np.linspace(-2.5, -1.5, resolution)
theta0, theta1 = np.meshgrid(f_sub_1d, beta_1d)
theta_grid = np.vstack((theta0.flatten(), theta1.flatten())).T
bin_size = f_sub_1d[1] - f_sub_1d[0]
alpha_edges = np.linspace(f_sub_1d[0] - bin_size/2, f_sub_1d[-1] + bin_size/2, resolution + 1)
bin_size = beta_1d[1] - beta_1d[0]
beta_edges = np.linspace(beta_1d[0] - bin_size/2, beta_1d[-1] + bin_size/2, resolution + 1)
```
## Calculate posterior on f_sub, beta
```python
def narrow_prior(thetas):
f_sub = thetas[:,0]
beta = thetas[:,1]
return uniform(0.001, 0.199).pdf(f_sub).flatten() * norm(-1.9, 0.1).pdf(beta).flatten()
```
```python
bayes = Posterior(llr, theta_grid)
posterior_narrow = bayes.posterior_based_on_expected_llr(n, narrow_prior)
```
## Drawing from posterior
```python
def draw_fsub_beta(posterior):
posterior /= np.sum(posterior)
# Draw bin
i_bin = np.random.choice(list(range(625)), p=posterior)
# Draw f_sub and theta uniformly within bin
f_sub = theta_grid[i_bin][0] + (f_sub_1d[1] - f_sub_1d[0]) * (np.random.random() - 0.5)
beta = theta_grid[i_bin][1] + (beta_edges[1] - beta_edges[0]) * (np.random.random() - 0.5)
return f_sub, beta
```
## SHMF
```python
def M_200_sigma_v(sigma_v, scatter=False):
"""
Relate central velocity dispersion to halo virial mass
From https://arxiv.org/pdf/1804.04492.pdf
"""
a = 0.09
b = 3.48
if scatter:
sigma_log10_M_200 = 0.13 # Lognormal scatter
log10_M_200 = np.random.normal(a + b * np.log10(sigma_v / (100 * Kmps)), sigma_log10_M_200)
else:
log10_M_200 = a + b * np.log10(sigma_v / (100 * Kmps))
return (10 ** log10_M_200) * 1e12 * M_s
```
```python
def shmf_by_mhost(fsub, beta, msubs):
""" Evaluates dn / dm for a given set of m (msubs)"""
sigma_v = np.random.normal(225, 50)
mhost = M_200_sigma_v(sigma_v * Kmps)
m_min_calib=1e6 * M_s
m_max_calib=0.01 * mhost
M_0=M_MW
m_0 = 1.e9 * M_s
alpha = fsub * ((2 + beta) * M_0 * m_0 ** beta) / (m_max_calib ** (beta + 2) - m_min_calib ** (beta + 2))
return np.where(msubs > m_max_calib, 0., alpha * (mhost / M_0) * (msubs / m_0)**beta / mhost)
```
## Find mean and credible intervals
```python
def mean_and_ci(samples, levels=[0.68, 0.95]):
# n_samples = samples.shape[0]
mean = np.mean(samples, axis=0)
all_down = [np.quantile(samples, (1. - level)/2, axis=0) for level in levels]
all_up = [np.quantile(samples, 1. - (1. - level)/2, axis=0) for level in levels]
all_cis = np.array([all_down, all_up])
# # Loop over data points
# all_cis = []
# for samples_, mean_ in zip(samples.T, mean):
# # Find position of mean in samples
# sorted_ = sorted(samples_)
# mean_pos = np.searchsorted(sorted_, mean_)
# # Loop over levels and find lowest and highest position
# cis = []
# for level in levels:
# half_n_ci = int(round(level * n_samples / 2,0))
# low_pos, high_pos = mean_pos - half_n_ci, mean_pos + half_n_ci
# if low_pos < 0:
# cis.append([sorted_[0], sorted_[2*half_n_ci]])
# elif high_pos >= n_samples:
# cis.append([sorted_[-2*half_n_ci - 1], sorted_[-1]])
# else:
# cis.append([sorted_[low_pos], sorted_[high_pos]])
# all_cis.append(cis)
# all_cis = np.array(all_cis) # Shape (n_datapoints, n_cis, 2)
# all_cis = all_cis.T # Shape (2, n_cis, n_datapoints)
return mean, all_cis
```
## Generate SHMF data
```python
n_realizations = 20000
n_realizations_show = 0
fsub_true, beta_true = 0.05, -1.9
msubs = np.geomspace(1.e7, 1e12, 200) * M_s
```
```python
shmf_posteriors = []
shmf_trues = []
for _ in range(n_realizations):
fsub_, beta_ = draw_fsub_beta(posterior_narrow)
shmf_posteriors.append(shmf_by_mhost(fsub_, beta_, msubs))
shmf_trues.append(shmf_by_mhost(fsub_true, beta_true, msubs))
shmf_posteriors = np.asarray(shmf_posteriors)
shmf_trues = np.asarray(shmf_trues)
```
```python
shmf_true, _ = mean_and_ci(shmf_trues, levels=[])
shmf_posterior, shmf_posterior_cis = mean_and_ci(shmf_posteriors, levels=[0.68, 0.95])
```
## Plot
```python
x = 1./M_s * msubs
y_mult = msubs**2
```
```python
fig = paper_settings.figure()
ax = plt.gca()
for i, shmf_ in enumerate(shmf_posteriors[:n_realizations_show]):
plt.plot(x, msubs**2 * shmf_,c="0.3", lw=0.25)
fillpost2 = plt.fill_between(x, y_mult * shmf_posterior_cis[0, 1], msubs**2 * shmf_posterior_cis[1, 1], facecolor=paper_settings.COLOR_FIX, edgecolor=paper_settings.COLOR_FIX)
fillpost1 = plt.fill_between(x, y_mult * shmf_posterior_cis[0, 0], msubs**2 * shmf_posterior_cis[1, 0], facecolor=paper_settings.COLOR_ALIGN, edgecolor=paper_settings.COLOR_ALIGN)
lpost, = plt.plot(x, y_mult * shmf_posterior, alpha=1., c=paper_settings.COLOR_FULL, lw=1.0, label="Posterior")
ltrue, = plt.plot(x, y_mult * shmf_true, alpha=1., c="black", lw=1.0, ls=":", label="True SHMF")
ax.add_patch(matplotlib.patches.Rectangle(xy=(1.93e7,1.147e-2), width=6.4e7, height=0.2e-2, facecolor=paper_settings.COLOR_FIX, edgecolor=paper_settings.COLOR_FIX))
ax.add_patch(matplotlib.patches.Rectangle(xy=(1.93e7,1.195e-2), width=6.4e7, height=0.09e-2, facecolor=paper_settings.COLOR_ALIGN, edgecolor=paper_settings.COLOR_ALIGN))
ax.legend(
[ltrue, lpost],
["True SHMF", "Posterior"],
loc=2,
frameon=False
)
plt.xlabel(r"$m_{200}$ [$\mathrm{M}_{\odot}$]")
ax.xaxis.set_label_coords(0.5, -0.11)
plt.ylabel(r"$\frac {m_{200}^2}{M_{200}} \frac{\mathrm{d}n}{\mathrm{d}m_{200}}$")
ax.yaxis.set_label_coords(-0.115, 0.5)
ax.yaxis.set_tick_params(pad=1)
plt.xscale("log")
plt.xlim(1.e7, 1.e12)
plt.yscale("log")
plt.ylim(0.001,0.02)
#plt.ylim(0., None)
plt.savefig("../figures/shmf.pdf", dpi=300)
```

```python
```
|
smsharmaREPO_NAMEmining-for-substructure-lensPATH_START.@mining-for-substructure-lens_extracted@mining-for-substructure-lens-master@notebooks@5_posterior_SHMF.ipynb@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py2/scipy/signal/windows/__init__.py",
"type": "Python"
}
|
"""
==============================================
Window functions (:mod:`scipy.signal.windows`)
==============================================
The suite of window functions for filtering and spectral estimation.
.. autosummary::
:toctree: generated/
get_window -- Return a window of a given length and type.
barthann -- Bartlett-Hann window
bartlett -- Bartlett window
blackman -- Blackman window
blackmanharris -- Minimum 4-term Blackman-Harris window
bohman -- Bohman window
boxcar -- Boxcar window
chebwin -- Dolph-Chebyshev window
cosine -- Cosine window
dpss -- Discrete prolate spheroidal sequences
exponential -- Exponential window
flattop -- Flat top window
gaussian -- Gaussian window
general_cosine -- Generalized Cosine window
general_gaussian -- Generalized Gaussian window
general_hamming -- Generalized Hamming window
hamming -- Hamming window
hann -- Hann window
hanning -- Hann window
kaiser -- Kaiser window
nuttall -- Nuttall's minimum 4-term Blackman-Harris window
parzen -- Parzen window
slepian -- Slepian window
triang -- Triangular window
tukey -- Tukey window
"""
from .windows import *
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'general_cosine',
'general_hamming', 'chebwin', 'slepian', 'cosine', 'hann',
'exponential', 'tukey', 'get_window', 'dpss']
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py2@scipy@signal@windows@__init__.py@.PATH_END.py
|
{
"filename": "2_developers.md",
"repo_name": "mattpitkin/tempo2",
"repo_path": "tempo2_extracted/tempo2-master/documentation/2_developers.md",
"type": "Markdown"
}
|
Core Developers {#devs}
===============
Tempo2 development team
=======================
Tempo2 was origianaly written by George Hobbs and Rusell Edwards.
Core package maintainers
------------------------
+ George Hobbs [GH] \anchor GH george.hobbs@csiro.au
- Core tempo2 development.
- Gravitational wave codes.
- Binary models.
+ Michael Keith [MJK] \anchor MJK mkeith@pulsarastronomy.net
- C++ code maintainence.
- Linear algebra and least-squares algorithms.
- Build system maintainence.
- Unit testing.
Active contributors
-------------------
+ Joris Verbiest
+ Lindley Lentati
+ Ryan Shannon
+ Paul Demorest
+ Lucas Guillemot
+ Stefan Oslowski
+ Willem van Straten
+ Rutger van Haasteren
+ Anne Archibald
Past Contributors
---------------
+ Russell Edwards
+ Aiden Hotan
+ Ankur Chaudhary
+ Ingrid Stairs
|
mattpitkinREPO_NAMEtempo2PATH_START.@tempo2_extracted@tempo2-master@documentation@2_developers.md@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.