metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "ordered_set.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/beniget/beniget/ordered_set.py",
"type": "Python"
}
|
"""
Copied from https://github.com/bustawin/ordered-set-37
"""
# Unlicense
# This is free and unencumbered software released into the public domain.
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# For more information, please refer to <http://unlicense.org/>
import sys
from collections import OrderedDict
import itertools
from typing import TYPE_CHECKING, MutableSet
if TYPE_CHECKING:
# trying to avoid polluting the global namespace with typing names.
from typing import TypeVar, Iterator, Iterable, Optional
T = TypeVar("T")
class ordered_set(MutableSet['T']):
"""
A set that preserves insertion order by internally using a dict.
"""
__slots__ = ('values',)
def __init__(self, elements: 'Optional[Iterable[T]]' = None):
self.values = OrderedDict.fromkeys(elements or [])
def add(self, x: 'T') -> None:
self.values[x] = None
def update(self, values:'Iterable[T]') -> None:
self.values.update((k, None) for k in values)
def clear(self) -> None:
self.values.clear()
def discard(self, x: 'T') -> None:
self.values.pop(x, None)
def __getitem__(self, index:int) -> 'T':
try:
return next(itertools.islice(self.values, index, index + 1))
except StopIteration:
raise IndexError(f"index {index} out of range")
def __contains__(self, x: object) -> bool:
return self.values.__contains__(x)
def __add__(self, other:'ordered_set[T]') -> 'ordered_set[T]':
return ordered_set(itertools.chain(self, other))
def __len__(self) -> int:
return self.values.__len__()
def __iter__(self) -> 'Iterator[T]':
return self.values.__iter__()
def __str__(self) -> str:
return f"{{{', '.join(str(i) for i in self)}}}"
def __repr__(self) -> str:
return f"<ordered_set {self}>"
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@beniget@beniget@ordered_set.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "mavrix93/LightCurvesClassifier",
"repo_path": "LightCurvesClassifier_extracted/LightCurvesClassifier-master/test/stars_processing/__init__.py",
"type": "Python"
}
|
mavrix93REPO_NAMELightCurvesClassifierPATH_START.@LightCurvesClassifier_extracted@LightCurvesClassifier-master@test@stars_processing@__init__.py@.PATH_END.py
|
|
{
"filename": "quirks.py",
"repo_name": "MazinLab/MKIDGen3",
"repo_path": "MKIDGen3_extracted/MKIDGen3-main/mkidgen3/quirks.py",
"type": "Python"
}
|
import pynq
class Quirk:
"""A class to unify working around hardware/pynq quirks in different versions
Indicate quirk presence with .quirkname properties, provide fixes with .do_quirkname(cls, ..)
classmethods, and indicate fix completion with .done_quirkname class properties
For groups of quirks use .pre_eventname() and .post_eventname() classmethods to manage them
"""
class MTS(Quirk):
"""Unclear why this is required but it has been in every bistream I've used, report upstream"""
double_sync = True
class Overlay(Quirk):
def __init__(self, overlay:pynq.Overlay):
self._ol = overlay
@property
def interrupt_mangled(self):
"""Work around an inconsistency in name mangling between the HWH parser and the pynq
interrupt logic
Report upstream to Xilinx
"""
for n in self._ol.ip_dict.keys():
if "axi_intc" in n and "/" in n:
return True
return False
def do_interrupt_mangled(self):
parser_intcs = self._ol.device.parser.interrupt_controllers
top_intcs = self._ol.interrupt_controllers
for n in self._ol.ip_dict.keys():
if "axi_intc" in n and "/" in n:
parser_intcs[n] = parser_intcs[n.replace("/", "_")]
top_intcs[n] = top_intcs[n.replace("/", "_")]
@property
def threepart_ddc(self):
"""The three part DDC uses an AXI BRAM controller which is picked up as a memory
instead of an IP by PYNQ3, this instantiates the 3 part ddc driver so that to the
user it still looks like an IP
"""
for n in self._ol.mem_dict.keys():
if "reschan" in n and "axi_bram_ctrl" in n:
return True
return False
def do_threepart_ddc(self):
import mkidgen3
for n in self._ol.mem_dict.keys():
if "reschan" in n and "axi_bram_ctrl" in n:
from mkidgen3.drivers.ddc import ThreepartDDC
num = int(n.split("_")[-1])
memname = "".join(n.split("/"))
mmio = getattr(self._ol, memname).mmio
hier = self._ol
for h in n.split("/")[:-1]:
hier = getattr(hier, h)
setattr(hier, "ddccontrol_{:d}".format(num), ThreepartDDC(mmio))
def post_configure(self):
if not hasattr(self._ol, 'dac_table') and hasattr(self._ol, 'dactable'):
self._ol.dac_table = self._ol.dactable
if self.interrupt_mangled:
self.do_interrupt_mangled()
if self.threepart_ddc:
self.do_threepart_ddc()
|
MazinLabREPO_NAMEMKIDGen3PATH_START.@MKIDGen3_extracted@MKIDGen3-main@mkidgen3@quirks.py@.PATH_END.py
|
{
"filename": "trunchen.py",
"repo_name": "BEAST-Fitting/beast",
"repo_path": "beast_extracted/beast-master/beast/observationmodel/noisemodel/trunchen.py",
"type": "Python"
}
|
"""
Trunchen version of noisemodel
Goal is to compute the full n-band covariance matrix for each model
"""
import numpy as np
from scipy.spatial import cKDTree
from tqdm import tqdm
from beast.observationmodel.noisemodel.noisemodel import NoiseModel
from beast.observationmodel.vega import Vega
__all__ = ["MultiFilterASTs"]
class MultiFilterASTs(NoiseModel):
""" Implement a noise model where the ASTs are provided as a single table
Attributes
----------
astfile: str
file containing the ASTs
filters: sequence(str)
sequence of filter names
"""
def __init__(self, astfile, filters, *args, **kwargs):
NoiseModel.__init__(self, astfile, *args, **kwargs)
self.setFilters(filters)
# needs updating
self._input_fluxes = None
self._biases = None
self._completenesses = None
self._cov_matrices = None
self._corr_matrices = None
def setFilters(self, filters):
""" set the filters and update the vega reference for the conversions
Parameters
----------
filters: sequence
list of filters using the internally normalized namings
"""
self.filters = filters
# ASTs inputs are in vega mag whereas models are in flux units
# for optimization purpose: pre-compute
with Vega() as v:
_, vega_flux, _ = v.getFlux(filters)
self.vega_flux = vega_flux
def _calc_ast_cov(self, indxs, filters, return_all=False):
"""
The NxN-dimensional covariance matrix and N-dimensional bias vector are
calculated from M independent ASTs computed for N bands
Parameters
----------
indxs : index array giving the ASTs assocaited with a single
model SED
filters : base filter names in the AST file
Keywords
--------
return_all : True/False
Returns
-------
if return_all = False
(cov_mat, bias, compls)
else
(cov_mat, bias, stddevs, corr_mat, diffs, ifluxes, compls)
cov_mat : NxN dim numpy array
covariance matrix in flux units
bias : N dim numpy vector
vector of the biases in each filter
stddevs : N dim numpy vector
vector of standard deviations in each filter
corr_mat : NxN dim numpy array
correlation matrix
diffs : KxN dim numpy vector
raw flux differences for N filters and K AST instances
ifluxes : N dim numpy vector
input fluxes of the AST in each filter
compl : float
AST completeness for this model
"""
# set the asts for this star using the input index array
asts = self.data[indxs]
# now check that the source was recovered in at least 1 band
# this replicates how the observed catalog is created
n_asts = len(asts)
gtindxs = np.full((n_asts), 1)
for k in range(n_asts):
cgood = 0
for cfilter in filters:
if asts[cfilter + "_VEGA"][k] < 90:
cgood = cgood + 1
gtindxs[k] = cgood
(indxs,) = np.where(gtindxs > 0)
n_indxs = len(indxs)
if n_indxs <= 5:
return False
# completeness
compl = float(n_indxs) / float(n_asts)
# setup the variables for output
n_filters = len(filters)
ifluxes = np.zeros((n_filters), dtype=np.float32)
diffs = np.zeros((n_filters, n_indxs), dtype=np.float32)
biases = np.zeros((n_filters), dtype=np.float32)
cov_matrix = np.full((n_filters, n_filters), 0.0, dtype=np.float32)
for ck, cfilter in enumerate(filters):
ifluxes[ck] = (
np.power(10.0, -0.4 * asts[cfilter + "_IN"][indxs[0]])
* self.vega_flux[ck]
)
# compute the difference vector between the input and output fluxes
# note that the input fluxes are in magnitudes and the
# output fluxes in normalized vega fluxes
diffs[ck, :] = (
asts[cfilter + "_RATE"][indxs] * self.vega_flux[ck] - ifluxes[ck]
)
# compute the bias and standard deviations around said bias
biases[ck] = np.mean(diffs[ck, :])
# compute the covariance matrix
for ck in range(n_filters):
for dk in range(ck, n_filters):
for ci in range(n_indxs):
cov_matrix[ck, dk] += (diffs[ck, ci] - biases[ck]) * (
diffs[dk, ci] - biases[dk]
)
# fill in the symmetric terms
cov_matrix[dk, ck] = cov_matrix[ck, dk]
cov_matrix /= n_indxs - 1
stddevs = np.sqrt(np.diagonal(cov_matrix))
# compute the corrleation matrix
corr_matrix = np.array(cov_matrix)
for ck in range(n_filters):
for dk in range(ck, n_filters):
if stddevs[ck] * stddevs[dk] > 0:
corr_matrix[ck, dk] /= stddevs[ck] * stddevs[dk]
else:
corr_matrix[ck, dk] = 0.0
# fill in the symmetric terms
corr_matrix[dk, ck] = corr_matrix[ck, dk]
if return_all:
return (cov_matrix, biases, stddevs, corr_matrix, diffs, ifluxes, compl)
else:
return (cov_matrix, biases, compl)
def _calc_all_ast_cov(self, filters, progress=True):
"""
The covariance matrices and biases are calculated for all the
independent models in the AST file
Parameters
----------
filters : filter names for the AST data
Keywords
--------
progress: bool, optional
if set, display a progress bar
Returns
-------
(cov_mats, biases, completenesses, corr_mats, ifluxes)
cov_mats : KxNxN dim numpy array
K AST covariance matrices in flux units
bias : KxN dim numpy vector
K vectors of the biases in each filter
completenesses : K dim numpy vector
completeness versus model
corr_mats : KxNxN dim numpy array
K AST correlation matrices
ifluxes : KxN dim numpy vector
K vectors of the input fluxes in each filter
"""
# find the stars by using unique values of the magnitude values
# in filtername
filtername = filters[-1] + "_IN"
uvals, ucounts = np.unique(self.data[filtername], return_counts=True)
n_models = len(uvals)
# setup the output
n_filters = len(filters)
all_covs = np.zeros((n_models, n_filters, n_filters), dtype=np.float64)
all_corrs = np.zeros((n_models, n_filters, n_filters), dtype=np.float32)
all_biases = np.zeros((n_models, n_filters), dtype=np.float64)
all_ifluxes = np.zeros((n_models, n_filters), dtype=np.float32)
all_compls = np.zeros((n_models), dtype=np.float32)
ast_minmax = np.zeros((2, n_filters), dtype=np.float64)
ast_minmax[0, :] = 1e99
ast_minmax[1, :] = 1e-99
# loop over the unique set of models and
# calculate the covariance matrix using the ASTs for this model
good_asts = np.full((n_models), True)
if progress is True:
it = tqdm(list(range(n_models)), desc="Calculating AST covariance matrices")
else:
it = list(range(n_models))
for i in it:
# find all the ASTs for this model
(indxs,) = np.where(self.data[filtername] == uvals[i])
n_asts = len(indxs)
if n_asts > 5:
results = self._calc_ast_cov(indxs, filters, return_all=True)
if results:
all_covs[i, :, :] = results[0]
all_biases[i, :] = results[1]
all_corrs[i, :, :] = results[3]
all_ifluxes[i, :] = results[5]
all_compls[i] = results[6]
for k in range(n_filters):
ast_minmax[0, k] = min(ast_minmax[0, k], all_ifluxes[i, k])
ast_minmax[1, k] = max(ast_minmax[1, k], all_ifluxes[i, k])
else:
good_asts[i] = False
(indxs,) = np.where(good_asts)
return (
all_covs[indxs, :, :],
all_biases[indxs, :],
all_compls[indxs],
all_corrs[indxs, :, :],
all_ifluxes[indxs, :],
ast_minmax,
)
def process_asts(self, filters):
"""
Process all the AST results creating average biases and
covariance matrices for each model SED.
Also, prep for the interpolation by setting up the kd-tree
Parameters
----------
filters : filter names for the AST data
Returns
-------
N/A.
"""
results = self._calc_all_ast_cov(filters)
self._cov_matrices = results[0]
self._biases = results[1]
self._completenesses = results[2]
self._corr_matrices = results[3]
self._input_fluxes = results[4]
self._minmax_asts = results[5]
print("building kd-tree...")
self._kdtree = cKDTree(np.log10(self._input_fluxes))
print("...done")
def __call__(self, sedgrid, generic_absflux_a_matrix=None, progress=True):
"""
Interpolate the results of the ASTs on the model grid
Parameters
----------
sedgrid: beast.core.grid type
model grid to interpolate AST results on
Returns
-------
progress: bool, optional
if set, display a progress bar
"""
flux = sedgrid.seds
if generic_absflux_a_matrix is not None:
model_absflux_cov = False
print("using model independent absflux cov matrix")
elif (sedgrid.cov_diag is not None) & (sedgrid.cov_offdiag is not None):
model_absflux_cov = True
absflux_cov_diag = sedgrid.cov_diag
absflux_cov_offdiag = sedgrid.cov_offdiag
print("using model dependent absflux cov matrix")
else:
model_absflux_cov = False
n_models, n_filters = flux.shape
n_offdiag = ((n_filters ** 2) - n_filters) / 2
if n_filters != len(self.filters):
raise AttributeError(
"the grid of models does not seem to"
+ "be defined with the same number of filters"
)
biases = np.zeros((n_models, n_filters), dtype=np.float64)
sigmas = np.zeros((n_models, n_filters), dtype=np.float64)
cov_diag = np.zeros((n_models, n_filters), dtype=np.float64)
cov_offdiag = np.zeros((n_models, n_offdiag), dtype=np.float64)
icov_diag = np.zeros((n_models, n_filters), dtype=np.float64)
icov_offdiag = np.zeros((n_models, n_offdiag), dtype=np.float64)
q_norm = np.zeros((n_models), dtype=np.float64)
compls = np.zeros((n_models), dtype=float)
if progress is True:
it = tqdm(list(range(n_models)), desc="Evaluating model")
else:
it = list(range(n_models))
for i in it:
# AST results are in vega fluxes
cur_flux = flux[i, :]
# find the 10 nearest neighbors to the model SED
result = self._kdtree.query(np.log10(cur_flux), 10)
dist = result[0]
indxs = result[1]
# check if the distance is very small, set to a reasonable value
(tindxs,) = np.where(dist < 0.01)
if len(tindxs) > 0:
dist[tindxs] = 0.01
# compute the interpolated covariance matrix
# use the distances to generate weights for the sum
dist_weights = 1.0 / dist
dist_weights /= np.sum(dist_weights)
cur_cov_matrix = np.average(
self._cov_matrices[indxs, :, :], axis=0, weights=dist_weights
)
# add in the absflux covariance matrix
# unpack off diagonal terms the same way they were packed
if model_absflux_cov:
m = 0
cur_cov_matrix[n_filters - 1, n_filters - 1] += absflux_cov_diag[
i, n_filters - 1
]
for k in range(n_filters - 1):
cur_cov_matrix[k, k] += absflux_cov_diag[i, k]
for ll in range(k + 1, n_filters):
cur_cov_matrix[k, ll] += absflux_cov_offdiag[i, m]
cur_cov_matrix[ll, k] += absflux_cov_offdiag[i, m]
m += 1
elif generic_absflux_a_matrix is not None:
for k in range(n_filters):
for ll in range(n_filters):
cur_cov_matrix[k, ll] += (
generic_absflux_a_matrix[k, ll] * cur_flux[k] * cur_flux[ll]
)
# compute the interpolated biases
biases[i, :] = np.average(
self._biases[indxs, :], axis=0, weights=dist_weights
)
# compute the interpolated completeness
compls[i] = np.average(self._completenesses[indxs], weights=dist_weights)
# save the straight uncertainties
sigmas[i, :] = np.sqrt(np.diagonal(cur_cov_matrix))
# invert covariance matrix
inv_cur_cov_matrix = np.linalg.inv(cur_cov_matrix)
# save the diagnonal and packed version of non-diagonal terms
m = 0
icov_diag[i, n_filters - 1] = inv_cur_cov_matrix[
n_filters - 1, n_filters - 1
]
cov_diag[i, n_filters - 1] = cur_cov_matrix[n_filters - 1, n_filters - 1]
for k in range(n_filters - 1):
icov_diag[i, k] = inv_cur_cov_matrix[k, k]
cov_diag[i, k] = cur_cov_matrix[k, k]
for ll in range(k + 1, n_filters):
icov_offdiag[i, m] = inv_cur_cov_matrix[k, ll]
cov_offdiag[i, m] = cur_cov_matrix[k, ll]
m += 1
# save the log of the determinat for normalization
# the ln(det) is calculated and saved as this is what will
# be used in the actual calculation
# norm = 1.0/sqrt(Q)
det = np.linalg.slogdet(cur_cov_matrix)
if det[0] <= 0:
print("something bad happened")
print("determinant of covarinace matrix is zero or negative")
print(det)
q_norm[i] = -0.5 * det[1]
return (
biases,
sigmas,
compls,
q_norm,
icov_diag,
icov_offdiag,
cov_diag,
cov_offdiag,
)
|
BEAST-FittingREPO_NAMEbeastPATH_START.@beast_extracted@beast-master@beast@observationmodel@noisemodel@trunchen.py@.PATH_END.py
|
{
"filename": "Tutorial-Start_to_Finish-ScalarWave.ipynb",
"repo_name": "zachetienne/nrpytutorial",
"repo_path": "nrpytutorial_extracted/nrpytutorial-master/Tutorial-Start_to_Finish-ScalarWave.ipynb",
"type": "Jupyter Notebook"
}
|
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# Start-to-Finish Example: Numerical Solution of the Scalar Wave Equation, in Cartesian Coordinates
## Author: Zach Etienne
### Formatting improvements courtesy Brandon Clark
## This module solves the scalar wave equation in Cartesian coordinates, using the [Method of Lines](Tutorial-Method_of_Lines-C_Code_Generation.ipynb), and validates the solution against the exact one, confirming its expected convergence behavior.
**Notebook Status:** <font color='green'><b>Validated</b></font>
**Validation Notes:** This module has been validated to converge at the expected order to the exact solution (see [plot](#convergence) at bottom).
### NRPy+ Source Code for this module:
* [ScalarWave/ScalarWave_RHSs.py](../edit/ScalarWave/ScalarWave_RHSs.py) [\[**tutorial**\]](Tutorial-ScalarWave.ipynb) generates the right-hand side for the Scalar Wave Equation in Cartesian coordinates.
* [ScalarWave/InitialData.py](../edit/ScalarWave/InitialData.py) [\[**tutorial**\]](Tutorial-ScalarWave.ipynb) generates C code for plane wave or spherical Gaussian initial data for the scalar wave equation.
## Introduction:
As outlined in the [previous NRPy+ tutorial notebook](Tutorial-ScalarWave.ipynb), we first use NRPy+ to generate initial data for the scalar wave equation, and then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on the [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4).
The entire algorithm is outlined as follows, with links to the relevant NRPy+ tutorial notebooks listed at each step.
1. Allocate memory for gridfunctions, including temporary storage for the Method of Lines time integration.
* [**NRPy+ tutorial notebook on Method of Lines algorithm**](Tutorial-Method_of_Lines-C_Code_Generation.ipynb)
1. Set gridfunction values to initial data.
* [**NRPy+ tutorial notebook section on plane-wave solution to scalar wave equation**](Tutorial-ScalarWave.ipynb#planewave)
1. Next, integrate the initial data forward in time using the Method of Lines coupled to a Runge-Kutta explicit timestepping algorithm.
1. At the start of each iteration in time, output the difference between the numerical and exact solution.
* [**NRPy+ tutorial notebook section on plane-wave solution to scalar wave equation**](Tutorial-ScalarWave.ipynb#planewave)
1. At each RK time substep, do the following.
1. Evaluate scalar wave RHS expressions.
* [**NRPy+ tutorial notebook section on right-hand sides of scalar wave equation, in 3 spatial dimensions**](Tutorial-ScalarWave.ipynb#rhss3d)
1. Apply boundary conditions [*a la* the SENR/NRPy+ paper](https://arxiv.org/abs/1712.07658).
1. Repeat above steps at two numerical resolutions to confirm convergence to zero.
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
This notebook is organized as follows.
1. [Step 1](#setup): Set up core functions and parameters for solving scalar wave equation
1. [Step 1.a](#applybcs) `apply_bcs()`: outer boundary condition driver function
1. [Step 1.b](#mol) Generate Method of Lines timestepping code
1. [Step 1.c](#freeparams) Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h`
1. [Step 2](#mainc): `ScalarWave_Playground.c`: The Main C Code
1. [Step 3](#convergence): Code validation: Verify that relative error in numerical solution converges to zero at the expected order
1. [Step 4](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='setup'></a>
# Step 1: Set up core functions and parameters for solving scalar wave equation \[Back to [top](#toc)\]
$$\label{setup}$$
Let's pick up where we left off in the [previous module](Tutorial-ScalarWave.ipynb), interfacing with the [ScalarWave/InitialData](../edit/ScalarWave/InitialData.py) and [ScalarWave/ScalarWave_RHSs](../edit/ScalarWave/ScalarWave_RHSs.py) NRPy+ modules to generate
* monochromatic (single-wavelength) plane wave scalar wave initial data, and
* the scalar wave equation RHSs at **4th** finite difference order in **3 spatial dimensions**.
```python
# Step P1: Import needed NRPy+ core modules:
from outputC import lhrh, add_to_Cfunction_dict # NRPy+: Core C code output module
import finite_difference as fin # NRPy+: Finite difference C code generation module
import NRPy_param_funcs as par # NRPy+: Parameter interface
import grid as gri # NRPy+: Functions having to do with numerical grids
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
import shutil, os # Standard Python modules for multiplatform OS-level functions
# Step P2: Create C code output directory:
Ccodesrootdir = os.path.join("ScalarWave_Ccodes")
# First remove C code output directory if it exists
shutil.rmtree(Ccodesrootdir, ignore_errors=True)
# Then create a fresh directory
cmd.mkdir(Ccodesrootdir)
# Step P3: Create executable output directory:
outdir = os.path.join(Ccodesrootdir, "output")
cmd.mkdir(outdir)
# Step P4: Set domain_size, the physical extent of numerical grid;
# in Cartesian coordinates xmin=ymin=zmin=-domain_size,
# and xmax=ymax=zmax=+domain_size
domain_size = 10.0
# Step P5: Set timestepping algorithm (we adopt the Method of Lines)
RK_method = "RK4"
# Step P6: Set the finite differencing order to 4.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", 4)
# Step P7: Enable/disable SIMD. If enabled, code should run ~2x faster on most CPUs.
enable_SIMD = True
# Step 1: Import the ScalarWave.InitialData module.
# This command only declares ScalarWave initial data
# parameters and the InitialData() function.
import ScalarWave.InitialData as swid
# Step 2: Import ScalarWave_RHSs module.
# This command only declares ScalarWave RHS parameters
# and the ScalarWave_RHSs function (called later)
import ScalarWave.ScalarWave_RHSs as swrhs
# Step 3: The spatial dimension parameter defaults to 3;
# no need to set!
# par.set_parval_from_str("grid::DIM", 3)
# Step 4: Call the InitialData() function to set up initial data.
# Options include:
# "PlaneWave": monochromatic (single frequency/wavelength) plane wave
# "SphericalGaussian": spherically symmetric Gaussian, with default stdev=3
swid.InitialData(WaveType="PlaneWave")
# Step 5: Generate SymPy symbolic expressions for
# uu_rhs and vv_rhs; the ScalarWave RHSs.
# This function also declares the uu and vv
# gridfunctions, which need to be declared
# to output even the initial data to C file.
swrhs.ScalarWave_RHSs()
# Step 6: Enable "FD functions". In other words, all finite-difference stencils
# will be output as inlined static functions. This is essential for
# compiling highly complex FD kernels with using certain versions of GCC;
# GCC 10-ish will choke on BSSN FD kernels at high FD order, sometimes
# taking *hours* to compile. Unaffected GCC versions compile these kernels
# in seconds. FD functions do not slow the code performance, but do add
# another header file to the C source tree.
enable_FD_functions = True
par.set_parval_from_str("finite_difference::enable_FD_functions", enable_FD_functions)
# Step 7: If enable_SIMD, then copy SIMD/SIMD_intrinsics.h to $Ccodesrootdir/SIMD/SIMD_intrinsics.h
cmd.mkdir(os.path.join(Ccodesrootdir,"SIMD"))
if enable_SIMD:
shutil.copy(os.path.join("SIMD", "SIMD_intrinsics.h"), os.path.join(Ccodesrootdir, "SIMD"))
```
```python
def add_to_Cfunction_dict_exact_solution_single_point():
includes = ["NRPy_basic_defines.h", "NRPy_function_prototypes.h"]
desc = "Exact solution at a single point. params.time==0 corresponds to the initial data."
c_type = "void"
name = "exact_solution_single_point"
params = """const paramstruct *restrict params,
const REAL xx0, const REAL xx1, const REAL xx2,
REAL *uu_exact, REAL *vv_exact"""
body = fin.FD_outputC("returnstring",[lhrh(lhs="*uu_exact",rhs=swid.uu_ID),
lhrh(lhs="*vv_exact",rhs=swid.vv_ID)],
params="includebraces=False,preindent=1,outCverbose=False")
add_to_Cfunction_dict(
includes=includes,
desc=desc,
c_type=c_type, name=name, params=params,
body=body,
rel_path_to_Cparams=os.path.join("."))
```
```python
def add_to_Cfunction_dict_exact_solution_all_points():
includes = ["NRPy_basic_defines.h", "NRPy_function_prototypes.h"]
desc = "Exact solution at all points. params.time==0 corresponds to the initial data."
c_type = "void"
name = "exact_solution_all_points"
params = "const paramstruct *restrict params,REAL *restrict xx[3], REAL *restrict in_gfs"
body = """exact_solution_single_point(params, xx0, xx1, xx2,
&in_gfs[IDX4S(UUGF,i0,i1,i2)], &in_gfs[IDX4S(VVGF,i0,i1,i2)]);"""
add_to_Cfunction_dict(
includes=includes,
desc=desc,
c_type=c_type, name=name, params=params,
body=body,
rel_path_to_Cparams=os.path.join("."), loopopts = "AllPoints,Read_xxs")
```
```python
def add_to_Cfunction_dict_rhs_eval():
desc="Evaluate the scalar wave RHSs"
includes = ["NRPy_basic_defines.h", "NRPy_function_prototypes.h"]
if enable_FD_functions:
includes += ["finite_difference_functions.h"]
if enable_SIMD:
includes += ["SIMD/SIMD_intrinsics.h"]
c_type = "void"
name = "rhs_eval"
params = "const paramstruct *restrict params, const REAL *restrict in_gfs, REAL *restrict rhs_gfs"
body = fin.FD_outputC("returnstring",[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=swrhs.uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=swrhs.vv_rhs)],
params="enable_SIMD="+str(enable_SIMD))
loopopts = "InteriorPoints"
if enable_SIMD:
loopopts += ",enable_SIMD"
add_to_Cfunction_dict(
includes=includes,
desc=desc,
c_type=c_type, name=name, params=params,
body=body,
rel_path_to_Cparams=os.path.join("."), loopopts = loopopts)
```
```python
def add_to_Cfunction_dict_diagnostic_output_2D_xy_plane():
includes = ["NRPy_basic_defines.h", "NRPy_function_prototypes.h"]
desc="As a diagnostic, output to file 2D data closest to the xy plane (z approx 0 plane)."
c_type = "void"
name = "diagnostic_output_2D_xy_plane"
params = """const paramstruct *restrict params, REAL *xx[3],
const REAL *numerical_gridfunction_data,REAL *gridfunction_to_store_exact"""
body = r""" char filename[100];
sprintf(filename, "output/out2D__resolution_%dx%dx%d__iter_%d.txt", Nxx0,Nxx1,Nxx2,params->n);
FILE *out2D = fopen(filename, "w");
// Output on z=midpoint plane (points closest to z=0):
const int i2 = (int)((Nxx2+ 2*NGHOSTS)*0.5);
const REAL xx2 = xx[2][i2];
for(int i0=0;i0<Nxx0+2*NGHOSTS;i0++) {
for(int i1=0;i1<Nxx1+2*NGHOSTS;i1++) {
// Zoom in; do not output data near outer boundary.
if(i0> (Nxx0+2*NGHOSTS)*.25 && i0< (Nxx0+2*NGHOSTS)*.75 &&
i1> (Nxx1+2*NGHOSTS)*.25 && i1< (Nxx1+2*NGHOSTS)*.75) {
const REAL xx0 = xx[0][i0];
const REAL xx1 = xx[1][i1];
REAL uu_exact,vv_exact; exact_solution_single_point(params,xx0,xx1,xx2, &uu_exact,&vv_exact);
fprintf(out2D,"%e %e %e %e\n", xx0, xx1,
numerical_gridfunction_data[IDX4S(0,i0,i1, (int)((Nxx2+ 2*NGHOSTS)*0.5))], uu_exact);
}
}
}
fclose(out2D);
"""
add_to_Cfunction_dict(
includes=includes,
desc=desc,
c_type=c_type, name=name, params=params,
body=body,
rel_path_to_Cparams=os.path.join("."))
```
<a id='applybcs'></a>
## Step 1.a: `apply_bcs()`: outer boundary condition driver function \[Back to [top](#toc)\]
$$\label{applybcs}$$
When solving the wave equation on a 3D Cartesian numerical grid cube (or, if you like, rectangular prism), at each step in time, we first evaluate the right-hand sides (RHSs) of the $\partial_t u$ and $\partial_t v$ equations.
These RHSs generally contain spatial derivatives, which we evaluate using finite-difference differentiation ([**tutorial**](Tutorial-Finite_Difference_Derivatives.ipynb)). Each finite-difference derivative depends on neighboring points on the left and right, so the RHSs can only be evaluated in the grid interior. For example, a standard fourth-order centered finite difference derivative depends on two points to the left and right of the point at which the derivative is being evaluated. In order for the same interior to be filled at the next time step, we need to fill in the data at the boundaries; i.e., we need to apply boundary conditions.
Here we quadratically extrapolate data to the outer boundary using the `FACE_UPDATE()` C macro defined below. The C code function `apply_bcs()` below updates all 6 faces of the cube. To ensure that all gridpoints on the outer boundary (also known as "ghost cells") are filled, the following algorithm is implemented, starting at the innermost ghost cells (i.e., the ghost cells closest to the grid interior):
1. The lower $x$ face is updated on only the interior points of the face.
1. The upper $x$ face is updated on only the interior points of the face.
1. The lower $y$ face is updated on the interior points of that face, plus the lower and upper $x$ boundary points.
1. The upper $y$ face is updated on the interior points of that face, plus the lower and upper $x$ boundary points.
1. The lower $z$ face is updated on the interior points of that face, plus the lower and upper $x$ boundary points, plus the lower and upper $y$ boundary points.
1. The upper $z$ face is updated on the interior points of that face, plus the lower and upper $x$ boundary points, plus the lower and upper $y$ boundary points.
1. The above is repeated on the next outer ghost cell until all outer boundary points are filled.
```python
def add_to_Cfunction_dict_apply_bcs():
desc="""Apply (quadratic extrapolation) spatial boundary conditions to the scalar wave gridfunctions.
BCs are applied to all six boundary faces of the cube, filling in the innermost
ghost zone first, and moving outward."""
includes = ["NRPy_basic_defines.h", "NRPy_function_prototypes.h"]
c_type = "void"
name = "apply_bcs"
params = "const paramstruct *restrict params,REAL *restrict gfs"
prefunc = r"""
// Declare boundary condition FACE_UPDATE macro,
// which updates a single face of the 3D grid cube
// using quadratic polynomial extrapolation.
const int MAXFACE = -1;
const int NUL = +0;
const int MINFACE = +1;
#define FACE_UPDATE(which_gf, i0min,i0max, i1min,i1max, i2min,i2max, FACEX0,FACEX1,FACEX2) \
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) { \
gfs[IDX4S(which_gf,i0,i1,i2)] = \
+3.0*gfs[IDX4S(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \
-3.0*gfs[IDX4S(which_gf,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)] \
+1.0*gfs[IDX4S(which_gf,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \
}
"""
body = r"""
#pragma omp parallel for
for(int which_gf=0;which_gf<NUM_EVOL_GFS;which_gf++) {
#include "set_Cparameters.h"
int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS };
int imax[3] = { Nxx_plus_2NGHOSTS0-NGHOSTS, Nxx_plus_2NGHOSTS1-NGHOSTS, Nxx_plus_2NGHOSTS2-NGHOSTS };
for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) {
// After updating each face, adjust imin[] and imax[]
// to reflect the newly-updated face extents.
FACE_UPDATE(which_gf, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--;
FACE_UPDATE(which_gf, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++;
FACE_UPDATE(which_gf, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--;
FACE_UPDATE(which_gf, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++;
FACE_UPDATE(which_gf, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); imin[2]--;
FACE_UPDATE(which_gf, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); imax[2]++;
}
}
"""
add_to_Cfunction_dict(
includes=includes,
desc=desc,
c_type=c_type, name=name, params=params,
prefunc=prefunc, body=body,
rel_path_to_Cparams=os.path.join("."))
```
<a id='mol'></a>
## Step 1.b: Generate Method of Lines timestepping code \[Back to [top](#toc)\]
$$\label{mol}$$
The Method of Lines algorithm is described in detail in the [**NRPy+ tutorial notebook on Method of Lines algorithm**](Tutorial-Method_of_Lines-C_Code_Generation.ipynb).
```python
# Step 1.b: Generate Runge-Kutta-based (RK-based) timestepping code.
# As described above the Table of Contents, this is a 2-step process:
# 1.b.A: Evaluate RHSs (RHS_string)
# 1.b.B: Apply boundary conditions (post_RHS_string, pt 1)
import MoLtimestepping.MoL_simple as MoL
# from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict
# RK_order = Butcher_dict[RK_method][1]
# cmd.mkdir(os.path.join(Ccodesrootdir,"MoLtimestepping/"))
MoL.register_C_functions_and_NRPy_basic_defines(RK_method,
RHS_string = "rhs_eval(params, RK_INPUT_GFS, RK_OUTPUT_GFS);",
post_RHS_string = "apply_bcs(params, RK_OUTPUT_GFS);", enable_SIMD=enable_SIMD)
```
<a id='freeparams'></a>
## Step 1.c: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` \[Back to [top](#toc)\]
$$\label{freeparams}$$
Here we output `free_parameters.h`, which sets initial data parameters, as well as grid domain & reference metric parameters, applying `domain_size` and `sinh_width`/`SymTP_bScale` (if applicable) as set above.
```python
domain_size_str=str(domain_size)
# Step 3.d: Set free_parameters.h
with open(os.path.join(Ccodesrootdir,"free_parameters.h"),"w") as file:
file.write(r"""
// Set free-parameter values.
// Set free-parameter values for the initial data.
params.time = 0.0; params.wavespeed = 1.0;
//params.kk0 = 1.0; params.kk1 = 1.0; params.kk2 = 1.0;
const REAL domain_size = """+str(domain_size)+r""";
// Override parameter defaults with values based on command line arguments and NGHOSTS.
const int Nx0x1x2 = atoi(argv[1]);
params.Nxx0 = Nx0x1x2;
params.Nxx1 = Nx0x1x2;
params.Nxx2 = Nx0x1x2;
params.Nxx_plus_2NGHOSTS0 = params.Nxx0 + 2*NGHOSTS;
params.Nxx_plus_2NGHOSTS1 = params.Nxx1 + 2*NGHOSTS;
params.Nxx_plus_2NGHOSTS2 = params.Nxx2 + 2*NGHOSTS;
// Step 0d: Set up space and time coordinates
// Step 0d.i: Declare \Delta x^i=dxx{0,1,2} and invdxx{0,1,2}, as well as xxmin[3] and xxmax[3]:
const REAL xxmin[3] = {-"""+domain_size_str+""",-"""+domain_size_str+""",-"""+domain_size_str+""" };
const REAL xxmax[3] = {+"""+domain_size_str+""",+"""+domain_size_str+""",+"""+domain_size_str+r""" };
params.dxx0 = (xxmax[0] - xxmin[0]) / ((REAL)params.Nxx0);
params.dxx1 = (xxmax[1] - xxmin[1]) / ((REAL)params.Nxx1);
params.dxx2 = (xxmax[2] - xxmin[2]) / ((REAL)params.Nxx2);
params.invdx0 = 1.0 / params.dxx0;
params.invdx1 = 1.0 / params.dxx1;
params.invdx2 = 1.0 / params.dxx2;
""")
```
<a id='mainc'></a>
# Step 2: `ScalarWave_Playground`: The Main C Code \[Back to [top](#toc)\]
$$\label{mainc}$$
Next we will write the C code infrastructure necessary to make use of the above NRPy+-generated codes. Again, we'll be using RK4 time integration via the Method of Lines.
```python
def add_to_Cfunction_dict_main__ScalarWave_Playground():
includes = ["NRPy_basic_defines.h", "NRPy_function_prototypes.h"]
prefunc = """
// Frequency of output.
const int NSKIP_0D_OUTPUT = 1;
const int NSKIP_2D_OUTPUT = 10;
"""
desc = """
// main() function:
// Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates
// Step 1: Set up scalar wave initial data
// Step 2: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm,
// applying quadratic extrapolation outer boundary conditions.
// Step 3: Output relative error between numerical and exact solution.
// Step 4: Free all allocated memory
"""
c_type = "int"
name = "main"
params = "int argc, const char *argv[]"
body = r"""
paramstruct params;
set_Cparameters_to_default(¶ms);
// Step 0a: Read command-line input, error out if nonconformant
if(argc != 2 || atoi(argv[1]) < NGHOSTS) {
printf("Error: Expected one command-line argument: ./ScalarWave_Playground [Nx(=Ny=Nz)],\n");
printf("where Nx is the number of grid points in the x,y, and z directions.\n");
printf("Nx MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS);
exit(1);
}
if(atoi(argv[1])%2 != 0) {
printf("Error: Algorithm for setting up cell-centered grids here requires Nx, Ny, and Nz to be a multiple of 2 .\n");
exit(1);
}
// Step 0b: Set free parameters, overwriting Cparameters defaults
// by hand or with command-line input, as desired.
#include "free_parameters.h"
// ... and then set up the numerical grid structure in time:
const REAL CFL_FACTOR = 0.5; // Set the CFL Factor
#define MIN(A, B) ( ((A) < (B)) ? (A) : (B) )
params.dt = CFL_FACTOR * MIN(params.dxx0,MIN(params.dxx1,params.dxx2)); // CFL condition
// Step 0c: Now that params struct has been properly set up, create
// list of const's containing each parameter. E.g.,
// const REAL dxx0 = params.dxx0;
#include "set_Cparameters-nopointer.h"
// Step 0d: Declare struct for gridfunctions and allocate memory for gridfunctions
MoL_gridfunctions_struct gridfuncs;
MoL_malloc_y_n_gfs(¶ms, &gridfuncs);
MoL_malloc_non_y_n_gfs(¶ms, &gridfuncs);
// Step 0e: Set t_final, and number of timesteps based on t_final
params.t_final = xxmax[0]*0.8; /* Final time is set so that at t=t_final,
data at the origin have not been corrupted
by the approximate outer boundary condition */
int Nt = (int)(params.t_final / params.dt + 0.5); // The number of points in time.
//Add 0.5 to account for C rounding down integers.
// Step 0f: Set up cell-centered Cartesian coordinate grids
REAL *xx[3];
xx[0] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS0);
xx[1] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS1);
xx[2] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS2);
for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) xx[0][j] = xxmin[0] + ((REAL)(j-NGHOSTS) + (1.0/2.0))*params.dxx0;
for(int j=0;j<Nxx_plus_2NGHOSTS1;j++) xx[1][j] = xxmin[1] + ((REAL)(j-NGHOSTS) + (1.0/2.0))*params.dxx1;
for(int j=0;j<Nxx_plus_2NGHOSTS2;j++) xx[2][j] = xxmin[2] + ((REAL)(j-NGHOSTS) + (1.0/2.0))*params.dxx2;
// Step 1: Set up initial data to be exact solution at time=0:
params.time = 0.0; exact_solution_all_points(¶ms, xx, gridfuncs.y_n_gfs);
while(params.time < params.t_final) { // Main loop to progress forward in time.
// Step 1a: Set current time to correct value & compute exact solution
params.time = ((REAL)params.n)*params.dt;
/* Step 2: Validation: Output relative error between numerical and exact solution, */
// Step 2b: Output to 2D grid (region of x-y plane near origin)
// every NSKIP_2D_OUTPUT iterations.
if((params.n)%NSKIP_2D_OUTPUT ==0) {
// Step 2a: Output data on gridpoints closest to xy-plane, ignoring points "too close" to outer boundary
diagnostic_output_2D_xy_plane(¶ms, xx, gridfuncs.y_n_gfs, gridfuncs.diagnostic_output_gfs);
}
if((n)%NSKIP_0D_OUTPUT ==0) {
// Step 2c: Output relative error between exact & numerical at center of grid.
const int i0mid=Nxx_plus_2NGHOSTS0/2;
const int i1mid=Nxx_plus_2NGHOSTS1/2;
const int i2mid=Nxx_plus_2NGHOSTS2/2;
REAL uu_exact,vv_exact; exact_solution_single_point(¶ms, xx[0][i0mid],xx[1][i1mid],xx[2][i2mid],
&uu_exact,&vv_exact);
const REAL numerical = gridfuncs.y_n_gfs[IDX4S(UUGF,i0mid,i1mid,i2mid)];
const REAL relative_error = fabs((uu_exact-numerical)/uu_exact) + 1e-16; // Add 1e-16 in case relerror
// is exactly zero.
printf("%e %e || %e %e %e: %e %e\n",params.time, log10(relative_error),
xx[0][i0mid],xx[1][i1mid],xx[2][i2mid], numerical,uu_exact);
}
// Step 3: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm,
// applying quadratic extrapolation outer boundary conditions.
// Step 3.b: Step forward one timestep (t -> t+dt) in time using
// chosen RK-like MoL timestepping algorithm
MoL_step_forward_in_time(¶ms, xx, &gridfuncs);
} // End main loop to progress forward in time.
// Step 4: Free all allocated memory
MoL_free_memory_y_n_gfs(¶ms, &gridfuncs);
MoL_free_memory_non_y_n_gfs(¶ms, &gridfuncs);
for(int i=0;i<3;i++) free(xx[i]);
return 0;
"""
add_to_Cfunction_dict(
includes=includes, prefunc=prefunc,
desc=desc,
c_type=c_type, name=name, params=params,
body=body,
rel_path_to_Cparams=os.path.join("."), enableCparameters=False)
```
```python
def register_C_code_functions_scalarwave():
add_to_Cfunction_dict_exact_solution_single_point()
add_to_Cfunction_dict_exact_solution_all_points()
add_to_Cfunction_dict_rhs_eval()
add_to_Cfunction_dict_diagnostic_output_2D_xy_plane()
add_to_Cfunction_dict_apply_bcs()
add_to_Cfunction_dict_main__ScalarWave_Playground()
import outputC as outC
outC.outputC_register_C_functions_and_NRPy_basic_defines() # #define M_PI, etc.
# Declare paramstruct, register set_Cparameters_to_default(),
# and output declare_Cparameters_struct.h and set_Cparameters[].h:
outC.NRPy_param_funcs_register_C_functions_and_NRPy_basic_defines(os.path.join(Ccodesrootdir))
gri.register_C_functions_and_NRPy_basic_defines(enable_griddata_struct=False) # #define IDX3S(), etc.
fin.register_C_functions_and_NRPy_basic_defines(NGHOSTS_account_for_onezone_upwind=False) # #define NGHOSTS, etc.
# all functions needed for scalar wave:
register_C_code_functions_scalarwave()
# Output functions for computing all finite-difference stencils.
# Must be called after defining all functions depending on FD stencils.
if enable_FD_functions:
fin.output_finite_difference_functions_h(path=Ccodesrootdir)
# Call this last: Set up NRPy_basic_defines.h and NRPy_function_prototypes.h.
outC.construct_NRPy_basic_defines_h(Ccodesrootdir, enable_SIMD=enable_SIMD)
outC.construct_NRPy_function_prototypes_h(Ccodesrootdir)
```
```python
import cmdline_helper as cmd
cmd.new_C_compile(Ccodesrootdir, "ScalarWave_Playground",
uses_free_parameters_h=True, compiler_opt_option="fast") # fastdebug or debug also supported
os.chdir(Ccodesrootdir)
for res in ["48", "64", "96"]:
cmd.Execute("ScalarWave_Playground", res, os.path.join("output", "out"+res+".txt"))
os.chdir("..")
# import sys
# sys.exit(1)
```
(EXEC): Executing `make -j18`...
(BENCH): Finished executing in 0.80 seconds.
Finished compilation.
(EXEC): Executing `taskset -c 1,3,5,7,9,11,13,15 ./ScalarWave_Playground 48`...
(BENCH): Finished executing in 0.20 seconds.
(EXEC): Executing `taskset -c 1,3,5,7,9,11,13,15 ./ScalarWave_Playground 64`...
(BENCH): Finished executing in 0.60 seconds.
(EXEC): Executing `taskset -c 1,3,5,7,9,11,13,15 ./ScalarWave_Playground 96`...
(BENCH): Finished executing in 1.61 seconds.
<a id='convergence'></a>
# Step 3: Code Validation: Verify that relative error in numerical solution converges to zero at the expected order \[Back to [top](#toc)\]
$$\label{convergence}$$
```python
%matplotlib inline
import matplotlib.pyplot as plt
import mpmath as mp
import csv
def file_reader(filename):
with open(filename) as file:
reader = csv.reader(file, delimiter=" ")
data = list(zip(*reader))
# data is a tuple of strings. Tuples are immutable, and we need to perform math on
# the data, so here we convert tuple to lists of floats:
data0 = []
data1 = []
for i in range(len(data[0])):
data0.append(float(data[0][i]))
data1.append(float(data[1][i]))
return data0,data1
first_col48,second_col48 = file_reader(os.path.join(outdir,"out48.txt"))
first_col64,second_col64 = file_reader(os.path.join(outdir,"out64.txt"))
first_col96,second_col96 = file_reader(os.path.join(outdir,"out96.txt"))
for i in range(len(second_col64)):
# data64 = data48*(64/48)**4
# -> log10(data64) = log10(data48) + 4*log(64/48)
second_col64[i] += 4*mp.log10(64./48.)
for i in range(len(second_col96)):
# data96 = data48*(96/48)**4
# -> log10(data96) = log10(data48) + 4*log(96/48)
second_col96[i] += 4*mp.log10(96./48.)
# https://matplotlib.org/gallery/text_labels_and_annotations/legend.html#sphx-glr-gallery-text-labels-and-annotations-legend-py
fig, ax = plt.subplots()
plt.title("Plot Demonstrating 4th-order Convergence")
plt.xlabel("time")
plt.ylabel(r"$\log_{10}$(Relative error)")
ax.plot(first_col48, second_col48, 'k--', label='Nx = 48')
ax.plot(first_col64, second_col64, 'k-', label='Nx = 64, mult by (64/48)^4')
ax.plot(first_col96, second_col96, 'k.', label='Nx = 96, mult by (96/48)^4')
legend = ax.legend(loc='lower right', shadow=True, fontsize='x-large')
legend.get_frame().set_facecolor('C1')
plt.show()
```

<a id='latex_pdf_output'></a>
# Step 4: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[Tutorial-Start_to_Finish-ScalarWave.pdf](Tutorial-Start_to_Finish-ScalarWave.pdf). (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```python
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-Start_to_Finish-ScalarWave")
```
Created Tutorial-Start_to_Finish-ScalarWave.tex, and compiled LaTeX file to
PDF file Tutorial-Start_to_Finish-ScalarWave.pdf
|
zachetienneREPO_NAMEnrpytutorialPATH_START.@nrpytutorial_extracted@nrpytutorial-master@Tutorial-Start_to_Finish-ScalarWave.ipynb@.PATH_END.py
|
{
"filename": "tutorial_pipe.md",
"repo_name": "rbuehler/vasca",
"repo_path": "vasca_extracted/vasca-main/docs/tutorials/tutorial_pipe.md",
"type": "Markdown"
}
|
---
jupytext:
hide_notebook_metadata: true
text_representation:
extension: .md
format_name: myst
format_version: 0.13
jupytext_version: 1.16.4
kernelspec:
display_name: vasca-github
language: python
name: vasca-github
---
```{code-cell}
:tags: [remove-cell]
# ruff: noqa: T201
```
```{code-cell}
:tags: [remove-input]
import pandas as pd
from IPython.display import HTML, display
from itables import init_notebook_mode, show
init_notebook_mode(all_interactive=True)
# Modify Table CSS so with colors that work ok in light and dark themes
class_specific_css = """
.dataTable th {
font-weight: normal;
background-color: #075;
color: #fff;
}
.dataTable td {
border-color: #f0f;
background-color: #333;
color: #fff;
}
.dt-container {
font-size: small;
}
"""
display(HTML(f"<style>{class_specific_css}</style>"))
```
# Pipeline
This is a tutorial showcasing VASCA's pipeline flow on a simple example. We will go
through all the steps equivalent to what is done in [](#vasca_pipe.run_from_file).
This is the same function that is called when starting the pipeline from the CLI using ``vasca-pipe``.
The goal is to create a VASCA [](#Region) from multiple [](#GALEXField) for which we
download the raw data online from [MAST](https://astroquery.readthedocs.io/en/latest/mast/mast.html).
We apply quality cuts and do source clustering followed by variability analysis.
For this tutorial we are interested in the near-ultraviolet (NUV) observations
by GALEX. We are going to look at neighboring/overlapping fields all of which
contain the location of famous Tidal Disruption Event [_PS1-10jh_](https://en.wikipedia.org/wiki/Pan-STARRS#Selected_discoveries)
discovered by Pan-STARRS and observed by GALEX in 2010.
:::{figure-md} galex-fields-ps1-10jh
<img src="../images/GALEX_fields_ps1-10jh.jpg" alt="galex_fields_ps1-10jh" class="bg-primary mb-1" width="400px">
GALEX sky map with field footprints of observations around the location of PS1-10jh (
purple crosshair). Screenshot from [MAST Portal](https://mast.stsci.edu/portal/Mashup/Clients/Mast/Portal.html)
:::
+++
## General Configuration
The standard pipeline processing starts by reading a yaml file. To keep this tutorial
simple, we are going to introduce parts of the configuration step by step at the point
where they are required in the pipeline flow.
```{note}
An important premise of the configuration is that each parameter needs to be
configured explicitly. This means even default values are specified all the time. This
is a design decision purposefully made in order to ensure transparent and complete
configurations. As a result, all possible parameters are always included when looking
at configuration file.
```
Let's begin with the ``general`` section. Here, basic information and functionality is
configured. The ``name`` of the pipeline run specifies also the name of directory in
which all results will be stored. The location of output directory is at ``out_dir_base``
relative to the root directory of the package.
VASCA uses the powerful logging system provided by [loguru](https://loguru.readthedocs.io/en/stable/index.html).
The configuration specifies the [``log_level``](https://loguru.readthedocs.io/en/stable/api/logger.html#loguru._logger.Logger.level),
which we are going to set to debugging mode here. By default VASCA is going to save
all logging messages in a file stored in the output directory. ``log_file`` specifies
the name of that file, while ``default`` tells the pipeline to use a default name.
Parallel processing of the field-level analysis can be enabled when setting the number
of CPU threads ``nr_cpus > 1``.
VASCA can include field-averaged reference data, if such data is available additional
to the visit-level data from the instruments mission pipeline. To save memory/storage
and computation time it is configurable wether to include reference sources in the
final [](#Region)-file (``save_ref_srcs``) and to repeat already processed fields that
are included in the region (``run_fields``).
```{code-cell}
# Dictionary holding the configuration
config = {}
# General section of the configuration
config["general"] = {
"name": "simple_pipe",
"out_dir_base": "docs/tutorial_resources/vasca_pipeline",
"log_level": "DEBUG",
"log_file": "default",
"nr_cpus": 3,
"save_ref_srcs": True,
"run_fields": True,
}
```
:::{tip}
In case the output location is on a remote server and multiple users should be
able to edit the data, i.e., reprocess data using an updated configruation, then
one needs to manage user access priviliges. For convenience this can be done
using [``umask``](https://en.wikipedia.org/wiki/Umask):
```Python
import os
os.umask("0o003", 0)
```
This will grand user and group full permissions. The setting is only persistant
throughout the Python session.
:::
+++
The pipeline begins with some prerequisites including enabling logging and creating
the output directory
```{code-cell}
import sys
from loguru import logger
from pathlib import Path
from importlib.resources import files
# Setup output directory
out_dir_base = Path(files("vasca").parent / config["general"]["out_dir_base"])
out_dir_base.mkdir(parents=True, exist_ok=True)
pipe_dir = out_dir_base / config["general"]["name"]
pipe_dir.mkdir(parents=True, exist_ok=True)
# Path to log file
log_file = (
pipe_dir / f'{config["general"]["name"]}.log'
) # Pipeline name is used by default
# Logger configuration, both to stdout and .log file
log_cfg = {
"handlers": [
{
"sink": sys.stdout,
"format": "<green>{time:YYYY-MM-DD HH:mm:ss.SSSS}</green> "
"<cyan>{name}</cyan>:<cyan>{line}</cyan> |"
"<level>{level}:</level> {message}",
"level": config["general"]["log_level"],
"colorize": True,
"backtrace": True,
"diagnose": True,
},
],
}
logger.configure(**log_cfg) # Set config
logger.add(log_file) # Add file logger
logger.enable("vasca") # Enable logging
# Some initial logging messages
logger.info(f"Runing '{config['general']['name']}'")
logger.debug(f"Config. file: {log_file}")
```
## Resources
Next is the section about resource handling. This specifies the method used to load
(``load_method``) field data and which data products should be included (tables,
tables plus visit-level images, or only metadata ``load_products``). Additionally
the ``coadd_exists`` flag tells the pipeline wether it can expect co-added (field-
averaged) data. Finally, ``field_kwargs`` allows to pass parameters directly to
the ``init`` function of a field class.
Here we are going to initialize fields from local raw data, if present. Else the
required data is downloaded from MAST. All data products will be included including
co-add data. Using the [](#ResourceManager) we can tell the field class where to
locate the data.
```{code-cell}
from vasca.resource_manager import ResourceManager
# Get the data locations using ResourceManager
rm = ResourceManager()
data_path = rm.get_path("docs_resources", "vasca")
visits_data_path = rm.get_path("gal_visits_list", "vasca")
# Resource section of the configuration
config["resources"] = {
"load_method": "MAST_LOCAL",
"load_products": "ALL",
"coadd_exists": True,
"field_kwargs": {
"data_path": data_path,
"visits_data_path": visits_data_path,
},
}
```
## Observations
The observations section of the configuration is responsible for configuring
which combination of instrument (``observatory``) and filter (``obs_filter``)to
load data. Also it specifies the exact list of fields to load (``obs_field_ids``).
In a later step we will also add here the selection parameters (``selection``) used
for the quality cuts on the data and the field-level clustering settings
(``cluster_det``).
```{code-cell}
config["observations"] = [
{
"observatory": "GALEX",
"obs_filter": "NUV",
"obs_field_ids": [
3880803393752530944, # MISGCSN2_10493_0117
2529969795944677376, # ELAISN1_09
2597664528044916736, # PS_ELAISN1_MOS15
],
# "cluster_det": {},
# "selection": {},
},
# More instruments/filters...
]
```
Find below the visit metadata about the fields under investigation.
```{code-cell}
:tags: [hide-input]
from astropy.table import Table
df_gal_visits = (
Table.read(visits_data_path)
.to_pandas()
.apply(lambda x: x.str.decode("utf-8") if x.dtype == "O" else x)
)
query = f"ParentImgRunID in {list(config['observations'][0]['obs_field_ids'])}"
df_select = df_gal_visits.query(query)
show(
df_select,
classes="display nowrap compact",
scrollY="300px",
scrollCollapse=True,
paging=False,
columnDefs=[{"className": "dt-body-left", "targets": "_all"}],
)
```
In the next step we will initialize a VASCA [](#Region) with all fields sequentially.
[](#load_from_config) is a convenience function that acts as an interface between the
region object and field-specific loading functions. This will downloads the data from
MAST, it will detect if the data is already present on disc and loads the cashed
files. To safe compute time later, a VASCA-field file is written to the download
location so that one can use this file instead of creating a new field from raw data.
This will be used during the field-level [processing](#field-level-analysis).
```{code-cell}
:tags: [hide-output]
from vasca.region import Region
rg = Region.load_from_config(config)
```
This populates the region object with all specified fields, the relevant metadata is
stored in {term}`tt_fields`.
```{code-cell}
rg.info()
# rg.tt_fields
```
```{code-cell}
:tags: [remove_input]
df_tt_fields = rg.tt_fields.to_pandas().apply(
lambda x: x.str.decode("utf-8") if x.dtype == "O" else x
)
show(
df_tt_fields,
classes="display nowrap compact",
scrollY="300px",
scrollCollapse=True,
paging=False,
columnDefs=[{"className": "dt-body-left", "targets": "_all"}],
)
```
## Field-level analysis
The field-level analysis incorporates, first, the data reduction and parameter mapping
from raw data to VASCA field objects, second, the data quality selection and finally
source clustering on the remaining high-quality detections.
The first step is implicitly taken care of by the [](#GALEXField) class, where the raw
data is loaded and only the column parameters are kept that are specified in the [](#tables_dict)
module. A shortcut is provided through the [](#Region.get_field) method which is an
interface to the ``load`` method of
any field class.
The configuration for the next two step requires the ``selection`` and ``cluster_det``
entries under the observations section.
### Data selection
```{note}
A crucial part of VASCA's flexibility to adapt to raw data of virtually any instrument
comes from the fact that the parameter list used for data quality selection is not
fixed and is allowed to vary for different instruments and filters. The only
requirement is an existent entry in the [](#tables_dict) module for any parameter and
a corresponding field class that includes these parameters in the {term}`tt_detections`
table.
```
The API for the data selection is provided by the [](#TableCollection.select_rows)
method. Each entry under selection maps to this interface. The ``table`` parameters
specifies which table to select on. Any selection operation modifies the ``sel``
column of a given table. It contains boolean values so ``0`` means _unselected_ and
``1`` means _selected_.
By specifying the ``presel_type``parameter, one controls the logic by which an
existing selection is combined with a new one. The ``sel_type`` parameter specifies
the logic by which the selection on a set of multiple column parameters is combined.
Parameters ``range`` and ``bitmask`` provide the column parameter and artifact
bitflag values that are used to make the selection. Using ``set_range`` on can choose
to clip values of a certain column to minimum and maximum values.
In combination with ``sel_type = "is_in"`` and ``var`` parameters, it is possible to
select the rows of given column ``var``in the target table if a value is also present
in the same column of a reference table (``ref_table``).
```{code-cell}
import numpy as np
# Updating the observations for GALEX-NUV observations
config["observations"][0].update(
{
"selection": {
# Quality cuts on visit-level detections
"det_quality": {
"table": "tt_detections",
"presel_type": "and",
"sel_type": "and",
"range": {
"s2n": [3.0, np.inf],
"r_fov": [0.0, 0.5],
"ellip_world": [0.0, 0.5],
"size_world": [0.0, 6.0],
"class_star": [0.15, 1.0],
"chkobj_type": [-0.5, 0.5],
"flux_app_ratio": [0.3, 1.05],
},
"bitmask": {
"artifacts": [2, 4, 8, 128, 256],
},
"set_range": {"pos_err": [0.5, 5]},
},
# Quality cuts on field-averaged detections
"coadd_det_quality": {
"table": "tt_detections",
"presel_type": "and",
"sel_type": "and",
"range": {
"s2n": [5.0, np.inf],
"r_fov": [0.0, 0.5],
"ellip_world": [0.0, 0.5],
"size_world": [0.0, 6.0],
"class_star": [0.15, 1.0],
"chkobj_type": [-0.5, 0.5],
"flux_app_ratio": [0.3, 1.05],
},
"bitmask": {
"artifacts": [2, 4, 8, 128, 256],
},
},
# Selection on only those detections wich are part of clusters
"det_association": {
"table": "tt_detections",
"presel_type": "and",
"sel_type": "is_in",
"ref_table": "tt_sources",
"var": "fd_src_id",
},
},
}
)
```
### Clustering
Also the field-level clustering configuration showcases VASCA's modular
approach. In the ``cluster_det`` section on specifies the clustering algorithm
which, in principle, can be different for each instrument and filter. Although,
at the moment only [mean-shift clustering](https://en.wikipedia.org/wiki/Mean_shift)
is supported by VASCA.
Again, the responsible API is provided by [](#TableCollection.cluster_meanshift).
This method wraps a method provided by the [scikit-learn package](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.MeanShift.html). The end result is that each field optains
a new {term}`tt_visits` table that lists all identified sources as defined
by their clustered detections. Sources have at the minimum one and as manny as ``n_vis``
detections.
Mean-shift is well suited for this use case due to several reasons. Most importantly
it is that the algorithm doesn't require the total number of clusters as a parameter.
In fact it is determining that number which would be otherwise very difficult to
predict from the visit-level detections before having done the clustering.
Another reason is its relatively simple algorithm where only one parameters is
required. It is called the ``bandwidth`` which means, translated to the astronomy use
case, the radial size of a typical source on the sky. It should be roughly chosen
to match the instrument's PSF, which, for GALEX, is about 5 arcseconds. We set
it slightly smaller to limit false associations also considering that the
source center is usually much better constrained than the PSF might suggest.
```{code-cell}
# Updating the observations for GALEX-NUV observations continued...
config["observations"][0].update(
{
"cluster_det": {
"meanshift": {
"bandwidth": 4,
"seeds": None,
"bin_seeding": False,
"min_bin_freq": 1,
"cluster_all": True,
"n_jobs": None,
"max_iter": 300,
"table_name": "tt_detections",
},
},
},
)
```
### Pipeline flow
According to the configuration above, we can finally run the analysis. VASCA
implements parallel processing ([](inv:#*.Pool.starmap)) for this part of the pipeline
by applying the [](#run_field) method in parallel for each field.
+++
First, the parameters for [](#run_field) are collected.
```{code-cell}
import vasca.utils as vutils
# Collect parameters from config
fd_pars: list[list[int, str, Region, dict]] = []
vobs: list[dict] = config["observations"]
obs_nr: int
field_nr: str
# Loop over observation list index
for obs_nr, _ in enumerate(vobs):
# Loop over fields
for field_nr in vobs[obs_nr]["obs_field_ids"]:
# Construct VASCA field ID (prepending instrument/filter identifier)
iprefix: str = vutils.dd_obs_id_add[
vobs[obs_nr]["observatory"] + vobs[obs_nr]["obs_filter"]
]
field_id: str = f"{iprefix}{field_nr}"
# Save parameters outside loop
fd_pars.append([obs_nr, field_id, rg, config])
```
Second, all fields are processed in parallel.
```{code-cell}
:tags: [hide-output]
from multiprocessing.pool import Pool
import vasca.vasca_pipe as vpipe
# Run each field in a separate process in parallel
nr_cpus = config["general"]["nr_cpus"]
logger.info(f"Analyzing {len(fd_pars)} fields on {nr_cpus} parallel threads.")
with Pool(processes=nr_cpus) as pool:
pool_return = pool.starmap(vpipe.run_field_docs, fd_pars)
pool.join()
logger.info("Done analyzing individual fields.")
```
Finally, the pool results are unpacked and the region object is updated with
processed field information.
A memory-saving procedure is used where first all fields are brought to the
scope of the region object by filling the ``Region.field`` dictionary from
which the field-level data is taken and stacked in respective region-owned
tables using the [](#Region.add_table_from_fields) method. After this step
all fields are discarded from the scope, to be deleted from the garbage collector.
```{hint}
In VASCA a [](#Region) object keeps track of its fields in the ``Region.tt_fields``
table. At any time one can load field data of a specific field using [](#Region.get_field).
```
```{code-cell}
:tags: [hide-output]
from astropy.table import unique
# Loop over processed fields
for field in pool_return:
# Populate region field dictionary with field data
rg.fields[field.field_id] = field
logger.info(f"Added field {field.field_id} from pool to region")
# Add field tables to region
# Visits metadata
rg.add_table_from_fields("tt_visits")
rg.tt_visits = unique(rg.tt_visits, keys="vis_id")
# Clustered sources
rg.add_table_from_fields("tt_sources")
# Visit-level detections
rg.add_table_from_fields("tt_detections", only_selected=False)
# Field-averaged detectsion
rg.add_table_from_fields("tt_coadd_detections")
# Discard fields. All that was needed has been transferred to region tables
del rg.fields
```
## Region-level analysis
In the final stage of the pipeline, all region-level analysis steps are performed.
This stage encompasses three key tasks: first, managing sources located in
overlapping sky regions; second, evaluating statistics for use in variability
detection; and finally, preparing the pipeline results for writing to disk and
generating the VASCA variable source catalog.
+++
### Overlapping fields
VASCA merges field-level sources in overlapping sky regions in a second clustering
step, where the same mean-shift algorithm is used but with a dedicated configuration.
In case field-averaged (co-added) data exists, the field-level detections are merged
in the same way, again, with a separate configuration.
```{code-cell}
config["cluster_src"] = {
"meanshift": {
"bandwidth": 4,
"seeds": None,
"bin_seeding": False,
"min_bin_freq": 1,
"cluster_all": True,
"n_jobs": 1,
"max_iter": 300,
"table_name": "tt_sources",
}
}
config["cluster_coadd_dets"] = {
"meanshift": {
"bandwidth": 4,
"seeds": None,
"bin_seeding": False,
"min_bin_freq": 1,
"cluster_all": True,
"n_jobs": 1,
"max_iter": 300,
"table_name": "tt_coadd_detections",
}
}
```
```{code-cell}
:tags: [hide-output]
# Cluster field sources and co-adds in parallel
ll_cluster = [
[
config["cluster_src"]["meanshift"],
rg.tt_sources,
rg.tt_detections,
False,
],
[
config["cluster_coadd_dets"]["meanshift"],
rg.tt_coadd_detections,
False,
True,
],
]
with Pool(processes=2) as pool:
pool_return = pool.starmap(vpipe.run_cluster_fields, ll_cluster)
pool.join()
# Copy parallelized results into region
for pool_rg in pool_return:
if "tt_sources" in pool_rg._table_names:
rg.tt_sources = pool_rg.tt_sources
rg.tt_detections = pool_rg.tt_detections
else:
rg.add_table(pool_rg.tt_coadd_sources, "region:tt_coadd_sources")
rg.tt_coadd_detections = pool_rg.tt_coadd_detections
```
### Source statistics
The primary statistic used by VASCA to detect variability is the probability of
obtaining a flux with the observed fluctuations under the assumption that the null
hypothesis is true, that is, constant flux (``flux_cpval``). The default selection is
such that the chance for the observed variability being purely random can be ruled out
at [5-sigma significance](https://en.wikipedia.org/wiki/Normal_distribution#Standard_deviation_and_coverage).
Additionally the normalized excess variance (``flux_nxv``) and absolute flux limits
is used to limit a contamination due to potential hidden systematic flux variations.
Similarly a selection on variation of the spatial coordinates (``pos_cpval``. ``pos_xv``)
is used to reduce the contamination du to false association of visit-levl detections
in the clustering step.
To ensure the statistical correctness only sources with more than three detections are
considers (``n_det>3``).
```{note}
The source selection is configured independently for each observational filter. This
allows to adapt to potential systematics in a very flexible way.
```
For a more concrete example on these calculations in VASCA, have a
look at the tutorial on [Variability Statistics](tutorial_vat_stat.md).
```{code-cell}
config["selection_src"] = {
"src_variability_nuv": {
"table": "tt_sources",
"presel_type": "or",
"sel_type": "and",
"obs_filter": "NUV",
"range": {
"nr_det": [3, np.inf],
"pos_cpval": [0.0001, np.inf],
"pos_xv": [-np.inf, 2],
"flux_cpval": [-0.5, 0.000000573303],
"flux_nxv": [0.001, np.inf],
"flux": [0.144543, 575.43],
},
},
"src_coadd_diff_nuv": {
"table": "tt_sources",
"presel_type": "or",
"sel_type": "and",
"obs_filter": "NUV",
"range": {
"nr_det": [2, np.inf],
"pos_cpval": [0.0001, np.inf],
"pos_xv": [-np.inf, 2],
"coadd_ffactor": [2.0, np.inf],
"coadd_fdiff_s2n": [7, np.inf],
},
},
}
```
An additional selection may be possible if co-added data is available. In this case
the association between VASCA sources and the field-averaged input data can be made.
This serves as cross-check since most static sources should be recovered in the
clustering step and match the field average.
%%s
config["assoc_src_coadd"] = {
"dist_max": 1, # Associate nearest source below this distance in arc_sec OR
"dist_s2n_max": 3, # Associate nearest source with this distance in units of "squared summed position error"
}
```{code-cell}
:tags: [hide-output]
import astropy.units as uu
# Calculate source statistics
rg.set_src_stats(src_id_name="rg_src_id")
rg.set_src_stats(src_id_name="coadd_src_id")
# Match sources to coadd sources
rg.cross_match(
dist_max=config["assoc_src_coadd"]["dist_max"] * uu.arcsec,
dist_s2n_max=config["assoc_src_coadd"]["dist_s2n_max"],
)
# Select variable sources, deselect all sources before and
# make sure all tables containting the region source ID
# are syncronized to this selection
rg.tt_sources["sel"] = False
rg.select_from_config(
config["selection_src"]
) # Loops over TableCollection.select_rows()
rg.synch_src_sel(remove_unselected=False)
# Set source ID mapping table
rg.set_src_id_info()
```
```{code-cell}
:tags: [hide-output]
# View all table attributes that have been added to the region object
rg.info()
```
```{code-cell}
:tags: [hide-input]
:title: '# %%'
# View all sources that passed the selection
df_sources = (
vutils.select_obs_filter(rg.tt_sources, obs_filter_id=1)
.to_pandas()
.apply(lambda x: x.str.decode("utf-8") if x.dtype == "O" else x)
)
df_select = df_sources.query("sel")
show(
df_select,
classes="display nowrap compact",
scrollY="300px",
scrollCollapse=True,
paging=False,
columnDefs=[{"className": "dt-body-left", "targets": "_all"}],
)
```
### Pipeline Output
With a few simple export functions the full region file, the variable source catalog
and its pipeline configuration are saved to the pipeline output directory.
This concludes the tutorial. Readers are invited to look into the post-processing
notebooks as listed [here](index.md).
```{code-cell}
:tags: [hide-output]
import yaml
# Write region file
rg.write_to_fits(file_name=pipe_dir / f'region_{config["general"]["name"]}.fits')
# Export variable source catalog (only selected sources are included)
rc = rg.get_region_catalog()
rc.write_to_fits(file_name=pipe_dir / f'region_{config["general"]["name"]}_cat.fits')
# Write used config file
yaml_out_name = pipe_dir / f'cfg_ran_{config["general"]["name"]}.yaml'
with open(yaml_out_name, "w") as yaml_file:
yaml.dump(config, yaml_file)
logger.info("Done running VASCA pipeline.")
```
```{code-cell}
# To be continued ...
```
|
rbuehlerREPO_NAMEvascaPATH_START.@vasca_extracted@vasca-main@docs@tutorials@tutorial_pipe.md@.PATH_END.py
|
{
"filename": "acspyTestAcsCORBA.py",
"repo_name": "ACS-Community/ACS",
"repo_path": "ACS_extracted/ACS-master/LGPL/CommonSoftware/acspycommon/test/acspyTestAcsCORBA.py",
"type": "Python"
}
|
#!/usr/bin/env python
#*******************************************************************************
# ALMA - Atacama Large Millimiter Array
# (c) Associated Universities Inc., 2002
# (c) European Southern Observatory, 2002
# Copyright by ESO (in the framework of the ALMA collaboration)
# and Cosylab 2002, All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
# @(#) $Id: acspyTestAcsCORBA.py,v 1.1.1.1 2012/03/07 17:40:45 acaproni Exp $
###############################################################################
'''
Tests CORBA access.
'''
from Acspy.Util import ACSCorba
###############################################################################
if __name__ == '__main__':
print 'Manager corbaloc: %s' % ACSCorba.getManagerCorbaloc()
print 'ORB: %s' % ACSCorba.getORB()
print 'POA ROOT: %s' % ACSCorba.getPOARoot()
print 'POA Manager: %s' % ACSCorba.getPOAManager()
print 'Manager: %s' % ACSCorba.getManager()
print 'Client: %s' % ACSCorba.getClient()
print 'Log: %s' % ACSCorba.log()
print 'LogFactory: %s' % ACSCorba.logFactory()
print 'NotifyEventChannelFactory: %s' % ACSCorba.notifyEventChannelFactory()
print 'ArchivingChannel: %s' % ACSCorba.archivingChannel()
print 'LoggingChannel: %s' % ACSCorba.loggingChannel()
print 'InterfaceRepository: %s' % ACSCorba.interfaceRepository()
print 'CDB: %s' % ACSCorba.cdb()
print 'ACSLogSvc: %s' % ACSCorba.acsLogSvc()
print 'NameService: %s' % ACSCorba.nameService()
|
ACS-CommunityREPO_NAMEACSPATH_START.@ACS_extracted@ACS-master@LGPL@CommonSoftware@acspycommon@test@acspyTestAcsCORBA.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "cmillion/gPhoton",
"repo_path": "gPhoton_extracted/gPhoton-master/README.md",
"type": "Markdown"
}
|
**gPhoton** is a project to calibrate, archive, and make available every photon event observed by the GALEX mission in an effort to dramatically improve the flexibility and utility of the data with specific emphasis on enabling short time domain photometric studies.
To get started, please read the [User Guide](https://github.com/cmillion/gPhoton/blob/master/docs/UserGuide.md). You can also refer to the [Full API](http://gphoton.readthedocs.io/en/master/) available on ReadTheDocs.
Note that Python 2.x is no longer supported by gPhoton, and many things will break under Python 2. **Use Python 3 or above.**
If you use gPhoton software in your research, please cite at least one of the following:
* _Million, et al. "gPhoton: The GALEX Photon Data Archive." The Astrophysical Journal 833.2 (2016): 292._
* _Million, et al., 2016, gPhoton, Astrophysics Source Code Library, record ascl:1603.004_

[](http://gphoton.readthedocs.io/en/master/?badge=master)
<a href="http://ascl.net/1603.004"><img src="https://img.shields.io/badge/ascl-1603.004-blue.svg?colorB=262255" alt="ascl:1603.004" /></a>
|
cmillionREPO_NAMEgPhotonPATH_START.@gPhoton_extracted@gPhoton-master@README.md@.PATH_END.py
|
{
"filename": "piecewise.py",
"repo_name": "nanograv/PINT",
"repo_path": "PINT_extracted/PINT-master/src/pint/models/piecewise.py",
"type": "Python"
}
|
"""Pulsar timing piecewise spin-down solution."""
import astropy.units as u
import numpy as np
from pint.models.parameter import prefixParameter
from pint.models.timing_model import PhaseComponent
from pint.utils import split_prefixed_name, taylor_horner
from pint.exceptions import MissingParameter
class PiecewiseSpindown(PhaseComponent):
"""Pulsar spin-down piecewise solution.
Parameters supported:
.. paramtable::
:class: pint.models.piecewise.PiecewiseSpindown
"""
register = True
category = "piecewise"
@classmethod
def _description_solution_epochstart(cls, x):
return ("Start epoch of solution piece %d" % x,)
@classmethod
def _description_solution_epochstop(cls, x):
return ("Stop epoch of solution piece %d" % x,)
@classmethod
def _description_solution_epoch(cls, x):
return ("Epoch of solution piece %d" % x,)
@classmethod
def _description_solution_startphase(cls, x):
return ("Starting phase of solution piece %d" % x,)
@classmethod
def _description_solution_frequency(cls, x):
return "Frequency of solution piece %d" % x
@classmethod
def _description_solution_frequencyderivative(cls, x):
return "Frequency-derivative of solution piece %d " % x
@classmethod
def _description_solution_secondfrequencyderivative(cls, x):
return "Second frequency-derivative of solution piece %d " % x
def __init__(self):
super().__init__()
self.add_param(
prefixParameter(
name="PWSTART_1",
units="MJD",
description_template=self._description_solution_epochstart,
parameter_type="MJD",
time_scale="tdb",
tcb2tdb_scale_factor=u.Quantity(1),
)
)
self.add_param(
prefixParameter(
name="PWSTOP_1",
units="MJD",
description_template=self._description_solution_epochstop,
parameter_type="MJD",
time_scale="tdb",
tcb2tdb_scale_factor=u.Quantity(1),
)
)
self.add_param(
prefixParameter(
name="PWEP_1",
units="MJD",
description_template=self._description_solution_epoch,
parameter_type="MJD",
time_scale="tdb",
tcb2tdb_scale_factor=u.Quantity(1),
)
)
self.add_param(
prefixParameter(
name="PWPH_1",
units="",
value=0.0,
description_template=self._description_solution_startphase,
type_match="float",
uncertainty=1,
tcb2tdb_scale_factor=u.Quantity(1),
)
)
self.add_param(
prefixParameter(
name="PWF0_1",
units="Hz",
value=0.0,
description_template=self._description_solution_frequency,
type_match="float",
tcb2tdb_scale_factor=u.Quantity(1),
)
)
self.add_param(
prefixParameter(
name="PWF1_1",
units="Hz/s",
value=0.0,
description_template=self._description_solution_frequencyderivative,
tcb2tdb_scale_factor=u.Quantity(1),
)
)
self.add_param(
prefixParameter(
name="PWF2_1",
units="Hz/s^2",
value=0.0,
description_template=self._description_solution_secondfrequencyderivative,
tcb2tdb_scale_factor=u.Quantity(1),
)
)
self.phase_funcs_component += [self.piecewise_phase]
# self.phase_derivs_wrt_delay += [self.d_piecewise_phase_d_delay]
def setup(self):
super().setup()
self.pwsol_prop = [
"PWEP_",
"PWSTART_",
"PWSTOP_",
"PWPH_",
"PWF0_",
"PWF1_",
"PWF2_",
]
self.pwsol_indices = [
getattr(self, y).index
for x in self.pwsol_prop
for y in self.params
if x in y
]
# for idx in set(self.pwsol_indices):
# for param in self.pwsol_prop:
# if not hasattr(self, param + "%d" % idx):
# param0 = getattr(self, param + "1")
# self.add_param(param0.new_param(idx))
# getattr(self, param + "%d" % idx).value = 0.0
# self.register_deriv_funcs(
# getattr(self, "d_phase_d_" + param[0:-1]), param + "%d" % idx
# )
for idx in set(self.pwsol_indices):
for param in self.pwsol_prop:
if param.startswith("PWF") or param.startswith("PWPH"):
self.register_deriv_funcs(self.d_phase_d_F, f"{param}{idx}")
def validate(self):
"""Validate parameters input."""
super().validate()
for idx in set(self.pwsol_indices):
if not hasattr(self, "PWEP_%d" % idx):
msg = "Piecewise solution Epoch is needed for Piece %d." % idx
raise MissingParameter("PiecewiseSpindown", "PWEP_%d" % idx, msg)
if not hasattr(self, "PWSTART_%d" % idx):
msg = "Piecewise solution starting epoch is needed for Piece %d." % idx
raise MissingParameter("PiecewiseSpindown", "PWSTART_%d" % idx, msg)
if not hasattr(self, "PWSTOP_%d" % idx):
msg = "Piecewise solution end epoch is needed for Piece %d." % idx
raise MissingParameter("PiecewiseSpindown", "PWSTOP_%d" % idx, msg)
def print_par(self, format="pint"):
result = ""
for idx in set(self.pwsol_indices):
for param in self.pwsol_prop:
par = getattr(self, param + "%d" % idx)
result += par.as_parfile_line(format=format)
return result
def get_dt_and_affected(self, toas, delay, glepnm):
tbl = toas.table
glep = getattr(self, glepnm)
idx = glep.index
start = getattr(self, "PWSTART_%d" % idx).value
stop = getattr(self, "PWSTOP_%d" % idx).value
affected = (tbl["tdbld"] >= start) & (tbl["tdbld"] < stop)
phsepoch_ld = glep.quantity.tdb.mjd_long
dt = (tbl["tdbld"][affected] - phsepoch_ld) * u.day - delay[affected]
return dt, affected
def piecewise_phase(self, toas, delay):
"""Glitch phase function.
delay is the time delay from the TOA to time of pulse emission
at the pulsar, in seconds.
returns an array of phases in long double
"""
phs = u.Quantity(np.zeros(toas.ntoas, dtype=np.longdouble))
glepnames = [x for x in self.params if x.startswith("PWEP_")]
for glepnm in glepnames:
glep = getattr(self, glepnm)
idx = glep.index
# dPH = getattr(self, "PWPH_%d" % idx).quantity
# dF0 = getattr(self, "PWF0_%d" % idx).quantity
# dF1 = getattr(self, "PWF1_%d" % idx).quantity
# dF2 = getattr(self, "PWF2_%d" % idx).quantity
dt, affected = self.get_dt_and_affected(toas, delay, glepnm)
# fterms = [dPH, dF0, dF1, dF2]
fterms = self.get_spin_terms(idx)
phs[affected] += taylor_horner(dt.to(u.second), fterms)
return phs.to(u.dimensionless_unscaled)
# def d_piecewise_phase_d_delay(self, toas, param, delay):
# par = getattr(self, param)
# unit = par.units
# tbl = toas.table
# ders = u.Quantity(np.zeros(toas.ntoas, dtype=np.longdouble) * (1 / u.second))
# glepnames = [x for x in self.params if x.startswith("PWEP_")]
# for glepnm in glepnames:
# glep = getattr(self, glepnm)
# idx = glep.index
# dF0 = getattr(self, "PWF0_%d" % idx).quantity
# dF1 = getattr(self, "PWF1_%d" % idx).quantity
# dF2 = getattr(self, "PWF2_%d" % idx).quantity
# dt, affected = self.get_dt_and_affected(toas, delay, glepnm)
# fterms = [0.0 * u.Unit("")] + [dF0, dF1, dF2]
# d_pphs_d_delay = taylor_horner_deriv(dt.to(u.second), fterms)
# ders[affected] = -d_pphs_d_delay.to(1 / u.second)
#
# return ders.to(1 / unit)
def get_spin_terms(self, order):
return [getattr(self, f"PWPH_{order}").quantity] + [
getattr(self, f"PWF{ii}_{order}").quantity for ii in range(3)
]
def d_phase_d_F(self, toas, param, delay):
"""Calculate the derivative wrt to an spin term."""
par = getattr(self, param)
unit = par.units
pn, idxf, idxv = split_prefixed_name(param)
order = split_prefixed_name(param[:4])[2] + 1 if param.startswith("PWF") else 0
# order = idxv + 1
fterms = self.get_spin_terms(idxv)
# make the chosen fterms 1 others 0
fterms = [ft * np.longdouble(0.0) / unit for ft in fterms]
fterms[order] += np.longdouble(1.0)
glepnm = f"PWEP_{idxf}"
res = u.Quantity(np.zeros(toas.ntoas, dtype=np.longdouble)) * (1 / unit)
dt, affected = self.get_dt_and_affected(toas, delay, glepnm)
d_pphs_d_f = taylor_horner(dt.to(u.second), fterms)
res[affected] = d_pphs_d_f.to(1 / unit)
return res
|
nanogravREPO_NAMEPINTPATH_START.@PINT_extracted@PINT-master@src@pint@models@piecewise.py@.PATH_END.py
|
{
"filename": "_textsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/image/_textsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="image", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@image@_textsrc.py@.PATH_END.py
|
{
"filename": "axes.py",
"repo_name": "marblestation/posidonius",
"repo_path": "posidonius_extracted/posidonius-master/posidonius/particles/axes.py",
"type": "Python"
}
|
import numpy as np
class Axes(object):
def __init__(self, x, y, z):
if type(x) is np.ndarray:
if len(x) != len(y) or len(x) != len(z):
raise Exception("Arrays length do not match!")
self._data = {u'x': x, u'y': y, u'z': z}
else:
self._data = {u'x': float(x), u'y': float(y), u'z': float(z)}
def get(self):
return self._data.copy()
def x(self):
return self._data['x']
def y(self):
return self._data['y']
def z(self):
return self._data['z']
def set_x(self, x):
if type(x) is np.ndarray:
if len(x) != len(self._data['y']) or len(x) != len(self._data['z']):
raise Exception("Arrays length do not match!")
self._data['x'] = x
else:
self._data['x'] = float(x)
def set_y(self, y):
if type(y) is np.ndarray:
if len(y) != len(self._data['x']) or len(y) != len(self._data['z']):
raise Exception("Arrays length do not match!")
self._data['y'] = y
else:
self._data['y'] = float(y)
def set_z(self, z):
if type(z) is np.ndarray:
if len(z) != len(self._data['x']) or len(z) != len(self._data['y']):
raise Exception("Arrays length do not match!")
self._data['z'] = z
else:
self._data['z'] = float(z)
|
marblestationREPO_NAMEposidoniusPATH_START.@posidonius_extracted@posidonius-master@posidonius@particles@axes.py@.PATH_END.py
|
{
"filename": "plot_triple_with_wind.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/examples/textbook/plot_triple_with_wind.py",
"type": "Python"
}
|
import os
import numpy
from amuse.lab import *
from prepare_figure import single_frame
from distinct_colours import get_distinct
from matplotlib import pyplot
def read_triple_data(filename):
t = []
ain = []
aout = []
ein = []
eout = []
a0in = 0
a0out = 0
for line in open(filename):
if "Triple" in line:
l = line.split()
ti = float(l[3])
if ti <= 0:
a0in = float(l[10])
a0out = float(l[16])
e0in = float(l[12])
e0out = float(l[18])
if ti >= 4:
t.append(float(l[3]))
ain.append(float(l[10])/a0in)
ein.append(float(l[12])/e0in)
aout.append(float(l[16])/a0out)
eout.append(float(l[18])/e0out)
return t, ain, ein, aout, eout
try:
amusedir = os.environ['AMUSE_DIR']
dir = amusedir+'/examples/textbook/'
except:
print('Environment variable AMUSE_DIR not set')
dir = './'
filename = dir+'evolve_triple_with_wind.data'
t, ain, ein, aout, eout = read_triple_data(filename)
x_label = "$a/a_{0}$"
y_label = "$e/e_{0}$"
fig = single_frame(x_label, y_label, logx=False, logy=False,
xsize=10, ysize=8)
color = get_distinct(2)
pyplot.plot(ain, ein, c=color[0], label= 'inner')
pyplot.plot(aout, eout, c=color[1], label= 'outer')
pyplot.legend(loc='best', ncol=1, shadow=False, fontsize=20)
save_file = 'evolve_triple_with_wind.png'
pyplot.savefig(save_file)
print('\nSaved figure in file', save_file,'\n')
pyplot.show()
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@examples@textbook@plot_triple_with_wind.py@.PATH_END.py
|
{
"filename": "methods.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/timeseries/periodograms/bls/methods.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ["bls_fast", "bls_slow"]
from functools import partial
import numpy as np
from ._impl import bls_impl
def bls_slow(t, y, ivar, period, duration, oversample, use_likelihood):
"""Compute the periodogram using a brute force reference method.
t : array-like
Sequence of observation times.
y : array-like
Sequence of observations associated with times t.
ivar : array-like
The inverse variance of ``y``.
period : array-like
The trial periods where the periodogram should be computed.
duration : array-like
The durations that should be tested.
oversample :
The resolution of the phase grid in units of durations.
use_likeliood : bool
If true, maximize the log likelihood over phase, duration, and depth.
Returns
-------
power : array-like
The periodogram evaluated at the periods in ``period``.
depth : array-like
The estimated depth of the maximum power model at each period.
depth_err : array-like
The 1-sigma uncertainty on ``depth``.
duration : array-like
The maximum power duration at each period.
transit_time : array-like
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like
The log likelihood of the maximum power model.
"""
f = partial(_bls_slow_one, t, y, ivar, duration, oversample, use_likelihood)
return _apply(f, period)
def bls_fast(t, y, ivar, period, duration, oversample, use_likelihood):
"""Compute the periodogram using an optimized Cython implementation.
t : array-like
Sequence of observation times.
y : array-like
Sequence of observations associated with times t.
ivar : array-like
The inverse variance of ``y``.
period : array-like
The trial periods where the periodogram should be computed.
duration : array-like
The durations that should be tested.
oversample :
The resolution of the phase grid in units of durations.
use_likeliood : bool
If true, maximize the log likelihood over phase, duration, and depth.
Returns
-------
power : array-like
The periodogram evaluated at the periods in ``period``.
depth : array-like
The estimated depth of the maximum power model at each period.
depth_err : array-like
The 1-sigma uncertainty on ``depth``.
duration : array-like
The maximum power duration at each period.
transit_time : array-like
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like
The log likelihood of the maximum power model.
"""
return bls_impl(t, y, ivar, period, duration, oversample, use_likelihood)
def _bls_slow_one(t, y, ivar, duration, oversample, use_likelihood, period):
"""A private function to compute the brute force periodogram result."""
best = (-np.inf, None)
hp = 0.5 * period
min_t = np.min(t)
for dur in duration:
# Compute the phase grid (this is set by the duration and oversample).
d_phase = dur / oversample
phase = np.arange(0, period + d_phase, d_phase)
for t0 in phase:
# Figure out which data points are in and out of transit.
m_in = np.abs((t - min_t - t0 + hp) % period - hp) < 0.5 * dur
m_out = ~m_in
# Compute the estimates of the in and out-of-transit flux.
ivar_in = np.sum(ivar[m_in])
ivar_out = np.sum(ivar[m_out])
y_in = np.sum(y[m_in] * ivar[m_in]) / ivar_in
y_out = np.sum(y[m_out] * ivar[m_out]) / ivar_out
# Use this to compute the best fit depth and uncertainty.
depth = y_out - y_in
depth_err = np.sqrt(1.0 / ivar_in + 1.0 / ivar_out)
snr = depth / depth_err
# Compute the log likelihood of this model.
loglike = -0.5 * np.sum((y_in - y[m_in]) ** 2 * ivar[m_in])
loglike += 0.5 * np.sum((y_out - y[m_in]) ** 2 * ivar[m_in])
# Choose which objective should be used for the optimization.
if use_likelihood:
objective = loglike
else:
objective = snr
# If this model is better than any before, keep it.
if depth > 0 and objective > best[0]:
best = (
objective,
(
objective,
depth,
depth_err,
dur,
(t0 + min_t) % period,
snr,
loglike,
),
)
return best[1]
def _apply(f, period):
return tuple(map(np.array, zip(*map(f, period))))
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@timeseries@periodograms@bls@methods.py@.PATH_END.py
|
{
"filename": "helpers.py",
"repo_name": "BEAST-Fitting/beast",
"repo_path": "beast_extracted/beast-master/beast/tools/helpers.py",
"type": "Python"
}
|
"""
This is a first collection of tools making the design easier
"""
import sys
from functools import wraps
import itertools
# replace the common range by the generator
try:
range = xrange
except NameError:
pass
__all__ = [
"generator",
"chunks",
"isNestedInstance",
"type_checker",
"nbytes",
]
def generator(func):
""" A dummy decorator that only make codes mode readable.
It allow to explicitly mark a function as generator (yielding values)
and does nothing more than calling the initial function
"""
@wraps(func)
def deco(*args, **kwargs):
return func(*args, **kwargs)
return deco
@generator
def chunks(ll, n):
""" Yield successive n-sized chunks from l.
Parameters
----------
ll: iterable
object to iter over
n: int
number of elements per slice
Returns
-------
chunk: tuple
n values from ll
"""
it = iter(ll)
while True:
chunk = tuple(itertools.islice(it, n))
if chunk:
yield chunk
else:
return
# raise StopIteration
def isNestedInstance(obj, cl):
""" Test for sub-classes types
I could not find a universal test
Parameters
----------
obj: object instance
object to test
cl: Class
top level class to test
returns
-------
r: bool
True if obj is indeed an instance or subclass instance of cl
"""
tree = [cl]
if hasattr(cl, "__subclasses"):
for k in cl.__subclasses():
if hasattr(k, "__subclasses"):
tree += k.__subclasses__()
return issubclass(obj.__class__, tuple(tree))
def type_checker(name, obj, tp):
""" Check a given type and raise a type error if not correct
Parameters
----------
name: str
name of the variable to show in the exception text
obj: object
object to check
tp: type
expected type of obj
Raises
------
:exc:TypeError:
raises a TypeError if object is not of the correct type of a subclass of it
"""
if not isNestedInstance(obj, tp):
txt = 'Expected "{0:s}" of type {1:s}, got {2:s} instead.'
raise TypeError(txt.format(name, str(tp.__name__), str(type(obj).__name__)))
def pretty_size_print(num_bytes):
"""
Output number of bytes in a human readable format
Parameters
----------
num_bytes: int
number of bytes to convert
returns
-------
output: str
string representation of the size with appropriate unit scale
"""
if num_bytes is None:
return
KiB = 1024
MiB = KiB * KiB
GiB = KiB * MiB
TiB = KiB * GiB
PiB = KiB * TiB
EiB = KiB * PiB
ZiB = KiB * EiB
YiB = KiB * ZiB
if num_bytes > YiB:
output = "%.3g YB" % (num_bytes / YiB)
elif num_bytes > ZiB:
output = "%.3g ZB" % (num_bytes / ZiB)
elif num_bytes > EiB:
output = "%.3g EB" % (num_bytes / EiB)
elif num_bytes > PiB:
output = "%.3g PB" % (num_bytes / PiB)
elif num_bytes > TiB:
output = "%.3g TB" % (num_bytes / TiB)
elif num_bytes > GiB:
output = "%.3g GB" % (num_bytes / GiB)
elif num_bytes > MiB:
output = "%.3g MB" % (num_bytes / MiB)
elif num_bytes > KiB:
output = "%.3g KB" % (num_bytes / KiB)
else:
output = "%.3g Bytes" % (num_bytes)
return output
def nbytes(obj, pprint=False):
""" return the number of bytes of the object, which includes size of nested
structures
Parameters
----------
obj: object
object to find the size of
pprint: bool, optional (default=False)
if set, returns the result after calling pretty_size_print
returns
-------
num_bytes: int or str
total number of bytes or human readable corresponding string
"""
num_bytes = sum(
k.nbytes if hasattr(k, "nbytes") else sys.getsizeof(k)
for k in list(obj.__dict__.values())
)
if pprint:
return pretty_size_print(num_bytes)
else:
return num_bytes
|
BEAST-FittingREPO_NAMEbeastPATH_START.@beast_extracted@beast-master@beast@tools@helpers.py@.PATH_END.py
|
{
"filename": "UPGRADES.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/restricted/abseil-cpp/UPGRADES.md",
"type": "Markdown"
}
|
# C++ Upgrade Tools
Abseil may occasionally release API-breaking changes. As noted in our
[Compatibility Guidelines][compatibility-guide], we will aim to provide a tool
to do the work of effecting such API-breaking changes, when absolutely
necessary.
These tools will be listed on the [C++ Upgrade Tools][upgrade-tools] guide on
https://abseil.io.
For more information, the [C++ Automated Upgrade Guide][api-upgrades-guide]
outlines this process.
[compatibility-guide]: https://abseil.io/about/compatibility
[api-upgrades-guide]: https://abseil.io/docs/cpp/tools/api-upgrades
[upgrade-tools]: https://abseil.io/docs/cpp/tools/upgrades/
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@restricted@abseil-cpp@UPGRADES.md@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "yulongzh/SPEMR",
"repo_path": "SPEMR_extracted/SPEMR-main/README.md",
"type": "Markdown"
}
|
# SPEMR
the source code and dataset of SPEMR model
|
yulongzhREPO_NAMESPEMRPATH_START.@SPEMR_extracted@SPEMR-main@README.md@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "SatoshiHamano/WARP",
"repo_path": "WARP_extracted/WARP-main/README.md",
"type": "Markdown"
}
|
# WARP
WINERED Automatic Reduction Pipeline
## What is WARP?
The pipeline software to reduce the astronomical spectroscopic data obtained with NIR high-resolution echelle spectrograph, WINERED. WARP is written with Python.
## How to install?
Using git:
`git clone https://github.com/SatoshiHamano/WARP`
WARP can also be installed by downloading zip from GitHub page. Just expand the zip to use it.
## Necessary environment
```
Python 3 (ver 3.6 or later)
Python libraries — numpy, matplotlib, PIL, astropy
PyRAF
```
## How to use?
See WARP_Manual_v?.?.pdf for detail.
|
SatoshiHamanoREPO_NAMEWARPPATH_START.@WARP_extracted@WARP-main@README.md@.PATH_END.py
|
{
"filename": "random.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/backend/torch/random.py",
"type": "Python"
}
|
import torch
import torch._dynamo as dynamo
import torch.nn.functional as tnn
from keras.src.backend.config import floatx
from keras.src.backend.torch.core import convert_to_tensor
from keras.src.backend.torch.core import get_device
from keras.src.backend.torch.core import to_torch_dtype
from keras.src.random.seed_generator import SeedGenerator
from keras.src.random.seed_generator import draw_seed
from keras.src.random.seed_generator import make_default_seed
# torch.Generator not supported with dynamo
# see: https://github.com/pytorch/pytorch/issues/88576
@dynamo.disable()
def torch_seed_generator(seed):
first_seed, second_seed = draw_seed(seed)
device = get_device()
if device == "meta":
# Generator is not supported by the meta device.
return None
generator = torch.Generator(device=get_device())
generator.manual_seed(int(first_seed + second_seed))
return generator
def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
dtype = to_torch_dtype(dtype)
# Do not use generator during symbolic execution.
if get_device() == "meta":
return torch.normal(
mean, stddev, size=shape, dtype=dtype, device=get_device()
)
generator = torch_seed_generator(seed)
return torch.normal(
mean,
stddev,
size=shape,
generator=generator,
dtype=dtype,
device=get_device(),
)
def categorical(logits, num_samples, dtype="int32", seed=None):
logits = convert_to_tensor(logits)
dtype = to_torch_dtype(dtype)
probs = torch.softmax(logits, dim=-1)
# Do not use generator during symbolic execution.
if get_device() == "meta":
return torch.multinomial(
probs,
num_samples,
replacement=True,
).type(dtype)
generator = torch_seed_generator(seed)
return torch.multinomial(
probs,
num_samples,
replacement=True,
generator=generator,
).type(dtype)
def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
dtype = to_torch_dtype(dtype)
requested_shape = shape
if len(requested_shape) == 0:
shape = (1,)
# Do not use generator during symbolic execution.
if get_device() == "meta":
rand_tensor = torch.rand(size=shape, dtype=dtype, device=get_device())
else:
generator = torch_seed_generator(seed)
rand_tensor = torch.rand(
size=shape, generator=generator, dtype=dtype, device=get_device()
)
output = (maxval - minval) * rand_tensor + minval
if len(requested_shape) == 0:
return output[0]
return output
def randint(shape, minval, maxval, dtype="int32", seed=None):
dtype = to_torch_dtype(dtype)
# Do not use generator during symbolic execution.
if get_device() == "meta":
return torch.randint(
low=minval,
high=maxval,
size=shape,
dtype=dtype,
device=get_device(),
)
generator = torch_seed_generator(seed)
return torch.randint(
low=minval,
high=maxval,
size=shape,
generator=generator,
dtype=dtype,
device=get_device(),
)
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = to_torch_dtype(dtype)
# Take a larger standard normal dist, discard values outside 2 * stddev
# Offset by mean and stddev
x = normal(tuple(shape) + (4,), mean=0, stddev=1, dtype=dtype, seed=seed)
valid = (x > -2) & (x < 2)
indexes = valid.max(-1, keepdim=True)[1]
trunc_x = torch.empty(shape, dtype=dtype, device=get_device())
trunc_x.data.copy_(x.gather(-1, indexes).squeeze(-1))
trunc_x.data.mul_(stddev).add_(mean)
return trunc_x
def _get_concrete_noise_shape(inputs, noise_shape):
if noise_shape is None:
return inputs.shape
concrete_inputs_shape = inputs.shape
concrete_noise_shape = []
for i, value in enumerate(noise_shape):
concrete_noise_shape.append(
concrete_inputs_shape[i] if value is None else value
)
return concrete_noise_shape
def dropout(inputs, rate, noise_shape=None, seed=None):
if (
seed is not None
and not (isinstance(seed, SeedGenerator) and seed._initial_seed is None)
or noise_shape is not None
):
keep_prob = 1.0 - rate
noise_shape = _get_concrete_noise_shape(inputs, noise_shape)
keep_prob_matrix = torch.full(
noise_shape, keep_prob, device=get_device()
)
generator = torch_seed_generator(seed)
# Do not use generator during symbolic execution.
if get_device() == "meta":
mask = torch.bernoulli(keep_prob_matrix)
else:
mask = torch.bernoulli(keep_prob_matrix, generator=generator)
mask = mask.bool()
mask = torch.broadcast_to(mask, inputs.shape)
return torch.where(
mask,
inputs / keep_prob,
torch.zeros_like(inputs, dtype=inputs.dtype),
)
# Fast path, unseeded (since torch doesn't support seeding dropout!!!!)
# Using the above implementation is possible, but much slower.
return torch.nn.functional.dropout(
inputs, p=rate, training=True, inplace=False
)
def shuffle(x, axis=0, seed=None):
# Ref: https://github.com/pytorch/pytorch/issues/71409
x = convert_to_tensor(x)
# Get permutation indices
# Do not use generator during symbolic execution.
if get_device() == "meta":
row_perm = torch.rand(x.shape[: axis + 1], device=get_device()).argsort(
axis
)
else:
generator = torch_seed_generator(seed)
row_perm = torch.rand(
x.shape[: axis + 1], generator=generator, device=get_device()
).argsort(axis)
for _ in range(x.ndim - axis - 1):
row_perm.unsqueeze_(-1)
# Reformat this for the gather operation
row_perm = row_perm.repeat(
*[1 for _ in range(axis + 1)], *(x.shape[axis + 1 :])
)
return x.gather(axis, row_perm)
def gamma(shape, alpha, dtype=None, seed=None):
dtype = dtype or floatx()
dtype = to_torch_dtype(dtype)
alpha = torch.broadcast_to(convert_to_tensor(alpha), shape)
beta = torch.ones(shape, device=get_device())
prev_rng_state = torch.random.get_rng_state()
# Do not draw seed during symbolic execution
if not get_device() == "meta":
first_seed, second_seed = draw_seed(seed)
torch.manual_seed(first_seed + second_seed)
gamma_distribution = torch.distributions.gamma.Gamma(alpha, beta)
sample = gamma_distribution.sample().type(dtype)
torch.random.set_rng_state(prev_rng_state)
return sample
def binomial(shape, counts, probabilities, dtype=None, seed=None):
dtype = dtype or floatx()
dtype = to_torch_dtype(dtype)
counts = torch.broadcast_to(convert_to_tensor(counts), shape)
probabilities = torch.broadcast_to(convert_to_tensor(probabilities), shape)
prev_rng_state = torch.random.get_rng_state()
# Do not draw seed during symbolic execution
if not get_device() == "meta":
first_seed, second_seed = draw_seed(seed)
torch.manual_seed(first_seed + second_seed)
binomial_distribution = torch.distributions.binomial.Binomial(
total_count=counts, probs=probabilities
)
sample = binomial_distribution.sample().type(dtype)
torch.random.set_rng_state(prev_rng_state)
return sample
def beta(shape, alpha, beta, dtype=None, seed=None):
dtype = dtype or floatx()
dtype = to_torch_dtype(dtype)
alpha = torch.broadcast_to(convert_to_tensor(alpha), shape)
beta = torch.broadcast_to(convert_to_tensor(beta), shape)
prev_rng_state = torch.random.get_rng_state()
# Do not draw seed during symbolic execution
if not get_device() == "meta":
first_seed, second_seed = draw_seed(seed)
torch.manual_seed(first_seed + second_seed)
beta_distribution = torch.distributions.beta.Beta(
concentration1=alpha, concentration0=beta
)
sample = beta_distribution.sample().type(dtype)
torch.random.set_rng_state(prev_rng_state)
return sample
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@backend@torch@random.py@.PATH_END.py
|
{
"filename": "_thicknessmode.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/choroplethmapbox/colorbar/_thicknessmode.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ThicknessmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="thicknessmode",
parent_name="choroplethmapbox.colorbar",
**kwargs
):
super(ThicknessmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["fraction", "pixels"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@choroplethmapbox@colorbar@_thicknessmode.py@.PATH_END.py
|
{
"filename": "_uid.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/histogram2d/_uid.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="uid", parent_name="histogram2d", **kwargs):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@histogram2d@_uid.py@.PATH_END.py
|
{
"filename": "grb_transient_metric.py",
"repo_name": "lsst/rubin_sim",
"repo_path": "rubin_sim_extracted/rubin_sim-main/rubin_sim/maf/maf_contrib/grb_transient_metric.py",
"type": "Python"
}
|
__all__ = ("GRBTransientMetric",)
import numpy as np
import rubin_sim.maf.metrics as metrics
# Gamma-ray burst afterglow metric
# ebellm@caltech.edu
class GRBTransientMetric(metrics.BaseMetric):
"""Evaluate the likelihood of detecting a GRB optical counterpart.
Detections for an on-axis GRB afterglows decaying as
F(t) = F(1min)((t-t0)/1min)^-alpha. No jet break, for now.
Derived from TransientMetric, but calculated with reduce functions to
enable-band specific counts.
Burst parameters taken from 2011PASP..123.1034J.
Simplifications:
* no color variation or evolution encoded yet.
* no jet breaks.
* not treating off-axis events.
Parameters
----------
alpha : `float`,
temporal decay index
Default = 1.0
apparent_mag_1min_mean : `float`,
mean magnitude at 1 minute after burst
Default = 15.35
apparent_mag_1min_sigma : `float`,
std of magnitudes at 1 minute after burst
Default = 1.59
trans_duration : `float`, optional
How long the transient lasts (days). Default 10.
survey_duration : `float`, optional
Length of survey (years).
Default 10.
survey_start : `float`, optional
MJD for the survey start date.
Default None (uses the time of the first observation).
detect_m5_plus : `float`, optional
An observation will be used if the light curve magnitude is brighter
than m5+detect_m5_plus.
Default 0.
n_per_filter : `int`, optional
Number of separate detections of the light curve above the
detect_m5_plus theshold (in a single filter) for the light curve
to be counted.
Default 1.
n_filters : `int`, optional
Number of filters that need to be observed n_per_filter times,
with differences min_delta_mag,
for an object to be counted as detected.
Default 1.
min_delta_mag : `float`, optional
magnitude difference between detections in the same filter required
for second+ detection to be counted.
For example, if min_delta_mag = 0.1 mag and two consecutive observations
differ only by 0.05 mag, those two detections will only count as one.
(Better would be a SNR-based discrimination of lightcurve change.)
Default 0.
n_phase_check : `int`, optional
Sets the number of phases that should be checked.
One can imagine pathological cadences where many objects pass the
detection criteria, but would not if the observations were offset
by a phase-shift.
Default 1.
"""
def __init__(
self,
alpha=1,
apparent_mag_1min_mean=15.35,
apparent_mag_1min_sigma=1.59,
metric_name="GRBTransientMetric",
mjd_col="expMJD",
m5_col="fiveSigmaDepth",
filter_col="filter",
trans_duration=10.0,
survey_duration=10.0,
survey_start=None,
detect_m5_plus=0.0,
n_per_filter=1,
n_filters=1,
min_delta_mag=0.0,
n_phase_check=1,
**kwargs,
):
self.mjd_col = mjd_col
self.m5_col = m5_col
self.filter_col = filter_col
super(GRBTransientMetric, self).__init__(
col=[self.mjd_col, self.m5_col, self.filter_col],
units="Fraction Detected",
metric_name=metric_name,
**kwargs,
)
self.alpha = alpha
self.apparent_mag_1min_mean = apparent_mag_1min_mean
self.apparent_mag_1min_sigma = apparent_mag_1min_sigma
self.trans_duration = trans_duration
self.survey_duration = survey_duration
self.survey_start = survey_start
self.detect_m5_plus = detect_m5_plus
self.n_per_filter = n_per_filter
self.n_filters = n_filters
self.min_delta_mag = min_delta_mag
self.n_phase_check = n_phase_check
self.peak_time = 0.0
self.reduce_order = {
"Bandu": 0,
"Bandg": 1,
"Bandr": 2,
"Bandi": 3,
"Bandz": 4,
"Bandy": 5,
"Band1FiltAvg": 6,
"BandanyNfilters": 7,
}
def light_curve(self, time, filters):
"""
given the times and filters of an observation, return the magnitudes.
"""
lc_mags = np.zeros(time.size, dtype=float)
decline = np.where(time > self.peak_time)
apparent_mag_1min = np.random.randn() * self.apparent_mag_1min_sigma + self.apparent_mag_1min_mean
lc_mags[decline] += apparent_mag_1min + self.alpha * 2.5 * np.log10(
(time[decline] - self.peak_time) * 24.0 * 60.0
)
# for key in self.peaks.keys():
# fMatch = np.where(filters == key)
# lc_mags[fMatch] += self.peaks[key]
return lc_mags
def run(self, data_slice, slice_point=None):
"""
Calculate the detectability of a transient with the
specified lightcurve.
"""
# Total number of transients that could go off back-to-back
n_trans_max = np.floor(self.survey_duration / (self.trans_duration / 365.25))
tshifts = np.arange(self.n_phase_check) * self.trans_duration / float(self.n_phase_check)
n_trans_max = 0
for tshift in tshifts:
# Compute the total number of back-to-back transients
# are possible to detect
# given the survey duration and the transient duration.
n_trans_max += np.floor(self.survey_duration / (self.trans_duration / 365.25))
if tshift != 0:
n_trans_max -= 1
if self.survey_start is None:
survey_start = data_slice[self.mjd_col].min()
time = (data_slice[self.mjd_col] - survey_start + tshift) % self.trans_duration
# Which lightcurve does each point belong to
lc_number = np.floor((data_slice[self.mjd_col] - survey_start) / self.trans_duration)
lc_mags = self.light_curve(time, data_slice[self.filter_col])
# How many criteria needs to be passed
detect_thresh = 0
# Flag points that are above the SNR limit
detected = np.zeros(data_slice.size, dtype=int)
detected[np.where(lc_mags < data_slice[self.m5_col] + self.detect_m5_plus)] += 1
bandcounter = {
"u": 0,
"g": 0,
"r": 0,
"i": 0,
"z": 0,
"y": 0,
"any": 0,
} # define zeroed out counter
# make sure things are sorted by time
ord = np.argsort(data_slice[self.mjd_col])
data_slice = data_slice[ord]
detected = detected[ord]
lc_number = lc_number[ord]
lc_mags = lc_mags[ord]
ulc_number = np.unique(lc_number)
left = np.searchsorted(lc_number, ulc_number)
right = np.searchsorted(lc_number, ulc_number, side="right")
detect_thresh += self.n_filters
# iterate over the lightcurves
for le, ri in zip(left, right):
wdet = np.where(detected[le:ri] > 0)
ufilters = np.unique(data_slice[self.filter_col][le:ri][wdet])
nfilts_lci = 0
for filt_name in ufilters:
wdetfilt = np.where((data_slice[self.filter_col][le:ri] == filt_name) & detected[le:ri])
lc_points = lc_mags[le:ri][wdetfilt]
dlc = np.abs(np.diff(lc_points))
# number of detections in band, requiring that for
# nPerFilter > 1 that points have more than minDeltaMag
# change
nbanddet = np.sum(dlc > self.min_delta_mag) + 1
if nbanddet >= self.n_per_filter:
bandcounter[filt_name] += 1
nfilts_lci += 1
if nfilts_lci >= self.n_filters:
bandcounter["any"] += 1
bandfraction = {}
for band in bandcounter.keys():
bandfraction[band] = float(bandcounter[band]) / n_trans_max
return bandfraction
def reduce_band1_filt_avg(self, bandfraction):
"Average fraction detected in single filter"
return np.mean(list(bandfraction.values()))
def reduce_bandany_nfilters(self, bandfraction):
"Fraction of events detected in Nfilters or more"
return bandfraction["any"]
def reduce_bandu(self, bandfraction):
return bandfraction["u"]
def reduce_bandg(self, bandfraction):
return bandfraction["g"]
def reduce_bandr(self, bandfraction):
return bandfraction["r"]
def reduce_bandi(self, bandfraction):
return bandfraction["i"]
def reduce_bandz(self, bandfraction):
return bandfraction["z"]
def reduce_bandy(self, bandfraction):
return bandfraction["y"]
|
lsstREPO_NAMErubin_simPATH_START.@rubin_sim_extracted@rubin_sim-main@rubin_sim@maf@maf_contrib@grb_transient_metric.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "jpcoles/mosaic",
"repo_path": "mosaic_extracted/mosaic-master/README.md",
"type": "Markdown"
}
|
# mosaic
Multipole Operators in Symbols, Automatically Improved and Condensed
|
jpcolesREPO_NAMEmosaicPATH_START.@mosaic_extracted@mosaic-master@README.md@.PATH_END.py
|
{
"filename": "overpressure2.py",
"repo_name": "wmpg/Supracenter",
"repo_path": "Supracenter_extracted/Supracenter-master/supra/Geminus/overpressure2.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
import scipy
from supra.Utils.Classes import Constants, Position
from supra.Utils.Formatting import *
c = Constants()
# function [dp,dpws,dpratio,tau,tauws,Z,td,talt,Ro] = overpressureihmod_Ro(meteor,stn,Ro,v,theta,dphi,atmos,sw);
def overpressureihmod_Ro(meteor, stn, Ro, v, theta, dphi, atmos, sw, wind=True, dopplershift=False):
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %
# % Theoretical Acoustic Overpressure Prediction using Line Source Theory
# % (ReVelle D.O., 1976: On Meteor-Generated Infrasound, JGR, 81, pp.1217-1229.
# %
# % Usage:
# % [dp,dpws,dpratio,tau,tauws,Z,td,talt,Ro,dm] = overpressureihmod(meteor,stn,mass,rhom,v,theta,dphi,atmos,sw);
# %
# % Given: meteor - [latitude,longitude,altitude] of meteor source [DD.ddd,DD.ddd,km]
# % stn - [latitude,longitude,elevation] of observing station [DD.ddd,DD.ddd,km]
# % mass - meteoroid mass in kilograms (kg)
# % rhom - meteoroid bulk density in kilograms per metre cubed (kg/m^3)
# % v - meteoroid velocity in kilometres per second (km/s)
# % theta - entry angle of meteoroid measured from horizontal (degrees)
# % dphi - angular deviation from the meteoroid trajectory (degrees)
# % atmos - atmospheric model (nx3) - MUST encompass altitudes for direct propagation
# % - altitude (m)
# % - pressure (hPa/mbars)
# % - temperature (oC)
# % sw - switches on/off = 1/0 3 element vector [a,b,c]
# % a - vary period to find transition (1) const. period (0)
# % b - display figures (on/off)
# % c - quick integration (on/off)
# %
# % Returns: dp - theoretical acoustic/infrasonic overpressure (Pascals)
# % dpws - theoretical acoustic/infrasonic overpressure for completely
# % weak shock propagation
# % dpratio - acoustic/infrasonic overpressure ratio (dp/p) (dimensionless)
# % td - transition distance (in units of Ro)
# % talt - transition altitude (kilometres)
# % tau - signal period (seconds)
# % tauws - signal period for completely weak shock propagation (seconds)
# % Z - Altitude interval (km)
# % Ro - Blast radius in metres
# % dm - diameter of meteoroid (metres)
# %
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
try:
meteor = validate(meteor, "Meteor Source Position")
stn = validate(stn, "Station Reciever Position")
Ro = validate(Ro, "Blast Radius")
v = validate(v, "Meteor Trajectory Velocity")
theta = validate(theta, "Entry Angle of Meteoroid")
dphi = validate(dphi, "Angular Deviation from the Meteoroid Trajectory")
atmos = validate(atmos, "Atmospheric Data")
sw = validate(sw, "Switch Data")
except TypeError as e:
print(printMessage("Error"), " Geminus variable input error! {:}".format(e))
raise TypeError(e)
alt = atmos[:, 0]/1000
if wind:
Wm = atmos[:, 2] #% meridional wind velocity
Wz = atmos[:, 3] #% zonal wind velocity
else:
Wm = np.zeros(len(alt))
Wz = np.zeros(len(alt))
pres = atmos[:, 4]*100
temp = atmos[:, 1]+273.15
Cs0 = np.sqrt(c.GAMMA*c.R*temp/c.M_0)
#% SWITCH SELECTIONS for wind/no wind and doppler/no doppler shift
#disp('Include winds = 1, no winds = 2, include Doppler = 3, no Doppler = 4');
#disp('Absorption as per ReVelle (1976) = 5');
#disp('Absorption as per Sutherland and Bass (2004) = 6');
#%in = input('Please make switch selections [1/2, 3/4, 5/6]: ');
Kw = 1
Kd = 3
Ka = 5
wCs, Ceff, s, wtype = windC(meteor, stn, atmos, Cs0)
rho = c.GAMMA*pres/wCs**2
Csa = Cs0[0]
Cswind = wCs[0]
Ceffs = Ceff[0]
Wms = Wm[0]
Wzs = Wz[0]
Ps = pres[0]
rhos = rho[0]
temps = temp[0]
M = v/(Csa/1000)
Ro = Ro/1000
tau0 = 2.81*Ro/(Csa/1000)
f0 = 1/tau0
theta = np.radians(theta)
dphi = np.radians(dphi)
epsilon = np.arctan(np.tan(theta)/(1 - 2*dphi/np.pi))
Range = (Position(meteor[0], meteor[1], meteor[2]) - Position(stn[0], stn[1], stn[2])).mag()/1000
dH = meteor[2] - stn[2]
sR = np.sqrt(Range**2 + dH**2)
inc = np.arctan(dH/Range)
Zs = meteor[2]
xt = sR/Ro
Z10 = Zs - 10*Ro*np.sin(inc)
if Z10 > alt[0]:
Z10 = alt[0]
N = 500
dZ = (Z10 - stn[2])/N
Z = np.linspace(Z10, stn[2], N)
f = scipy.interpolate.interp1d(alt, pres)
Pz = f(Z)
f = scipy.interpolate.interp1d(alt, Cs0)
Csz = f(Z)
f = scipy.interpolate.interp1d(alt, rho)
rhoz = f(Z)
f = scipy.interpolate.interp1d(alt, temp)
tempz = f(Z)
f = scipy.interpolate.interp1d(alt, wCs)
CsW = f(Z)
CmW = meanC(meteor[2], Cswind, Z, CsW)
# # % Find mean sound speed between source and Z
# Cmz = meanC(meteor[2], Csa, Z, Csz)
x = (Zs - Z)/(Ro*np.sin(inc))
Csd = Cs0;
Dtau = doppler(meteor, stn, tau0, Csd, atmos, Z, s, alt)
Dtau = np.flipud(Dtau)
tau = []
for jj in range(len(Dtau)):
if dopplershift:
tau.append(0.562*Dtau[jj]*x[jj]**(0.25))
else:
tau.append(0.562*tau0*x[jj]**(0.25))
# tau = 0.562*tau0*x**(1/4)
if sw[0] == 0:
tau = tau0
fm = 1/np.array(tau)
Dtogo = xt - x
# % Calculate Linear Absorption
# DL = lineardamping(Z, Csz, rhoz, tempz, fm, epsilon, sw[2])
# # % Calculate Overpressure ratio via eqn 23 (eqn 93b thesis)
# dpp = c.GAMMA/(2*(c.GAMMA + 1))*(3/8)**(-3/5)/((1 + (8/3)**(8/5)*x**2 )**(3/8) - 1)
# dpp = dpp*(rhoz/rhos)**(1/2)*Csz/Cmz #% Non uniform path correction (eqn 86)
# dpp = dpp*(rhos/rhoz)*Csa**2/Csz**2; #% Source Altitude correction for inhomogenious atmosphere
# dpp = dpp*DL*x**(1/4)
DL = lineardamping(Z, CsW, rhoz, tempz, fm, epsilon, sw[2])
dpp = c.GAMMA/(2*(c.GAMMA + 1))*(3/8)**(-3/5)/((1 + (8/3)**(8/5)*x**2)**(3/8) - 1)
dpp = dpp*(rhoz/rhos)**(1/2)*CsW/CmW
#% Source Altitude correction for inhomogenious atmosphere
dpp = dpp*(rhos/rhoz)*Cswind**2/CsW**2
dpp = dpp*DL*x**(1/4)# % Linear damping & decay
#% Linear damping & decay
# % Calculate Distortion distance
Dprime = Csz*tau/(34.3*dpp)
Dprime = Dprime/1000
Dprime = Dprime/Ro
Trans = (Dprime > Dtogo).astype(int)
try:
M, it = 1, np.nanargmax(Trans)
#Transition here
except ValueError:
# Tested from Matlab code
M, it = 0, 0
if (M == 1):
td = x[it]
talt = Z[it]
else:
td = xt
talt = stn[2]
Csd = Cs0
Dtau = doppler(meteor, stn, tau0, Csd, atmos, Z, s, alt)
Dtau = np.flipud(Dtau)
tau = []
for jj in range(len(Dtau)):
if dopplershift:
tau.append(0.562*Dtau[jj]*x[jj]**(0.25))
else:
tau.append(0.562*tau0*x[jj]**(0.25))
if (sw[0] == 0):
tau = [tau0]*len(x)
tauws = tau.copy()
tau[it-1:] = [tau[it-1]]*len(tau[it-1:])
fm = 1/np.array(tau)
fmws = 1/np.array(tauws)
dpp = c.GAMMA/(2*(c.GAMMA + 1))*(3/8)**(-3/5)/((1 + (8/3)**(8/5)*x**2)**(3/8) - 1)
dpp = dpp*(rhoz/rhos)**(1/2)*CsW/CmW
dpp = dpp*(rhos/rhoz)*Cswind**2/CsW**2
dppws = dpp
Dws = weakshockdamping(Z, CsW, rhoz, tempz, fm, epsilon, Ps, sw[2])
dpp = dpp*Dws
dpp[it:] = dpp[it-1]
Dws = weakshockdamping(Z, CsW, rhoz, tempz, fmws, epsilon, Ps, sw[2])
dppws = dppws*Dws
DL = lineardamping(Z[it:], CsW[it:], rhoz[it:], tempz[it:], fm[it:], epsilon, sw[2])
pzt = (rhos/rhoz[it-1])*Cswind**2/CsW[it-1]**2
pzg = (rhos/rhoz[it:])*Cswind**2/CsW[it:]**2
dppl = dpp[it:]*(pzg/pzt)
dppl = dpp[it:]*DL*(td/x[it:])**(1/2)
dpratio = dpp
for ii in range(len(dppl)):
dpratio[it+ii-1] = dppl[ii]
dp = dpratio*Pz
dpws = dppws*Pz
Ro = Ro*1000
# if True:
if sw[1] == 1:
plt.plot(tau[0:it], Z[0:it], 'r-', label="Weak Shock Period Change")
plt.plot(tau[it-1:], Z[it-1:], 'b-', label="Stable Period")
plt.plot(tauws[it-1:], Z[it-1:], 'm-', label="Weak Shock: No Transition")
plt.scatter([tau[it-1]], [Z[it-1]])
plt.xlabel("Signal Period [s]")
plt.ylabel("Geopotential Height [km]")
plt.legend()
plt.show()
# txtfile = fopen('Weak Shock Output - TEST only.txt','a');
print('FINAL OUTPUT FOR THE WEAK SHOCK')
print('=========================================================')
print('Period (weak shock): {:3.4f} s'.format(tauws[-1]))
print(' Frequency (weak shock): {:3.3f} Hz'.format(1/tauws[-1]))
print('Period (linear): {:3.4f} s'.format(tau[-1]))
print(' Frequency (linear): {:3.3f} Hz'.format(1/tau[-1]))
print('Slant range: {:5.2f} km'.format(sR))
print('Arrival (inclination): {:3.4f} deg'.format(np.degrees(inc)))
print('Transition height: {:3.3f} km'.format(talt))
print('Overpressure (weak shock): {:3.4f} Pa'.format(dpws[-1]))
print('Overpressure (linear): {:3.4f} Pa'.format(dp[-1]))
return [tau, tauws, Z, sR, np.degrees(inc), talt, dpws, dp, it]
# # Temporary float return
# return [tau[-1], tauws[-1], dpws[-1], dp[-1], it]
#======================================================================================================
def meanC(Zs,Cs,Z,Cz):
Cm = []
for i in range(len(Z)):
del_z = (Zs - Z[i-1])
avgC = -intfunc([Zs, Z[i]], [Cs, Cz[i]])
Cm.append(avgC/del_z)
Cm = np.array(Cm)
return Cm
# %======================================================================================================
def shearviscosity(T):
mu0 = 1.846e-5
T0 = 300
Ts = 110.4
Ta = 245.4
Tb = 27.6
mu = mu0*(T0 + Ts)/(np.array(T) + Ts)*(np.array(T)/T0)**(3/2)
return mu
# %======================================================================================================
def thermcond(T):
kap0 = 2.624e-2
T0 = 300
Ta = 245.4
Tb = 27.6
T = np.array(T)
T_b_T = []
for i in T:
T_b_T.append(np.exp(-Tb/i))
T_b_T = np.array(T_b_T)
kap = kap0*(T/T0)**(3/2)*(T0 + Ta*T_b_T)/(T + Ta*T_b_T)
return kap
# %======================================================================================================
def bulkviscosity(T):
nu = 2/3*shearviscosity(T);
return nu
# %======================================================================================================
def lineardamping(Z, Cs, rho, T, fm, epsilon, Method):
gamma = 1.4
Cp = 1008.56
mu = shearviscosity(T)
kappa = thermcond(T)
nu = bulkviscosity(T)
Cs = np.array(Cs)
delta = 4*(4/3*mu + nu + kappa*(gamma-1)/Cp)
lamda = Cs/np.array(fm)
alpha = np.pi**2*delta/(2*rho*Cs*lamda**2)
integ = []
for i in range(len(Z)):
integ.append(-intfunc(Z[:i], alpha[:i]/np.cos(epsilon)))
DL = np.exp(-np.array(integ))
return DL
# %======================================================================================================
def weakshockdamping(Z, Cs, rho, T, fm, epsilon, Ps, Method):
gamma = 1.4
Cp = 1008.56
mu = shearviscosity(T)
kappa = thermcond(T)
nu = bulkviscosity(T)
delta = 4*( 4/3*mu + nu + kappa*(gamma-1)/Cp)
BA = 3/2*delta*fm/(gamma+1)
l = Cs/fm
Be = 3*delta/( 2*rho*Cs*l**2)
integ = []
for i in range(len(Z)):
integ.append(-intfunc(Z[0:i], Be[0:i]/np.cos(epsilon)))
dpz = 0.0575*Ps
DWS = BA*np.exp(-np.array(integ))/(dpz*(1 - np.exp(-np.array(integ))) + BA)
return DWS
# %======================================================================================
def intfunc(X, Y):
# tested and works with silber version
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# % Usage: A = intfunc(X,Y)
# %
# % Integrate an arbitrary function Y(X) defined by
# % measured points X & Y assuming piecewise linearity
# %
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
dm = np.array(Y[1:]) - np.array(Y[0:-1])
dx = np.array(X[1:]) - np.array(X[0:-1])
dx2 = np.array(X[1:])**2 - np.array(X[0:-1])**2
slope = []
for i in range(len(dm)):
slope.append(dm[i]/dx[i])
slope = np.array(slope)
inpt = Y[0:-1] - slope*X[0:-1]
segment = slope*dx2/2 + inpt*dx
A = np.sum(segment)
return A
def windC(source, stn, atmos, Cs):
sP = Position(*source)
stP = Position(*stn)
waveP = stP - sP
x, y, z = waveP.x, waveP.y, waveP.z
h = np.array(atmos[:, 0])
wy = np.array(atmos[:, 2])
wx = np.array(atmos[:, 3])
Ceff = []
wCs = []
for i in range(len(h)):
Cx, Cy, Cz = (Cs[i] + wx[i])*x, (Cs[i] + wy[i])*y, Cs[i]*z
Ceff.append([Cx, Cy, Cz])
wCs.append(np.sqrt(Cx*Cx + Cy*Cy + Cz*Cz))
wCs = np.array(wCs)
Ceff = np.array(wCs)
return wCs, wx, wy, Ceff
def doppler(source, stn, tau0, Cs, atmos, Z, s, alt):
# %==========================================================================
# % This function is used to calculate the Doppler shift
# %
# % Input variables:
# % source: location of the source [lat, long]
# % stn: location of the station [lat, long]
# % tau0: fundamental period
# % Cs: ambient speed of sound
# % atmos: atmospheric profile (txt file)
# % inc: inclination angle at observing station
# % Z: altitude intervals (line 108)
# % wtype: type of the wind data file (component form = 0 or
# % magnitude/direction = 1)
# %
# % Last modified on: 16-Oct-2013 (E. A. Silber)
# %==========================================================================
# % wind components
# % s = 0/1 switch [a,b,c], where
# % a = homogeneous (0) or inhomogeneous (1) atmosphere
# % b = meridional, or y-direction winds on (1) or off (0)
# % c = zonal, or x-direction winds on (1) or off (0)
# %
# % Z and tau0 are top-down, therefore they need to be flipped in order to
# % match all other variables.
wCs, wx, wy, Ceff = windC(source, stn, atmos, Cs)
n = len(Z)
h = alt
Z = np.flipud(np.array(Z))
h = np.flipud(np.array(h))
Cs = np.flipud(np.array(Cs))
Ceff = np.flipud(np.array(Ceff))
wCs = np.flipud(np.array(wCs))
wx = np.flipud(np.array(wx))
wy = np.flipud(np.array(wy))
f = scipy.interpolate.interp1d(h, wx)
iwx = f(Z)
f = scipy.interpolate.interp1d(h, wy)
iwy = f(Z)
wz_speed = 0.01;
iwz = np.array([wz_speed]*500)
f = scipy.interpolate.interp1d(h, Cs)
iCs = f(Z)
wind_unit = []
wind_speed = []
for ii in range(n):
step = np.sqrt(iwx[ii]**2 + iwy[ii]**2 + iwz[ii]**2)
if step == 0:
# % disp('Wind speed is zero in all directions (x,y,z).');
# % disp('Doppler shift in frequency is not applicable.');
# % disp('No change in frequency or period was applied.');
# % disp('==================================================');
return tau0
wind_unit.append([iwx[ii]/step, iwy[ii]/step, iwz[ii]/step])
wind_speed.append([iwx[ii], iwy[ii], iwz[ii]])
wind_unit = np.flipud(wind_unit)
sP = Position(*source)
stP = Position(*stn)
waveP = stP - sP
x, y, z = waveP.x, waveP.y, waveP.z*1000
mag = np.sqrt(x**2 + y**2 + z**2)
x, y, z = x/mag, y/mag, z/mag
f0 = 1/tau0
angf0 = 2*np.pi*f0
# %
# %==========================================================================
# % Perform Doppler shift calculations following Morse & Ingard (1968):
# % OMEGA = angf - kw, where
angf = angf0
kn = []
kn.append(angf/iCs[0])
k = []
k.append(kn[0]*np.array([x, y, z]))
kw = []
kw.append(np.dot(np.array(k)[0, :], np.array(wind_speed)[0, :]))
OMEGA = []
OMEGA.append(angf0 - kw[0])
F = []
F.append(OMEGA[0]/(2*np.pi))
tau = []
tau.append(tau0)
dOMEGA = []
for j in range(n):
kn.append(angf/iCs[j])
a = kn[j]*np.array([x, y, z])
k.append(a)
kw.append(np.dot(a, np.array(wind_speed)[j, :]))
dOMEGA.append(kw[j])
for i in range(1, n):
OMEGA.append(angf0 - (1/j)*(np.sum(dOMEGA[:i])))
F.append(OMEGA[i]/(2*np.pi))
tau.append(1/F[i])
return tau
if __name__ == "__main__":
source = [51.9183000000000, -2.43020000000000, 30.0400000000000]
stat3 = [51.2112, -0.3298, 0.080]
Ro = 7.3
v = 13
theta = 41.69
dphi3 = 24.4958
sw = [1, 0, 0]
data3 = np.array([[30040, 19.2830354000000, 12.0092002400000, 1.61793559900000, 27.5531885400000],
[29811.1552100000, 19.2830354000000, 12.0092002400000 , 1.61793559900000 , 28.3203220600000],
[29345.3559100000, 18.9468873800000, 11.1503036000000 , 0.873037995000000 , 29.9483897000000],
[28879.5566100000, 18.6262410900000, 10.2095693000000 , -0.0203514030000000, 31.6700510600000],
[28413.7573100000, 18.3262035300000, 9.21982058000000 , -0.967960426000000, 33.4906866200000],
[27947.9580000000, 18.0518817000000, 8.21388069600000 , -1.87551690200000 , 35.4159861700000],
[27482.1587000000, 17.8083826200000, 7.22457289300000 , -2.64874866000000 , 37.4519665900000],
[27016.3594000000, 17.6008132700000, 6.28472042100000 , -3.19338353100000 , 39.6049906500000],
[26550.5601000000, 17.4342806700000, 5.42714652700000 , -3.41514934200000 , 41.8817869300000],
[26084.7608000000, 17.3138918200000, 4.68467445900000 , -3.21977392400000 , 44.2894707800000],
[25618.9615000000, 17.2442143500000, 4.08849683000000 , -2.52282450700000 , 46.8355666300000],
[25153.1622000000, 17.2207883200000, 3.64251399600000 , -1.40455221100000 , 49.5280314400000],
[24687.3629000000, 17.2316798900000, 3.32803113100000 , -0.081549479000000, 52.3752796200000],
[24221.5636000000, 17.2647300500000, 3.12567270900000 , 1.22548382500000 , 55.3862092900000],
[23755.7643000000, 17.3077798100000, 3.01606320100000 , 2.29584783400000 , 58.5702301200000],
[23289.9650000000, 17.3487023300000, 2.97981198500000 , 2.90943988400000 , 61.9372926900000],
[22824.1657000000, 17.3809240800000, 2.99492175400000 , 2.94928968100000 , 65.4979196500000],
[22358.3664000000, 17.4096920500000, 3.03384678300000 , 2.51794798300000 , 69.2632385400000],
[21892.5671000000, 17.4417678600000, 3.06833037700000 , 1.74609462700000 , 73.2450166200000],
[21426.7678000000, 17.4839131500000, 3.07011584200000 , 0.764409448000000 , 77.4556976000000],
[20960.9685000000, 17.5428895400000, 3.01094648800000 , -0.296427716000000, 81.9084405700000],
[20495.1692000000, 17.6254586500000, 2.86256561800000 , -1.30573703000000 , 86.6171611000000],
[20029.3699000000, 17.7383880900000, 2.59937613600000 , -2.13445816200000 , 91.5965747200000],
[19563.5706000000, 17.8886045300000, 2.26666285000000 , -2.69669289200000 , 96.8622429300000],
[19097.7713000000, 18.0832068700000, 1.98644819600000 , -2.95327082700000 , 102.430621800000],
[18631.9720000000, 18.3293025800000, 1.88458766000000 , -2.86735563300000 , 108.319113500000],
[18166.1727000000, 18.6339991800000, 2.08693672100000 , -2.40211098200000 , 114.546120500000],
[17700.3734000000, 18.9981690500000, 2.65538233000000 , -1.55701180700000 , 121.131103300000],
[17234.5741000000, 19.3981237300000, 3.39983053200000 , -0.474568141000000, 128.094641000000],
[16768.7748000000, 19.8041279600000, 4.06815067800000 , 0.667495312000000 , 135.458495800000],
[16302.9755000000, 20.1864464700000, 4.40821211900000 , 1.69145384600000 , 143.245681000000],
[15837.1762000000, 20.5154564500000, 4.16909953200000 , 2.41982200300000 , 151.480532900000],
[15371.3769000000, 20.7814372300000, 3.31498619800000 , 2.71745592000000 , 160.188786800000],
[14905.5776000000, 21.0172786000000, 2.27055029600000 , 2.53986513900000 , 169.397657400000],
[14439.7783000000, 21.2611940300000, 1.51878759200000 , 1.85443707900000 , 179.135923900000],
[13973.9790000000, 21.5152815300000, 1.31521930100000 , 0.666478214000000 , 189.434020100000],
[13508.1797000000, 21.7014032700000, 1.40999834600000 , -0.934462214000000, 200.324129300000],
[13042.3804000000, 21.7135357700000, 1.51361931800000 , -2.72511417500000 , 211.840284800000],
[12576.5811000000, 21.2844879800000, 1.60441615000000 , -3.32758735900000 , 224.018476600000],
[12110.7818000000, 20.1280569500000, 1.80671965900000 , -1.02816098400000 , 236.896763500000],
[11644.9825000000, 18.8857245900000, 2.22991100100000 , 1.90497940600000 , 250.515392400000],
[11179.1832000000, 18.6974016900000, 2.82487362100000 , 1.25106017700000 , 264.916923700000],
[10713.3839000000, 19.6487717600000, 2.59380956400000 , -1.45927153400000 , 280.146364600000],
[10247.5846000000, 21.2946382200000, 1.48804729700000 , -2.93359371300000 , 296.251309700000],
[9781.78530200000, 23.0860407700000, 1.52501285400000 , -1.43283009000000 , 313.282089600000],
[9315.98600200000, 24.9832396700000, 2.49512839000000 , 1.56148398400000 , 331.291928300000],
[8850.18670200000, 27.1508261300000, 3.22498799700000 , 3.38554864600000 , 350.337109600000],
[8384.38740100000, 29.6192824800000, 3.06454747800000 , 3.15487528100000 , 370.477152900000],
[7918.58810100000, 32.2113985100000, 2.17559126100000 , 2.73472409900000 , 391.774999200000],
[7452.78880100000, 34.7896644100000, 0.998437394000000, 3.19420349500000 , 414.297207700000],
[6986.98950100000, 37.3711217300000, 0.535159967000000, 2.82788006700000 , 438.114164200000],
[6521.19020100000, 39.9097360800000, 1.38029961300000 , 0.744103870000000 , 463.300300700000],
[6055.39090100000, 42.1443019200000, 2.87548114400000 , -0.788679054000000, 489.934327900000],
[5589.59160100000, 44.1403914000000, 4.27619715900000 , -0.782552838000000, 518.099481700000],
[5123.79230100000, 46.4009971200000, 5.06369378700000 , -0.668129721000000, 547.883782900000],
[4657.99300100000, 48.6647982500000, 5.43402403400000 , -0.779143340000000, 579.380312400000],
[4192.19370100000, 50.5624311700000, 5.61620082300000 , -0.860071698000000, 612.687502100000],
[3726.39440100000, 52.4474632000000, 5.31358619700000 , -0.964503001000000, 647.909442500000],
[3260.59510100000, 54.2162936500000, 6.51736006400000 , -1.05726338200000 , 685.156208200000],
[2794.79580000000, 55.9922729000000, 9.21998439200000 , -1.11589134500000 , 724.544201400000],
[2328.99650000000, 58.0104813700000, 10.6576076600000 , -1.16115673900000 , 766.196516200000],
[1863.19720000000, 59.6992087800000, 10.7914352100000 , -1.18021050200000 , 810.243323100000],
[1397.39790000000, 60.8453442000000, 10.9822039700000 , -1.30434323700000 , 856.822275700000],
[931.598600200000, 61.7477812900000, 12.5594856100000 , -1.44036513500000 , 906.078940900000],
[465.799300100000, 62.1792358000000, 13.6274798500000 , -1.48388871000000 , 958.167254100000],
[80, 60.7438690000000, 10.9109203900000, -1.78912816400000, 1003.56934200000]])
import time
t1 = time.time()
results = overpressureihmod_Ro(source,stat3,Ro,v,theta,dphi3,data3,sw, wind=False)
t2 = time.time()
print("Overpressure (base): {:.4f} s".format(t2 - t1))
|
wmpgREPO_NAMESupracenterPATH_START.@Supracenter_extracted@Supracenter-master@supra@Geminus@overpressure2.py@.PATH_END.py
|
{
"filename": "arrays.py",
"repo_name": "MikeSWang/Harmonia",
"repo_path": "Harmonia_extracted/Harmonia-master/harmonia/algorithms/arrays.py",
"type": "Python"
}
|
"""
Structured arrays (:mod:`~harmonia.algorithms.arrays`)
===========================================================================
Provide structured arrays for cosmological data.
.. autosummary::
DataArray
SphericalArray
CartesianArray
|
"""
from collections.abc import Sequence
from itertools import product
import numpy as np
from .discretisation import DiscreteSpectrum
try:
import cPickle as pickle
except ModuleNotFoundError:
import pickle
class IndexingError(IndexError):
"""Exception raised for unsupported slicing or indexing in
`__getitem__` methods.
"""
class DataArray:
"""Abstract data array with save and load methods.
"""
def __getstate__(self):
raise NotImplementedError
def __setstate__(self, state):
raise NotImplementedError
def save(self, output_file, file_extension):
"""Save the structured array.
Parameters
----------
output_file : *str or* :class:`pathlib.Path`
Output file path.
extension : {'.pkl', '.npz'}
Output file extension.
"""
if file_extension == '.pkl':
with open(output_file, 'wb') as output_data:
pickle.dump(self, output_data, protocol=-1)
elif file_extension == '.npz':
np.savez(output_file, **self.__getstate__())
else:
raise IOError(
"Unwritable output file. "
"The file extension must be either .npz or .pkl."
)
@classmethod
def load(cls, input_file):
"""Load the structured array from a .npz or .pkl file.
Parameters
----------
input_file : *str or* :class:`pathlib.Path`
Input file path.
"""
try:
extension = input_file.suffix
except AttributeError:
extension = input_file.rpartition(".")[-1]
if extension.endswith('npz'):
state_data = np.load(input_file, allow_pickle=True)
state = {}
for attr in state_data.files:
try:
state.update({attr: state_data[attr].item()})
except ValueError:
state.update({attr: state_data[attr]})
self = object.__new__(cls)
self.__setstate__(state)
elif extension.endswith('pkl'):
with open(input_file, 'rb') as input_data:
self = pickle.load(input_data)
else:
raise IOError(
"Unreadable input file. "
"The file extension must be either .npz or .pkl."
)
return self
class SphericalArray(DataArray):
r"""Structured array for spherically decomposed cosmological data.
Array is initialised with a discrete spectrum of Fourier modes
and consists of three fields: the 'index' field of
:math:`(\ell, m_\ell, n_\ell)` triplets, the 'wavenumber' field
of discrete :math:`k_{\ell n}`, and the 'coefficient' field of
spherically decomposed data.
Parameters
----------
disc : :class:`~harmonia.algorithms.discretisation.DiscreteSpectrum`
Discrete spectrum associated with the structured array.
Attributes
----------
array : :class:`numpy.ndarray`
Structured NumPy array.
size : int
Total number of elements in the array. This should equal the sum
of ``disc.mode_counts``.
attrs : dict
Attributes of the structured array inherited from `disc`.
See Also
--------
:class:`~harmonia.algorithms.discretisation.DiscreteSpectrum`
"""
# Class-wide ``numpy.dtype`` for the structured array and any
# collapsed array.
_dtype = np.dtype({
'names': ['index', 'wavenumber', 'coefficient', '_position'],
'formats': ['(3,)i4', 'f8', 'c16', 'i8'],
})
_dtype_collapsed = np.dtype({
'names': ['index', 'wavenumber', 'coefficient', '_position'],
'formats': ['(2,)i4', 'f8', 'c16', 'i8'],
})
def __init__(self, disc):
self.disc = disc
self.size = sum(disc.mode_counts)
self.attrs = {
attr: getattr(disc, attr)
for attr in ['degrees', 'depths', 'wavenumbers', 'mode_counts']
}
self.array, self._directory = self._initialise_array()
def __str__(self):
return f"{self.__class__.__name__}({str(self.disc)})"
def __getitem__(self, key):
"""Get the 'coefficient' field value(s).
The access key can be an integer, a slice expression, a tuple of
index triplet or a string, e.g. ``[-1]``, ``[:]``, ``[(0, 0, 1)]``
or ``'degree_0'``.
Parameters
----------
key : int, slice, tuple(int, int, int) or str
'coefficient' field access key.
Returns
-------
complex
'coefficient' field data entry.
"""
position = self._find_position(key)
return self.array['coefficient'][position]
def __setitem__(self, key, value):
"""Set the 'coefficient' field value(s).
Parameters
----------
key : int, tuple of int or slice
'coefficient' field access key.
value : complex
'coefficient' field data entry.
See Also
--------
:meth:`.SphericalArray.__getitem__`
"""
position = self._find_position(key)
self.array['coefficient'][position] = value
def __getstate__(self):
state = self.__dict__
state.update({'disc': self.disc.__getstate__()})
return state
def __setstate__(self, state):
for attr, value in state.items():
if attr == 'disc':
self.disc = DiscreteSpectrum._from_state(value)
else:
setattr(self, attr, value)
# NOTE: For backward compatibility; may be removed in the future.
if '_position' not in self.array.dtype.names:
self.array = np.lib.recfunctions.append_fields(
self.array, '_position', list(range(self.size))
)
@classmethod
def _from_state(cls, state): # internal classmethod
self = object.__new__(cls)
self.__setstate__(state)
return self
def vectorise(self, pivot, collapse=None):
r"""Returrn a data vector from the 'coefficient' field.
Vectorisation is performed by *pivoting* in either of the following
orders of precedence---
* 'natural': ordered by :math:`(\ell, m, n)`;
* 'spectral': ordered by :math:`(k_{\ell n}, m)`.
Subarrays of equivalent :math:`(\ell, n)` may be further collapsed
over spherical order :math:`m` by simple averaging or averaging in
quadrature.
Parameters
----------
pivot : {'natural', 'spectral'}
Pivot order for vectorisation.
collapse : {None, 'mean', 'qaudrature'}, optional
If not `None` (default), subarrays are collapsed over
equivalent spherical order :math:`m` by averaging ('mean') or
averaging in quadrature ('qaudrature').
Returns
-------
vectorised_data : :class:`numpy.ndarray`
Vectorised coefficient data.
"""
if collapse is None:
array = self.array
else:
# Initialise the collapsed array.
doublet_list = self._gen_index_list(
self.disc.degrees, self.disc.depths, reduction=True
)
array = np.empty(len(doublet_list), dtype=self._dtype_collapsed)
array['index'] = doublet_list
for pos, ind in enumerate(array['index']):
# Extract subarray matching the degree and depth of the
# index doublet being considered.
selector = np.all(
self.array['index'][:, [0, 2]] == ind, axis=-1
)
subarray = self.array['coefficient'][selector]
# Collapse the extracted subarray.
if collapse.lower() == 'mean':
collapsed_subarray = np.mean(subarray)
elif collapse.lower() == 'qaudrature':
collapsed_subarray = np.mean(np.abs(subarray) ** 2)
else:
raise ValueError(f"Unknown `collapse` option: {collapse}.")
array['coefficient'][pos] = collapsed_subarray
array['wavenumber'][pos] = self.disc.wavenumbers[tuple(ind)]
# Sort array by the pivot order. It is unsafe to sort by the
# 'index' field which is multi-dimensional. The private field
# '_position' was initialised for this purpose.
if pivot == 'natural':
sort_order = ['_position', 'wavenumber']
elif pivot == 'spectral':
sort_order = ['wavenumber', '_position']
else:
raise ValueError(f"Unknown `pivot` option: {pivot}.")
vectorised_data = np.sort(array, order=sort_order)['coefficient']
return vectorised_data
def _initialise_array(self):
# Initialise the structured array in the natural order.
array = np.empty(self.size, dtype=self._dtype)
# Generate the list of index triplets and the dictionary of index
# directory.
triplet_list = self._gen_index_list(
self.disc.degrees, self.disc.depths
)
triplet_directory = self._gen_index_directory(triplet_list)
# Initialise index triplets and corresponding wavenumbers.
array['index'] = triplet_list
array['wavenumber'] = [
self.disc.wavenumbers[ell, n] for ell, _, n in triplet_list
]
array['_position'] = list(range(self.size))
return array, triplet_directory
def _find_position(self, key):
# If accessed by integer, reinterprete negative values as reverse
# indexing.
if isinstance(key, int):
position = key if key >= 0 else key % self.size
if position > self.size:
raise IndexingError(
f"Index {position} out of bound for key: {key}."
)
return position
# Access by slice.
if isinstance(key, slice):
return key
# If accessed by a sequence of (degree, order, depth); any list
# is turned into a tuple.
if isinstance(key, Sequence) and not isinstance(key, str):
position = self._directory[tuple(key)]
return position
# Access by string of the form 'degree_<ell>'.
if isinstance(key, str):
degree = int(key.split('_')[-1])
degree_idx = self.disc.degrees.index(degree)
start = sum(self.disc.mode_counts[:degree_idx])
stop = sum(self.disc.mode_counts[:(degree_idx + 1)])
position = slice(start, stop)
return position
raise TypeError(f"Invalid type for key: {key}.")
@staticmethod
def _gen_index_list(degrees, depths, reduction=False):
if reduction:
index_list = [
(ell, n + 1)
for ell, nmax in zip(degrees, depths)
for n in range(nmax)
]
else:
index_list = [
(ell, m, n + 1)
for ell, nmax in zip(degrees, depths)
for m in range(- ell, ell+1)
for n in range(nmax)
]
return index_list
@staticmethod
def _gen_index_directory(index_list):
# The directory is a mapping from an index tuple to an array position.
index_directory = {
tuple(tup): pos for pos, tup in enumerate(index_list)
}
return index_directory
class CartesianArray(DataArray):
r"""Structured array for Cartesian decomposition of cosmological data.
Array is initialised with three fields: the 'order' field of the
Legendre multipoles, the 'wavenumber' field of :math:`k`-bin centres
and the 'power' field for power spectrum multipole measurements.
Parameters
----------
orders : list of tuple of int
Orders of the power spectrum multipole.
wavenumbers : float, array_like
Wavenumbers of the multipole data.
mode_counts : list of int or None, optional
Mode counts in wavenumber bins (default is `None`).
shot_noise : float or None, optional
Shot noise level (default is `None`).
Attributes
----------
array : :class:`numpy.ndarray`
Structured NumPy array.
size : int
Total number of elements in the array. This should equal the
product of the numbers of wavenumbers and multipoles.
attrs : dict
Initialisation parameters as attributes.
"""
# Class-wide ``numpy.dtype`` for the structured array.
_dtype = np.dtype({
'names': ['order', 'wavenumber', 'power'],
'formats': ['i4', 'f8', 'f8'],
})
def __init__(self, orders, wavenumbers, mode_counts=None, shot_noise=None):
self.size = np.size(orders) * np.size(wavenumbers)
orders = np.sort(orders).tolist()
wavenumbers = np.array(wavenumbers)[np.argsort(wavenumbers)]
if mode_counts is not None:
mode_counts = np.array(mode_counts)[np.argsort(wavenumbers)]
self.attrs = dict(
orders=orders,
wavenumbers=wavenumbers,
mode_counts=mode_counts,
shot_noise=shot_noise,
)
self.array, self._directory = self._initialise_array()
def __getitem__(self, key):
"""Access the 'power' field.
The access key can be an integer positional index, a slice, a tuple
of (order, wavenumber) or a string, e.g. ``[-1]``, ``[:]``,
``[(0, 0.04)]`` or ``'power_0'``.
Parameters
----------
key : int, slice, tuple(int, float) or str
'power' field access key.
Returns
-------
float
'power' field data entry.
"""
position = self._find_position(key)
return self.array['power'][position]
def __setitem__(self, key, value):
"""Set the 'power' field value(s).
Parameters
----------
key : int, tuple(int, float) or slice
'power' field access key.
value : float
'power' field data entry.
See Also
--------
:meth:`.CartesianArray.__getitem__`
"""
position = self._find_position(key)
self.array['power'][position] = value
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
def vectorise(self, pivot):
r"""Return a data vector from the 'power' field.
Vectorisation is performed by *pivoting* in either of the following
orders of precedence---
* 'order': ordered by multipole order :math:`\ell`;
* 'wavenumber': ordered by wavenumber :math:`k`.
Parameters
----------
pivot : {'order', 'wavenumber'}
Pivot order for vectorisation.
Returns
-------
vectorised_data : :class:`numpy.ndarray`
Vectorised power spectrum data.
"""
if pivot == 'order':
sort_order = ['order', 'wavenumber']
elif pivot == 'wavenumber':
sort_order = ['wavenumber', 'order']
else:
raise ValueError(f"Unknown `pivot` option: {pivot}.")
return np.sort(self.array, order=sort_order)['power']
def _initialise_array(self):
# Initialise the structured array ordered by order.
array = np.empty(self.size, dtype=self._dtype)
array['order'] = np.repeat(
self.attrs['orders'], len(self.attrs['wavenumbers']))
array['wavenumber'] = np.tile(
self.attrs['wavenumbers'], len(self.attrs['orders'])
)
# Generate the dictionary mapping (order, wavenumber) tuples to
# array positions.
directory = self._gen_directory(
self.attrs['orders'], self.attrs['wavenumbers']
)
return array, directory
def _find_position(self, key):
# If accessed by integer, reinterprete negative values as reverse
# indexing.
if isinstance(key, int):
position = key if key >= 0 else key % self.size
if position > self.size:
raise IndexingError(
f"Index {position} out of bound for key: {key}."
)
return position
# Access by slice.
if isinstance(key, slice):
return key
# Access by sequence of (order, wavenumber).
if isinstance(key, Sequence) and not isinstance(key, str):
position = self._directory[tuple(key)]
return position
# Access by string of the form 'power_<ell>'.
if isinstance(key, str):
order = int(key.split('_')[-1])
order_idx = self.attrs['orders'].index(order)
length = len(self.attrs['wavenumbers'])
position = slice(order_idx * length, (order_idx + 1) * length)
return position
raise TypeError(f"Invalid type for key: {key}.")
@staticmethod
def _gen_directory(orders, wavenumbers):
directory = {
tuple(key): pos
for pos, key in enumerate(product(orders, wavenumbers))
}
return directory
|
MikeSWangREPO_NAMEHarmoniaPATH_START.@Harmonia_extracted@Harmonia-master@harmonia@algorithms@arrays.py@.PATH_END.py
|
{
"filename": "plot.py",
"repo_name": "sbi-dev/sbi",
"repo_path": "sbi_extracted/sbi-main/sbi/analysis/plot.py",
"type": "Python"
}
|
# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
# under the Apache License Version 2.0, see <https://www.apache.org/licenses/>
import collections
import copy
import logging
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
from warnings import warn
import matplotlib as mpl
import numpy as np
import six
import torch
from matplotlib import cm
from matplotlib import pyplot as plt
from matplotlib.axes import Axes
from matplotlib.colors import Normalize
from matplotlib.figure import Figure, FigureBase
from matplotlib.patches import Rectangle
from scipy.stats import binom, gaussian_kde, iqr
from torch import Tensor
from sbi.analysis.conditional_density import eval_conditional_density
from sbi.utils.analysis_utils import pp_vals
try:
collectionsAbc = collections.abc # type: ignore
except AttributeError:
collectionsAbc = collections
def hex2rgb(hex: str) -> List[int]:
"""Pass 16 to the integer function for change of base"""
return [int(hex[i : i + 2], 16) for i in range(1, 6, 2)]
def rgb2hex(RGB: List[int]) -> str:
"""Components need to be integers for hex to make sense"""
RGB = [int(x) for x in RGB]
return "#" + "".join([
"0{0:x}".format(v) if v < 16 else "{0:x}".format(v) for v in RGB
])
def to_list_string(
x: Optional[Union[str, List[Optional[str]]]], len: int
) -> List[Optional[str]]:
"""If x is not a list, make it a list of strings of length `len`."""
if not isinstance(x, list):
return [x for _ in range(len)]
return x
def to_list_kwargs(
x: Optional[Union[Dict, List[Optional[Dict]]]], len: int
) -> List[Optional[Dict]]:
"""If x is not a list, make it a list of dicts of length `len`."""
if not isinstance(x, list):
return [x for _ in range(len)]
return x
def _update(d: Dict, u: Optional[Dict]) -> Dict:
"""update dictionary with user input, see: https://stackoverflow.com/a/3233356"""
if u is not None:
for k, v in six.iteritems(u):
dv = d.get(k, {})
if not isinstance(dv, collectionsAbc.Mapping): # type: ignore
d[k] = v
elif isinstance(v, collectionsAbc.Mapping): # type: ignore
d[k] = _update(dv, v)
else:
d[k] = v
return d
# Plotting functions
def plt_hist_1d(
ax: Axes,
samples: np.ndarray,
limits: torch.Tensor,
diag_kwargs: Dict,
) -> None:
"""Plot 1D histogram."""
hist_kwargs = copy.deepcopy(diag_kwargs["mpl_kwargs"])
if "bins" not in hist_kwargs or hist_kwargs["bins"] is None:
if diag_kwargs["bin_heuristic"] == "Freedman-Diaconis":
# The Freedman-Diaconis heuristic
binsize = 2 * iqr(samples) * len(samples) ** (-1 / 3)
hist_kwargs["bins"] = np.arange(limits[0], limits[1] + binsize, binsize)
else:
# TODO: add more bin heuristics
pass
if isinstance(hist_kwargs["bins"], int):
hist_kwargs["bins"] = np.linspace(limits[0], limits[1], hist_kwargs["bins"])
ax.hist(samples, **hist_kwargs)
def plt_kde_1d(
ax: Axes,
samples: np.ndarray,
limits: torch.Tensor,
diag_kwargs: Dict,
) -> None:
"""Run 1D kernel density estimation on samples and plot it on a given axes."""
density = gaussian_kde(samples, bw_method=diag_kwargs["bw_method"])
xs = np.linspace(limits[0], limits[1], diag_kwargs["bins"])
ys = density(xs)
ax.plot(xs, ys, **diag_kwargs["mpl_kwargs"])
def plt_scatter_1d(
ax: Axes,
samples: np.ndarray,
limits: torch.Tensor,
diag_kwargs: Dict,
) -> None:
"""Plot vertical lines for each sample. Note: limits are not used."""
for single_sample in samples:
ax.axvline(single_sample, **diag_kwargs["mpl_kwargs"])
def plt_hist_2d(
ax: Axes,
samples_col: np.ndarray,
samples_row: np.ndarray,
limits_col: torch.Tensor,
limits_row: torch.Tensor,
offdiag_kwargs: Dict,
):
hist_kwargs = copy.deepcopy(offdiag_kwargs)
"""Plot 2D histogram."""
if (
"bins" not in hist_kwargs["np_hist_kwargs"]
or hist_kwargs["np_hist_kwargs"]["bins"] is None
):
if hist_kwargs["bin_heuristic"] == "Freedman-Diaconis":
# The Freedman-Diaconis heuristic applied to each direction
binsize_col = 2 * iqr(samples_col) * len(samples_col) ** (-1 / 3)
n_bins_col = int((limits_col[1] - limits_col[0]) / binsize_col)
binsize_row = 2 * iqr(samples_row) * len(samples_row) ** (-1 / 3)
n_bins_row = int((limits_row[1] - limits_row[0]) / binsize_row)
hist_kwargs["np_hist_kwargs"]["bins"] = [n_bins_col, n_bins_row]
else:
# TODO: add more bin heuristics
pass
hist, xedges, yedges = np.histogram2d(
samples_col,
samples_row,
range=[
[limits_col[0], limits_col[1]],
[limits_row[0], limits_row[1]],
],
**hist_kwargs["np_hist_kwargs"],
)
ax.imshow(
hist.T,
extent=(
xedges[0],
xedges[-1],
yedges[0],
yedges[-1],
),
**hist_kwargs["mpl_kwargs"],
)
def plt_kde_2d(
ax: Axes,
samples_col: np.ndarray,
samples_row: np.ndarray,
limits_col: torch.Tensor,
limits_row: torch.Tensor,
offdiag_kwargs: Dict,
) -> None:
"""Run 2D Kernel Density Estimation and plot it on given axis."""
X, Y, Z = get_kde(samples_col, samples_row, limits_col, limits_row, offdiag_kwargs)
ax.imshow(
Z,
extent=(
limits_col[0].item(),
limits_col[1].item(),
limits_row[0].item(),
limits_row[1].item(),
),
**offdiag_kwargs["mpl_kwargs"],
)
def plt_contour_2d(
ax: Axes,
samples_col: np.ndarray,
samples_row: np.ndarray,
limits_col: torch.Tensor,
limits_row: torch.Tensor,
offdiag_kwargs: Dict,
) -> None:
"""2D Contour based on Kernel Density Estimation."""
X, Y, Z = get_kde(samples_col, samples_row, limits_col, limits_row, offdiag_kwargs)
ax.contour(
X,
Y,
Z,
extent=(
limits_col[0],
limits_col[1],
limits_row[0],
limits_row[1],
),
levels=offdiag_kwargs["levels"],
**offdiag_kwargs["mpl_kwargs"],
)
def plt_scatter_2d(
ax: Axes,
samples_col: np.ndarray,
samples_row: np.ndarray,
limits_col: torch.Tensor,
limits_row: torch.Tensor,
offdiag_kwargs: Dict,
) -> None:
"""Scatter plot 2D. Note: limits are not used"""
ax.scatter(
samples_col,
samples_row,
**offdiag_kwargs["mpl_kwargs"],
)
def plt_plot_2d(
ax: Axes,
samples_col: np.ndarray,
samples_row: np.ndarray,
limits_col: torch.Tensor,
limits_row: torch.Tensor,
offdiag_kwargs: Dict,
) -> None:
"""Plot 2D trajectory. Note: limits are not used."""
ax.plot(
samples_col,
samples_row,
**offdiag_kwargs["mpl_kwargs"],
)
def get_kde(
samples_col: np.ndarray,
samples_row: np.ndarray,
limits_col: torch.Tensor,
limits_row: torch.Tensor,
offdiag_kwargs: dict,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""2D Kernel Density Estimation."""
density = gaussian_kde(
np.array([samples_col, samples_row]),
bw_method=offdiag_kwargs["bw_method"],
)
X, Y = np.meshgrid(
np.linspace(
limits_col[0],
limits_col[1],
offdiag_kwargs["bins"],
),
np.linspace(
limits_row[0],
limits_row[1],
offdiag_kwargs["bins"],
),
)
positions = np.vstack([X.ravel(), Y.ravel()])
Z = np.reshape(density(positions).T, X.shape)
if "percentile" in offdiag_kwargs and "levels" in offdiag_kwargs:
Z = probs2contours(Z, offdiag_kwargs["levels"])
else:
Z = (Z - Z.min()) / (Z.max() - Z.min())
return X, Y, Z
def get_diag_funcs(
diag_list: List[Optional[str]],
) -> List[
Union[
Callable[
[
Axes,
np.ndarray,
torch.Tensor,
Dict,
],
None,
],
None,
]
]:
"""make a list of the functions for the diagonal plots."""
diag_funcs = []
for diag in diag_list:
if diag == "hist":
diag_funcs.append(plt_hist_1d)
elif diag == "kde":
diag_funcs.append(plt_kde_1d)
elif diag == "scatter":
diag_funcs.append(plt_scatter_1d)
else:
diag_funcs.append(None)
return diag_funcs
def get_offdiag_funcs(
off_diag_list: List[Optional[str]],
) -> List[
Union[
Callable[
[
Axes,
np.ndarray,
torch.Tensor,
Dict,
],
None,
],
None,
]
]:
"""make a list of the functions for the off-diagonal plots."""
offdiag_funcs = []
for offdiag in off_diag_list:
if offdiag == "hist" or offdiag == "hist2d":
offdiag_funcs.append(plt_hist_2d)
elif offdiag == "kde" or offdiag == "kde2d":
offdiag_funcs.append(plt_kde_2d)
elif offdiag == "contour" or offdiag == "contourf":
offdiag_funcs.append(plt_contour_2d)
elif offdiag == "scatter":
offdiag_funcs.append(plt_scatter_2d)
elif offdiag == "plot":
offdiag_funcs.append(plt_plot_2d)
else:
offdiag_funcs.append(None)
return offdiag_funcs
def _format_subplot(
ax: Axes,
current: str,
limits: Union[List[List[float]], torch.Tensor],
ticks: Optional[Union[List, torch.Tensor]],
labels_dim: List[str],
fig_kwargs: Dict,
row: int,
col: int,
dim: int,
flat: bool,
excl_lower: bool,
) -> None:
"""
Format subplot according to fig_kwargs and other arguments
Args:
ax: matplotlib axis
current: str, 'diag','upper' or 'lower'
limits: list of lists, limits for each dimension
ticks: list of lists, ticks for each dimension
labels_dim: list of strings, labels for each dimension
fig_kwargs: dict, figure kwargs
row: int, row index
col: int, column index
dim: int, number of dimensions
flat: bool, whether the plot is flat (1 row)
excl_lower: bool, whether lower triangle is empty
"""
# Background color
if (
current in fig_kwargs["fig_bg_colors"]
and fig_kwargs["fig_bg_colors"][current] is not None
):
ax.set_facecolor(fig_kwargs["fig_bg_colors"][current])
# Limits
if isinstance(limits, Tensor):
assert limits.dim() == 2, "Limits should be a 2D tensor."
limits = limits.tolist()
if current == "diag":
eps = fig_kwargs["x_lim_add_eps"]
ax.set_xlim((limits[col][0] - eps, limits[col][1] + eps))
else:
ax.set_xlim((limits[col][0], limits[col][1]))
if current != "diag":
ax.set_ylim((limits[row][0], limits[row][1]))
# Ticks
if ticks is not None:
ax.set_xticks((ticks[col][0], ticks[col][1])) # pyright: ignore[reportCallIssue]
if current != "diag":
ax.set_yticks((ticks[row][0], ticks[row][1])) # pyright: ignore[reportCallIssue]
# make square
if fig_kwargs["square_subplots"]:
ax.set_box_aspect(1)
# Despine
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_position(("outward", fig_kwargs["despine"]["offset"]))
# Formatting axes
if current == "diag": # diagonals
if excl_lower or col == dim - 1 or flat:
_format_axis(
ax,
xhide=False,
xlabel=labels_dim[col],
yhide=True,
tickformatter=fig_kwargs["tickformatter"],
)
else:
_format_axis(ax, xhide=True, yhide=True)
else: # off-diagonals
if row == dim - 1:
_format_axis(
ax,
xhide=False,
xlabel=labels_dim[col],
yhide=True,
tickformatter=fig_kwargs["tickformatter"],
)
else:
_format_axis(ax, xhide=True, yhide=True)
if fig_kwargs["tick_labels"] is not None:
ax.set_xticklabels(( # pyright: ignore[reportCallIssue]
str(fig_kwargs["tick_labels"][col][0]),
str(fig_kwargs["tick_labels"][col][1]),
))
def _format_axis(
ax: Axes,
xhide: bool = True,
yhide: bool = True,
xlabel: str = "",
ylabel: str = "",
tickformatter=None,
) -> Axes:
"""Format axis spines and ticks."""
for loc in ["right", "top", "left", "bottom"]:
ax.spines[loc].set_visible(False)
if xhide:
ax.set_xlabel("")
ax.xaxis.set_ticks_position("none")
ax.xaxis.set_tick_params(labelbottom=False)
if yhide:
ax.set_ylabel("")
ax.yaxis.set_ticks_position("none")
ax.yaxis.set_tick_params(labelleft=False)
if not xhide:
ax.set_xlabel(xlabel)
ax.xaxis.set_ticks_position("bottom")
ax.xaxis.set_tick_params(labelbottom=True)
if tickformatter is not None:
ax.xaxis.set_major_formatter(tickformatter)
ax.spines["bottom"].set_visible(True) # pyright: ignore[reportCallIssue]
if not yhide:
ax.set_ylabel(ylabel)
ax.yaxis.set_ticks_position("left")
ax.yaxis.set_tick_params(labelleft=True)
if tickformatter is not None:
ax.yaxis.set_major_formatter(tickformatter)
ax.spines["left"].set_visible(True)
return ax
def probs2contours(
probs: np.ndarray,
levels: Union[List, torch.Tensor, np.ndarray],
) -> np.ndarray:
"""Takes an array of probabilities and produces an array of contours at specified
percentile levels.
Args:
probs: Probability array. doesn't have to sum to 1, but it is assumed it
contains all the mass
levels: Percentile levels, have to be in [0.0, 1.0]. Specifies contour levels
that include a given proportion of samples, i.e., 0.1 specifies where the
top 10% of the density is.
Returns:
contours: Array of same shape as probs with percentile labels. Values in output
array denote labels which percentile bin the probability mass belongs to.
Example: for levels = [0.1, 0.5], output array will take on values [1.0, 0.5, 0.1],
where elements labeled "0.1" correspond to the top 10% of the density, "0.5"
corresponds to between top 50% to 10%, etc.
"""
# make sure all contour levels are in [0.0, 1.0]
levels = np.asarray(levels)
assert np.all(levels <= 1.0) and np.all(levels >= 0.0)
# flatten probability array
shape = probs.shape
probs = probs.flatten()
# sort probabilities in descending order
idx_sort = probs.argsort()[::-1]
idx_unsort = idx_sort.argsort()
probs = probs[idx_sort]
# cumulative probabilities
cum_probs = probs.cumsum()
cum_probs /= cum_probs[-1]
# create contours at levels
contours = np.ones_like(cum_probs)
levels = np.sort(levels)[::-1]
for level in levels:
contours[cum_probs <= level] = level
# make sure contours have the order and the shape of the original
# probability array
contours = np.reshape(contours[idx_unsort], shape)
return contours
def ensure_numpy(t: Union[np.ndarray, torch.Tensor]) -> np.ndarray:
"""
Returns np.ndarray if torch.Tensor was provided.
Used because samples_nd() can only handle np.ndarray.
"""
if isinstance(t, torch.Tensor):
return t.numpy()
elif not isinstance(t, np.ndarray):
return np.array(t)
return t
def handle_nan_infs(samples: List[np.ndarray]) -> List[np.ndarray]:
"""Check if there are NaNs or Infs in the samples."""
for i in range(len(samples)):
if np.isnan(samples[i]).any():
logging.warning("NaNs found in samples, omitting datapoints.")
if np.isinf(samples[i]).any():
logging.warning("Infs found in samples, omitting datapoints.")
# cast inf to nan, so they are omitted in the next step
np.nan_to_num(
samples[i], copy=False, nan=np.nan, posinf=np.nan, neginf=np.nan
)
samples[i] = samples[i][~np.isnan(samples[i]).any(axis=1)]
return samples
def convert_to_list_of_numpy(
arr: Union[List[np.ndarray], List[torch.Tensor], np.ndarray, torch.Tensor],
) -> List[np.ndarray]:
"""Converts a list of torch.Tensor to a list of np.ndarray."""
if not isinstance(arr, list):
arr = ensure_numpy(arr)
return [arr]
return [ensure_numpy(a) for a in arr]
def infer_limits(
samples: List[np.ndarray],
dim: int,
points: Optional[List[np.ndarray]] = None,
eps: float = 0.1,
) -> List[List[float]]:
"""Infer limits for the plot.
Args:
samples: List of set of samples.
dim: Dimension of the samples.
points: List of points.
eps: Relative margin for the limits.
"""
limits = []
for d in range(dim):
# get min and max across all sets of samples
min_val = min(np.min(sample[:, d]) for sample in samples)
max_val = max(np.max(sample[:, d]) for sample in samples)
# include points in the limits
if points is not None:
min_val = min(min_val, min(np.min(point[:, d]) for point in points))
max_val = max(max_val, max(np.max(point[:, d]) for point in points))
# add margin
max_min_range = max_val - min_val
epsilon_range = eps * max_min_range
limits.append([min_val - epsilon_range, max_val + epsilon_range])
return limits
def prepare_for_plot(
samples: Union[List[np.ndarray], List[torch.Tensor], np.ndarray, torch.Tensor],
limits: Optional[Union[List, torch.Tensor, np.ndarray]] = None,
points: Optional[
Union[List[np.ndarray], List[torch.Tensor], np.ndarray, torch.Tensor]
] = None,
) -> Tuple[List[np.ndarray], int, torch.Tensor]:
"""
Ensures correct formatting for samples and limits, and returns dimension
of the samples.
"""
samples = convert_to_list_of_numpy(samples)
if points is not None:
points = convert_to_list_of_numpy(points)
samples = handle_nan_infs(samples)
dim = samples[0].shape[1]
if limits is None or limits == []:
limits = infer_limits(samples, dim, points)
else:
limits = [limits[0] for _ in range(dim)] if len(limits) == 1 else limits
limits = torch.as_tensor(limits)
return samples, dim, limits
def prepare_for_conditional_plot(condition, opts):
"""
Ensures correct formatting for limits. Returns the margins just inside
the domain boundaries, and the dimension of the samples.
"""
# Dimensions
dim = condition.shape[-1]
# Prepare limits
if len(opts["limits"]) == 1:
limits = [opts["limits"][0] for _ in range(dim)]
else:
limits = opts["limits"]
limits = torch.as_tensor(limits)
# Infer the margin. This is to avoid that we evaluate the posterior **exactly**
# at the boundary.
limits_diffs = limits[:, 1] - limits[:, 0]
eps_margins = limits_diffs / 1e5
return dim, limits, eps_margins
def get_conditional_diag_func(opts, limits, eps_margins, resolution):
"""
Returns the diag_func which returns the 1D marginal conditional plot for
the parameter indexed by row.
"""
def diag_func(row, **kwargs):
p_vector = (
eval_conditional_density(
opts["density"],
opts["condition"],
limits,
row,
row,
resolution=resolution,
eps_margins1=eps_margins[row],
eps_margins2=eps_margins[row],
)
.to("cpu")
.numpy()
)
plt.plot(
np.linspace(
limits[row, 0],
limits[row, 1],
resolution,
),
p_vector,
c=opts["samples_colors"][0],
)
return diag_func
def pairplot(
samples: Union[List[np.ndarray], List[torch.Tensor], np.ndarray, torch.Tensor],
points: Optional[
Union[List[np.ndarray], List[torch.Tensor], np.ndarray, torch.Tensor]
] = None,
limits: Optional[Union[List, torch.Tensor]] = None,
subset: Optional[List[int]] = None,
upper: Optional[Union[List[Optional[str]], str]] = "hist",
lower: Optional[Union[List[Optional[str]], str]] = None,
diag: Optional[Union[List[Optional[str]], str]] = "hist",
figsize: Tuple = (10, 10),
labels: Optional[List[str]] = None,
ticks: Optional[Union[List, torch.Tensor]] = None,
offdiag: Optional[Union[List[Optional[str]], str]] = None,
diag_kwargs: Optional[Union[List[Optional[Dict]], Dict]] = None,
upper_kwargs: Optional[Union[List[Optional[Dict]], Dict]] = None,
lower_kwargs: Optional[Union[List[Optional[Dict]], Dict]] = None,
fig_kwargs: Optional[Dict] = None,
fig: Optional[FigureBase] = None,
axes: Optional[Axes] = None,
**kwargs: Optional[Any],
) -> Tuple[FigureBase, Axes]:
"""
Plot samples in a 2D grid showing marginals and pairwise marginals.
Each of the diagonal plots can be interpreted as a 1D-marginal of the distribution
that the samples were drawn from. Each upper-diagonal plot can be interpreted as a
2D-marginal of the distribution.
Args:
samples: Samples used to build the histogram.
points: List of additional points to scatter.
limits: Array containing the plot xlim for each parameter dimension. If None,
just use the min and max of the passed samples
subset: List containing the dimensions to plot. E.g. subset=[1,3] will plot
plot only the 1st and 3rd dimension but will discard the 0th and 2nd (and,
if they exist, the 4th, 5th and so on).
upper: Plotting style for upper diagonal, {hist, scatter, contour, kde,
None}.
lower: Plotting style for upper diagonal, {hist, scatter, contour, kde,
None}.
diag: Plotting style for diagonal, {hist, scatter, kde}.
figsize: Size of the entire figure.
labels: List of strings specifying the names of the parameters.
ticks: Position of the ticks.
offdiag: deprecated, use upper instead.
diag_kwargs: Additional arguments to adjust the diagonal plot,
see the source code in `_get_default_diag_kwarg()`
upper_kwargs: Additional arguments to adjust the upper diagonal plot,
see the source code in `_get_default_offdiag_kwarg()`
lower_kwargs: Additional arguments to adjust the lower diagonal plot,
see the source code in `_get_default_offdiag_kwarg()`
fig_kwargs: Additional arguments to adjust the overall figure,
see the source code in `_get_default_fig_kwargs()`
fig: matplotlib figure to plot on.
axes: matplotlib axes corresponding to fig.
**kwargs: Additional arguments to adjust the plot (deprecated).
Returns: figure and axis of posterior distribution plot
"""
# Backwards compatibility
if len(kwargs) > 0:
warn(
f"you passed deprecated arguments **kwargs: {[key for key in kwargs]}, use "
"fig_kwargs instead. We continue calling the deprecated pairplot function",
DeprecationWarning,
stacklevel=2,
)
fig, axes = pairplot_dep(
samples,
points,
limits,
subset,
offdiag,
diag,
figsize,
labels,
ticks,
upper,
fig,
axes,
**kwargs,
)
return fig, axes
samples, dim, limits = prepare_for_plot(samples, limits, points)
# prepate figure kwargs
fig_kwargs_filled = _get_default_fig_kwargs()
# update the defaults dictionary with user provided values
fig_kwargs_filled = _update(fig_kwargs_filled, fig_kwargs)
# checks.
if fig_kwargs_filled["legend"]:
assert len(fig_kwargs_filled["samples_labels"]) >= len(
samples
), "Provide at least as many labels as samples."
if offdiag is not None:
warn("offdiag is deprecated, use upper or lower instead.", stacklevel=2)
upper = offdiag
# Prepare diag
diag_list = to_list_string(diag, len(samples))
diag_kwargs_list = to_list_kwargs(diag_kwargs, len(samples))
diag_func = get_diag_funcs(diag_list)
diag_kwargs_filled = []
for i, (diag_i, diag_kwargs_i) in enumerate(zip(diag_list, diag_kwargs_list)):
diag_kwarg_filled_i = _get_default_diag_kwargs(diag_i, i)
# update the defaults dictionary with user provided values
diag_kwarg_filled_i = _update(diag_kwarg_filled_i, diag_kwargs_i)
diag_kwargs_filled.append(diag_kwarg_filled_i)
# Prepare upper
upper_list = to_list_string(upper, len(samples))
upper_kwargs_list = to_list_kwargs(upper_kwargs, len(samples))
upper_func = get_offdiag_funcs(upper_list)
upper_kwargs_filled = []
for i, (upper_i, upper_kwargs_i) in enumerate(zip(upper_list, upper_kwargs_list)):
upper_kwarg_filled_i = _get_default_offdiag_kwargs(upper_i, i)
# update the defaults dictionary with user provided values
upper_kwarg_filled_i = _update(upper_kwarg_filled_i, upper_kwargs_i)
upper_kwargs_filled.append(upper_kwarg_filled_i)
# Prepare lower
lower_list = to_list_string(lower, len(samples))
lower_kwargs_list = to_list_kwargs(lower_kwargs, len(samples))
lower_func = get_offdiag_funcs(lower_list)
lower_kwargs_filled = []
for i, (lower_i, lower_kwargs_i) in enumerate(zip(lower_list, lower_kwargs_list)):
lower_kwarg_filled_i = _get_default_offdiag_kwargs(lower_i, i)
# update the defaults dictionary with user provided values
lower_kwarg_filled_i = _update(lower_kwarg_filled_i, lower_kwargs_i)
lower_kwargs_filled.append(lower_kwarg_filled_i)
return _arrange_grid(
diag_func,
upper_func,
lower_func,
diag_kwargs_filled,
upper_kwargs_filled,
lower_kwargs_filled,
samples,
points,
limits,
subset,
figsize,
labels,
ticks,
fig,
axes,
fig_kwargs_filled,
)
def marginal_plot(
samples: Union[List[np.ndarray], List[torch.Tensor], np.ndarray, torch.Tensor],
points: Optional[
Union[List[np.ndarray], List[torch.Tensor], np.ndarray, torch.Tensor]
] = None,
limits: Optional[Union[List, torch.Tensor]] = None,
subset: Optional[List[int]] = None,
diag: Optional[Union[List[Optional[str]], str]] = "hist",
figsize: Optional[Tuple] = (10, 2),
labels: Optional[List[str]] = None,
ticks: Optional[Union[List, torch.Tensor]] = None,
diag_kwargs: Optional[Union[List[Optional[Dict]], Dict]] = None,
fig_kwargs: Optional[Dict] = None,
fig: Optional[FigureBase] = None,
axes: Optional[Axes] = None,
**kwargs: Optional[Any],
) -> Tuple[FigureBase, Axes]:
"""
Plot samples in a row showing 1D marginals of selected dimensions.
Each of the plots can be interpreted as a 1D-marginal of the distribution
that the samples were drawn from.
Args:
samples: Samples used to build the histogram.
points: List of additional points to scatter.
limits: Array containing the plot xlim for each parameter dimension. If None,
just use the min and max of the passed samples
subset: List containing the dimensions to plot. E.g. subset=[1,3] will plot
plot only the 1st and 3rd dimension but will discard the 0th and 2nd (and,
if they exist, the 4th, 5th and so on).
diag: Plotting style for 1D marginals, {hist, kde cond, None}.
figsize: Size of the entire figure.
labels: List of strings specifying the names of the parameters.
ticks: Position of the ticks.
diag_kwargs: Additional arguments to adjust the diagonal plot,
see the source code in `_get_default_diag_kwarg()`
fig_kwargs: Additional arguments to adjust the overall figure,
see the source code in `_get_default_fig_kwargs()`
fig: matplotlib figure to plot on.
axes: matplotlib axes corresponding to fig.
**kwargs: Additional arguments to adjust the plot (deprecated)
Returns: figure and axis of posterior distribution plot
"""
# backwards compatibility
if len(kwargs) > 0:
warn(
"**kwargs are deprecated, use fig_kwargs instead. "
"calling the to be deprecated marginal_plot function",
DeprecationWarning,
stacklevel=2,
)
fig, axes = marginal_plot_dep(
samples,
points,
limits,
subset,
diag,
figsize,
labels,
ticks,
fig,
axes,
**kwargs,
)
return fig, axes
samples, dim, limits = prepare_for_plot(samples, limits)
# prepare kwargs and functions of the subplots
diag_list = to_list_string(diag, len(samples))
diag_kwargs_list = to_list_kwargs(diag_kwargs, len(samples))
diag_func = get_diag_funcs(diag_list)
diag_kwargs_filled = []
for i, (diag_i, diag_kwargs_i) in enumerate(zip(diag_list, diag_kwargs_list)):
diag_kwarg_filled_i = _get_default_diag_kwargs(diag_i, i)
diag_kwarg_filled_i = _update(diag_kwarg_filled_i, diag_kwargs_i)
diag_kwargs_filled.append(diag_kwarg_filled_i)
# prepare fig_kwargs
fig_kwargs_filled = _get_default_fig_kwargs()
fig_kwargs_filled = _update(fig_kwargs_filled, fig_kwargs)
# generate plot
return _arrange_grid(
diag_func,
[None],
[None],
diag_kwargs_filled,
[None],
[None],
samples,
points,
limits,
subset,
figsize,
labels,
ticks,
fig,
axes,
fig_kwargs_filled,
)
def _get_default_offdiag_kwargs(offdiag: Optional[str], i: int = 0) -> Dict:
"""Get default offdiag kwargs."""
if offdiag == "kde" or offdiag == "kde2d":
offdiag_kwargs = {
"bw_method": "scott",
"bins": 50,
"mpl_kwargs": {"cmap": "viridis", "origin": "lower", "aspect": "auto"},
}
elif offdiag == "hist" or offdiag == "hist2d":
offdiag_kwargs = {
"bin_heuristic": None, # "Freedman-Diaconis",
"mpl_kwargs": {"cmap": "viridis", "origin": "lower", "aspect": "auto"},
"np_hist_kwargs": {"bins": 50, "density": False},
}
elif offdiag == "scatter":
offdiag_kwargs = {
"mpl_kwargs": {
"color": plt.rcParams["axes.prop_cycle"].by_key()["color"][i * 2], # pyright: ignore[reportOptionalMemberAccess]
"edgecolor": "white",
"alpha": 0.5,
"rasterized": False,
}
}
elif offdiag == "contour" or offdiag == "contourf":
offdiag_kwargs = {
"bw_method": "scott",
"bins": 50,
"levels": [0.68, 0.95, 0.99],
"percentile": True,
"mpl_kwargs": {
"colors": plt.rcParams["axes.prop_cycle"].by_key()["color"][i * 2], # pyright: ignore[reportOptionalMemberAccess]
},
}
elif offdiag == "plot":
offdiag_kwargs = {
"mpl_kwargs": {
"color": plt.rcParams["axes.prop_cycle"].by_key()["color"][i * 2], # pyright: ignore[reportOptionalMemberAccess]
"aspect": "auto",
}
}
else:
offdiag_kwargs = {}
return offdiag_kwargs
def _get_default_diag_kwargs(diag: Optional[str], i: int = 0) -> Dict:
"""Get default diag kwargs."""
if diag == "kde":
diag_kwargs = {
"bw_method": "scott",
"bins": 50,
"mpl_kwargs": {
"color": plt.rcParams["axes.prop_cycle"].by_key()["color"][i * 2] # pyright: ignore[reportOptionalMemberAccess]
},
}
elif diag == "hist":
diag_kwargs = {
"bin_heuristic": "Freedman-Diaconis",
"mpl_kwargs": {
"color": plt.rcParams["axes.prop_cycle"].by_key()["color"][i * 2], # pyright: ignore[reportOptionalMemberAccess]
"density": False,
"histtype": "step",
},
}
elif diag == "scatter":
diag_kwargs = {
"mpl_kwargs": {
"color": plt.rcParams["axes.prop_cycle"].by_key()["color"][i * 2] # pyright: ignore[reportOptionalMemberAccess]
}
}
else:
diag_kwargs = {}
return diag_kwargs
def _get_default_fig_kwargs() -> Dict:
"""Get default figure kwargs."""
return {
"legend": None,
"legend_kwargs": {},
# labels
"points_labels": [f"points_{idx}" for idx in range(10)], # for points
"samples_labels": [f"samples_{idx}" for idx in range(10)], # for samples
# colors: take even colors for samples, odd colors for points
"samples_colors": plt.rcParams["axes.prop_cycle"].by_key()["color"][0::2], # pyright: ignore[reportOptionalMemberAccess]
"points_colors": plt.rcParams["axes.prop_cycle"].by_key()["color"][1::2], # pyright: ignore[reportOptionalMemberAccess]
# ticks
"tickformatter": mpl.ticker.FormatStrFormatter("%g"), # type: ignore
"tick_labels": None,
# formatting points (scale, markers)
"points_diag": {},
"points_offdiag": {
"marker": ".",
"markersize": 10,
},
# other options
"fig_bg_colors": {"offdiag": None, "diag": None, "lower": None},
"fig_subplots_adjust": {
"top": 0.9,
},
"subplots": {},
"despine": {
"offset": 5,
},
"title": None,
"title_format": {"fontsize": 16},
"x_lim_add_eps": 1e-5,
"square_subplots": True,
}
def conditional_marginal_plot(
density: Any,
condition: torch.Tensor,
limits: Union[List, torch.Tensor],
points: Optional[
Union[List[np.ndarray], List[torch.Tensor], np.ndarray, torch.Tensor]
] = None,
subset: Optional[List[int]] = None,
resolution: int = 50,
figsize: Tuple = (10, 10),
labels: Optional[List[str]] = None,
ticks: Optional[Union[List, torch.Tensor]] = None,
fig=None,
axes=None,
**kwargs,
):
r"""
Plot conditional distribution given all other parameters.
The conditionals can be interpreted as slices through the `density` at a location
given by `condition`.
For example:
Say we have a 3D density with parameters $\theta_0$, $\theta_1$, $\theta_2$ and
a condition $c$ passed by the user in the `condition` argument.
For the plot of $\theta_0$ on the diagonal, this will plot the conditional
$p(\theta_0 | \theta_1=c[1], \theta_2=c[2])$. All other diagonals and are built in
the corresponding way.
Args:
density: Probability density with a `log_prob()` method.
condition: Condition that all but the one/two regarded parameters are fixed to.
The condition should be of shape (1, dim_theta), i.e. it could e.g. be
a sample from the posterior distribution.
limits: Limits in between which each parameter will be evaluated.
points: Additional points to scatter.
subset: List containing the dimensions to plot. E.g. subset=[1,3] will plot
plot only the 1st and 3rd dimension but will discard the 0th and 2nd (and,
if they exist, the 4th, 5th and so on)
resolution: Resolution of the grid at which we evaluate the `pdf`.
figsize: Size of the entire figure.
labels: List of strings specifying the names of the parameters.
ticks: Position of the ticks.
points_colors: Colors of the `points`.
fig: matplotlib figure to plot on.
axes: matplotlib axes corresponding to fig.
**kwargs: Additional arguments to adjust the plot, e.g., `samples_colors`,
`points_colors` and many more, see the source code in `_get_default_opts()`
in `sbi.analysis.plot` for details.
Returns: figure and axis of posterior distribution plot
"""
# Setting these is required because _marginal will check if opts['diag'] is
# `None`. This would break if opts has no key 'diag'.
diag = "cond"
opts = _get_default_opts()
# update the defaults dictionary by the current values of the variables (passed by
# the user)
opts = _update(opts, locals())
opts = _update(opts, kwargs)
dim, limits, eps_margins = prepare_for_conditional_plot(condition, opts)
diag_func = get_conditional_diag_func(opts, limits, eps_margins, resolution)
return _arrange_plots(
diag_func, None, dim, limits, points, opts, fig=fig, axes=axes
)
def conditional_pairplot(
density: Any,
condition: torch.Tensor,
limits: Union[List, torch.Tensor],
points: Optional[
Union[List[np.ndarray], List[torch.Tensor], np.ndarray, torch.Tensor]
] = None,
subset: Optional[List[int]] = None,
resolution: int = 50,
figsize: Tuple = (10, 10),
labels: Optional[List[str]] = None,
ticks: Optional[Union[List, torch.Tensor]] = None,
fig=None,
axes=None,
**kwargs,
):
r"""
Plot conditional distribution given all other parameters.
The conditionals can be interpreted as slices through the `density` at a location
given by `condition`.
For example:
Say we have a 3D density with parameters $\theta_0$, $\theta_1$, $\theta_2$ and
a condition $c$ passed by the user in the `condition` argument.
For the plot of $\theta_0$ on the diagonal, this will plot the conditional
$p(\theta_0 | \theta_1=c[1], \theta_2=c[2])$. For the upper
diagonal of $\theta_1$ and $\theta_2$, it will plot
$p(\theta_1, \theta_2 | \theta_0=c[0])$. All other diagonals and upper-diagonals
are built in the corresponding way.
Args:
density: Probability density with a `log_prob()` method.
condition: Condition that all but the one/two regarded parameters are fixed to.
The condition should be of shape (1, dim_theta), i.e. it could e.g. be
a sample from the posterior distribution.
limits: Limits in between which each parameter will be evaluated.
points: Additional points to scatter.
subset: List containing the dimensions to plot. E.g. subset=[1,3] will plot
plot only the 1st and 3rd dimension but will discard the 0th and 2nd (and,
if they exist, the 4th, 5th and so on)
resolution: Resolution of the grid at which we evaluate the `pdf`.
figsize: Size of the entire figure.
labels: List of strings specifying the names of the parameters.
ticks: Position of the ticks.
points_colors: Colors of the `points`.
fig: matplotlib figure to plot on.
axes: matplotlib axes corresponding to fig.
**kwargs: Additional arguments to adjust the plot, e.g., `samples_colors`,
`points_colors` and many more, see the source code in `_get_default_opts()`
in `sbi.analysis.plot` for details.
Returns: figure and axis of posterior distribution plot
"""
device = density._device if hasattr(density, "_device") else "cpu"
# Setting these is required because _pairplot_scaffold will check if opts['diag'] is
# `None`. This would break if opts has no key 'diag'. Same for 'upper'.
diag = "cond"
offdiag = "cond"
opts = _get_default_opts()
# update the defaults dictionary by the current values of the variables (passed by
# the user)
opts = _update(opts, locals())
opts = _update(opts, kwargs)
opts["lower"] = None
dim, limits, eps_margins = prepare_for_conditional_plot(condition, opts)
diag_func = get_conditional_diag_func(opts, limits, eps_margins, resolution)
def offdiag_func(row, col, **kwargs):
p_image = (
eval_conditional_density(
opts["density"],
opts["condition"].to(device),
limits.to(device),
row,
col,
resolution=resolution,
eps_margins1=eps_margins[row],
eps_margins2=eps_margins[col],
)
.to("cpu")
.numpy()
)
plt.imshow(
p_image.T,
origin="lower",
extent=(
limits[col, 0].item(),
limits[col, 1].item(),
limits[row, 0].item(),
limits[row, 1].item(),
),
aspect="auto",
)
return _arrange_plots(
diag_func, offdiag_func, dim, limits, points, opts, fig=fig, axes=axes
)
def _arrange_grid(
diag_funcs: List[Optional[Callable]],
upper_funcs: List[Optional[Callable]],
lower_funcs: List[Optional[Callable]],
diag_kwargs: List[Optional[Dict]],
upper_kwargs: List[Optional[Dict]],
lower_kwargs: List[Optional[Dict]],
samples: List[np.ndarray],
points: Optional[
Union[List[np.ndarray], List[torch.Tensor], np.ndarray, torch.Tensor]
],
limits: torch.Tensor,
subset: Optional[List[int]],
figsize: Optional[Tuple],
labels: Optional[List[str]],
ticks: Optional[Union[List, torch.Tensor]],
fig: Optional[FigureBase],
axes: Optional[Axes],
fig_kwargs: Dict,
) -> Tuple[FigureBase, Axes]:
"""
Arranges the plots for any function that plots parameters either in a row of 1D
marginals or a pairplot setting.
Args:
diag_funcs: List of plotting function that will be executed for the diagonal
elements of the plot (or the columns of a row of 1D marginals).
upper_funcs: List of plotting function that will be executed for the
upper-diagonal elements of the plot. None if we are in a 1D setting.
lower_funcs: List of plotting function that will be executed for the
lower-diagonal elements of the plot. None if we are in a 1D setting.
diag_kwargs: Additional arguments to adjust the diagonal plot,
see the source code in `_get_default_diag_kwarg()`
upper_kwargs: Additional arguments to adjust the upper diagonal plot,
see the source code in `_get_default_offdiag_kwarg()`
lower_kwargs: Additional arguments to adjust the lower diagonal plot,
see the source code in `_get_default_offdiag_kwarg()`
samples: List of samples given to the plotting functions
points: List of additional points to scatter.
limits: Limits for each dimension / axis.
subset: List containing the dimensions to plot. E.g. subset=[1,3] will plot
plot only the 1st and 3rd dimension
figsize: Size of the entire figure.
labels: List of strings specifying the names of the parameters.
ticks: Position of the ticks.
fig: matplotlib figure to plot on.
axes: matplotlib axes corresponding to fig.
fig_kwargs: Additional arguments to adjust the overall figure,
see the source code in `_get_default_fig_kwargs()`
Returns:
Fig: matplotlib figure
Axes: matplotlib axes
"""
dim = samples[0].shape[1]
# Prepare points
if points is None:
points = []
if not isinstance(points, list):
points = ensure_numpy(points) # type: ignore
points = [points]
points = [np.atleast_2d(p) for p in points]
points = [np.atleast_2d(ensure_numpy(p)) for p in points]
# TODO: add asserts checking compatibility of dimensions
# Prepare labels
if labels == [] or labels is None:
labels = ["dim {}".format(i + 1) for i in range(dim)]
# Prepare ticks
if ticks is not None:
if len(ticks) == 1:
ticks = [ticks[0] for _ in range(dim)]
elif ticks == []:
ticks = None
# Figure out if we subset the plot
if subset is None:
rows = cols = dim
subset = [i for i in range(dim)]
else:
if isinstance(subset, int):
subset = [subset]
elif isinstance(subset, list):
pass
else:
raise NotImplementedError
rows = cols = len(subset)
# check which subplots are empty
excl_lower = all(v is None for v in lower_funcs)
excl_upper = all(v is None for v in upper_funcs)
excl_diag = all(v is None for v in diag_funcs)
flat = excl_lower and excl_upper
one_dim = dim == 1
# select the subset of rows and cols to be plotted
if flat:
rows = 1
subset_rows = [1]
else:
subset_rows = subset
subset_cols = subset
# Create fig and axes if they were not passed.
if fig is None or axes is None:
fig, axes = plt.subplots(rows, cols, figsize=figsize, **fig_kwargs["subplots"]) # pyright: ignore reportAssignmenttype
else:
assert axes.shape == ( # pyright: ignore reportAttributeAccessIssue
rows,
cols,
), f"Passed axes must match subplot shape: {rows, cols}."
# Style figure
fig.subplots_adjust(**fig_kwargs["fig_subplots_adjust"])
fig.suptitle(fig_kwargs["title"], **fig_kwargs["title_format"])
# Main Loop through all subplots, style and create the figures
for row_idx, row in enumerate(subset_rows):
for col_idx, col in enumerate(subset_cols):
if flat or row == col:
current = "diag"
elif row < col:
current = "upper"
else:
current = "lower"
if one_dim:
ax = axes # pyright: ignore reportIndexIssue
elif flat:
ax = axes[col_idx] # pyright: ignore reportIndexIssue
else:
ax = axes[row_idx, col_idx] # pyright: ignore reportIndexIssue
# Diagonals
_format_subplot(
ax, # pyright: ignore reportArgumentType
current,
limits,
ticks,
labels,
fig_kwargs,
row,
col,
dim,
flat,
excl_lower,
)
if current == "diag":
if excl_diag:
ax.axis("off") # pyright: ignore reportOptionalMemberAccess
else:
for sample_ind, sample in enumerate(samples):
diag_f = diag_funcs[sample_ind]
if callable(diag_f): # is callable:
diag_f(
ax, sample[:, row], limits[row], diag_kwargs[sample_ind]
)
if len(points) > 0:
extent = ax.get_ylim() # pyright: ignore reportOptionalMemberAccess
for n, v in enumerate(points):
ax.plot( # pyright: ignore reportOptionalMemberAccess
[v[:, col], v[:, col]],
extent,
color=fig_kwargs["points_colors"][n],
**fig_kwargs["points_diag"],
label=fig_kwargs["points_labels"][n],
)
if fig_kwargs["legend"] and col == 0:
ax.legend(**fig_kwargs["legend_kwargs"]) # pyright: ignore reportOptionalMemberAccess
# Off-diagonals
# upper
elif current == "upper":
if excl_upper:
ax.axis("off") # pyright: ignore reportOptionalMemberAccess
else:
for sample_ind, sample in enumerate(samples):
upper_f = upper_funcs[sample_ind]
if callable(upper_f):
upper_f(
ax,
sample[:, col],
sample[:, row],
limits[col],
limits[row],
upper_kwargs[sample_ind],
)
if len(points) > 0:
for n, v in enumerate(points):
ax.plot( # pyright: ignore reportOptionalMemberAccess
v[:, col],
v[:, row],
color=fig_kwargs["points_colors"][n],
**fig_kwargs["points_offdiag"],
)
# lower
elif current == "lower":
if excl_lower:
ax.axis("off") # pyright: ignore reportOptionalMemberAccess
else:
for sample_ind, sample in enumerate(samples):
lower_f = lower_funcs[sample_ind]
if callable(lower_f):
lower_f(
ax,
sample[:, row],
sample[:, col],
limits[row],
limits[col],
lower_kwargs[sample_ind],
)
if len(points) > 0:
for n, v in enumerate(points):
ax.plot( # pyright: ignore reportOptionalMemberAccess
v[:, col],
v[:, row],
color=fig_kwargs["points_colors"][n],
**fig_kwargs["points_offdiag"],
)
# Add dots if we subset
if len(subset) < dim:
if flat:
ax = axes[len(subset) - 1] # pyright: ignore[reportIndexIssue, reportOptionalSubscript]
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
text_kwargs = {"fontsize": plt.rcParams["font.size"] * 2.0} # pyright: ignore[reportOptionalOperand]
ax.text(x1 + (x1 - x0) / 8.0, (y0 + y1) / 2.0, "...", **text_kwargs)
else:
for row in range(len(subset)):
ax = axes[row, len(subset) - 1] # pyright: ignore[reportIndexIssue, reportOptionalSubscript]
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
text_kwargs = {"fontsize": plt.rcParams["font.size"] * 2.0} # pyright: ignore[reportOptionalOperand]
ax.text(x1 + (x1 - x0) / 8.0, (y0 + y1) / 2.0, "...", **text_kwargs)
if row == len(subset) - 1:
ax.text(
x1 + (x1 - x0) / 12.0,
y0 - (y1 - y0) / 1.5,
"...",
rotation=-45,
**text_kwargs,
)
return fig, axes # pyright: ignore[reportReturnType]
def sbc_rank_plot(
ranks: Union[Tensor, np.ndarray, List[Tensor], List[np.ndarray]],
num_posterior_samples: int,
num_bins: Optional[int] = None,
plot_type: str = "cdf",
parameter_labels: Optional[List[str]] = None,
ranks_labels: Optional[List[str]] = None,
colors: Optional[List[str]] = None,
fig: Optional[Figure] = None,
ax: Optional[Axes] = None,
figsize: Optional[tuple] = None,
**kwargs,
) -> Tuple[Figure, Axes]:
"""Plot simulation-based calibration ranks as empirical CDFs or histograms.
Additional options can be passed via the kwargs argument, see _sbc_rank_plot.
Args:
ranks: Tensor of ranks to be plotted shape (num_sbc_runs, num_parameters), or
list of Tensors when comparing several sets of ranks, e.g., set of ranks
obtained from different methods.
num_bins: number of bins used for binning the ranks, default is
num_sbc_runs / 20.
plot_type: type of SBC plot, histograms ("hist") or empirical cdfs ("cdf").
parameter_labels: list of labels for each parameter dimension.
ranks_labels: list of labels for each set of ranks.
colors: list of colors for each parameter dimension, or each set of ranks.
Returns:
fig, ax: figure and axis objects.
"""
return _sbc_rank_plot(
ranks,
num_posterior_samples,
num_bins,
plot_type,
parameter_labels,
ranks_labels,
colors,
fig=fig,
ax=ax,
figsize=figsize,
**kwargs,
)
def _sbc_rank_plot(
ranks: Union[Tensor, np.ndarray, List[Tensor], List[np.ndarray]],
num_posterior_samples: int,
num_bins: Optional[int] = None,
plot_type: str = "cdf",
parameter_labels: Optional[List[str]] = None,
ranks_labels: Optional[List[str]] = None,
colors: Optional[List[str]] = None,
num_repeats: int = 50,
line_alpha: float = 0.8,
show_uniform_region: bool = True,
uniform_region_alpha: float = 0.3,
xlim_offset_factor: float = 0.1,
num_cols: int = 4,
params_in_subplots: bool = False,
show_ylabel: bool = False,
sharey: bool = False,
fig: Optional[FigureBase] = None,
legend_kwargs: Optional[Dict] = None,
ax=None, # no type hint to avoid hassle with pyright. Should be `array(Axes).`
figsize: Optional[tuple] = None,
) -> Tuple[Figure, Axes]:
"""Plot simulation-based calibration ranks as empirical CDFs or histograms.
Args:
ranks: Tensor of ranks to be plotted shape (num_sbc_runs, num_parameters), or
list of Tensors when comparing several sets of ranks, e.g., set of ranks
obtained from different methods.
num_bins: number of bins used for binning the ranks, default is
num_sbc_runs / 20.
plot_type: type of SBC plot, histograms ("hist") or empirical cdfs ("cdf").
parameter_labels: list of labels for each parameter dimension.
ranks_labels: list of labels for each set of ranks.
colors: list of colors for each parameter dimension, or each set of ranks.
num_repeats: number of repeats for each empirical CDF step (resolution).
line_alpha: alpha for cdf lines or histograms.
show_uniform_region: whether to plot the region showing the cdfs expected under
uniformity.
uniform_region_alpha: alpha for region showing the cdfs expected under
uniformity.
xlim_offset_factor: factor for empty space left and right of the histogram.
num_cols: number of subplot columns, e.g., when plotting ranks of many
parameters.
params_in_subplots: whether to show each parameter in a separate subplot, or
all in one.
show_ylabel: whether to show ylabels and ticks.
sharey: whether to share the y-labels, ticks, and limits across subplots.
fig: figure object to plot in.
ax: axis object, must contain as many sublpots as parameters or len(ranks).
figsize: dimensions of figure object, default (8, 5) or (len(ranks) * 4, 5).
Returns:
fig, ax: figure and axis objects.
"""
if isinstance(ranks, (Tensor, np.ndarray)):
ranks_list = [ranks]
else:
assert isinstance(ranks, List)
ranks_list = ranks
for idx, rank in enumerate(ranks_list):
assert isinstance(rank, (Tensor, np.ndarray))
if isinstance(rank, Tensor):
ranks_list[idx]: np.ndarray = rank.numpy() # type: ignore
plot_types = ["hist", "cdf"]
assert (
plot_type in plot_types
), "plot type {plot_type} not implemented, use one in {plot_types}."
if legend_kwargs is None:
legend_kwargs = dict(loc="best", handlelength=0.8)
num_sbc_runs, num_parameters = ranks_list[0].shape
num_ranks = len(ranks_list)
# For multiple methods, and for the hist plots plot each param in a separate subplot
if num_ranks > 1 or plot_type == "hist":
params_in_subplots = True
for ranki in ranks_list:
assert (
ranki.shape == ranks_list[0].shape
), "all ranks in list must have the same shape."
num_rows = int(np.ceil(num_parameters / num_cols))
if figsize is None:
figsize = (num_parameters * 4, num_rows * 5) if params_in_subplots else (8, 5)
if parameter_labels is None:
parameter_labels = [f"dim {i + 1}" for i in range(num_parameters)]
if ranks_labels is None:
ranks_labels = [f"rank set {i + 1}" for i in range(num_ranks)]
if num_bins is None:
# Recommendation from Talts et al.
num_bins = num_sbc_runs // 20
# Plot one row subplot for each parameter, different "methods" on top of each other.
if params_in_subplots:
if fig is None or ax is None:
fig, ax = plt.subplots(
num_rows,
min(num_parameters, num_cols),
figsize=figsize,
sharey=sharey,
)
ax = np.atleast_1d(ax) # type: ignore
else:
assert (
ax.size >= num_parameters
), "There must be at least as many subplots as parameters."
num_rows = ax.shape[0] if ax.ndim > 1 else 1
assert ax is not None
col_idx, row_idx = 0, 0
for ii, ranki in enumerate(ranks_list):
for jj in range(num_parameters):
col_idx = jj if num_rows == 1 else jj % num_cols
row_idx = jj // num_cols
plt.sca(ax[col_idx] if num_rows == 1 else ax[row_idx, col_idx])
if plot_type == "cdf":
_plot_ranks_as_cdf(
ranki[:, jj], # type: ignore
num_bins,
num_repeats,
ranks_label=ranks_labels[ii],
color=f"C{ii}" if colors is None else colors[ii],
xlabel=f"posterior ranks {parameter_labels[jj]}",
# Show legend and ylabel only in first subplot.
show_ylabel=jj == 0,
alpha=line_alpha,
)
if ii == 0 and show_uniform_region:
_plot_cdf_region_expected_under_uniformity(
num_sbc_runs,
num_bins,
num_repeats,
alpha=uniform_region_alpha,
)
elif plot_type == "hist":
_plot_ranks_as_hist(
ranki[:, jj], # type: ignore
num_bins,
num_posterior_samples,
ranks_label=ranks_labels[ii],
color="firebrick" if colors is None else colors[ii],
xlabel=f"posterior rank {parameter_labels[jj]}",
# Show legend and ylabel only in first subplot.
show_ylabel=show_ylabel,
alpha=line_alpha,
xlim_offset_factor=xlim_offset_factor,
)
# Plot expected uniform band.
_plot_hist_region_expected_under_uniformity(
num_sbc_runs,
num_bins,
num_posterior_samples,
alpha=uniform_region_alpha,
)
# show legend only in first subplot.
if jj == 0 and ranks_labels[ii] is not None:
plt.legend(**legend_kwargs)
else:
raise ValueError(
f"plot_type {plot_type} not defined, use one in {plot_types}"
)
# Remove empty subplots.
col_idx += 1
while num_rows > 1 and col_idx < num_cols:
ax[row_idx, col_idx].axis("off")
col_idx += 1
# When there is only one set of ranks show all params in a single subplot.
else:
if fig is None or ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
plt.sca(ax)
ranki = ranks_list[0]
for jj in range(num_parameters):
_plot_ranks_as_cdf(
ranki[:, jj], # type: ignore
num_bins,
num_repeats,
ranks_label=parameter_labels[jj],
color=f"C{jj}" if colors is None else colors[jj],
xlabel="posterior rank",
# Plot ylabel and legend at last.
show_ylabel=jj == (num_parameters - 1),
alpha=line_alpha,
)
if show_uniform_region:
_plot_cdf_region_expected_under_uniformity(
num_sbc_runs,
num_bins,
num_repeats,
alpha=uniform_region_alpha,
)
# show legend on the last subplot.
plt.legend(**legend_kwargs)
return fig, ax # pyright: ignore[reportReturnType]
def _plot_ranks_as_hist(
ranks: np.ndarray,
num_bins: int,
num_posterior_samples: int,
ranks_label: Optional[str] = None,
xlabel: Optional[str] = None,
color: str = "firebrick",
alpha: float = 0.8,
show_ylabel: bool = False,
num_ticks: int = 3,
xlim_offset_factor: float = 0.1,
) -> None:
"""Plot ranks as histograms on the current axis.
Args:
ranks: SBC ranks in shape (num_sbc_runs, )
num_bins: number of bins for the histogram, recommendation is num_sbc_runs / 20.
num_posteriors_samples: number of posterior samples used for ranking.
ranks_label: label for the ranks, e.g., when comparing ranks of different
methods.
xlabel: label for the current parameter.
color: histogram color, default from Talts et al.
alpha: histogram transparency.
show_ylabel: whether to show y-label "counts".
show_legend: whether to show the legend, e.g., when comparing multiple ranks.
num_ticks: number of ticks on the x-axis.
xlim_offset_factor: factor for empty space left and right of the histogram.
legend_kwargs: kwargs for the legend.
"""
xlim_offset = int(num_posterior_samples * xlim_offset_factor)
plt.hist(
ranks,
bins=num_bins,
label=ranks_label,
color=color,
alpha=alpha,
)
if show_ylabel:
plt.ylabel("counts")
else:
plt.yticks([])
plt.xlim(-xlim_offset, num_posterior_samples + xlim_offset)
plt.xticks(np.linspace(0, num_posterior_samples, num_ticks))
plt.xlabel("posterior rank" if xlabel is None else xlabel)
def _plot_ranks_as_cdf(
ranks: np.ndarray,
num_bins: int,
num_repeats: int,
ranks_label: Optional[str] = None,
xlabel: Optional[str] = None,
color: Optional[str] = None,
alpha: float = 0.8,
show_ylabel: bool = True,
num_ticks: int = 3,
) -> None:
"""Plot ranks as empirical CDFs on the current axis.
Args:
ranks: SBC ranks in shape (num_sbc_runs, )
num_bins: number of bins for the histogram, recommendation is num_sbc_runs / 20.
num_repeats: number of repeats of each CDF step, i.e., resolution of the eCDF.
ranks_label: label for the ranks, e.g., when comparing ranks of different
methods.
xlabel: label for the current parameter
color: line color for the cdf.
alpha: line transparency.
show_ylabel: whether to show y-label "counts".
show_legend: whether to show the legend, e.g., when comparing multiple ranks.
num_ticks: number of ticks on the x-axis.
legend_kwargs: kwargs for the legend.
"""
# Generate histogram of ranks.
hist, *_ = np.histogram(ranks, bins=num_bins, density=False)
# Construct empirical CDF.
histcs = hist.cumsum()
# Plot cdf and repeat each stair step
plt.plot(
np.linspace(0, num_bins, num_repeats * num_bins),
np.repeat(histcs / histcs.max(), num_repeats),
label=ranks_label,
color=color,
alpha=alpha,
)
if show_ylabel:
plt.yticks(np.linspace(0, 1, 3))
plt.ylabel("empirical CDF")
else:
# Plot ticks only
plt.yticks(np.linspace(0, 1, 3), [])
plt.ylim(0, 1)
plt.xlim(0, num_bins)
plt.xticks(np.linspace(0, num_bins, num_ticks))
plt.xlabel("posterior rank" if xlabel is None else xlabel)
def _plot_cdf_region_expected_under_uniformity(
num_sbc_runs: int,
num_bins: int,
num_repeats: int,
alpha: float = 0.2,
color: str = "gray",
) -> None:
"""Plot region of empirical cdfs expected under uniformity on the current axis."""
# Construct uniform histogram.
uni_bins = binom(num_sbc_runs, p=1 / num_bins).ppf(0.5) * np.ones(num_bins)
uni_bins_cdf = uni_bins.cumsum() / uni_bins.sum()
# Decrease value one in last entry by epsilon to find valid
# confidence intervals.
uni_bins_cdf[-1] -= 1e-9
lower = [binom(num_sbc_runs, p=p).ppf(0.005) for p in uni_bins_cdf]
upper = [binom(num_sbc_runs, p=p).ppf(0.995) for p in uni_bins_cdf]
# Plot grey area with expected ECDF.
plt.fill_between(
x=np.linspace(0, num_bins, num_repeats * num_bins),
y1=np.repeat(lower / np.max(lower), num_repeats),
y2=np.repeat(upper / np.max(upper), num_repeats), # pyright: ignore[reportArgumentType]
color=color,
alpha=alpha,
label="expected under uniformity",
)
def _plot_hist_region_expected_under_uniformity(
num_sbc_runs: int,
num_bins: int,
num_posterior_samples: int,
alpha: float = 0.2,
color: str = "gray",
) -> None:
"""Plot region of empirical cdfs expected under uniformity."""
lower = binom(num_sbc_runs, p=1 / (num_bins + 1)).ppf(0.005)
upper = binom(num_sbc_runs, p=1 / (num_bins + 1)).ppf(0.995)
# Plot grey area with expected ECDF.
plt.fill_between(
x=np.linspace(0, num_posterior_samples, num_bins),
y1=np.repeat(lower, num_bins),
y2=np.repeat(upper, num_bins), # pyright: ignore[reportArgumentType]
color=color,
alpha=alpha,
label="expected under uniformity",
)
# Diagnostics for hypothesis tests
def pp_plot(
scores: Union[List[np.ndarray], Dict[Any, np.ndarray]],
scores_null: Union[List[np.ndarray], Dict[Any, np.ndarray]],
true_scores_null: np.ndarray,
conf_alpha: float,
n_alphas: int = 100,
labels: Optional[List[str]] = None,
colors: Optional[List[str]] = None,
ax: Optional[Axes] = None,
**kwargs: Any,
) -> Axes:
"""Probability - Probability (P-P) plot for hypothesis tests
to assess the validity of one (or several) estimator(s).
See [here](https://en.wikipedia.org/wiki/P%E2%80%93P_plot) for more details.
Args:
scores: test scores estimated on observed data and evaluated on the test set,
of shape (n_eval,). One array per estimator.
scores_null: test scores estimated under the null hypothesis and evaluated on
the test set, of shape (n_eval,). One array per null trial.
true_scores_null: theoretical true scores under the null hypothesis,
of shape (n_eval,).
labels: labels for the estimators, defaults to None.
colors: colors for the estimators, defaults to None.
conf_alpha: significanecee level of the hypothesis test.
n_alphas: number of cdf-values to compute the P-P plot, defaults to 100.
ax: axis to plot on, defaults to None.
kwargs: additional arguments for matplotlib plotting.
Returns:
ax: axes with the P-P plot.
"""
if ax is None:
ax = plt.gca()
ax_: Axes = cast(Axes, ax) # cast to fix pyright error
alphas = np.linspace(0, 1, n_alphas)
# pp_vals for the true null hypothesis
pp_vals_true = pp_vals(true_scores_null, alphas)
ax_.plot(alphas, pp_vals_true, "--", color="black", label="True Null (H0)")
# pp_vals for the estimated null hypothesis over the multiple trials
pp_vals_null = []
for t in range(len(scores_null)):
pp_vals_null.append(pp_vals(scores_null[t], alphas))
pp_vals_null = np.array(pp_vals_null)
# confidence region
quantiles = np.quantile(pp_vals_null, [conf_alpha / 2, 1 - conf_alpha / 2], axis=0)
ax_.fill_between(
alphas,
quantiles[0],
quantiles[1],
color="grey",
alpha=0.2,
label=f"{(1 - conf_alpha) * 100}% confidence region",
)
# pp_vals for the observed data
for i, p_ in enumerate(scores):
pp_vals_o = pp_vals(p_, alphas)
if labels is not None:
kwargs["label"] = labels[i]
if colors is not None:
kwargs["color"] = colors[i]
ax_.plot(alphas, pp_vals_o, **kwargs)
return ax_
def marginal_plot_with_probs_intensity(
probs_per_marginal: dict,
marginal_dim: int,
n_bins: int = 20,
vmin: float = 0.0,
vmax: float = 1.0,
cmap_name: str = "Spectral_r",
show_colorbar: bool = True,
label: Optional[str] = None,
ax: Optional[Axes] = None,
) -> Axes:
"""Plot 1d or 2d marginal histogram of samples of the density estimator
with probabilities as color intensity.
Args:
probs_per_marginal: dataframe with predicted class probabilities
as obtained from `sbi.utils.analysis_utils.get_probs_per_marginal`.
marginal_dim: dimension of the marginal histogram to plot.
n_bins: number of bins for the histogram, defaults to 20.
vmin: minimum value for the color intensity, defaults to 0.
vmax: maximum value for the color intensity, defaults to 1.
cmap: colormap for the color intensity, defaults to "Spectral_r".
show_colorbar: whether to show the colorbar, defaults to True.
label: label for the colorbar, defaults to None.
ax (matplotlib.axes.Axes): axes to plot on, defaults to None.
Returns:
ax (matplotlib.axes.Axes): axes with the plot.
"""
assert marginal_dim in [1, 2], "Only 1d or 2d marginals are supported."
if ax is None:
ax = plt.gca()
ax_: Axes = cast(Axes, ax) # cast to fix pyright error
if label is None:
label = "probability"
# get colormap
cmap = cm.get_cmap(cmap_name)
# case of 1d marginal
if marginal_dim == 1:
# extract bins and patches
_, bins, patches = ax_.hist(
probs_per_marginal["s"], n_bins, density=True, color="green"
)
# create bins: all samples between bin edges are assigned to the same bin
probs_per_marginal["bins"] = np.searchsorted(bins, probs_per_marginal["s"]) - 1
probs_per_marginal["bins"][probs_per_marginal["bins"] < 0] = 0
# get mean prob for each bin (same as pandas groupy method)
array_probs = np.concatenate(
[probs_per_marginal["bins"][:, None], probs_per_marginal["probs"][:, None]],
axis=1,
)
array_probs = array_probs[array_probs[:, 0].argsort()]
weights = np.split(
array_probs[:, 1], np.unique(array_probs[:, 0], return_index=True)[1][1:]
)
weights = np.array([np.mean(w) for w in weights])
# remove empty bins
id = list(set(range(n_bins)) - set(probs_per_marginal["bins"]))
patches = np.delete(np.array(patches), id)
bins = np.delete(bins, id)
# normalize color intensity
norm = Normalize(vmin=vmin, vmax=vmax)
# set color intensity
for w, p in zip(weights, patches):
p.set_facecolor(cmap(w))
if show_colorbar:
plt.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap), ax=ax_, label=label)
if marginal_dim == 2:
# extract bin edges
_, x, y = np.histogram2d(
probs_per_marginal["s_1"], probs_per_marginal["s_2"], bins=n_bins
)
# create bins: all samples between bin edges are assigned to the same bin
probs_per_marginal["bins_x"] = np.searchsorted(x, probs_per_marginal["s_1"]) - 1
probs_per_marginal["bins_y"] = np.searchsorted(y, probs_per_marginal["s_2"]) - 1
probs_per_marginal["bins_x"][probs_per_marginal["bins_x"] < 0] = 0
probs_per_marginal["bins_y"][probs_per_marginal["bins_y"] < 0] = 0
# extract unique bin pairs
group_idx = np.concatenate(
[
probs_per_marginal["bins_x"][:, None],
probs_per_marginal["bins_y"][:, None],
],
axis=1,
)
unique_bins = np.unique(group_idx, return_counts=True, axis=0)[0]
# get mean prob for each bin (same as pandas groupy method)
mean_probs = np.zeros((len(unique_bins),))
for i in range(len(unique_bins)):
idx = np.where((group_idx == unique_bins[i]).all(axis=1))
mean_probs[i] = np.mean(probs_per_marginal["probs"][idx])
# create weight matrix with nan values for non-existing bins
weights = np.zeros((n_bins, n_bins))
weights[:] = np.nan
weights[unique_bins[:, 0], unique_bins[:, 1]] = mean_probs
# set color intensity
norm = Normalize(vmin=vmin, vmax=vmax)
for i in range(len(x) - 1):
for j in range(len(y) - 1):
facecolor = cmap(norm(weights.T[j, i]))
# if no sample in bin, set color to white
if weights.T[j, i] == np.nan:
facecolor = "white"
rect = Rectangle(
(x[i], y[j]),
x[i + 1] - x[i],
y[j + 1] - y[j],
facecolor=facecolor,
edgecolor="none",
)
ax_.add_patch(rect)
if show_colorbar:
plt.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap), ax=ax_, label=label)
return ax_
# Customized plotting functions for LC2ST
def pp_plot_lc2st(
probs: Union[List[np.ndarray], Dict[Any, np.ndarray]],
probs_null: Union[List[np.ndarray], Dict[Any, np.ndarray]],
conf_alpha: float,
**kwargs: Any,
) -> Axes:
"""Probability - Probability (P-P) plot for LC2ST.
Args:
probs: predicted probability on observed data and evaluated on the test set,
of shape (n_eval,). One array per estimator.
probs_null: predicted probability under the null hypothesis and evaluated on
the test set, of shape (n_eval,). One array per null trial.
conf_alpha: significanecee level of the hypothesis test.
kwargs: additional arguments for `pp_plot`.
Returns:
ax: axes with the P-P plot.
"""
# probability at chance level (under the null) is 0.5
true_probs_null = np.array([0.5] * len(probs))
return pp_plot(
scores=probs,
scores_null=probs_null,
true_scores_null=true_probs_null,
conf_alpha=conf_alpha,
**kwargs,
)
def plot_tarp(
ecp: Tensor, alpha: Tensor, title: Optional[str] = None
) -> Tuple[Figure, Axes]:
"""
Plots the expected coverage probability (ECP) against the credibility
level,alpha, for a given alpha grid.
Args:
ecp : numpy.ndarray
Array of expected coverage probabilities.
alpha : numpy.ndarray
Array of credibility levels.
title : str, optional
Title for the plot. The default is "".
Returns
fig : matplotlib.figure.Figure
The figure object.
ax : matplotlib.axes.Axes
The axes object.
"""
fig = plt.figure(figsize=(6, 6))
ax: Axes = plt.gca()
ax.plot(alpha, ecp, color="blue", label="TARP")
ax.plot(alpha, alpha, color="black", linestyle="--", label="ideal")
ax.set_xlabel(r"Credibility Level $\alpha$")
ax.set_ylabel(r"Expected Coverage Probability")
ax.set_xlim(0.0, 1.0)
ax.set_ylim(0.0, 1.0)
ax.set_title(title or "")
ax.legend()
return fig, ax # type: ignore
# TO BE DEPRECATED
# ----------------
def pairplot_dep(
samples: Union[List[np.ndarray], List[torch.Tensor], np.ndarray, torch.Tensor],
points: Optional[
Union[List[np.ndarray], List[torch.Tensor], np.ndarray, torch.Tensor]
] = None,
limits: Optional[Union[List, torch.Tensor]] = None,
subset: Optional[List[int]] = None,
offdiag: Optional[Union[List[Optional[str]], str]] = "hist",
diag: Optional[Union[List[Optional[str]], str]] = "hist",
figsize: Optional[Tuple] = (10, 10),
labels: Optional[List[str]] = None,
ticks: Optional[Union[List, torch.Tensor]] = None,
upper: Optional[Union[List[Optional[str]], str]] = None,
fig: Optional[FigureBase] = None,
axes: Optional[Axes] = None,
**kwargs: Optional[Any],
) -> Tuple[FigureBase, Axes]:
"""
Plot samples in a 2D grid showing marginals and pairwise marginals.
Each of the diagonal plots can be interpreted as a 1D-marginal of the distribution
that the samples were drawn from. Each upper-diagonal plot can be interpreted as a
2D-marginal of the distribution.
Args:
samples: Samples used to build the histogram.
points: List of additional points to scatter.
limits: Array containing the plot xlim for each parameter dimension. If None,
just use the min and max of the passed samples
subset: List containing the dimensions to plot. E.g. subset=[1,3] will plot
plot only the 1st and 3rd dimension but will discard the 0th and 2nd (and,
if they exist, the 4th, 5th and so on).
offdiag: Plotting style for upper diagonal, {hist, scatter, contour, cond,
None}.
upper: deprecated, use offdiag instead.
diag: Plotting style for diagonal, {hist, cond, None}.
figsize: Size of the entire figure.
labels: List of strings specifying the names of the parameters.
ticks: Position of the ticks.
fig: matplotlib figure to plot on.
axes: matplotlib axes corresponding to fig.
**kwargs: Additional arguments to adjust the plot, e.g., `samples_colors`,
`points_colors` and many more, see the source code in `_get_default_opts()`
in `sbi.analysis.plot` for details.
Returns: figure and axis of posterior distribution plot
"""
opts = _get_default_opts()
# update the defaults dictionary by the current values of the variables (passed by
# the user)
opts = _update(opts, locals())
opts = _update(opts, kwargs)
samples, dim, limits = prepare_for_plot(samples, limits)
# checks.
if opts["legend"]:
assert len(opts["samples_labels"]) >= len(
samples
), "Provide at least as many labels as samples."
if opts["upper"] is not None:
opts["offdiag"] = opts["upper"]
# Prepare diag/upper/lower
if not isinstance(opts["diag"], list):
opts["diag"] = [opts["diag"] for _ in range(len(samples))]
if not isinstance(opts["offdiag"], list):
opts["offdiag"] = [opts["offdiag"] for _ in range(len(samples))]
# if type(opts['lower']) is not list:
# opts['lower'] = [opts['lower'] for _ in range(len(samples))]
opts["lower"] = None
diag_func = get_diag_func(samples, limits, opts, **kwargs)
def offdiag_func(row, col, limits, **kwargs):
if len(samples) > 0:
for n, v in enumerate(samples):
if opts["offdiag"][n] == "hist" or opts["offdiag"][n] == "hist2d":
hist, xedges, yedges = np.histogram2d(
v[:, col],
v[:, row],
range=[
[limits[col][0], limits[col][1]],
[limits[row][0], limits[row][1]],
],
**opts["hist_offdiag"],
)
plt.imshow(
hist.T,
origin="lower",
extent=(
xedges[0],
xedges[-1],
yedges[0],
yedges[-1],
),
aspect="auto",
)
elif opts["offdiag"][n] in [
"kde",
"kde2d",
"contour",
"contourf",
]:
density = gaussian_kde(
v[:, [col, row]].T,
bw_method=opts["kde_offdiag"]["bw_method"],
)
X, Y = np.meshgrid(
np.linspace(
limits[col][0],
limits[col][1],
opts["kde_offdiag"]["bins"],
),
np.linspace(
limits[row][0],
limits[row][1],
opts["kde_offdiag"]["bins"],
),
)
positions = np.vstack([X.ravel(), Y.ravel()])
Z = np.reshape(density(positions).T, X.shape)
if opts["offdiag"][n] == "kde" or opts["offdiag"][n] == "kde2d":
plt.imshow(
Z,
extent=(
limits[col][0],
limits[col][1],
limits[row][0],
limits[row][1],
),
origin="lower",
aspect="auto",
)
elif opts["offdiag"][n] == "contour":
if opts["contour_offdiag"]["percentile"]:
Z = probs2contours(Z, opts["contour_offdiag"]["levels"])
else:
Z = (Z - Z.min()) / (Z.max() - Z.min())
plt.contour(
X,
Y,
Z,
origin="lower",
extent=[
limits[col][0],
limits[col][1],
limits[row][0],
limits[row][1],
],
colors=opts["samples_colors"][n],
levels=opts["contour_offdiag"]["levels"],
)
else:
pass
elif opts["offdiag"][n] == "scatter":
plt.scatter(
v[:, col],
v[:, row],
color=opts["samples_colors"][n],
**opts["scatter_offdiag"],
)
elif opts["offdiag"][n] == "plot":
plt.plot(
v[:, col],
v[:, row],
color=opts["samples_colors"][n],
**opts["plot_offdiag"],
)
else:
pass
return _arrange_plots(
diag_func, offdiag_func, dim, limits, points, opts, fig=fig, axes=axes
)
def marginal_plot_dep(
samples: Union[List[np.ndarray], List[torch.Tensor], np.ndarray, torch.Tensor],
points: Optional[
Union[List[np.ndarray], List[torch.Tensor], np.ndarray, torch.Tensor]
] = None,
limits: Optional[Union[List, torch.Tensor]] = None,
subset: Optional[List[int]] = None,
diag: Optional[Union[List[Optional[str]], str]] = "hist",
figsize: Optional[Tuple] = (10, 10),
labels: Optional[List[str]] = None,
ticks: Optional[Union[List, torch.Tensor]] = None,
fig: Optional[FigureBase] = None,
axes: Optional[Axes] = None,
**kwargs: Optional[Any],
) -> Tuple[FigureBase, Axes]:
"""
Plot samples in a row showing 1D marginals of selected dimensions.
Each of the plots can be interpreted as a 1D-marginal of the distribution
that the samples were drawn from.
Args:
samples: Samples used to build the histogram.
points: List of additional points to scatter.
limits: Array containing the plot xlim for each parameter dimension. If None,
just use the min and max of the passed samples
subset: List containing the dimensions to plot. E.g. subset=[1,3] will plot
plot only the 1st and 3rd dimension but will discard the 0th and 2nd (and,
if they exist, the 4th, 5th and so on).
diag: Plotting style for 1D marginals, {hist, kde cond, None}.
figsize: Size of the entire figure.
labels: List of strings specifying the names of the parameters.
ticks: Position of the ticks.
points_colors: Colors of the `points`.
fig: matplotlib figure to plot on.
axes: matplotlib axes corresponding to fig.
**kwargs: Additional arguments to adjust the plot, e.g., `samples_colors`,
`points_colors` and many more, see the source code in `_get_default_opts()`
in `sbi.analysis.plot` for details.
Returns: figure and axis of posterior distribution plot
"""
opts = _get_default_opts()
# update the defaults dictionary by the current values of the variables (passed by
# the user)
opts = _update(opts, locals())
opts = _update(opts, kwargs)
samples, dim, limits = prepare_for_plot(samples, limits)
# Prepare diag/upper/lower
if not isinstance(opts["diag"], list):
opts["diag"] = [opts["diag"] for _ in range(len(samples))]
diag_func = get_diag_func(samples, limits, opts, **kwargs)
return _arrange_plots(
diag_func, None, dim, limits, points, opts, fig=fig, axes=axes
)
def get_diag_func(samples, limits, opts, **kwargs):
"""
Returns the diag_func which returns the 1D marginal plot for the parameter
indexed by row.
"""
warn(
"get_diag_func will be deprecated, use get_diag_funcs instead",
PendingDeprecationWarning,
stacklevel=2,
)
def diag_func(row, **kwargs):
if len(samples) > 0:
for n, v in enumerate(samples):
if opts["diag"][n] == "hist":
plt.hist(
v[:, row],
color=opts["samples_colors"][n],
label=opts["samples_labels"][n],
**opts["hist_diag"],
)
elif opts["diag"][n] == "kde":
density = gaussian_kde(
v[:, row], bw_method=opts["kde_diag"]["bw_method"]
)
xs = np.linspace(
limits[row, 0], limits[row, 1], opts["kde_diag"]["bins"]
)
ys = density(xs)
plt.plot(
xs,
ys,
color=opts["samples_colors"][n],
)
elif "offdiag" in opts and opts["offdiag"][n] == "scatter":
for single_sample in v:
plt.axvline(
single_sample[row],
color=opts["samples_colors"][n],
**opts["scatter_diag"],
)
else:
pass
return diag_func
def _arrange_plots(
diag_func, offdiag_func, dim, limits, points, opts, fig=None, axes=None
):
"""
Arranges the plots for any function that plots parameters either in a row of 1D
marginals or a pairplot setting.
Args:
diag_func: Plotting function that will be executed for the diagonal elements of
the plot (or the columns of a row of 1D marginals). It will be passed the
current `row` (i.e. which parameter that is to be plotted) and the `limits`
for all dimensions.
offdiag_func: Plotting function that will be executed for the upper-diagonal
elements of the plot. It will be passed the current `row` and `col` (i.e.
which parameters are to be plotted and the `limits` for all dimensions. None
if we are in a 1D setting.
dim: The dimensionality of the density.
limits: Limits for each parameter.
points: Additional points to be scatter-plotted.
opts: Dictionary built by the functions that call `_arrange_plots`. Must
contain at least `labels`, `subset`, `figsize`, `subplots`,
`fig_subplots_adjust`, `title`, `title_format`, ..
fig: matplotlib figure to plot on.
axes: matplotlib axes corresponding to fig.
Returns: figure and axis
"""
warn(
"_arrange_plots will be deprecated, use _arrange_grid instead",
PendingDeprecationWarning,
stacklevel=2,
)
# Prepare points
if points is None:
points = []
if not isinstance(points, list):
points = ensure_numpy(points) # type: ignore
points = [points]
points = [np.atleast_2d(p) for p in points]
points = [np.atleast_2d(ensure_numpy(p)) for p in points]
# TODO: add asserts checking compatibility of dimensions
# Prepare labels
if opts["labels"] == [] or opts["labels"] is None:
labels_dim = ["dim {}".format(i + 1) for i in range(dim)]
else:
labels_dim = opts["labels"]
# Prepare ticks
if opts["ticks"] == [] or opts["ticks"] is None:
ticks = None
else:
if len(opts["ticks"]) == 1:
ticks = [opts["ticks"][0] for _ in range(dim)]
else:
ticks = opts["ticks"]
# Figure out if we subset the plot
subset = opts["subset"]
if subset is None:
rows = cols = dim
subset = [i for i in range(dim)]
else:
if isinstance(subset, int):
subset = [subset]
elif isinstance(subset, list):
pass
else:
raise NotImplementedError
rows = cols = len(subset)
flat = offdiag_func is None
if flat:
rows = 1
opts["lower"] = None
# Create fig and axes if they were not passed.
if fig is None or axes is None:
fig, axes = plt.subplots(
rows, cols, figsize=opts["figsize"], **opts["subplots"]
)
else:
assert axes.shape == (
rows,
cols,
), f"Passed axes must match subplot shape: {rows, cols}."
# Cast to ndarray in case of 1D subplots.
axes = np.array(axes).reshape(rows, cols)
# Style figure
fig.subplots_adjust(**opts["fig_subplots_adjust"])
fig.suptitle(opts["title"], **opts["title_format"])
# Style axes
row_idx = -1
for row in range(dim):
if row not in subset:
continue
if not flat:
row_idx += 1
col_idx = -1
for col in range(dim):
if col not in subset:
continue
else:
col_idx += 1
if flat or row == col:
current = "diag"
elif row < col:
current = "offdiag"
else:
current = "lower"
ax = axes[row_idx, col_idx]
plt.sca(ax)
# Background color
if (
current in opts["fig_bg_colors"]
and opts["fig_bg_colors"][current] is not None
):
ax.set_facecolor(opts["fig_bg_colors"][current])
# Axes
if opts[current] is None:
ax.axis("off")
continue
# Limits
ax.set_xlim((limits[col][0], limits[col][1]))
if current != "diag":
ax.set_ylim((limits[row][0], limits[row][1]))
# Ticks
if ticks is not None:
ax.set_xticks((ticks[col][0], ticks[col][1]))
if current != "diag":
ax.set_yticks((ticks[row][0], ticks[row][1]))
# Despine
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_position(("outward", opts["despine"]["offset"]))
# Formatting axes
if current == "diag": # off-diagnoals
if opts["lower"] is None or col == dim - 1 or flat:
_format_axis(
ax,
xhide=False,
xlabel=labels_dim[col],
yhide=True,
tickformatter=opts["tickformatter"],
)
else:
_format_axis(ax, xhide=True, yhide=True)
else: # off-diagnoals
if row == dim - 1:
_format_axis(
ax,
xhide=False,
xlabel=labels_dim[col],
yhide=True,
tickformatter=opts["tickformatter"],
)
else:
_format_axis(ax, xhide=True, yhide=True)
if opts["tick_labels"] is not None:
ax.set_xticklabels((
str(opts["tick_labels"][col][0]),
str(opts["tick_labels"][col][1]),
))
# Diagonals
if current == "diag":
diag_func(row=col, limits=limits)
if len(points) > 0:
extent = ax.get_ylim()
for n, v in enumerate(points):
plt.plot(
[v[:, col], v[:, col]],
extent,
color=opts["points_colors"][n],
**opts["points_diag"],
label=opts["points_labels"][n],
)
if opts["legend"] and col == 0:
plt.legend(**opts["legend_kwargs"])
# Off-diagonals
else:
offdiag_func(
row=row,
col=col,
limits=limits,
)
if len(points) > 0:
for n, v in enumerate(points):
plt.plot(
v[:, col],
v[:, row],
color=opts["points_colors"][n],
**opts["points_offdiag"],
)
if len(subset) < dim:
if flat:
ax = axes[0, len(subset) - 1]
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
text_kwargs = {"fontsize": plt.rcParams["font.size"] * 2.0} # pyright: ignore[reportOptionalOperand]
ax.text(x1 + (x1 - x0) / 8.0, (y0 + y1) / 2.0, "...", **text_kwargs)
else:
for row in range(len(subset)):
ax = axes[row, len(subset) - 1]
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
text_kwargs = {"fontsize": plt.rcParams["font.size"] * 2.0} # pyright: ignore[reportOptionalOperand]
ax.text(x1 + (x1 - x0) / 8.0, (y0 + y1) / 2.0, "...", **text_kwargs)
if row == len(subset) - 1:
ax.text(
x1 + (x1 - x0) / 12.0,
y0 - (y1 - y0) / 1.5,
"...",
rotation=-45,
**text_kwargs,
)
return fig, axes
def _get_default_opts():
warn(
"_get_default_opts will be deprecated, use _get_default_fig_kwargs,"
"get_default_diag_kwargs, get_default_offdiag_kwargs instead",
PendingDeprecationWarning,
stacklevel=2,
)
return {
# title and legend
"title": None,
"legend": False,
"legend_kwargs": {},
# labels
"points_labels": [f"points_{idx}" for idx in range(10)], # for points
"samples_labels": [f"samples_{idx}" for idx in range(10)], # for samples
# colors: take even colors for samples, odd colors for points
"samples_colors": plt.rcParams["axes.prop_cycle"].by_key()["color"][0::2], # pyright: ignore[reportOptionalMemberAccess]
"points_colors": plt.rcParams["axes.prop_cycle"].by_key()["color"][1::2], # pyright: ignore[reportOptionalMemberAccess]
# ticks
"ticks": [],
"tickformatter": mpl.ticker.FormatStrFormatter("%g"), # type: ignore
"tick_labels": None,
# options for hist
"hist_diag": {
"alpha": 1.0,
"bins": 50,
"density": False,
"histtype": "step",
},
"hist_offdiag": {
# 'edgecolor': 'none',
# 'linewidth': 0.0,
"bins": 50,
},
# options for kde
"kde_diag": {"bw_method": "scott", "bins": 50, "color": "black"},
"kde_offdiag": {"bw_method": "scott", "bins": 50},
# options for contour
"contour_offdiag": {"levels": [0.68], "percentile": True},
# options for scatter
"scatter_offdiag": {
"alpha": 0.5,
"edgecolor": "none",
"rasterized": False,
},
"scatter_diag": {},
# options for plot
"plot_offdiag": {},
# formatting points (scale, markers)
"points_diag": {},
"points_offdiag": {
"marker": ".",
"markersize": 10,
},
# other options
"fig_bg_colors": {"offdiag": None, "diag": None, "lower": None},
"fig_subplots_adjust": {
"top": 0.9,
},
"subplots": {},
"despine": {
"offset": 5,
},
"title_format": {"fontsize": 16},
}
|
sbi-devREPO_NAMEsbiPATH_START.@sbi_extracted@sbi-main@sbi@analysis@plot.py@.PATH_END.py
|
{
"filename": "_line.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/scattercarpet/marker/_line.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattercarpet.marker"
_path_str = "scattercarpet.marker.line"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorscale",
"colorsrc",
"reversescale",
"width",
"widthsrc",
}
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is true, the
default palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.line.color`) or the
bounds set in `marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.line.color`is set to a numerical array. Value should
have the same units as in `marker.line.color` and if set,
`marker.line.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be equidistant
to this point. Has an effect only if in `marker.line.color`is
set to a numerical array. Value should have the same units as
in `marker.line.color`. Has no effect when `marker.line.cauto`
is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.line.color`is set to a numerical array. Value should
have the same units as in `marker.line.color` and if set,
`marker.line.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# color
# -----
@property
def color(self):
"""
Sets themarker.linecolor. It accepts either a specific color or
an array of numbers that are mapped to the colorscale relative
to the max and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A number that will be interpreted as a color
according to scattercarpet.marker.line.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The colorscale
must be an array containing arrays mapping a normalized value
to an rgb, rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and highest (1) values
are required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the colorscale in
color space, use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth,Electric,Vi
ridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.line.color`is set to a numerical array. If true,
`marker.line.cmin` will correspond to the last color in the
array and `marker.line.cmax` will correspond to the first
color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the lines bounding the marker points.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# widthsrc
# --------
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for width .
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a
numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are set by
the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color`is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color`is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
width .
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorscale=None,
colorsrc=None,
reversescale=None,
width=None,
widthsrc=None,
**kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattercarpet.marker.Line`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a
numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are set by
the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color`is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color`is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
width .
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattercarpet.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.marker.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("cauto", None)
_v = cauto if cauto is not None else _v
if _v is not None:
self["cauto"] = _v
_v = arg.pop("cmax", None)
_v = cmax if cmax is not None else _v
if _v is not None:
self["cmax"] = _v
_v = arg.pop("cmid", None)
_v = cmid if cmid is not None else _v
if _v is not None:
self["cmid"] = _v
_v = arg.pop("cmin", None)
_v = cmin if cmin is not None else _v
if _v is not None:
self["cmin"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
_v = arg.pop("widthsrc", None)
_v = widthsrc if widthsrc is not None else _v
if _v is not None:
self["widthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@scattercarpet@marker@_line.py@.PATH_END.py
|
{
"filename": "_griddash.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/polar/radialaxis/_griddash.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class GriddashValidator(_plotly_utils.basevalidators.DashValidator):
def __init__(
self, plotly_name="griddash", parent_name="layout.polar.radialaxis", **kwargs
):
super(GriddashValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop(
"values", ["solid", "dot", "dash", "longdash", "dashdot", "longdashdot"]
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@polar@radialaxis@_griddash.py@.PATH_END.py
|
{
"filename": "_usecolormap.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/surface/contours/y/_usecolormap.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class UsecolormapValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="usecolormap", parent_name="surface.contours.y", **kwargs
):
super(UsecolormapValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@surface@contours@y@_usecolormap.py@.PATH_END.py
|
{
"filename": "histogram_splitter.py",
"repo_name": "minzastro/unidam",
"repo_path": "unidam_extracted/unidam-master/unidam/core/histogram_splitter.py",
"type": "Python"
}
|
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from future import standard_library
import numpy as np
from scipy.signal import argrelextrema
standard_library.install_aliases()
def histogram_splitter(hist, bins, min_order=2, max_order=3, dip_depth=0.75,
use_spikes=True, spike_threshold=3,
spike_location=1.75, spike_spread=0.3):
"""
Function to split histogram using local minima and local maxima.
We detect local minima and maxima of the histogram.
Local minima (or maxima) are defined as locations of bins that have
lower (higher) value $h_i$ than all other bins within the window:
$h_i = min\{h_j, i-n \leq j \leq i+n\}$ for a local minimum and
$h_i = max\{h_j, i-n \leq j \leq i+n\}$ for a local maximum.
Window size $n$ was taken to be 3 for maxima and 2 for minima.
Differences in window sizes are caused by the need to locate minima with
high precision and to avoid too many maxima in noisy data.
Formally, it is possible to have more than one local minimum between
two local maxima -- we split only by the lowest of them in this case.
We split the sample at positions of local minima that are lower
than 0.75 times the value of the smallest of the two enclosing maxima.
"""
# Locate minima and maxima
mins = argrelextrema(hist, np.less_equal, order=min_order)[0]
# Pad right with zeros + wrap to get maxima on first/last bin correctly
maxs = argrelextrema(np.append(hist, np.zeros(max_order)), np.greater,
order=max_order, mode='wrap')[0]
if use_spikes:
with np.errstate(all='ignore'):
spikes = np.where(np.logical_and(
hist[1:-1] / hist[:-2] > spike_threshold,
hist[1:-1] / hist[2:] > spike_threshold))[0]
if len(spikes) > 0:
spikes = spikes + 1
# Remove peaks already detected as maxima
spikes = np.setdiff1d(spikes, maxs)
if len(spikes) > 0:
# Spikes might happen at masses around 1.75 Msol
# (this is a feature, not a bug).
# They have to be treated separately.
spikes = np.where(np.abs(bins[spikes] - spike_location) <
spike_spread)[0]
if len(spikes) > 0:
spikes = spikes + 1
maxs = np.append(maxs, spikes)
maxs = np.sort(np.unique(maxs))
mins = np.append(mins, spikes - 1)
mins = np.append(mins, spikes + 1)
mins = np.sort(np.unique(mins))
pos = 0.5 * (bins[1:] + bins[:-1])
maximums = np.vstack((hist[maxs], pos[maxs]))
minimums = np.vstack((hist[mins], pos[mins]))
# Select only maxima bigger than 0.01 of the main maximum
# ...removed, to get rid of "bad" fits.
# maximums = maximums[:, maximums[0] > 0.01*hist.max()]
for imax in range(maximums.shape[1] - 1):
# Get list of minimums between this and next maximum
between = minimums[:,
(minimums[1] > maximums[1, imax]) *
(minimums[1] < maximums[1, imax + 1])]
# Drop minimums that are not low enough...
between = between[:, between[0] < dip_depth *
np.min(maximums[0, imax:imax + 2])]
if len(between[0]) == 0:
# No minimum between maximums, merge maximums...
continue
# Split by lowest minimum...
yield between[1, np.argmin(between[0])]
|
minzastroREPO_NAMEunidamPATH_START.@unidam_extracted@unidam-master@unidam@core@histogram_splitter.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "sblunt/orbitize",
"repo_path": "orbitize_extracted/orbitize-main/orbitize/__init__.py",
"type": "Python"
}
|
import os
__version__ = "3.1.0"
# set Python env variable to keep track of example data dir
orbitize_dir = os.path.dirname(__file__)
DATADIR = os.path.join(orbitize_dir, "example_data/")
# Detect a valid CUDA environment
try:
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
cuda_ext = True
except:
cuda_ext = False
try:
from . import _kepler
cext = True
except ImportError:
cext = False
|
sbluntREPO_NAMEorbitizePATH_START.@orbitize_extracted@orbitize-main@orbitize@__init__.py@.PATH_END.py
|
{
"filename": "intrinsics.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/pythran/pythran/analyses/intrinsics.py",
"type": "Python"
}
|
""" Intrinsics gathers all intrinsics referenced in a module. """
from pythran.passmanager import ModuleAnalysis
import pythran.intrinsic as intrinsic
from pythran.utils import attr_to_path
class Intrinsics(ModuleAnalysis):
""" Gather all intrinsics used in the module
"""
def __init__(self):
""" Result is a set of intrinsic values. """
self.result = set()
super(Intrinsics, self).__init__()
def visit_Attribute(self, node):
obj, _ = attr_to_path(node)
if isinstance(obj, intrinsic.Intrinsic):
self.result.add(obj)
self.generic_visit(node)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@pythran@pythran@analyses@intrinsics.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/contour/colorbar/tickfont/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._weight import WeightValidator
from ._variant import VariantValidator
from ._textcase import TextcaseValidator
from ._style import StyleValidator
from ._size import SizeValidator
from ._shadow import ShadowValidator
from ._lineposition import LinepositionValidator
from ._family import FamilyValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._weight.WeightValidator",
"._variant.VariantValidator",
"._textcase.TextcaseValidator",
"._style.StyleValidator",
"._size.SizeValidator",
"._shadow.ShadowValidator",
"._lineposition.LinepositionValidator",
"._family.FamilyValidator",
"._color.ColorValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@contour@colorbar@tickfont@__init__.py@.PATH_END.py
|
{
"filename": "_cmax.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter3d/marker/line/_cmax.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CmaxValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="cmax", parent_name="scatter3d.marker.line", **kwargs
):
super(CmaxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"cauto": False}),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatter3d@marker@line@_cmax.py@.PATH_END.py
|
{
"filename": "_fixedrange.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/yaxis/_fixedrange.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FixedrangeValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="fixedrange", parent_name="layout.yaxis", **kwargs):
super(FixedrangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@yaxis@_fixedrange.py@.PATH_END.py
|
{
"filename": "simrephtml.py",
"repo_name": "wokast/PyCactus",
"repo_path": "PyCactus_extracted/PyCactus-master/SimRep/simrep/simrephtml.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import str
from builtins import filter
from builtins import object
import os
import shutil
from .htmlfactory import *
class RepNav(object):
def __init__(self, name, secs):
self.name = name
self.secs = secs
#
def filename(self, sec, sub=None):
n = self.name + '_' + sec.name
if (sub is not None):
n += ('_' + sub.name)
elif sec.hassub:
n += ('_' + sec.subs[0].name)
return n+'.html'
#
def rendersub(self, sec, sub, asub):
if (sub.name == asub.name):
return tag(sub.name, 'li', Class='current')
fn = self.filename(sec, sub)
return tag(link(sub.name, fn), 'li')
#
def rendersec(self, sec, asec, asub):
if (asec.name == sec.name):
if (asub is not None):
c1 = tag(sec.name, 'li', Class='open')
c2 = [self.rendersub(sec, ss, asub) for ss in sec.subs]
c2 = tag(c2, 'ul', id='NaviSub')
return [c1, c2]
else:
return tag(sec.name, 'li', Class='current')
#
#
fn = self.filename(sec)
return tag(link(sec.name, fn), 'li')
#
def render(self, asec, asub=None):
c = [self.rendersec(s, asec, asub) for s in self.secs]
return tag(c, 'ul', id='NaviMain')
#
#
class RepSubs(object):
def __init__(self, title, name, cont):
self.title = title
self.name = name
self.cont = cont
#
#
class RepSec(object):
def __init__(self, title, name, cont, subs):
self.title = title
self.name = name
self.cont = cont
self.subs = subs
self.hassub = (len(self.subs) > 0)
#
#
class RepSim(object):
def __init__(self, title, name, secs):
self.title = title
self.name = name
self.secs = secs
self.nav = RepNav(name, secs)
#
def render(self, path):
self.copyfiles(path)
for s in self.secs:
if s.hassub:
for ss in s.subs:
navbar = self.nav.render(s, ss)
fname = self.nav.filename(s, ss)
cont = [heading(s.title, 1), ss.cont]
self.savehtml(path, fname, navbar, cont)
#
else:
navbar = self.nav.render(s)
fname = self.nav.filename(s)
self.savehtml(path, fname, navbar, s.cont)
#
#
#
def copyfiles(self, path):
srcpath = os.path.join(os.path.split(__file__)[0], 'data')
if (os.path.samefile(path, srcpath)):
return
for f in ['style.css', 'logo.png']:
src = os.path.join(srcpath, f)
dst = os.path.join(path, f)
shutil.copyfile(src, dst)
#
#
def savehtml(self, path, name, nav, cont):
cont = div(cont, id='Content')
logo = img('logo.png', 'logo')
lbar = div([logo,nav], id='LeftBar')
dsgn = div([lbar, cont], id='Layout')
page = htmldoc(dsgn, cssfile="style.css")
fname = os.path.join(path, name)
page = str(page)
with open(fname, 'w') as f:
f.write(page)
#
#
#
class DocParse(object):
def __init__(self, doc, path):
self.level0 = ['text', 'emph', 'warn', 'nobreak', 'floatnum', 'intnum', 'newline']
self.level1 = ['par','table','figure','movie','glist','listing'] \
+ self.level0
self.tabnum = 1
self.fignum = 1
self.movnum = 1
self.path = os.path.abspath(path)
self.rep = self.parse(doc, ['simreport'])
#
def parse(self, ent, allow):
if isinstance(ent, list):
return [self.parse(e, allow) for e in ent]
if not ent.what in allow:
raise ValueError('Found unexpected entity '+ent.what)
f = getattr(self, ent.what)
return f(ent)
#
def simreport(self, ent):
sections = self.parse(ent.sections, ['section'])
return RepSim(ent.title, ent.name, sections)
#
def section(self, ent):
cont = self.parse(ent.cont, self.level1)
cont.insert(0, heading(ent.title, 1))
subs = self.parse(ent.subs, ['subsection'])
return RepSec(ent.title, ent.name, cont, subs)
#
def subsection(self, ent):
cont = self.parse(ent.cont, self.level1)
cont.insert(0, heading(ent.title, 2))
return RepSubs(ent.title, ent.name, cont)
#
def text(self, ent):
return ent.value
#
def emph(self, ent):
return emph(ent.value)
#
def warn(self, ent):
return strong(ent.value)
#
def nobreak(self, ent):
return nobreak(ent.value)
#
def newline(self, ent):
return newline()
#
def floatnum(self, ent):
return "%.*g" % (ent.digits, ent.value)
#
def intnum(self, ent):
return ("%d" % ent.value)
#
def par(self, ent):
return par(self.parse(ent.cont, self.level0))
#
def listing(self, ent):
import re
txt = escape(ent.cont)
mark = lambda m: '<strong>'+m.group()+'</strong>'
for w in ent.alarming:
pat = re.compile(w, re.IGNORECASE)
txt = pat.sub(mark, txt)
#
return pre(rawhtml(txt), Class='listing', width=80)
#lines = [[self.parse(e, self.level0),etag('br')] for e in ent.cont]
#return div(lines, Class='listing')
#
def glist(self, ent):
c = [self.parse(e, self.level0) for e in ent.cont]
l = olist if ent.order else ulist
return l(c)
#
def table(self, ent):
cnt = [[self.parse(e, self.level0) for e in r] for r in ent.cont]
if (ent.cap is None):
return table(cnt, Class='standard')
tabnum = span(("Table %d:" % self.tabnum), Class='capnum')
self.tabnum += 1
cap = [tabnum, self.parse(ent.cap, self.level0)]
tbl = table(cnt, cap=cap, Class='captab')
return div(tbl, Class='captab')
#
def autoconvert(self, imgname):
def get_img_variants(formats):
cand = [(fmt, imgname+'.'+fmt) for fmt in formats]
found = [(fmt,fn) for fmt,fn in cand if os.path.isfile(fn)]
return found
#
femb = get_img_variants(['svg', 'png', 'jpeg', 'jpg', 'gif'])
fext = get_img_variants(['pdf', 'eps'])
if (not femb):
if (not fext):
raise IOError("Image " + imgname + " not found")
#
newimg = imgname+'.png'
os.system('convert -density 72 %s %s' % (fext[0][1], newimg))
if (not os.path.isfile(newimg)):
raise RuntimeError("Image conversion failed")
#
femb = ['png', newimg]
#
altfmt = fext[0] if fext else None
return femb[0][1], altfmt
#
def figure(self, ent):
path = ent.path
if (not os.path.isabs(path)):
path = os.path.join(self.path, path)
imgsrc = os.path.abspath(path)
imgfile,altf = self.autoconvert(imgsrc)
imgurl = os.path.relpath(imgfile, self.path)
cnt = img(imgurl, os.path.basename(imgurl))
if (ent.cap is None):
return cnt
fignum = span(("Figure %d:" % self.fignum), Class='fignum')
self.fignum += 1
cap = [fignum, self.parse(ent.cap, self.level0)]
if (altf is not None):
alturl = os.path.relpath(altf[1], self.path)
cap.append(link(altf[0], alturl))
#
ftab = table([[cnt]], cap=cap, Class='capfig')
return div(ftab, Class='capfig')
#
def moviethumb(self, movname):
prevname = os.path.splitext(movname)[0]+'_thumb.png'
os.system('ffmpeg -i %s -y -vframes 1 -sameq %s' % (movname, prevname))
if (not os.path.isfile(prevname)):
raise RuntimeError("Thumbnail creation failed")
#
return prevname
#
def movie(self, ent):
path = ent.path
if (not os.path.isabs(path)):
path = os.path.join(self.path, path)
mov = os.path.abspath(path)
fmts = ['mpeg', 'wmv', 'mov', 'mp4']
movs = list(filter(os.path.isfile, [mov+'.'+e for e in fmts]))
if (not movs):
raise RuntimeError("Movie %s not found." % mov)
mov = movs[0]
prev = self.moviethumb(mov)
movurl = os.path.relpath(mov, self.path)
prevurl = os.path.relpath(prev, self.path)
thumb = img(prevurl, os.path.basename(prevurl))
movlnk = link(thumb, movurl)
if (ent.cap is None):
return movlnk
movnum = span(("Movie %d:" % self.movnum), Class='fignum')
self.movnum += 1
cap = [movnum, self.parse(ent.cap, self.level0)]
ftab = table([[movlnk]], cap=cap, Class='capfig')
return div(ftab, Class='capfig')
#
#
def render(doc, path):
pd = DocParse(doc, path)
pd.rep.render(path)
#
|
wokastREPO_NAMEPyCactusPATH_START.@PyCactus_extracted@PyCactus-master@SimRep@simrep@simrephtml.py@.PATH_END.py
|
{
"filename": "preds_analysis_uce.py",
"repo_name": "devinamhn/RadioGalaxies-BNNs",
"repo_path": "RadioGalaxies-BNNs_extracted/RadioGalaxies-BNNs-main/radiogalaxies_bnns/eval/uncertainty/preds_analysis_uce.py",
"type": "Python"
}
|
import torch
import hamiltorch
import torch.nn as nn
import torch.nn.functional as F
from pathlib import Path
import numpy as np
import torchvision.transforms as transforms
from torchvision import datasets
from torchvision.transforms import InterpolationMode
from pytorch_lightning.demos.mnist_datamodule import MNIST
from torch.utils.data import DataLoader, random_split
from datamodules import MNISTDataModule, MiraBestDataModule, testloader_mb_uncert
from torch.utils.data.sampler import SubsetRandomSampler
import pytorch_lightning as pl
import utils
from PIL import Image
from models import MLP, LeNet
from matplotlib import pyplot as plt
import mirabest
from uncertainty import entropy_MI, overlapping, GMM_logits, calibration
import csv
hamiltorch.set_random_seed(123)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = LeNet(1, 2) #MLP(150, 200, 10)
path = './results/temp/thin1000/' #'./results/checkpt/' #'./results/galahad/hamilt/testing/15000steps/'
params_hmc = torch.load(path+'thin_chain'+str(0), map_location = torch.device(device))
print(len(params_hmc))
tau_list = []
tau = 100. # 10.#./100. # 1/50
for w in model.parameters():
tau_list.append(tau)
tau_list = torch.tensor(tau_list).to(device)
config_dict, config = utils.parse_config('config_mb.txt')
datamodule = MiraBestDataModule(config_dict, hmc=True)
test_loader, test_data1, data_type, test_data = testloader_mb_uncert(config_dict['output']['test_data'], config_dict['data']['datadir'])
for i, (x_test, y_test) in enumerate(test_loader):
x_test, y_test = x_test.to(device), y_test.to(device)
print(len(y_test))
pred_list, log_prob_list = hamiltorch.predict_model(model, x = x_test, y = y_test, samples=params_hmc, model_loss= 'multi_class_linear_output', tau_out=1., tau_list=tau_list)
_, pred = torch.max(pred_list, 2)
# multi_class_log_softmax_output
# multi_class_linear_output
# acc = []
# acc = torch.zeros( int(len(params_hmc))-1)
# nll = torch.zeros( int(len(params_hmc))-1)
# ensemble_proba = F.softmax(pred_list[0], dim=-1)
# print(pred_list.shape)
# for s in range(1,len(params_hmc)):
# _, pred = torch.max(pred_list[:s].mean(0), -1)
# acc[s-1] = (pred.float() == y_test.flatten()).sum().float()/y_test.shape[0]
# # print(pred_list[s][103])
# # print('FRI', F.softmax(pred_list[s][103], dim=-1)[0], y_test[103])
# # print('FRII', F.softmax(pred_list[s][103], dim=-1)[1])
# # softmax_list.append((F.softmax(pred_list[s], dim = -1)).detach().numpy())
# ensemble_proba += F.softmax(pred_list[s], dim=-1) #ensemble prob is being added - save all the softmax values
# nll[s-1] = F.nll_loss(torch.log(ensemble_proba.cpu()/(s+1)), y_test[:].long().cpu().flatten(), reduction='mean')
# print("test accuracy", torch.mean(acc), torch.std(acc))
error_all = []
entropy_all = []
mi_all = []
aleat_all =[]
fr1 = 0
fr2 = 0
indices = np.arange(0, len(test_data), 1)
print(indices)
#for each sample in the test set:
for index in indices:
# x_test = x_test[index] #torch.unsqueeze(x_test[index])
# y_test = y_test[index] #torch.unsqueeze(x_test[index])
x_test = torch.unsqueeze(torch.tensor(test_data[index][0]),0)
y_test = torch.unsqueeze(torch.tensor(test_data[index][1]),0)
# x_test[0]
# print(x_test, y_test)
# target = y_test[index].detach().numpy()
target = y_test.detach().numpy().flatten()[0]
if(target == 0):
fr1+= 1
elif(target==1):
fr2+= 1
pred_list, log_prob_list = hamiltorch.predict_model(model, x = x_test, y = y_test, samples=params_hmc, model_loss='multi_class_linear_output', tau_out=1., tau_list=tau_list)
softmax_ = F.softmax(pred_list, dim=-1)
pred = softmax_.mean(dim=1).argmax(dim=-1).numpy().flatten()
pred1 = pred_list.mean(dim=1).argmax(dim=-1)
y_test_all = np.tile(y_test.detach().numpy().flatten()[0], len(params_hmc))
# print(pred)
# print(pred1)
# print(y_test_all)
# print(pred != y_test_all)
errors = np.mean((pred != y_test_all).astype('uint8'))
# print(errors)
softmax = np.array(softmax_).reshape((len(params_hmc), 2))
logits = pred #np.array(pred)
mean_logits = np.mean(logits,axis=0)
var_logits = np.std(logits,axis=0)
# print(softmax_list)
# print(softmax[:,0])
entropy, mutual_info, entropy_singlepass = entropy_MI(softmax, samples_iter= len(params_hmc))
# print(entropy)
# print(mutual_info)
error_all.append(errors)
entropy_all.append(entropy/np.log(2))
mi_all.append(mutual_info/np.log(2))
# print(error_all)
# print(entropy_all)
fr1_start = 0 #{0}
fr1_end = fr1 #{49, 68}
fr2_start = fr1 #49, 68
fr2_end = len(indices) #len(val_indices) #{104, 145}
print(fr1_start, fr1_end, fr2_start, fr2_end)
#print(error_all)
#entropy, mutual_info, entropy_singlepass = entropy_MI(softmax, samples_iter)
n_bins = 8
uce = calibration(path, np.array(error_all), np.array(entropy_all), n_bins, x_label = 'predictive entropy')
print("Predictive Entropy")
print("uce = ", np.round(uce, 2))
uce_0 = calibration(path, np.array(error_all[fr1_start:fr1_end]), np.array(entropy_all[fr1_start:fr1_end]), n_bins, x_label = 'predictive entropy')
print("UCE FRI= ", np.round(uce_0, 2))
uce_1 = calibration(path, np.array(error_all[fr2_start:fr2_end]), np.array(entropy_all[fr2_start:fr2_end]), n_bins, x_label = 'predictive entropy')
print("UCE FRII = ", np.round(uce_1, 2))
cUCE = (uce_0 + uce_1)/2
print("cUCE=", np.round(cUCE, 2))
#max_mi = np.amax(np.array(mi_all))
#print("max mi",max_mi)
#mi_all = mi_all/max_mi
#print(mi_all)
#print("max mi",np.amax(mi_all))
uce = calibration(path, np.array(error_all), np.array(mi_all), n_bins, x_label = 'mutual information')
print("Mutual Information")
print("uce = ", np.round(uce, 2))
uce_0 = calibration(path, np.array(error_all[fr1_start:fr1_end]), np.array(mi_all[fr1_start:fr1_end]), n_bins, x_label = 'mutual information')
print("UCE FRI= ", np.round(uce_0, 2))
uce_1 = calibration(path, np.array(error_all[fr2_start:fr2_end]), np.array(mi_all[fr2_start:fr2_end]), n_bins, x_label = 'mutual information')
print("UCE FRII = ", np.round(uce_1, 2))
cUCE = (uce_0 + uce_1)/2
print("cUCE=", np.round(cUCE,2))
print("mean and std of error")
print(error_all)
print(np.mean(error_all)*100)
print(np.std(error_all))
# uce = calibration(path, np.array(error_all), np.array(aleat_all), n_bins, x_label = 'average entropy')
# print("Average Entropy")
# print("uce = ", np.round(uce, 2))
# uce_0 = calibration(path, np.array(error_all[fr1_start:fr1_end]), np.array(aleat_all[fr1_start:fr1_end]), n_bins, x_label = 'average entropy')
# print("UCE FRI= ",np.round(uce_0, 2))
# uce_1 = calibration(path, np.array(error_all[fr2_start:fr2_end]), np.array(aleat_all[fr2_start:fr2_end]), n_bins, x_label = 'average entropy')
# print("UCE FRII = ", np.round(uce_1, 2))
# cUCE = (uce_0 + uce_1)/2
# print("cUCE=", np.round(cUCE, 2))
|
devinamhnREPO_NAMERadioGalaxies-BNNsPATH_START.@RadioGalaxies-BNNs_extracted@RadioGalaxies-BNNs-main@radiogalaxies_bnns@eval@uncertainty@preds_analysis_uce.py@.PATH_END.py
|
{
"filename": "C_halo_special_target_list.ipynb",
"repo_name": "ChrisBoettner/plato",
"repo_path": "plato_extracted/plato-main/notebooks/C_halo_special_target_list.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import pandas as pd
from astropy import units as u
from astropy.coordinates import SkyCoord
from plato.stars import filter_p1_targets, filter_valid_targets
from plato.utils import get_abspath
```
[91mA new version of galpy (1.10.0) is available, please upgrade using pip/conda/... to get the latest features and bug fixes![0m
```python
LOPS2 = pd.read_csv(get_abspath() + f"data/processed/LOPS2_targets.csv")
LOPN1 = pd.read_csv(get_abspath() + f"data/processed/LOPN1_targets.csv")
LOPN1["Field"] = "LOPN1"
LOPS2["Field"] = "LOPS2"
fields = pd.concat([LOPS2, LOPN1])
fields = filter_valid_targets(fields)
fields = fields[
[
"gaiaID_DR3",
"GLON",
"GLAT",
"gaiaV",
"Population",
"Field",
"n_cameras",
"[Fe/H]",
"[Fe/H]_apogee",
]
]
fields = fields.rename(
columns={
"gaiaV": "Magnitude_V",
"GLON": "l",
"GLAT": "b",
}
)
p1_sample = filter_p1_targets(fields)
```
## FILTER SPECIAL TARGETS
```python
special_targets = (
p1_sample[p1_sample["Population"] == "Halo"]
.drop(columns=["Population", "n_cameras"])
.rename(
columns={
"gaiaID_DR3": "Gaia DR3 ID",
"Magnitude_V": "V Magnitude",
}
)
)
print("Number of special targets: ", len(special_targets))
print("Median [Fe/H]", special_targets["[Fe/H]"].median())
```
Number of special targets: 47
Median [Fe/H] -1.274
```python
# create a SkyCoord object with galactic coordinates
galactic_coord = SkyCoord(
l=special_targets["l"].to_numpy() * u.deg,
b=special_targets["b"].to_numpy() * u.deg,
frame="galactic",
)
# convert galactic coordinates to equatorial coordinates
equatorial_coord = galactic_coord.transform_to("icrs")
special_targets["RA"] = equatorial_coord.ra.to_string(
u.hourangle, sep="hms", pad=True, precision=2
)
special_targets["Dec"] = equatorial_coord.dec.to_string(
u.deg, sep="dms", pad=True, precision=2
)
special_targets["l"] = special_targets["l"].round(5).astype(str)
special_targets["b"] = special_targets["b"].round(5).astype(str)
special_targets["V Magnitude"] = special_targets["V Magnitude"].round(2).astype(str)
```
```python
special_targets_table = special_targets[
["Gaia DR3 ID", "RA", "Dec", "l", "b", "V Magnitude", "Field"]
]
LOPS2_table = special_targets_table[special_targets["Field"] == "LOPS2"].drop(
columns=["Field"]
)
LOPN1_table = special_targets_table[special_targets["Field"] == "LOPN1"].drop(
columns=["Field"]
)
```
```python
print(LOPS2_table.to_latex(index=False))
```
\begin{tabular}{rlllll}
\toprule
Gaia DR3 ID & RA & Dec & l & b & V Magnitude \\
\midrule
2892879693284897536 & 06h16m37.01s & -32d27m15.29s & 239.62354 & -20.99928 & 10.89 \\
2911140863435161856 & 05h59m33.51s & -26d16m35.09s & 232.0118 & -22.39392 & 10.91 \\
4676601464106231040 & 04h23m43.36s & -62d17m05.00s & 273.55676 & -40.71957 & 10.52 \\
4768015406298936960 & 05h47m36.18s & -54d09m57.56s & 261.95891 & -30.85913 & 10.57 \\
4772221523607359232 & 05h25m26.69s & -51d58m42.48s & 259.15855 & -34.06364 & 10.98 \\
4797516475799429888 & 05h18m19.97s & -48d52m16.70s & 255.33827 & -35.08485 & 10.63 \\
4818949324679566720 & 05h13m48.21s & -40d54m03.53s & 245.53908 & -35.2076 & 10.73 \\
4864851495597048448 & 04h33m26.34s & -38d18m08.85s & 241.28118 & -42.70393 & 10.96 \\
4874147282294634496 & 04h46m26.38s & -33d09m18.77s & 234.89973 & -39.4875 & 10.88 \\
4874355845906645120 & 04h44m49.12s & -32d52m40.74s & 234.47078 & -39.77595 & 9.82 \\
5297612567846523648 & 08h55m16.79s & -63d11m48.85s & 279.68487 & -11.56833 & 10.59 \\
5303348140199025920 & 08h53m18.62s & -59d02m02.45s & 276.24095 & -9.13719 & 9.18 \\
5489531880096156416 & 07h53m21.37s & -52d39m13.01s & 266.12717 & -12.61531 & 9.46 \\
5490827169212941056 & 07h16m15.58s & -54d16m38.91s & 265.24364 & -18.30189 & 10.37 \\
5494465006512656000 & 06h01m47.64s & -59d51m51.34s & 268.69547 & -29.3658 & 10.18 \\
5498528840145966464 & 06h55m28.03s & -52d47m36.15s & 262.66243 & -20.71451 & 9.96 \\
5510893810476230144 & 07h34m18.14s & -45d16m36.11s & 257.95676 & -11.97777 & 10.54 \\
5534999297246168320 & 07h35m22.44s & -44d25m02.43s & 257.26085 & -11.42081 & 10.75 \\
5537359398962337280 & 07h56m34.82s & -40d20m27.82s & 255.56685 & -6.02412 & 10.96 \\
5539856596020917376 & 08h15m04.77s & -39d58m08.40s & 257.14142 & -2.83572 & 10.98 \\
5545190739243196672 & 07h58m07.44s & -35d54m56.94s & 251.92065 & -3.47982 & 10.42 \\
5549536180990319616 & 06h25m44.65s & -50d08m47.30s & 258.54324 & -24.4474 & 10.88 \\
5551362018831013376 & 06h43m15.47s & -48d39m59.03s & 257.82336 & -21.30686 & 10.56 \\
5551565291043498496 & 06h41m26.76s & -48d13m11.51s & 257.2697 & -21.46142 & 10.52 \\
5555201689530777344 & 06h28m22.83s & -47d46m42.24s & 256.15226 & -23.43749 & 10.97 \\
5556830959605647360 & 06h29m33.11s & -44d15m03.85s & 252.53326 & -22.27004 & 10.21 \\
5557022343348187392 & 06h42m10.34s & -44d02m26.04s & 253.06044 & -20.05018 & 10.88 \\
5578884070483294976 & 06h52m22.61s & -36d24m18.71s & 246.28301 & -15.52673 & 10.8 \\
5584821364554787456 & 07h25m11.33s & -40d23m42.41s & 252.73421 & -11.30575 & 10.32 \\
5586241315104190848 & 07h28m03.43s & -38d00m45.45s & 250.81879 & -9.74411 & 9.68 \\
5616551552155482880 & 07h18m43.25s & -24d39m26.88s & 237.91749 & -5.40371 & 8.94 \\
5618295476367781504 & 07h30m41.52s & -23d59m08.82s & 238.60724 & -2.68067 & 10.91 \\
\bottomrule
\end{tabular}
```python
print(LOPN1_table.to_latex(index=False))
```
\begin{tabular}{rlllll}
\toprule
Gaia DR3 ID & RA & Dec & l & b & V Magnitude \\
\midrule
1340991529725341056 & 17h11m40.84s & 37d49m55.08s & 61.75032 & 35.17462 & 10.59 \\
1342299192648782592 & 17h39m36.23s & 37d10m48.77s & 62.26064 & 29.61662 & 8.36 \\
1423516852416948224 & 16h14m57.15s & 49d46m03.04s & 77.50137 & 45.30106 & 10.98 \\
1622478459328957696 & 15h57m27.57s & 56d40m02.77s & 88.01797 & 45.85473 & 10.68 \\
1644643411153918336 & 15h39m11.90s & 66d48m13.65s & 101.89417 & 42.83402 & 10.31 \\
2026374267595492096 & 19h28m53.81s & 28d22m21.21s & 62.28341 & 5.12122 & 10.29 \\
2039347061671874944 & 19h10m44.00s & 30d05m46.72s & 62.01238 & 9.43357 & 10.76 \\
2051426296414984960 & 19h31m09.21s & 36d09m01.42s & 69.4295 & 8.34166 & 10.2 \\
2077092436860985728 & 19h42m06.28s & 41d41m23.01s & 75.38792 & 9.06408 & 10.62 \\
2083249324019906048 & 20h20m38.47s & 46d26m29.55s & 83.09395 & 5.60762 & 10.81 \\
2104987557947509888 & 18h54m17.01s & 42d59m04.32s & 72.80069 & 17.62626 & 9.93 \\
2107126146721252864 & 18h59m16.93s & 45d06m31.93s & 75.21886 & 17.55053 & 10.89 \\
2126182469941128960 & 19h26m03.33s & 44d21m35.14s & 76.45543 & 12.88873 & 10.27 \\
2142082129629510272 & 19h32m25.97s & 56d36m25.13s & 88.4213 & 17.07098 & 10.32 \\
2203746967971153024 & 21h39m15.35s & 60d17m05.38s & 101.18362 & 5.802 & 10.32 \\
\bottomrule
\end{tabular}
```python
special_targets["[Fe/H]"].max()
```
-0.324
## SAVE SPECIAL TARGET LIST
```python
LOPS2 = pd.read_csv(get_abspath() + f"data/processed/LOPS2_targets.csv")
LOPN1 = pd.read_csv(get_abspath() + f"data/processed/LOPN1_targets.csv")
LOPN1["Field"] = "LOPN1"
LOPS2["Field"] = "LOPS2"
fields = pd.concat([LOPS2, LOPN1])
fields = filter_valid_targets(fields).rename(columns={"gaiaV": "Magnitude_V"})
p1_sample = filter_p1_targets(fields).rename(columns={"Magnitude_V": "gaiaV"})
special_targets = special_targets = p1_sample[p1_sample["Population"] == "Halo"]
special_targets.to_csv(
get_abspath() + f"data/processed/special_target_list.csv", index=False
)
```
|
ChrisBoettnerREPO_NAMEplatoPATH_START.@plato_extracted@plato-main@notebooks@C_halo_special_target_list.ipynb@.PATH_END.py
|
{
"filename": "PIDOptimizer.py",
"repo_name": "jacotay7/pyRTC",
"repo_path": "pyRTC_extracted/pyRTC-main/pyRTC/hardware/PIDOptimizer.py",
"type": "Python"
}
|
from pyRTC.utils import *
from pyRTC.Pipeline import *
from pyRTC.Optimizer import *
import numpy as np
class PIDOptimizer(Optimizer):
def __init__(self, conf, loop) -> None:
self.loop = loop
self.mode = 'strehl'
self.strehlShm, _, _ = initExistingShm("strehl")
self.tipTiltShm, _, _ = initExistingShm("tiptilt")
self.maxPGain = setFromConfig(conf, "maxPGain", 0.5)
self.maxIGain = setFromConfig(conf, "maxIGain", 0.05)
self.maxDGain = setFromConfig(conf, "maxDGain", 0.05)
self.numReads = setFromConfig(conf, "numReads", 5)
self.isPOL = False
super().__init__(conf)
def objective(self, trial):
self.applyTrial(trial)
self.loop.run("stop")
for i in range(10):
self.loop.run("flatten")
self.loop.run("start")
result = np.empty(self.numReads)
for i in range(self.numReads):
if self.mode == 'strehl':
result[i] = self.strehlShm.read()
elif self.mode == 'tiptilt':
result[i] = self.strehlShm.read() -1*self.tipTiltShm.read()
return np.mean(result)
def applyTrial(self, trial):
# Suggest values for Kp, Ki, Kd
self.loop.setProperty("pGain", trial.suggest_float('pGain', 0, self.maxPGain))
self.loop.setProperty("iGain", trial.suggest_float('iGain', 0, self.maxIGain))
self.loop.setProperty("dGain", trial.suggest_float('dGain', 0, self.maxDGain))
if self.isPOL:
self.loop.setProperty("leakyGain", self.loop.getProperty('pGain'))
return super().applyTrial(trial)
def applyOptimum(self):
super().applyOptimum()
self.loop.setProperty("pGain", self.study.best_params["pGain"])
self.loop.setProperty("iGain", self.study.best_params["iGain"])
self.loop.setProperty("dGain", self.study.best_params["dGain"])
if self.isPOL:
self.loop.setProperty("leakyGain", self.loop.getProperty('pGain'))
return
if __name__ == "__main__":
#Prevents camera output from messing with communication
original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
# Create argument parser
parser = argparse.ArgumentParser(description="Read a config file from the command line.")
# Add command-line argument for the config file
parser.add_argument("-c", "--config", required=True, help="Path to the config file")
parser.add_argument("-p", "--port", required=True, help="Port for communication")
# Parse command-line arguments
args = parser.parse_args()
conf = read_yaml_file(args.config)["optimizer"]
pid = os.getpid()
set_affinity((conf["affinity"])%os.cpu_count())
decrease_nice(pid)
component = PIDOptimizer(conf=conf)
component.start()
# Go back to communicating with the main program through stdout
sys.stdout = original_stdout
l = Listener(component, port = int(args.port))
while l.running:
l.listen()
time.sleep(1e-3)
|
jacotay7REPO_NAMEpyRTCPATH_START.@pyRTC_extracted@pyRTC-main@pyRTC@hardware@PIDOptimizer.py@.PATH_END.py
|
{
"filename": "test_instrument.py",
"repo_name": "ExObsSim/ExoRad2-public",
"repo_path": "ExoRad2-public_extracted/ExoRad2-public-master/tests/test_instrument.py",
"type": "Python"
}
|
import logging
import os
import pathlib
import unittest
import h5py
from test_options import payload_file
from exorad.log import setLogLevel
from exorad.models.instruments import Photometer
from exorad.models.instruments import Spectrometer
from exorad.output.hdf5 import HDF5Output
from exorad.tasks import MergeChannelsOutput
from exorad.tasks.instrumentHandler import BuildChannels
from exorad.tasks.instrumentHandler import LoadPayload
from exorad.tasks.loadOptions import LoadOptions
path = pathlib.Path(__file__).parent.absolute()
data_dir = os.path.join(path.parent.absolute(), 'examples')
test_dir = os.path.join(path, 'test_data')
loadOptions = LoadOptions()
options = loadOptions(filename=payload_file())
class PhotometerTest(unittest.TestCase):
setLogLevel(logging.DEBUG)
photometer = Photometer('Phot', options['channel']['Phot'], options)
photometer.build()
def test_photometer_table(self):
self.assertEqual(self.photometer.table['Wavelength'].value, 0.55)
# self.assertEqual(self.photometer.table['TR'].value, 0.5)
self.assertEqual(self.photometer.table['QE'].value, 0.55)
class SpectrometerTest(unittest.TestCase):
setLogLevel(logging.DEBUG)
spectrometer = Spectrometer('Spec', options['channel']['Spec'], options)
spectrometer.build()
def test_spectrometer_table(self):
self.assertEqual(self.spectrometer.table['Wavelength'].size, 12)
# self.assertListEqual(list(self.spectrometer.table['TR'].value), [0.5] * 12)
self.assertListEqual(list(self.spectrometer.table['QE'].value),
[0.7] * 12)
def test_spectrometer_native_R(self):
from copy import deepcopy
opt = deepcopy(options)
opt['channel']['Spec']['targetR']['value'] = 'native'
spectrometer = Spectrometer('Spec', opt['channel']['Spec'], opt)
spectrometer.build()
def test_spectrometer_no_R(self):
from copy import deepcopy
opt = deepcopy(options)
opt['channel']['Spec'].pop('targetR')
spectrometer = Spectrometer('Spec', opt['channel']['Spec'], opt)
spectrometer.build()
self.assertEqual(opt['channel']['Spec']['targetR']['value'], 'native')
class Spectrometer_from_pickle_Test(unittest.TestCase):
setLogLevel(logging.DEBUG)
loadOptions = LoadOptions()
options2 = loadOptions(filename=
payload_file(
name='payload_test_pickle_spec.xml'))
spectrometer = Spectrometer('Spec', options2['channel']['Spec'], options2)
spectrometer.build()
def test_spectrometer_table(self):
self.assertEqual(self.spectrometer.table['Wavelength'].size, 12)
# self.assertListEqual(list(self.spectrometer.table['TR'].value), [0.5] * 12)
self.assertListEqual(list(self.spectrometer.table['QE'].value),
[0.7] * 12)
class InstrumentBuilderTest(unittest.TestCase):
setLogLevel(logging.DEBUG)
buildChannels = BuildChannels()
channels = buildChannels(payload=options, write=False, output=None)
def test_builder_dict(self):
self.assertListEqual(list(self.channels.keys()), ['Phot', 'Spec'])
class IOTest(unittest.TestCase):
setLogLevel(logging.INFO)
buildChannels = BuildChannels()
fname = 'test.h5'
with HDF5Output(fname) as o:
channels_built = buildChannels(payload=options, write=True, output=o)
file = h5py.File(fname)
loadPayload = LoadPayload()
payload_loaded, channels_loaded = loadPayload(input=file)
def test_instrument_list(self):
self.assertListEqual(list(self.channels_built.keys()),
list(self.channels_loaded.keys()))
def test_instrument_build_from_file(self):
with self.assertRaises(ValueError):
self.channels_loaded['Phot'].build()
with self.assertRaises(ValueError):
self.channels_loaded['Spec'].build()
os.remove(self.fname)
class MergeOutputTest(unittest.TestCase):
setLogLevel(logging.INFO)
buildChannels = BuildChannels()
mergeChannelsOutput = MergeChannelsOutput()
channels = buildChannels(payload=options, write=False, output=None)
setLogLevel(logging.DEBUG)
def test_table_output(self):
table = self.mergeChannelsOutput(channels=self.channels)
|
ExObsSimREPO_NAMEExoRad2-publicPATH_START.@ExoRad2-public_extracted@ExoRad2-public-master@tests@test_instrument.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterternary/marker/line/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._widthsrc import WidthsrcValidator
from ._width import WidthValidator
from ._reversescale import ReversescaleValidator
from ._colorsrc import ColorsrcValidator
from ._colorscale import ColorscaleValidator
from ._coloraxis import ColoraxisValidator
from ._color import ColorValidator
from ._cmin import CminValidator
from ._cmid import CmidValidator
from ._cmax import CmaxValidator
from ._cauto import CautoValidator
from ._autocolorscale import AutocolorscaleValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._widthsrc.WidthsrcValidator",
"._width.WidthValidator",
"._reversescale.ReversescaleValidator",
"._colorsrc.ColorsrcValidator",
"._colorscale.ColorscaleValidator",
"._coloraxis.ColoraxisValidator",
"._color.ColorValidator",
"._cmin.CminValidator",
"._cmid.CmidValidator",
"._cmax.CmaxValidator",
"._cauto.CautoValidator",
"._autocolorscale.AutocolorscaleValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterternary@marker@line@__init__.py@.PATH_END.py
|
{
"filename": "get_aux_data.py",
"repo_name": "LSSTDESC/lsstdesc-diffsky",
"repo_path": "lsstdesc-diffsky_extracted/lsstdesc-diffsky-main/lsstdesc_diffsky/validation/get_aux_data.py",
"type": "Python"
}
|
import os
import re
import numpy as np
from astropy.table import Table
cosmos_dir = '/lus/eagle/projects/LastJourney/kovacs/COSMOS2020'
restcolor_dir = '/lus/eagle/projects/LastJourney/kovacs/ValidationData/RestColorData'
def get_data(restcolor_dir=restcolor_dir, cosmos_dir=cosmos_dir):
aux_data = add_rest_color_data({}, restcolor_dir)
print(aux_data.keys(), aux_data['BC03_rest'].colnames)
cosmos2020_uncut = read_COSMOS2020(cosmos_dir)
cosmos2020, cmask = select_COSMOS2020_data(cosmos2020_uncut)
aux_data['COSMOS2020'] = cosmos2020[cmask]
return aux_data
def add_rest_color_data(aux_data, restcolor_dir, frame='rest'):
for k, fn in zip(['BC03', 'Brown'], ['{}_39_rest_colors.out',
'{}_129_rest_colors.out']):
key = '_'.join([k, frame])
aux_data[key] = Table()
rcfile = os.path.join(restcolor_dir, fn.format(k))
data = np.loadtxt(rcfile, skiprows=1, usecols=(3, 4))
aux_data[key]['G-R'] = data[:, 0]
aux_data[key]['R-I'] = data[:, 1]
return aux_data
def read_COSMOS2020(cosmos_dir, cosmos_fn='COSMOS2020_Farmer_processed_hlin.fits',
rename=[('_MAG', ''), ('lp_M', 'lp_M_')]):
fname = os.path.join(cosmos_dir, cosmos_fn)
cosmos2020 = Table.read(fname, format='fits', hdu=1)
# select galaxies
sel_galaxies = (cosmos2020['lp_type'] == 0)
cosmos2020_gals = cosmos2020[sel_galaxies]
print('{} objects'.format(len(cosmos2020_gals)))
# rename some columns for plotting convenience
if rename is not None:
cosmos2020_gals = rename_columns(cosmos2020_gals, rename=rename)
print(cosmos2020_gals.colnames)
return cosmos2020_gals
def rename_columns(cosmos2020_gals, rename=[('_MAG', ''), ('lp_M', 'lp_M_')]):
"""
rename some columns for plotting convenience
"""
for r in rename:
cols = [c for c in cosmos2020_gals.colnames if r[0] in c]
for c in cols:
cosmos2020_gals.rename_column(c, re.sub(r[0], r[1], c))
return cosmos2020_gals
def add_cosmos2020_colors(cosmos2020, filter_names, filter_types, bands, minmax=False):
for fname, ftyp in zip(filter_names, filter_types):
for b1, b2 in zip(bands[:-1], bands[1:]):
c = b1 + '-' + b2 if ftyp == 'obs' else b1.upper() + '-' + b2.upper()
col1 = fname.format(b1) if ftyp == 'obs' else fname.format(b1.upper())
col2 = fname.format(b2) if ftyp == 'obs' else fname.format(b2.upper())
if col1 in cosmos2020.colnames and col2 in cosmos2020.colnames:
colc = fname.format(c)
cosmos2020[colc] = cosmos2020[col1] - cosmos2020[col2]
if minmax:
print('{} min/max = {:.3g}/{:.3g}'.format(c,
np.min(cosmos2020[colc]), np.max(cosmos2020[colc])))
return cosmos2020
def select_COSMOS2020_data(cosmos2020, keyname='COSMOS2020 ({})',
selections_max=[('HSC_{}', 26.), ('lp_M_{}', -15.0),
('HSC_g-r', 2.0), ('HSC_r-i', 1.6),
('HSC_i-z', 1.2), ('HSC_z-y', 1.0),
('lp_M_G-R', 1.1), ('lp_M_R-I', 1.2),
('lp_M_I-Z', 1.0), ('lp_M_Z-Y', 1.2),
('photoz', 3.0), ('lp_mass_med', 12.5)],
selections_min=[('lp_M_{}', -25.0),
('HSC_g-r', -0.5), ('HSC_r-i', -0.5),
('HSC_i-z', -0.5), ('HSC_z-y', -1.0),
('lp_M_G-R', -1.2), ('lp_M_R-I', -1.2),
('lp_M_I-Z', -0.5), ('lp_M_Z-Y', -0.5),
('lp_mass_med', 6.0)],
filter_names=['HSC_{}', 'lp_M_{}'],
filter_types=['obs', 'rest'],
bands=['u', 'g', 'r', 'i', 'z', 'y'],
):
# compute colors
cosmos2020 = add_cosmos2020_colors(cosmos2020, filter_names, filter_types, bands)
# make selections
mask = np.ones(len(cosmos2020), dtype=bool)
for select in selections_min:
mask = make_selections(cosmos2020, select, mask, bands, minimum=True)
for select in selections_max:
mask = make_selections(cosmos2020, select, mask, bands, minimum=False)
print('Selecting {}/{} objects after all cuts\n'.format(np.count_nonzero(mask),
len(mask)))
return cosmos2020, mask
def make_selections(cosmos2020, select, mask, bands, minimum=True):
for b in bands:
key = select[0].format(b) if select[1] > 0 else select[0].format(b.upper())
if key in cosmos2020.colnames:
if minimum:
dmask = (cosmos2020[key] >= select[1])
msg = '>='
else:
dmask = (cosmos2020[key] <= select[1])
msg = '<='
dcount = np.count_nonzero(dmask)
print('Selecting {}/{} objects with {} {} {:.1f}'.format(dcount,
len(dmask),
key, msg,
select[1]))
mask = mask & dmask
return mask
|
LSSTDESCREPO_NAMElsstdesc-diffskyPATH_START.@lsstdesc-diffsky_extracted@lsstdesc-diffsky-main@lsstdesc_diffsky@validation@get_aux_data.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "pymc-devs/pymc",
"repo_path": "pymc_extracted/pymc-main/tests/logprob/__init__.py",
"type": "Python"
}
|
# Copyright 2024 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
pymc-devsREPO_NAMEpymcPATH_START.@pymc_extracted@pymc-main@tests@logprob@__init__.py@.PATH_END.py
|
{
"filename": "_heatmapgl.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/layout/template/data/_heatmapgl.py",
"type": "Python"
}
|
from plotly.graph_objs import Heatmapgl
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@layout@template@data@_heatmapgl.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/slider/_font.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="font", parent_name="layout.slider", **kwargs):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
size
style
Sets whether a font should be styled with a
normal or italic face from its family.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@slider@_font.py@.PATH_END.py
|
{
"filename": "airglow.py",
"repo_name": "spacetelescope/calcos",
"repo_path": "calcos_extracted/calcos-master/calcos/airglow.py",
"type": "Python"
}
|
from __future__ import absolute_import, print_function
from . import cosutil
from . import dispersion
from .calcosparam import * # parameter definitions
# Half width (pixels) of airglow region to be excluded.
AIRGLOW_LyA = 250. # Lyman alpha
AIRGLOW_FUV = 100. # anything but Lyman alpha, but still FUV
AIRGLOW_NUV = 30. # any NUV airglow line
# Wavelengths in Angstroms of airglow lines.
# The values in the tuple are the wavelengths of the lines in the multiplet.
AIRGLOW_WAVELENGTHS = {"Lyman_alpha": (1215.67,),
"N_I_1200": (1199.550, 1200.223, 1200.710),
"O_I_1304": (1302.168, 1304.858, 1306.029),
"O_I_1356": (1355.598, 1358.512),
"N_I_1134": (1134.165, 1134.415, 1134.980)}
# ? "O_I_2973": (2973.154,)}
def findAirglowLimits(info, segment, disptab, airglow_line):
"""Find the pixel region corresponding to a set of airglow lines.
Parameters
----------
info: dictionary
Keywords and values.
segment: str
Segment or stripe name: "FUVA", "FUVB", "NUVA", "NUVB", "NUVC".
disptab: str
Name of reference table for dispersion solution.
airglow_line: str
The key for extracting an element from AIRGLOW_WAVELENGTHS.
Returns
-------
tuple (x0, x1) of floats, or None
x0 and x1 are the left and right pixel numbers of the region
that should be omitted to avoid contamination by an airglow line.
These are inclusive limits (pixels), not the elements of a slice.
None will be returned if the specified line (or multiplet) is off
the detector, the mode was not found in a reference table, or
the obstype is not spectroscopic.
"""
if info["obstype"] != "SPECTROSCOPIC":
print("Data is not spectroscopic")
return None
wl_airglow = AIRGLOW_WAVELENGTHS[airglow_line]
if info["detector"] == "FUV":
axis_length = FUV_X
if airglow_line == "Lyman_alpha":
exclude = AIRGLOW_LyA
else:
exclude = AIRGLOW_FUV
else:
axis_length = NUV_X
exclude = AIRGLOW_NUV
# This filter is used for both xtractab and disptab.
filter = {"opt_elem": info["opt_elem"],
"cenwave": info["cenwave"],
"segment": segment,
"aperture": info["aperture"]}
# currently not necessary: filter["fpoffset"] = info["fpoffset"]
disp_rel = dispersion.Dispersion(disptab, filter)
if not disp_rel.isValid():
cosutil.printWarning("Dispersion relation is not valid; filter is:")
cosutil.printContinuation(str(filter))
disp_rel.close()
return None
min_wl = min(wl_airglow)
max_wl = max(wl_airglow)
# First check whether the airglow line is off the detector.
# NOTE that we assume that wavelength increases with x.
wl_left_edge = disp_rel.evalDisp(-exclude)
if max_wl < wl_left_edge:
disp_rel.close()
return None
wl_right_edge = disp_rel.evalDisp(axis_length - 1. + exclude)
if min_wl > wl_right_edge:
disp_rel.close()
return None
# x_left and x_right are the pixel coordinates for the minimum
# and maximum airglow wavelengths in the multiplet.
x_left = float(disp_rel.evalInvDisp(min_wl, tiny=1.e-8))
x_right = float(disp_rel.evalInvDisp(max_wl, tiny=1.e-8))
x0 = x_left - exclude
x1 = x_right + exclude
disp_rel.close()
return (x0, x1)
|
spacetelescopeREPO_NAMEcalcosPATH_START.@calcos_extracted@calcos-master@calcos@airglow.py@.PATH_END.py
|
{
"filename": "plot_convergence_bondi.py",
"repo_name": "AFD-Illinois/iharm3d",
"repo_path": "iharm3d_extracted/iharm3d-master/script/test/convergence/plot_convergence_bondi.py",
"type": "Python"
}
|
################################################################################
# #
# BONDI INFLOW CONVERGENCE PLOTS #
# #
################################################################################
from __future__ import print_function, division
import plot as bplt
import util
import hdf5_to_dict as io
import os,sys
import numpy as np
import matplotlib.pyplot as plt
RES = [32, 64, 128, 256]
NVAR = 8
L1 = np.zeros(len(RES))
# RUN PROBLEM FOR EACH RESOLUTION AND ANALYZE RESULT
for m in range(len(RES)):
os.chdir('../dumps_' + str(RES[m]))
dfiles = io.get_dumps_list(".")
hdr, geom, dump0 = io.load_all(dfiles[0])
dump1 = io.load_dump(dfiles[-1], hdr, geom)
r = geom['r'][:,hdr['n2']//2,0]
# print("r_eh is {}".format(hdr['r_eh']))
imin = 0
while r[imin] < hdr['r_eh']:
imin += 1
rho0 = np.mean(dump0['RHO'][imin:,:,0], axis=1)
rho1 = np.mean(dump1['RHO'][imin:,:,0], axis=1)
L1[m] = np.mean(np.fabs(rho1 - rho0))
# MEASURE CONVERGENCE
powerfit = np.polyfit(np.log(RES), np.log(L1), 1)[0]
print("Powerfit: {} L1: {}".format(powerfit, L1))
os.chdir('../plots/')
# MAKE PLOTS
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(1,1,1)
ax.plot(RES, L1, marker='s', label='RHO')
amp = 1.0e-3
ax.plot([RES[0]/2., RES[-1]*2.],
10.*amp*np.asarray([RES[0]/2., RES[-1]*2.])**-2.,
color='k', linestyle='--', label='N^-2')
plt.xscale('log', basex=2); plt.yscale('log')
plt.xlim([RES[0]/np.sqrt(2.), RES[-1]*np.sqrt(2.)])
plt.xlabel('N'); plt.ylabel('L1')
plt.title("BONDI")
plt.legend(loc=1)
plt.savefig('bondi.png', bbox_inches='tight')
|
AFD-IllinoisREPO_NAMEiharm3dPATH_START.@iharm3d_extracted@iharm3d-master@script@test@convergence@plot_convergence_bondi.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/sunburst/marker/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._colorbar import ColorBar
from ._line import Line
from ._pattern import Pattern
from . import colorbar
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[".colorbar"],
["._colorbar.ColorBar", "._line.Line", "._pattern.Pattern"],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@sunburst@marker@__init__.py@.PATH_END.py
|
{
"filename": "_size.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter/unselected/marker/_size.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="scatter.unselected.marker", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter@unselected@marker@_size.py@.PATH_END.py
|
{
"filename": "_ticklabelposition.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/isosurface/colorbar/_ticklabelposition.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicklabelpositionValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="ticklabelposition",
parent_name="isosurface.colorbar",
**kwargs,
):
super(TicklabelpositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop(
"values",
[
"outside",
"inside",
"outside top",
"inside top",
"outside left",
"inside left",
"outside right",
"inside right",
"outside bottom",
"inside bottom",
],
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@isosurface@colorbar@_ticklabelposition.py@.PATH_END.py
|
{
"filename": "_textfont.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/scatterpolar/_textfont.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterpolar"
_path_str = "scatterpolar.textfont"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Textfont object
Sets the text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolar.Textfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolar.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolar.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@scatterpolar@_textfont.py@.PATH_END.py
|
{
"filename": "test_inverse_cdf.py",
"repo_name": "gammapy/gammapy",
"repo_path": "gammapy_extracted/gammapy-main/gammapy/utils/random/tests/test_inverse_cdf.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import scipy.stats as stats
from numpy.testing import assert_allclose
from gammapy.utils.random import InverseCDFSampler
def uniform_dist(x, a, b):
return np.select([x <= a, x >= b], [0, 0], 1 / (b - a))
def gauss_dist(x, mu, sigma):
return stats.norm.pdf(x, mu, sigma)
def test_uniform_dist_sampling():
n_sampled = 1000
x = np.linspace(-2, 2, n_sampled)
a, b = -1, 1
pdf = uniform_dist(x, a=a, b=b)
sampler = InverseCDFSampler(pdf=pdf, random_state=0)
idx = sampler.sample(int(1e4))
x_sampled = np.interp(idx, np.arange(n_sampled), x)
assert_allclose(np.mean(x_sampled), 0.5 * (a + b), atol=0.01)
assert_allclose(
np.std(x_sampled), np.sqrt(1 / 3 * (a**2 + a * b + b**2)), rtol=0.01
)
def test_norm_dist_sampling():
n_sampled = 1000
x = np.linspace(-2, 2, n_sampled)
mu, sigma = 0, 0.1
pdf = gauss_dist(x=x, mu=mu, sigma=sigma)
sampler = InverseCDFSampler(pdf=pdf, random_state=0)
idx = sampler.sample(int(1e5))
x_sampled = np.interp(idx, np.arange(n_sampled), x)
assert_allclose(np.mean(x_sampled), mu, atol=0.01)
assert_allclose(np.std(x_sampled), sigma, atol=0.005)
def test_axis_sampling():
n_sampled = 1000
x = np.linspace(-2, 2, n_sampled)
a, b = -1, 1
pdf_uniform = uniform_dist(x, a=a, b=b)
mu, sigma = 0, 0.1
pdf_gauss = gauss_dist(x=x, mu=mu, sigma=sigma)
pdf = np.vstack([pdf_gauss, pdf_uniform])
sampler = InverseCDFSampler(pdf, random_state=0, axis=1)
idx = sampler.sample_axis()
x_sampled = np.interp(idx, np.arange(n_sampled), x)
assert_allclose(x_sampled, [0.012266, 0.43081], rtol=1e-4)
|
gammapyREPO_NAMEgammapyPATH_START.@gammapy_extracted@gammapy-main@gammapy@utils@random@tests@test_inverse_cdf.py@.PATH_END.py
|
{
"filename": "_textsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choroplethmapbox/_textsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="choroplethmapbox", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choroplethmapbox@_textsrc.py@.PATH_END.py
|
{
"filename": "test_linear.py",
"repo_name": "dmlc/xgboost",
"repo_path": "xgboost_extracted/xgboost-master/tests/python/test_linear.py",
"type": "Python"
}
|
from hypothesis import given, note, settings, strategies
import xgboost as xgb
from xgboost import testing as tm
pytestmark = tm.timeout(20)
parameter_strategy = strategies.fixed_dictionaries({
'booster': strategies.just('gblinear'),
'eta': strategies.floats(0.01, 0.25),
'tolerance': strategies.floats(1e-5, 1e-2),
'nthread': strategies.integers(1, 4),
})
coord_strategy = strategies.fixed_dictionaries({
'feature_selector': strategies.sampled_from(['cyclic', 'shuffle',
'greedy', 'thrifty']),
'top_k': strategies.integers(1, 10),
})
def train_result(param, dmat, num_rounds):
result = {}
xgb.train(
param,
dmat,
num_rounds,
evals=[(dmat, "train")],
verbose_eval=False,
evals_result=result,
)
return result
class TestLinear:
@given(
parameter_strategy,
strategies.integers(10, 50),
tm.make_dataset_strategy(),
coord_strategy
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_coordinate(self, param, num_rounds, dataset, coord_param):
param['updater'] = 'coord_descent'
param.update(coord_param)
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing(result, 5e-4)
# Loss is not guaranteed to always decrease because of regularisation parameters
# We test a weaker condition that the loss has not increased between the first and last
# iteration
@given(
parameter_strategy,
strategies.integers(10, 50),
tm.make_dataset_strategy(),
coord_strategy,
strategies.floats(1e-5, 0.8),
strategies.floats(1e-5, 0.8)
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_coordinate_regularised(self, param, num_rounds, dataset, coord_param, alpha, lambd):
param['updater'] = 'coord_descent'
param['alpha'] = alpha
param['lambda'] = lambd
param.update(coord_param)
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])
@given(
parameter_strategy, strategies.integers(10, 50), tm.make_dataset_strategy()
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_shotgun(self, param, num_rounds, dataset):
param['updater'] = 'shotgun'
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
# shotgun is non-deterministic, so we relax the test by only using first and last
# iteration.
if len(result) > 2:
sampled_result = (result[0], result[-1])
else:
sampled_result = result
assert tm.non_increasing(sampled_result)
@given(
parameter_strategy,
strategies.integers(10, 50),
tm.make_dataset_strategy(),
strategies.floats(1e-5, 1.0),
strategies.floats(1e-5, 1.0)
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_shotgun_regularised(self, param, num_rounds, dataset, alpha, lambd):
param['updater'] = 'shotgun'
param['alpha'] = alpha
param['lambda'] = lambd
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])
|
dmlcREPO_NAMExgboostPATH_START.@xgboost_extracted@xgboost-master@tests@python@test_linear.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "sibirrer/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/test/test_Cosmo/__init__.py",
"type": "Python"
}
|
sibirrerREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@test@test_Cosmo@__init__.py@.PATH_END.py
|
|
{
"filename": "eventwrapper.py",
"repo_name": "astroufsc/chimera",
"repo_path": "chimera_extracted/chimera-master/src/chimera/core/eventwrapper.py",
"type": "Python"
}
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# chimera - observatory automation system
# Copyright (C) 2006-2007 P. Henrique Silva <henrique@astro.ufsc.br>
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
from chimera.core.proxy import ProxyMethod
from chimera.core.methodwrapper import MethodWrapperDispatcher
from chimera.core.constants import EVENTS_PROXY_NAME
import copy
__all__ = ["EventWrapperDispatcher"]
class EventWrapperDispatcher(MethodWrapperDispatcher):
def __init__(self, wrapper, instance, cls):
MethodWrapperDispatcher.__init__(self, wrapper, instance, cls)
def call(self, *args, **kwargs):
if hasattr(self.instance, EVENTS_PROXY_NAME):
getattr(self.instance, EVENTS_PROXY_NAME).publish(
self.func.__name__, *args[1:], **kwargs
)
return True
def __do(self, other, action):
handler = {"topic": self.func.__name__, "handler": {"proxy": "", "method": ""}}
# REMEBER: Return a copy of this wrapper as we are using +=
# Can't add itself as a subscriber
if other == self.func:
return copy.copy(self)
# passing a proxy method?
if not isinstance(other, ProxyMethod):
return copy.copy(self)
handler["handler"]["proxy"] = other.proxy.location
handler["handler"]["method"] = str(other.__name__)
if hasattr(self.instance, EVENTS_PROXY_NAME):
proxy = getattr(self.instance, EVENTS_PROXY_NAME)
f = getattr(proxy, action)
f(handler)
return copy.copy(self)
def __iadd__(self, other):
return self.__do(other, "subscribe")
def __isub__(self, other):
return self.__do(other, "unsubscribe")
|
astroufscREPO_NAMEchimeraPATH_START.@chimera_extracted@chimera-master@src@chimera@core@eventwrapper.py@.PATH_END.py
|
{
"filename": "test__basinhopping.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/scipy/optimize/tests/test__basinhopping.py",
"type": "Python"
}
|
"""
Unit tests for the basin hopping global minimization algorithm.
"""
from __future__ import division, print_function, absolute_import
import copy
from numpy.testing import assert_almost_equal, assert_equal, assert_
from pytest import raises as assert_raises
import numpy as np
from numpy import cos, sin
from scipy.optimize import basinhopping, OptimizeResult
from scipy.optimize._basinhopping import (
Storage, RandomDisplacement, Metropolis, AdaptiveStepsize)
def func1d(x):
f = cos(14.5 * x - 0.3) + (x + 0.2) * x
df = np.array(-14.5 * sin(14.5 * x - 0.3) + 2. * x + 0.2)
return f, df
def func1d_nograd(x):
f = cos(14.5 * x - 0.3) + (x + 0.2) * x
df = np.array(-14.5 * sin(14.5 * x - 0.3) + 2. * x + 0.2)
return f, df
def func2d_nograd(x):
f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
return f
def func2d(x):
f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
df = np.zeros(2)
df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2
df[1] = 2. * x[1] + 0.2
return f, df
def func2d_easyderiv(x):
f = 2.0*x[0]**2 + 2.0*x[0]*x[1] + 2.0*x[1]**2 - 6.0*x[0]
df = np.zeros(2)
df[0] = 4.0*x[0] + 2.0*x[1] - 6.0
df[1] = 2.0*x[0] + 4.0*x[1]
return f, df
class MyTakeStep1(RandomDisplacement):
"""use a copy of displace, but have it set a special parameter to
make sure it's actually being used."""
def __init__(self):
self.been_called = False
super(MyTakeStep1, self).__init__()
def __call__(self, x):
self.been_called = True
return super(MyTakeStep1, self).__call__(x)
def myTakeStep2(x):
"""redo RandomDisplacement in function form without the attribute stepsize
to make sure still everything works ok
"""
s = 0.5
x += np.random.uniform(-s, s, np.shape(x))
return x
class MyAcceptTest(object):
"""pass a custom accept test
This does nothing but make sure it's being used and ensure all the
possible return values are accepted
"""
def __init__(self):
self.been_called = False
self.ncalls = 0
self.testres = [False, 'force accept', True, np.bool_(True),
np.bool_(False), [], {}, 0, 1]
def __call__(self, **kwargs):
self.been_called = True
self.ncalls += 1
if self.ncalls - 1 < len(self.testres):
return self.testres[self.ncalls - 1]
else:
return True
class MyCallBack(object):
"""pass a custom callback function
This makes sure it's being used. It also returns True after 10
steps to ensure that it's stopping early.
"""
def __init__(self):
self.been_called = False
self.ncalls = 0
def __call__(self, x, f, accepted):
self.been_called = True
self.ncalls += 1
if self.ncalls == 10:
return True
class TestBasinHopping(object):
def setup_method(self):
""" Tests setup.
Run tests based on the 1-D and 2-D functions described above.
"""
self.x0 = (1.0, [1.0, 1.0])
self.sol = (-0.195, np.array([-0.195, -0.1]))
self.tol = 3 # number of decimal places
self.niter = 100
self.disp = False
# fix random seed
np.random.seed(1234)
self.kwargs = {"method": "L-BFGS-B", "jac": True}
self.kwargs_nograd = {"method": "L-BFGS-B"}
def test_TypeError(self):
# test the TypeErrors are raised on bad input
i = 1
# if take_step is passed, it must be callable
assert_raises(TypeError, basinhopping, func2d, self.x0[i],
take_step=1)
# if accept_test is passed, it must be callable
assert_raises(TypeError, basinhopping, func2d, self.x0[i],
accept_test=1)
def test_1d_grad(self):
# test 1d minimizations with gradient
i = 0
res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
def test_2d(self):
# test 2d minimizations with gradient
i = 1
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
assert_(res.nfev > 0)
def test_njev(self):
# test njev is returned correctly
i = 1
minimizer_kwargs = self.kwargs.copy()
# L-BFGS-B doesn't use njev, but BFGS does
minimizer_kwargs["method"] = "BFGS"
res = basinhopping(func2d, self.x0[i],
minimizer_kwargs=minimizer_kwargs, niter=self.niter,
disp=self.disp)
assert_(res.nfev > 0)
assert_equal(res.nfev, res.njev)
def test_jac(self):
# test jacobian returned
minimizer_kwargs = self.kwargs.copy()
# BFGS returns a Jacobian
minimizer_kwargs["method"] = "BFGS"
res = basinhopping(func2d_easyderiv, [0.0, 0.0],
minimizer_kwargs=minimizer_kwargs, niter=self.niter,
disp=self.disp)
assert_(hasattr(res.lowest_optimization_result, "jac"))
#in this case, the jacobian is just [df/dx, df/dy]
_, jacobian = func2d_easyderiv(res.x)
assert_almost_equal(res.lowest_optimization_result.jac, jacobian, self.tol)
def test_2d_nograd(self):
# test 2d minimizations without gradient
i = 1
res = basinhopping(func2d_nograd, self.x0[i],
minimizer_kwargs=self.kwargs_nograd,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
def test_all_minimizers(self):
# test 2d minimizations with gradient. Nelder-Mead, Powell and COBYLA
# don't accept jac=True, so aren't included here.
i = 1
methods = ['CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP']
minimizer_kwargs = copy.copy(self.kwargs)
for method in methods:
minimizer_kwargs["method"] = method
res = basinhopping(func2d, self.x0[i],
minimizer_kwargs=minimizer_kwargs,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
def test_all_nograd_minimizers(self):
# test 2d minimizations without gradient. Newton-CG requires jac=True,
# so not included here.
i = 1
methods = ['CG', 'BFGS', 'L-BFGS-B', 'TNC', 'SLSQP',
'Nelder-Mead', 'Powell', 'COBYLA']
minimizer_kwargs = copy.copy(self.kwargs_nograd)
for method in methods:
minimizer_kwargs["method"] = method
res = basinhopping(func2d_nograd, self.x0[i],
minimizer_kwargs=minimizer_kwargs,
niter=self.niter, disp=self.disp)
tol = self.tol
if method == 'COBYLA':
tol = 2
assert_almost_equal(res.x, self.sol[i], decimal=tol)
def test_pass_takestep(self):
# test that passing a custom takestep works
# also test that the stepsize is being adjusted
takestep = MyTakeStep1()
initial_step_size = takestep.stepsize
i = 1
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp,
take_step=takestep)
assert_almost_equal(res.x, self.sol[i], self.tol)
assert_(takestep.been_called)
# make sure that the built in adaptive step size has been used
assert_(initial_step_size != takestep.stepsize)
def test_pass_simple_takestep(self):
# test that passing a custom takestep without attribute stepsize
takestep = myTakeStep2
i = 1
res = basinhopping(func2d_nograd, self.x0[i],
minimizer_kwargs=self.kwargs_nograd,
niter=self.niter, disp=self.disp,
take_step=takestep)
assert_almost_equal(res.x, self.sol[i], self.tol)
def test_pass_accept_test(self):
# test passing a custom accept test
# makes sure it's being used and ensures all the possible return values
# are accepted.
accept_test = MyAcceptTest()
i = 1
# there's no point in running it more than a few steps.
basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=10, disp=self.disp, accept_test=accept_test)
assert_(accept_test.been_called)
def test_pass_callback(self):
# test passing a custom callback function
# This makes sure it's being used. It also returns True after 10 steps
# to ensure that it's stopping early.
callback = MyCallBack()
i = 1
# there's no point in running it more than a few steps.
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=30, disp=self.disp, callback=callback)
assert_(callback.been_called)
assert_("callback" in res.message[0])
assert_equal(res.nit, 10)
def test_minimizer_fail(self):
# test if a minimizer fails
i = 1
self.kwargs["options"] = dict(maxiter=0)
self.niter = 10
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp)
# the number of failed minimizations should be the number of
# iterations + 1
assert_equal(res.nit + 1, res.minimization_failures)
def test_niter_zero(self):
# gh5915, what happens if you call basinhopping with niter=0
i = 0
res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=0, disp=self.disp)
def test_seed_reproducibility(self):
# seed should ensure reproducibility between runs
minimizer_kwargs = {"method": "L-BFGS-B", "jac": True}
f_1 = []
def callback(x, f, accepted):
f_1.append(f)
basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
niter=10, callback=callback, seed=10)
f_2 = []
def callback2(x, f, accepted):
f_2.append(f)
basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
niter=10, callback=callback2, seed=10)
assert_equal(np.array(f_1), np.array(f_2))
class Test_Storage(object):
def setup_method(self):
self.x0 = np.array(1)
self.f0 = 0
minres = OptimizeResult()
minres.x = self.x0
minres.fun = self.f0
self.storage = Storage(minres)
def test_higher_f_rejected(self):
new_minres = OptimizeResult()
new_minres.x = self.x0 + 1
new_minres.fun = self.f0 + 1
ret = self.storage.update(new_minres)
minres = self.storage.get_lowest()
assert_equal(self.x0, minres.x)
assert_equal(self.f0, minres.fun)
assert_(not ret)
def test_lower_f_accepted(self):
new_minres = OptimizeResult()
new_minres.x = self.x0 + 1
new_minres.fun = self.f0 - 1
ret = self.storage.update(new_minres)
minres = self.storage.get_lowest()
assert_(self.x0 != minres.x)
assert_(self.f0 != minres.fun)
assert_(ret)
class Test_RandomDisplacement(object):
def setup_method(self):
self.stepsize = 1.0
self.displace = RandomDisplacement(stepsize=self.stepsize)
self.N = 300000
self.x0 = np.zeros([self.N])
def test_random(self):
# the mean should be 0
# the variance should be (2*stepsize)**2 / 12
# note these tests are random, they will fail from time to time
x = self.displace(self.x0)
v = (2. * self.stepsize) ** 2 / 12
assert_almost_equal(np.mean(x), 0., 1)
assert_almost_equal(np.var(x), v, 1)
class Test_Metropolis(object):
def setup_method(self):
self.T = 2.
self.met = Metropolis(self.T)
def test_boolean_return(self):
# the return must be a bool. else an error will be raised in
# basinhopping
ret = self.met(f_new=0., f_old=1.)
assert isinstance(ret, bool)
def test_lower_f_accepted(self):
assert_(self.met(f_new=0., f_old=1.))
def test_KeyError(self):
# should raise KeyError if kwargs f_old or f_new is not passed
assert_raises(KeyError, self.met, f_old=1.)
assert_raises(KeyError, self.met, f_new=1.)
def test_accept(self):
# test that steps are randomly accepted for f_new > f_old
one_accept = False
one_reject = False
for i in range(1000):
if one_accept and one_reject:
break
ret = self.met(f_new=1., f_old=0.5)
if ret:
one_accept = True
else:
one_reject = True
assert_(one_accept)
assert_(one_reject)
def test_GH7495(self):
# an overflow in exp was producing a RuntimeWarning
# create own object here in case someone changes self.T
met = Metropolis(2)
with np.errstate(over='raise'):
met.accept_reject(0, 2000)
class Test_AdaptiveStepsize(object):
def setup_method(self):
self.stepsize = 1.
self.ts = RandomDisplacement(stepsize=self.stepsize)
self.target_accept_rate = 0.5
self.takestep = AdaptiveStepsize(takestep=self.ts, verbose=False,
accept_rate=self.target_accept_rate)
def test_adaptive_increase(self):
# if few steps are rejected, the stepsize should increase
x = 0.
self.takestep(x)
self.takestep.report(False)
for i in range(self.takestep.interval):
self.takestep(x)
self.takestep.report(True)
assert_(self.ts.stepsize > self.stepsize)
def test_adaptive_decrease(self):
# if few steps are rejected, the stepsize should increase
x = 0.
self.takestep(x)
self.takestep.report(True)
for i in range(self.takestep.interval):
self.takestep(x)
self.takestep.report(False)
assert_(self.ts.stepsize < self.stepsize)
def test_all_accepted(self):
# test that everything works OK if all steps were accepted
x = 0.
for i in range(self.takestep.interval + 1):
self.takestep(x)
self.takestep.report(True)
assert_(self.ts.stepsize > self.stepsize)
def test_all_rejected(self):
# test that everything works OK if all steps were rejected
x = 0.
for i in range(self.takestep.interval + 1):
self.takestep(x)
self.takestep.report(False)
assert_(self.ts.stepsize < self.stepsize)
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@scipy@optimize@tests@test__basinhopping.py@.PATH_END.py
|
{
"filename": "learnlet_training.py",
"repo_name": "utsav-akhaury/understanding-unets",
"repo_path": "understanding-unets_extracted/understanding-unets-master/learning_wavelets/training_scripts/learnlet_training.py",
"type": "Python"
}
|
import os
import os.path as op
import time
import click
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, LearningRateScheduler
import tensorflow as tf
from learning_wavelets.config import LOGS_DIR, CHECKPOINTS_DIR
from learning_wavelets.data.datasets import im_dataset_div2k, im_dataset_bsd500
from learning_wavelets.keras_utils.normalisation import NormalisationAdjustment
from learning_wavelets.models.learned_wavelet import learnlet
tf.random.set_seed(1)
@click.command()
@click.option(
'noise_std_train',
'--ns-train',
nargs=2,
default=(0, 55),
type=float,
help='The noise standard deviation range for the training set. Defaults to [0, 55]',
)
@click.option(
'noise_std_val',
'--ns-val',
default=30,
type=float,
help='The noise standard deviation for the validation set. Defaults to 30',
)
@click.option(
'n_samples',
'-n',
default=None,
type=int,
help='The number of samples to use for training. Defaults to None, which means that all samples are used.',
)
@click.option(
'source',
'-s',
default='bsd500',
type=click.Choice(['bsd500', 'div2k'], case_sensitive=False),
help='The dataset you wish to use for training and validation, between bsd500 and div2k. Defaults to bsd500',
)
@click.option(
'cuda_visible_devices',
'-gpus',
'--cuda-visible-devices',
default='0123',
type=str,
help='The visible GPU devices. Defaults to 0123',
)
@click.option(
'denoising_activation',
'-da',
'--denoising-activation',
default='dynamic_soft_thresholding',
type=click.Choice([
'dynamic_soft_thresholding',
'dynamic_hard_thresholding',
'dynamic_soft_thresholding_per_filter',
'cheeky_dynamic_hard_thresholding'
], case_sensitive=False),
help='The denoising activation to use. Defaults to dynamic_soft_thresholding',
)
@click.option(
'n_filters',
'-nf',
'--n-filters',
default=256,
type=int,
help='The number of filters in the learnlets. Defaults to 256.',
)
@click.option(
'decreasing_noise_level',
'--decr-n-lvl',
is_flag=True,
help='Set if you want the noise level distribution to be non uniform, skewed towards low value.',
)
def train_learnlet(noise_std_train, noise_std_val, n_samples, source, cuda_visible_devices, denoising_activation, n_filters, decreasing_noise_level):
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(cuda_visible_devices)
# data preparation
batch_size = 8
if source == 'bsd500':
data_func = im_dataset_bsd500
elif source == 'div2k':
data_func = im_dataset_div2k
im_ds_train = data_func(
mode='training',
batch_size=batch_size,
patch_size=256,
noise_std=noise_std_train,
return_noise_level=True,
n_samples=n_samples,
decreasing_noise_level=decreasing_noise_level,
)
im_ds_val = data_func(
mode='validation',
batch_size=batch_size,
patch_size=256,
noise_std=noise_std_val,
return_noise_level=True,
)
run_params = {
'denoising_activation': denoising_activation,
'learnlet_analysis_kwargs':{
'n_tiling': n_filters,
'mixing_details': False,
'skip_connection': True,
'kernel_size': 11,
},
'learnlet_synthesis_kwargs': {
'res': True,
'kernel_size': 13,
},
'wav_type': 'starlet',
'n_scales': 5,
'clip': False,
}
n_epochs = 500
run_id = f'learnlet_dynamic_{n_filters}_{denoising_activation}_{source}_{noise_std_train[0]}_{noise_std_train[1]}_{n_samples}_{int(time.time())}'
chkpt_path = f'{CHECKPOINTS_DIR}checkpoints/{run_id}' + '-{epoch:02d}.hdf5'
print(run_id)
def l_rate_schedule(epoch):
return max(1e-3 / 2**(epoch//25), 1e-5)
lrate_cback = LearningRateScheduler(l_rate_schedule)
chkpt_cback = ModelCheckpoint(chkpt_path, period=n_epochs, save_weights_only=False)
log_dir = op.join(f'{LOGS_DIR}logs', run_id)
tboard_cback = TensorBoard(
log_dir=log_dir,
histogram_freq=0,
write_graph=False,
write_images=False,
profile_batch=0,
)
norm_cback = NormalisationAdjustment(momentum=0.99, n_pooling=5)
norm_cback.on_train_batch_end = norm_cback.on_batch_end
n_channels = 1
# run distributed
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = learnlet(input_size=(None, None, n_channels), lr=1e-3, **run_params)
print(model.summary(line_length=114))
model.fit(
im_ds_train,
steps_per_epoch=200,
epochs=n_epochs,
validation_data=im_ds_val,
validation_steps=1,
verbose=0,
callbacks=[tboard_cback, chkpt_cback, norm_cback, lrate_cback],
shuffle=False,
)
if __name__ == '__main__':
train_learnlet()
|
utsav-akhauryREPO_NAMEunderstanding-unetsPATH_START.@understanding-unets_extracted@understanding-unets-master@learning_wavelets@training_scripts@learnlet_training.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "nickhand/pyRSD",
"repo_path": "pyRSD_extracted/pyRSD-master/pyRSD/rsdfit/plotly/__init__.py",
"type": "Python"
}
|
try:
import plotly.offline as py
import plotly.graph_objs as go
import plotly.tools as pytools
except ImportError as e:
raise ImportError("install 'plotly' to use interactive plotting features")
def enable_latex():
"""
A workaround in :mod:`plotly` where latex labels are broken in notebooks.
See also: https://github.com/plotly/plotly.py/issues/515
"""
from IPython.core.display import display, HTML
display(HTML(
'<script>'
'var waitForPlotly = setInterval( function() {'
'if( typeof(window.Plotly) !== "undefined" ){'
'MathJax.Hub.Config({ SVG: { font: "STIX-Web" }, displayAlign: "center" });'
'MathJax.Hub.Queue(["setRenderer", MathJax.Hub, "SVG"]);'
'clearInterval(waitForPlotly);'
'}}, 250 );'
'</script>'
))
py.init_notebook_mode(connected=True)
from .fit import plot_fit_comparison
from .mcmc import jointplot_2d, hist_1d, plot_traces, plot_triangle
|
nickhandREPO_NAMEpyRSDPATH_START.@pyRSD_extracted@pyRSD-master@pyRSD@rsdfit@plotly@__init__.py@.PATH_END.py
|
{
"filename": "_showexponent.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatterpolargl/marker/colorbar/_showexponent.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowexponentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="showexponent",
parent_name="scatterpolargl.marker.colorbar",
**kwargs
):
super(ShowexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatterpolargl@marker@colorbar@_showexponent.py@.PATH_END.py
|
{
"filename": "map_defun.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/data/experimental/ops/map_defun.py",
"type": "Python"
}
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for optimizing `tf.data` pipelines."""
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_dataset_ops
def map_defun(fn,
elems,
output_dtypes,
output_shapes,
max_intra_op_parallelism=1):
"""Map a function on the list of tensors unpacked from `elems` on dimension 0.
Args:
fn: A function (`function.defun`) that takes a list of tensors and returns
another list of tensors. The output list has the same types as
output_dtypes. The elements of the output list have the same dimension 0
as `elems`, and the remaining dimensions correspond to those of
`fn_output_shapes`.
elems: A list of tensors.
output_dtypes: A list of dtypes corresponding to the output types of the
function.
output_shapes: A list of `TensorShape`s corresponding to the output shapes
from each invocation of the function on slices of inputs.
max_intra_op_parallelism: An integer. If positive, sets the max parallelism
limit of each function call to this.
Raises:
ValueError: if any of the inputs are malformed.
Returns:
A list of `Tensor` objects with the same types as `output_dtypes`.
"""
if not isinstance(elems, list):
raise ValueError(f"`elems` must be a list of tensors, but was {elems}.")
if not isinstance(output_dtypes, list):
raise ValueError("`output_dtypes` must be a list of `tf.DType` objects, "
f"but was {output_dtypes}.")
if not isinstance(output_shapes, list):
raise ValueError("`output_shapes` must be a list of `tf.TensorShape` "
f"objects, but was {output_shapes}.")
concrete_fn = fn.get_concrete_function() # pylint: disable=protected-access
# TODO(shivaniagrawal/rachelim): what about functions created without
# input_signature.
elems = [ops.convert_to_tensor(e) for e in elems]
output_shapes = [tensor_shape.TensorShape(s) for s in output_shapes]
return gen_dataset_ops.map_defun(elems, concrete_fn.captured_inputs,
output_dtypes, output_shapes, concrete_fn,
max_intra_op_parallelism)
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@data@experimental@ops@map_defun.py@.PATH_END.py
|
{
"filename": "bouwens2017.py",
"repo_name": "mirochaj/ares",
"repo_path": "ares_extracted/ares-main/input/litdata/bouwens2017.py",
"type": "Python"
}
|
"""
Bouwens et al., 2017, ApJ, 843, 129
Table 4 and volume estimate from text.
"""
info = \
{
'reference': 'Bouwens et al., 2017, ApJ, 843, 129',
'data': 'Table 5',
'label': 'Bouwens+ (2017)'
}
import numpy as np
redshifts = [6.]
wavelength = 1600. # I think?
ULIM = -1e10
tmp_data = {}
tmp_data['lf'] = \
{
6.0: {'M': list(np.arange(-20.75, -12.25, 0.5)),
'phi': [0.0002, 0.0009, 0.0007, 0.0018, 0.0036,
0.0060, 0.0071, 0.0111, 0.0170, 0.0142,
0.0415, 0.0599, 0.0817, 0.1052, 0.1275,
0.1464, 0.1584],
'err': [(0.0002, 0.0002), (0.0004, 0.0004),
(0.0004, 0.0004), (0.0006, 0.0006),
(0.0009, 0.0009), (0.0012, 0.0012),
(0.0066, 0.0014), (0.0101, 0.0022),
(0.0165, 0.0039), (0.0171, 0.0054),
(0.0354, 0.0069), (0.0757, 0.0106),
(0.1902, 0.0210), (0.5414, 0.0434),
(1.6479, 0.0747), (5.4369, 0.1077),
(19.8047, 0.1343)],
},
}
units = {'lf': 1.}
data = {}
data['lf'] = {}
for key in tmp_data['lf']:
#mask = np.array(tmp_data['lf'][key]['err']) == ULIM
N = len(tmp_data['lf'][key]['M'])
mask = np.array([tmp_data['lf'][key]['err'][i] == ULIM for i in range(N)])
data['lf'][key] = {}
data['lf'][key]['M'] = np.ma.array(tmp_data['lf'][key]['M'], mask=mask)
data['lf'][key]['phi'] = np.ma.array(tmp_data['lf'][key]['phi'], mask=mask)
data['lf'][key]['err'] = tmp_data['lf'][key]['err']
|
mirochajREPO_NAMEaresPATH_START.@ares_extracted@ares-main@input@litdata@bouwens2017.py@.PATH_END.py
|
{
"filename": "securitymethods.py",
"repo_name": "astropy/pyvo",
"repo_path": "pyvo_extracted/pyvo-main/pyvo/auth/securitymethods.py",
"type": "Python"
}
|
ANONYMOUS = 'anonymous'
BASIC = 'ivo://ivoa.net/sso#BasicAA'
CLIENT_CERTIFICATE = 'ivo://ivoa.net/sso#tls-with-certificate'
COOKIE = 'ivo://ivoa.net/sso#cookie'
|
astropyREPO_NAMEpyvoPATH_START.@pyvo_extracted@pyvo-main@pyvo@auth@securitymethods.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "ahmedfgad/GeneticAlgorithmPython",
"repo_path": "GeneticAlgorithmPython_extracted/GeneticAlgorithmPython-master/setup.py",
"type": "Python"
}
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pygad",
version="3.3.1",
author="Ahmed Fawzy Gad",
install_requires=["numpy", "matplotlib", "cloudpickle",],
author_email="ahmed.f.gad@gmail.com",
description="PyGAD: A Python Library for Building the Genetic Algorithm and Training Machine Learning Algoithms (Keras & PyTorch).",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ahmedfgad/GeneticAlgorithmPython",
packages=setuptools.find_packages())
|
ahmedfgadREPO_NAMEGeneticAlgorithmPythonPATH_START.@GeneticAlgorithmPython_extracted@GeneticAlgorithmPython-master@setup.py@.PATH_END.py
|
{
"filename": "pyright_diff.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/scripts/pyright_diff.py",
"type": "Python"
}
|
import json
import sys
from typing import Any, Dict, NamedTuple
class Diagnostic(NamedTuple):
"""Structured representation of a diagnostic for easier table formatting."""
file: str
line: int
character: int
severity: str
message: str
def normalize_diagnostic(diagnostic: Dict[Any, Any]) -> Dict[Any, Any]:
"""Normalize a diagnostic by removing or standardizing volatile fields."""
normalized = diagnostic.copy()
normalized.pop("time", None)
normalized.pop("version", None)
return normalized
def load_and_normalize_file(file_path: str) -> Dict[Any, Any]:
"""Load a JSON file and normalize its contents."""
with open(file_path, "r") as f:
data = json.load(f)
return normalize_diagnostic(data)
def parse_diagnostic(diag: Dict[Any, Any]) -> Diagnostic:
"""Convert a diagnostic dict into a Diagnostic object."""
file = diag.get("file", "unknown_file")
message = diag.get("message", "no message")
range_info = diag.get("range", {})
start = range_info.get("start", {})
line = start.get("line", 0)
char = start.get("character", 0)
severity = diag.get("severity", "unknown")
return Diagnostic(file, line, char, severity, message)
def format_markdown_table(diagnostics: list[Diagnostic]) -> str:
"""Format list of diagnostics as a markdown table."""
if not diagnostics:
return "\nNo new errors found!"
table = ["| File | Location | Message |", "|------|----------|---------|"]
for diag in sorted(diagnostics, key=lambda x: (x.file, x.line, x.character)):
# Escape pipe characters and replace newlines with HTML breaks
message = diag.message.replace("|", "\\|").replace("\n", "<br>")
location = f"L{diag.line}:{diag.character}"
table.append(f"| {diag.file} | {location} | {message} |")
return "\n".join(table)
def compare_pyright_outputs(base_file: str, new_file: str) -> None:
"""Compare two pyright JSON output files and display only new errors."""
base_data = load_and_normalize_file(base_file)
new_data = load_and_normalize_file(new_file)
# Group diagnostics by file
base_diags = set()
new_diags = set()
# Process diagnostics from type completeness symbols
for data, diag_set in [(base_data, base_diags), (new_data, new_diags)]:
for symbol in data.get("typeCompleteness", {}).get("symbols", []):
for diag in symbol.get("diagnostics", []):
if diag.get("severity", "") == "error":
diag_set.add(parse_diagnostic(diag))
# Find new errors
new_errors = list(new_diags - base_diags)
print("\n## New Pyright Errors\n")
print(format_markdown_table(new_errors))
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: python pyright_diff.py <base.json> <new.json>")
sys.exit(1)
compare_pyright_outputs(sys.argv[1], sys.argv[2])
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@scripts@pyright_diff.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "sbi-dev/sbi",
"repo_path": "sbi_extracted/sbi-main/sbi/inference/trainers/npe/__init__.py",
"type": "Python"
}
|
from sbi.inference.trainers.npe.npe_a import NPE_A # noqa: F401
from sbi.inference.trainers.npe.npe_b import NPE_B # noqa: F401
from sbi.inference.trainers.npe.npe_base import PosteriorEstimator # noqa: F401
from sbi.inference.trainers.npe.npe_c import NPE_C # noqa: F401
SNPE_A = NPE_A
SNPE_B = NPE_C
SNPE_C = SNPE = NPE = NPE_C
|
sbi-devREPO_NAMEsbiPATH_START.@sbi_extracted@sbi-main@sbi@inference@trainers@npe@__init__.py@.PATH_END.py
|
{
"filename": "generate_Pk_example.py",
"repo_name": "yacobozdalkiran/CLASS_mod",
"repo_path": "CLASS_mod_extracted/CLASS_mod-main/class_public-master/external/external_Pk/generate_Pk_example.py",
"type": "Python"
}
|
#!/usr/bin/python
from __future__ import print_function
import sys
from math import exp
# README:
#
# This is an example python script for the external_Pk mode of Class.
# It generates the primordial spectrum of LambdaCDM.
# It can be edited and used directly, though keeping a copy of it is recommended.
#
# Two (maybe three) things need to be edited:
#
# 1. The name of the parameters needed for the calculation of Pk.
# "sys.argv[1]" corresponds to "custom1" in Class, an so on
try :
k_0 = float(sys.argv[1])
A = float(sys.argv[2])
n_s = float(sys.argv[3])
# Error control, no need to touch
except IndexError :
raise IndexError("It seems you are calling this script with too few arguments.")
except ValueError :
raise ValueError("It seems some of the arguments are not correctly formatted. "+
"Remember that they must be floating point numbers.")
# 2. The function giving P(k), including the necessary import statements.
# Inside this function, you can use the parameters named in the previous step.
def P(k) :
return A * (k/k_0)**(n_s-1.)
# 3. Limits for k and precision:
# Check that the boundaries are correct for your case.
# It is safer to set k_per_decade primordial slightly bigger than that of Class.
k_min = 1.e-6
k_max = 10.
k_per_decade_primordial = 200.
#
# And nothing should need to be edited from here on.
#
# Filling the array of k's
ks = [float(k_min)]
while ks[-1] <= float(k_max) :
ks.append(ks[-1]*10.**(1./float(k_per_decade_primordial)))
# Filling the array of Pk's
for k in ks :
P_k = P(k)
print("%.18g %.18g" % (k, P_k))
|
yacobozdalkiranREPO_NAMECLASS_modPATH_START.@CLASS_mod_extracted@CLASS_mod-main@class_public-master@external@external_Pk@generate_Pk_example.py@.PATH_END.py
|
{
"filename": "run_and_gather_logs_lib.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/tools/test/run_and_gather_logs_lib.py",
"type": "Python"
}
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for getting system information during TensorFlow tests."""
import os
import re
import shlex
import subprocess
import tempfile
import time
from tensorflow.core.util import test_log_pb2
from tensorflow.python.platform import gfile
from tensorflow.tools.test import gpu_info_lib
from tensorflow.tools.test import system_info_lib
class MissingLogsError(Exception):
pass
def get_git_commit_sha():
"""Get git commit SHA for this build.
Attempt to get the SHA from environment variable GIT_COMMIT, which should
be available on Jenkins build agents.
Returns:
SHA hash of the git commit used for the build, if available
"""
return os.getenv("GIT_COMMIT")
def process_test_logs(name, test_name, test_args, benchmark_type,
start_time, run_time, log_files):
"""Gather test information and put it in a TestResults proto.
Args:
name: Benchmark target identifier.
test_name: A unique bazel target, e.g. "//path/to:test"
test_args: A string containing all arguments to run the target with.
benchmark_type: A string representing the BenchmarkType enum; the
benchmark type for this target.
start_time: Test starting time (epoch)
run_time: Wall time that the test ran for
log_files: Paths to the log files
Returns:
A TestResults proto
"""
results = test_log_pb2.TestResults()
results.name = name
results.target = test_name
results.start_time = start_time
results.run_time = run_time
results.benchmark_type = test_log_pb2.TestResults.BenchmarkType.Value(
benchmark_type.upper())
# Gather source code information
git_sha = get_git_commit_sha()
if git_sha:
results.commit_id.hash = git_sha
results.entries.CopyFrom(process_benchmarks(log_files))
results.run_configuration.argument.extend(test_args)
results.machine_configuration.CopyFrom(
system_info_lib.gather_machine_configuration())
return results
def process_benchmarks(log_files):
benchmarks = test_log_pb2.BenchmarkEntries()
for f in log_files:
content = gfile.GFile(f, "rb").read()
if benchmarks.MergeFromString(content) != len(content):
raise Exception("Failed parsing benchmark entry from %s" % f)
return benchmarks
def run_and_gather_logs(name,
test_name,
test_args,
benchmark_type,
skip_processing_logs=False):
"""Run the bazel test given by test_name. Gather and return the logs.
Args:
name: Benchmark target identifier.
test_name: A unique bazel target, e.g. "//path/to:test"
test_args: A string containing all arguments to run the target with.
benchmark_type: A string representing the BenchmarkType enum; the
benchmark type for this target.
skip_processing_logs: Whether to skip processing test results from log
files.
Returns:
A tuple (test_results, mangled_test_name), where
test_results: A test_log_pb2.TestResults proto, or None if log processing
is skipped.
test_adjusted_name: Unique benchmark name that consists of
benchmark name optionally followed by GPU type.
Raises:
ValueError: If the test_name is not a valid target.
subprocess.CalledProcessError: If the target itself fails.
IOError: If there are problems gathering test log output from the test.
MissingLogsError: If we couldn't find benchmark logs.
"""
if not (test_name and test_name.startswith("//") and ".." not in test_name and
not test_name.endswith(":") and not test_name.endswith(":all") and
not test_name.endswith("...") and len(test_name.split(":")) == 2):
raise ValueError("Expected test_name parameter with a unique test, e.g.: "
"--test_name=//path/to:test")
test_executable = test_name.rstrip().strip("/").replace(":", "/")
if gfile.Exists(os.path.join("bazel-bin", test_executable)):
# Running in standalone mode from core of the repository
test_executable = os.path.join("bazel-bin", test_executable)
else:
# Hopefully running in sandboxed mode
test_executable = os.path.join(".", test_executable)
test_adjusted_name = name
gpu_config = gpu_info_lib.gather_gpu_devices()
if gpu_config:
gpu_name = gpu_config[0].model
gpu_short_name_match = re.search(
r"(Tesla|NVIDIA) (K40|K80|P100|V100|A100)", gpu_name
)
if gpu_short_name_match:
gpu_short_name = gpu_short_name_match.group(0)
test_adjusted_name = name + "|" + gpu_short_name.replace(" ", "_")
temp_directory = tempfile.mkdtemp(prefix="run_and_gather_logs")
mangled_test_name = (
test_adjusted_name.strip("/").replace("|",
"_").replace("/",
"_").replace(":", "_"))
test_file_prefix = os.path.join(temp_directory, mangled_test_name)
test_file_prefix = "%s." % test_file_prefix
try:
if not gfile.Exists(test_executable):
test_executable_py3 = test_executable + ".python3"
if not gfile.Exists(test_executable_py3):
raise ValueError("Executable does not exist: %s" % test_executable)
test_executable = test_executable_py3
test_args = shlex.split(test_args)
# This key is defined in tf/core/util/reporter.h as
# TestReporter::kTestReporterEnv.
os.environ["TEST_REPORT_FILE_PREFIX"] = test_file_prefix
start_time = time.time()
subprocess.check_call([test_executable] + test_args)
if skip_processing_logs:
return None, test_adjusted_name
run_time = time.time() - start_time
log_files = gfile.Glob("{}*".format(test_file_prefix))
if not log_files:
raise MissingLogsError("No log files found at %s." % test_file_prefix)
return (process_test_logs(
test_adjusted_name,
test_name=test_name,
test_args=test_args,
benchmark_type=benchmark_type,
start_time=int(start_time),
run_time=run_time,
log_files=log_files), test_adjusted_name)
finally:
try:
gfile.DeleteRecursively(temp_directory)
except OSError:
pass
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@tools@test@run_and_gather_logs_lib.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "Pranab-JD/iPIC3D-CPU-SPACE-CoE",
"repo_path": "iPIC3D-CPU-SPACE-CoE_extracted/iPIC3D-CPU-SPACE-CoE-main/README.md",
"type": "Markdown"
}
|
# iPIC3D
## Requirements
- gcc/g++ compiler
- cmake (minimum version 2.8)
- MPI (OpenMPI or MPICH)
- HDF5 (optional)
- Paraview/Catalyst (optional)
If you are on a supercomputer or cluster, it's highly possible that you can use tools like `module` to change the compiler, MPI or libraries used.
## Installation
1. Download the code
``` shell
git clone https://github.com/Pranab-JD/iPIC3D-CPU-SPACE-CoE.git
```
2. Create build directory
``` shell
cd iPIC3D-CPU-SPACE-CoE && mkdir build && cd build
```
3. Compile the code
``` shell
cmake ..
make -j # -j = build with max # of threads - fast, recommended
```
4. Run
``` shell
# no_of_proc = XLEN x YLEN x ZLEN (as specified in the input file)
mpirun -np no_of_proc ./iPIC3D inputfilename.inp
```
**Important:** make sure `number of MPI process = XLEN x YLEN x ZLEN` as specified in the input file.
If you are on a super-computer, especially a multi-node system, it's likely that you should use `srun` to launch the program.
# Citation
Markidis, Stefano and Giovanni Lapenta (2010), *Multi-scale simulations of plasma with iPIC3D*, Mathematics and Computers in Simulation, 80, 7, 1509-1519 [[DOI]](https://doi.org/10.1016/j.matcom.2009.08.038)
|
Pranab-JDREPO_NAMEiPIC3D-CPU-SPACE-CoEPATH_START.@iPIC3D-CPU-SPACE-CoE_extracted@iPIC3D-CPU-SPACE-CoE-main@README.md@.PATH_END.py
|
{
"filename": "CompatibleDifferenceSpecificThermalEnergyPolicyInst.cc.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/src/Hydro/CompatibleDifferenceSpecificThermalEnergyPolicyInst.cc.py",
"type": "Python"
}
|
text = """
//------------------------------------------------------------------------------
// Explicit instantiation.
//------------------------------------------------------------------------------
#include "Geometry/Dimension.hh"
#include "Hydro/CompatibleDifferenceSpecificThermalEnergyPolicy.cc"
namespace Spheral {
template class CompatibleDifferenceSpecificThermalEnergyPolicy<Dim< %(ndim)s > >;
}
"""
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@src@Hydro@CompatibleDifferenceSpecificThermalEnergyPolicyInst.cc.py@.PATH_END.py
|
{
"filename": "Generator.py",
"repo_name": "COSMOGRAIL/PyCS",
"repo_path": "PyCS_extracted/PyCS-master/pycs/sim/old/Generator.py",
"type": "Python"
}
|
"""
Generate simulated light curve with the PureLightCurve
"""
from PureLightCurve import *
from SimLightCurve import *
from MlCurve import *
from util import *
import sys
sys.path.append("../../")
from pycs.gen import lc
class Generator:
"""
This class is the main class of this program, it takes a pure light curve and create N simulated light curves in a list.
This class is composed by some methods which create the differents parts of the curve( sampling, errorbar, seasons...)
@todo: Maybe a general method like Construct(all the parameter) will be good
"""
def __init__(self,OriginLightCurve,NLens=4,names=["A","B","C","D"],color=["red","blue","green","magenta"],length=20,period=1,dmag=[0]):
"""
Initialise some general variable
@type NLens: int
@param NLens: number of simulated light curve
@type names: list of string
@param names: list of the simulated light curve names
@type length: float
@param length: lenght of the simulated data in days
@type period: float
@param period: period of the simulated mesure
@type dmag: array of float
@param dmag: array containig the magnitude shift for each curve
"""
self.olcurve=OriginLightCurve
"""
@type: PureLightCurve
@ivar: The pure light curve which generate the simulation
"""
self.curves=NLens*[SimLightCurve]
"""
@type:list of SimLightCurve
@ivar: a list of NLens simulated light curve
"""
self.length=length
self.period=period
for i in range(NLens): #construct the list with empty SimLightCurve
self.curves[i]=SimLightCurve(names[i],plotcolor=color[i],length=self.length,period=self.period,originalcurve=self.olcurve,dmag=dmag[i])
def Shift(self,shift=[0, 1, -2,10]):
"""
Pseudo shifting method because it just save the shifting parameter in the each SimLightCurve object
@type shift: array of float
@param shift: array of NLens length which take all the time shift parameters
"""
self.shift=shift
for i in range(len(shift)):
self.curves[i].shift=shift[i]
def create_timeline(self,jdsamplingstd):
"""
Create an array with the value in day of each mesure. It is based on the period of mesurement but with some deviations according to the bad wether for a night or something else.
@type jdsamplingstd: float
@param jdsamplingstd: standart deviation for the normal distribution which simulate the unperiodicity of the mesures
"""
if jdsamplingstd != 0: #we can remove this error for some test
std=np.random.normal(0,jdsamplingstd,self.length/self.period) #create the vector of error
else:
std=np.array(self.length/self.period*[0.])
for curve in self.curves:
f = np.arange(self.length/self.period)
curve.datatime=f*self.period+std
def create_seasons(self,pos, size):
"""
This method create the seasons. It's a brut force method because it completly delete the data beween each season. Maybe we can change that...
@type pos: array of float
@param pos: array of the position for each begining of inter-seasons
@type size: array of foat
@param size: array of the lenght of each inter-seasons
"""
seas=[0]
for i in range(len(size)):
seas=np.append(seas,np.linspace(pos[i],pos[i]+size[i],size[i])) #define the inter seasins period
for curve in self.curves: #delete it from the data
curve.datatime=np.delete(curve.datatime,seas)
curve.datamag=np.delete(curve.datamag,seas)
curve.dataerr=np.delete(curve.dataerr,seas)
def create_errorbar(self,low,high):
"""
Create an array of errorbar with a normal distribution(obsolete)
Now this is a uniform distribution between low and high values.
@type low: float
@param low: minimum of the uniform distribution
@type high: float
@param high: maximum of the uniform distribution
"""
for curve in self.curves:
#curve.dataerr=np.abs(np.random.normal(0,errorstd,self.length/self.period)) #normal generator(obsolete)
curve.dataerr=np.abs(np.random.uniform(low,high,self.length/self.period)) #uniform generator
def create_data(self,erramp,mlbeta,mlstd):
"""
This is the main method which create the sampling data. It shift and sample the data according to the shift parameter and the timeline.It add some micro lensing effect in each curve too.
@type erramp: float
@param erramp: amplitude of the error of each point( the errorbar give the standart deviation), 0 or 1 in general
@type mlbeta: float
@param mlbeta: beta parameter for microlensing curve generator
@type mlstd: float
@param mlstd: std parameter for microlensing generator (equivalent to an amplitude parameter)
"""
for curve in self.curves:
std=erramp*np.random.randn(len(curve.dataerr))*curve.dataerr #error vector
curve.mlcurve=MlCurve(self.length,self.olcurve.res) #microlensing vector
curve.mlcurve.LawNoise(mlbeta,mlstd) #generate microlensing
for (i, time) in enumerate(curve.datatime):
if (time+curve.shift)*self.olcurve.res> self.olcurve.length*self.olcurve.res:
break
if (time+curve.shift)>0:
curve.datamag[i]=self.olcurve[(time+np.floor(curve.shift)-1)*self.olcurve.res]+std[i]+curve.mlcurve.data[(time+np.floor(curve.shift)-1)*self.olcurve.res]
#curve.datamag=-2.5*np.log10(curve.datamag) #convert magnitude to flux (just for testing purpose don't use it with micro lensing which must be multiplicative in this case)
def save(self):
"""
Save the data in a dictionnary of lightcurve define by the shifting part of the program
So we can directly take this pickle to test the main program
@todo: maybe give different name for archive
"""
dict={}
for curve in self.curves:
dict[curve.name]=lc.factory(curve.datatime,curve.datamag,magerrs=curve.dataerr,telescopename="simulation",object=curve.name)
writepickle(dict,'lcs.pkl')
|
COSMOGRAILREPO_NAMEPyCSPATH_START.@PyCS_extracted@PyCS-master@pycs@sim@old@Generator.py@.PATH_END.py
|
{
"filename": "image_data_new.py",
"repo_name": "sibirrer/AstroObjectAnalyser",
"repo_path": "AstroObjectAnalyser_extracted/AstroObjectAnalyser-master/astroObjectAnalyser/image_data_new.py",
"type": "Python"
}
|
__author__ = 'sibirrer'
#external modules
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
import numpy as np
import pyextract.image_config as ImageConfig
import astropy.coordinates as coords
#internal modules
from astroObjectAnalyser.DataAnalysis.analysis import Analysis
from astroObjectAnalyser.DataAnalysis.catalogues import Catalogue
class ImageData(object):
"""
contains all the information associated with a given band image (e.g. r-band frame of specific lens)
"""
def __init__(self, image_filename=None, wht_filename=None, data_type='cosmos', wht_extension=1, sci_extension=0):
"""
initialize data class with file names (absolute paths), coordinates and data type specifications
"""
self._image_filename = image_filename
self._wht_filename = wht_filename
self.catalogue = Catalogue()
self.analysis = Analysis()
self._data_type = data_type
self._extension_image = sci_extension # or'SCI'
self._extension_wht = wht_extension # or 'WHT'
@property
def header_primary(self):
if not hasattr(self, '_header_primary'):
self._load_header()
return self._header_primary
@property
def header(self):
if not hasattr(self, '_header'):
self._load_header()
return self._header
@property
def naxis1(self):
if not hasattr(self, '_naxis1'):
self._pixel_number()
return self._naxis1
@property
def naxis2(self):
if not hasattr(self, '_naxis2'):
self._pixel_number()
return self._naxis2
@property
def exposure_time(self):
if not hasattr(self, '_exposure_time'):
self._exposure_time = self.header_primary.get('EXPTIME')
return self._exposure_time
@property
def CCD_gain(self):
if not hasattr(self, '_CCD_gain'):
if 'CCDGAIN' in self.header_primary:
self._CCD_gain = self.header_primary.get('CCDGAIN')
elif 'CCDGAIN' in self.header:
self._CCD_gain = self.header.get('CCDGAIN')
elif 'GAIN' in self.header:
self._CCD_gain = self.header.get('GAIN')
else:
raise ValueError("CCD gain could not be read from the header. Please manually add it!")
return self._CCD_gain
@property
def background(self):
if not hasattr(self, '_background'):
self._background_mean, self._background_rms = self._get_background()
return self._background_mean, self._background_rms
def set_extension(self, ext_image=0, ext_wht=1):
""""""
self._extension_image = ext_image
self._extension_wht = ext_wht
@property
def get_cat(self):
if not hasattr(self, '_cat'):
cat = self._get_cat()
self._cat = cat
return self._cat
def transforms(self, xc, yc):
if not hasattr(self, '_pix2coord_transform') or not hasattr(self, '_coord2pix_transform'):
self.transform(xc, yc)
return self._pix2coord_transform, self._coord2pix_transform
@property
def transforms_undistorted(self):
if not hasattr(self, '_pix2coord_transform_undistorted') or not hasattr(self, '_coord2pix_transform_undistorted'):
self._transform_undistorted()
return self._pix2coord_transform_undistorted, self._coord2pix_transform_undistorted
def _get_background(self):
"""
:return: mean and rms value of background
"""
HDUFile = self.HDUFile()
mean, rms = self.catalogue.get_background(HDUFile)
return mean, rms
def _get_cat(self):
"""
:return: sextractor catalogue
"""
HDUFile = self.HDUFile()
cat = self.catalogue.get_source_cat(HDUFile)
return cat
def _get_psf_fit(self, psf_type):
"""
:param psf_type:
:return:
"""
exp_time = self.exposure_time
HDUFile = self.HDUFile()
mean, rms = self.catalogue.get_background(HDUFile)
cat = self.catalogue.get_source_cat(HDUFile)
image = self.image_full()
kernel, mean_list, filter_object = self.analysis.get_psf(image, cat, mean, rms, exp_time, psf_type)
return kernel, mean_list
def HDUFile(self, force=False):
if not(hasattr(self, '_HDUFile') and (not force)):
conf_args = ImageConfig.config_arguments(self.exposure_time, self.CCD_gain)
self._HDUFile = ImageConfig.get_source_cat(imageref=self._image_filename, conf_args=conf_args)
return self._HDUFile
def _load_header(self):
"""
reads in the header info and performs checks on whether the header has the right format to deal with
"""
self._header_primary = pyfits.getheader(self._image_filename) # this is the primary header which does not contain general information
file = pyfits.open(self._image_filename)
self._header = file[self._extension_image].header
file.close()
def _pixel_number(self):
"""
reads in number of pixel per axis for original image
"""
if self.header['NAXIS'] > 2:
raise TypeError("Too many (%i) dimensions!" % self.header['NAXIS'])
self._naxis1 = self.header['NAXIS1']
self._naxis2 = self.header['NAXIS2']
@property
def image_full(self):
"""
array of one full band, do only use this function when really needed as images can be quite large
"""
file = pyfits.open(self._image_filename)
data_full = file[self._extension_image].data
data_full[np.isnan(data_full)] = 0
file.close()
return data_full
@property
def exposure_full(self):
"""
array of one full band exposure time. do only use this function when really needed as images can be quite large
"""
if self._wht_filename is not None:
file = pyfits.open(self._wht_filename)
exp_full = file[0].data
print("separate exposure map loaded")
else:
file = pyfits.open(self._image_filename)
exp_full = file[self._extension_wht].data
#else:
# exp_full = file['WHT'].data
exp_full[np.isnan(exp_full)] = 0
file.close()
return exp_full
@property
def pixel_size(self):
"""
:return: pixel size in arc seconds
"""
cd1 = self.header.get('CDELT1') if self.header.get('CDELT1') else np.sqrt(
self.header.get('CD1_1') ** 2 + self.header.get('CD1_2') ** 2)
cd2 = self.header.get('CDELT2') if self.header.get('CDELT2') else np.sqrt(
self.header.get('CD2_1') ** 2 + self.header.get('CD2_2') ** 2)
if cd1 is None or cd2 is None:
raise Exception("Missing CD or CDELT keywords in header")
return cd1 * 3600, cd2 * 3600
def coordinates_grid(self, x_min, x_max, y_min, y_max):
"""
:param xc: center in ra
:param yc: center in dec
:param x_min: min pixel x-axis
:param x_max: max pixel x-axis
:param y_min: min pixel y-axis
:param y_max: max pixel y-axis
:param wcs: coordinate class initialized with the fits file of the original image
:return: ra_coord, dec_coord in units of arc seconds centered to the cutout position
"""
head = self.header
wcs = pywcs.WCS(head)
x_coords = np.linspace(x_min, x_max-1, x_max - x_min)
y_coords = np.linspace(y_min, y_max-1, y_max - y_min)
x_coords, y_coords = np.meshgrid(x_coords, y_coords)
ra_coords, dec_coords = wcs.all_pix2world(x_coords, y_coords, 0)
#ra_coords -= self.ra
#dec_coords -= self.dec
#ra_coords *= 3600
#dec_coords *= 3600
#ra_coords = util.image2array(ra_coords)
#dec_coords = util.image2array(dec_coords)
return ra_coords, dec_coords
def pix2coord(self, x, y):
"""
maps pixel indices to ra/dec coordinates
:param x:
:param y:
:return:
"""
wcs = pywcs.WCS(self.header)
return wcs.all_pix2world(x, y, 0)
def coord2pix(self, ra, dec):
"""
maps ra/dec coordinates to pixel indices
:param x:
:param y:
:return:
"""
wcs = pywcs.WCS(self.header)
return wcs.all_world2pix(ra, dec, 0)
def cutout_range(self, rac, decc, xw, yw, units='pixels', coordsys='galactic'):
"""
computes the pixel range (min max) of a frame centered at (rac, decc) with width xw, yw (in arc seconds)
:param rac:
:param decc:
:param xw:
:param yw:
:return:
"""
head = self.header
wcs = pywcs.WCS(head)
if units == 'wcs':
if coordsys == 'celestial' and wcs.wcs.lngtyp == 'GLON':
rac, decc = coords.Position((rac, decc), system=coordsys).galactic()
elif coordsys == 'galactic' and wcs.wcs.lngtyp == 'RA':
rac, decc = coords.Position((rac, decc), system=coordsys).j2000()
else:
raise ValueError("problem with wcs instance.")
xx, yy = wcs.all_world2pix(rac, decc, 0)
xx = int(xx)
yy = int(yy)
print('the center of the image is at pixel coordinates %f, %f.' % (xx, yy))
if units == 'pixels':
xmin, xmax = np.max([0, xx - xw]), np.min([self.naxis1, xx + xw])
ymin, ymax = np.max([0, yy - yw]), np.min([self.naxis2, yy + yw])
elif units == 'arcseconds':
cd1, cd2 = self.pixel_size
xmin, xmax = np.max([0, xx - xw / np.abs(cd1)]), np.min([self.naxis1, xx + xw / np.abs(cd1)])
ymin, ymax = np.max([0, yy - yw / np.abs(cd2)]), np.min([self.naxis2, yy + yw / np.abs(cd2)])
else:
raise Exception("Can't use units %s." % units)
if xmax < 0 or ymax < 0:
raise ValueError("Max Coordinate is outside of map: %f,%f." % (xmax, ymax))
if ymin >= head.get('NAXIS2') or xmin >= head.get('NAXIS1'):
raise ValueError("Min Coordinate is outside of map: %f,%f." % (xmin, ymin))
return xmin, xmax, ymin, ymax
def cutout(self, xc, yc, xw, yw, units='pixels', coordsys='galactic', verbose=False,
exposure_map=False):
"""
Inputs:
file - pyfits HDUList (must be 2D)
xc,yc - x and y coordinates in the fits files' coordinate system (CTYPE)
xw,yw - x and y width (pixels or wcs)
units - specify units to use: either pixels or wcs
outfile - optional output file
"""
# file = pyfits.open(fits_filename)
# head = file['SCI'].header.copy()
xmin, xmax, ymin, ymax = self.cutout_range(xc, yc, xw, yw, units=units, coordsys=coordsys)
img = self.image_full[int(ymin):int(ymax), int(xmin):int(xmax)].copy()
if verbose is True: print("Cut image to %s. xrange: %f:%f, yrange: %f:%f" % (img.shape, xmin, xmax, ymin, ymax))
if exposure_map is True:
exp_map = self.exposure_full[int(ymin):int(ymax), int(xmin):int(xmax)].copy()
else:
exp_map = None
return img, exp_map
def _transform_undistorted(self):
"""
initializes the the matrix which transforms pixel to ra/dec
"""
if not hasattr(self, 'header'):
self._load_header()
CD1_1 = self.header.get('CD1_1')*3600 # change in arc sec per pixel d(ra)/dx
CD1_2 = self.header.get('CD1_2')*3600
CD2_1 = self.header.get('CD2_1')*3600
CD2_2 = self.header.get('CD2_2')*3600
pix2coord_transform = np.array([[CD1_1, CD1_2], [CD2_1, CD2_2]])
det = CD1_1*CD2_2 - CD1_2*CD2_1
coord2pix_transform = np.array([[CD2_2, -CD1_2], [-CD2_1, CD1_1]])/det
return pix2coord_transform, coord2pix_transform
def transform(self, xc, yc):
"""
:param xc: pixel of the center
:param yc: pixel of the center
:return: linear transformation matrix for the pixel shift at the center of the image
comupted with the full distortion corrections
"""
head = self.header
wcs = pywcs.WCS(head)
ra_0, dec_0 = wcs.all_pix2world(xc, yc, 0)
ra_10, dec_10 = wcs.all_pix2world(xc+1, yc, 0)
ra_01, dec_01 = wcs.all_pix2world(xc, yc+1, 0)
cos_dec = np.cos(dec_0 / 360 * 2 * np.pi)
factor = 3600.
CD1_1 = (ra_10 - ra_0) * factor * cos_dec
CD1_2 = (dec_10 - dec_0) * factor
CD2_1 = (ra_01 - ra_0) * factor * cos_dec
CD2_2 = (dec_01 - dec_0) * factor
pix2coord_transform = np.array([[CD1_1, CD1_2], [CD2_1, CD2_2]])
det = CD1_1*CD2_2 - CD1_2*CD2_1
coord2pix_transform = np.array([[CD2_2, -CD1_2], [-CD2_1, CD1_1]])/det
return pix2coord_transform, coord2pix_transform
def _transform_large(self, xc, yc, delta_pix=100):
"""
:param xc: pixel of the center
:param yc: pixel of the center
:return: linear transformation matrix for the pixel shift at (0,0) comupted with the full distortion corrections
"""
head = self.header
wcs = pywcs.WCS(head)
ra_0, dec_0 = wcs.all_pix2world(xc, yc, 0)
ra_10, dec_10 = wcs.all_pix2world(xc + delta_pix, yc, 0)
ra_01, dec_01 = wcs.all_pix2world(xc, yc + delta_pix, 0)
cos_dec = np.cos(dec_0 / 360 * 2 * np.pi)
factor = 3600. / delta_pix
CD1_1 = (ra_10 - ra_0) * factor * cos_dec
CD1_2 = (dec_10 - dec_0) * factor
CD2_1 = (ra_01 - ra_0) * factor * cos_dec
CD2_2 = (dec_01 - dec_0) * factor
pix2coord_transform = np.array([[CD1_1, CD1_2], [CD2_1, CD2_2]])
det = CD1_1*CD2_2 - CD1_2*CD2_1
coord2pix_transform = np.array([[CD2_2, -CD1_2], [-CD2_1, CD1_1]])/det
return pix2coord_transform, coord2pix_transform
|
sibirrerREPO_NAMEAstroObjectAnalyserPATH_START.@AstroObjectAnalyser_extracted@AstroObjectAnalyser-master@astroObjectAnalyser@image_data_new.py@.PATH_END.py
|
{
"filename": "_increasing.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/ohlc/_increasing.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Increasing(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "ohlc"
_path_str = "ohlc.increasing"
_valid_props = {"line"}
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.increasing.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
plotly.graph_objs.ohlc.increasing.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
line
:class:`plotly.graph_objects.ohlc.increasing.Line`
instance or dict with compatible properties
"""
def __init__(self, arg=None, line=None, **kwargs):
"""
Construct a new Increasing object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.ohlc.Increasing`
line
:class:`plotly.graph_objects.ohlc.increasing.Line`
instance or dict with compatible properties
Returns
-------
Increasing
"""
super(Increasing, self).__init__("increasing")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.ohlc.Increasing
constructor must be a dict or
an instance of :class:`plotly.graph_objs.ohlc.Increasing`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@ohlc@_increasing.py@.PATH_END.py
|
{
"filename": "correct_files.py",
"repo_name": "saltastro/polsalt",
"repo_path": "polsalt_extracted/polsalt-master/scripts/correct_files.py",
"type": "Python"
}
|
import os
import sys
import copy
import numpy as np
from astropy.io import fits
polsaltdir = '/'.join(os.path.realpath(__file__).split('/')[:-2])
datadir = polsaltdir+'/polsalt/data/'
sys.path.extend((polsaltdir+'/polsalt/',))
from specpolwollaston import correct_wollaston, read_wollaston
def correct_files(hdu,tilt=0):
"""For a given input file, apply corrections for wavelength,
distortion, and bad pixels
Parameters
----------
input_file: astropy.io.fits.HDUList
tilt: (float)
change in row from col = 0 to cols
"""
cbin, rbin = [int(x) for x in hdu[0].header['CCDSUM'].split(" ")]
beams, rows, cols = hdu[1].data.shape
#temporary cludge
thdu = fits.HDUList([fits.PrimaryHDU(), fits.ImageHDU(hdu[1].data[0])])
thdu[0].header = hdu[0].header
thdu[1].name = 'SCI'
rpix_oc = read_wollaston(thdu, wollaston_file=datadir+"wollaston.txt")
drow_oc = (rpix_oc-rpix_oc[:,cols/2][:,None])/rbin
drow_oc += -tilt*(np.arange(cols) - cols/2)/cols
for i in range(1, len(hdu)):
for o in range(beams):
if hdu[i].name == 'BPM' :
tdata = hdu[i].data[o].astype('float')
else:
tdata = hdu[i].data[o]
tdata = correct_wollaston(tdata, -drow_oc[o])
if hdu[i].name == 'BPM' :
hdu[i].data[o] = (tdata > 0.1).astype('uint')
else:
hdu[i].data[o] = tdata
return hdu
if __name__=='__main__':
import glob
if '*' in sys.argv[-1]:
images = glob.glob(sys.argv[-1])
else:
images = sys.argv[1:]
for img in images:
hdu=correct_files(fits.open(img))
hdu.writeto('c' + img, clobber=True)
|
saltastroREPO_NAMEpolsaltPATH_START.@polsalt_extracted@polsalt-master@scripts@correct_files.py@.PATH_END.py
|
{
"filename": "_opacitysrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/barpolar/marker/_opacitysrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OpacitysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="opacitysrc", parent_name="barpolar.marker", **kwargs
):
super(OpacitysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@barpolar@marker@_opacitysrc.py@.PATH_END.py
|
{
"filename": "treering_skybg2.py",
"repo_name": "GalSim-developers/GalSim",
"repo_path": "GalSim_extracted/GalSim-main/devel/lsst/treering_skybg2.py",
"type": "Python"
}
|
# Copyright (c) 2012-2023 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
from __future__ import print_function
import sys
import time
import numpy as np
import galsim
seed = 140101
rng = galsim.UniformDeviate(seed)
treering_func = galsim.SiliconSensor.simple_treerings(0.26, 47.)
treering_center = galsim.PositionD(0,0)
skyCounts = 800.
print('skyCounts = ',skyCounts)
# Not an LSST wcs, but just make sure this works properly with a non-trivial wcs.
wcs = galsim.FitsWCS('../../tests/fits_files/tnx.fits')
t0 = time.time()
image = galsim.ImageF(2000, 500, wcs=wcs)
print('image bounds = ',image.bounds)
nrecalc = 1.e300
sensor = galsim.SiliconSensor(rng=rng, nrecalc=nrecalc,
treering_func=treering_func, treering_center=treering_center)
# For regular sky photons, we can just use the pixel areas to buidl the sky image.
# At this point the image is blank, so area is just from tree rings.
sensor_area = sensor.calculate_pixel_areas(image)
sensor_area.write('sensor_area.fits')
# We also need to account for the distortion of the wcs across the image.
# This expects sky_level in ADU/arcsec^2, not ADU/pixel.
image.wcs.makeSkyImage(image, sky_level=1.)
image.write('wcs_area.fits')
# Rescale so that the mean sky level per pixel is skyCounts
mean_pixel_area = image.array.mean()
image *= skyCounts / mean_pixel_area
# Now multiply by the area due to the sensor effects.
image *= sensor_area
# Finally, add noise. What we have here so far is the expectation value in each pixel.
# We need to realize this according to Poisson statistics with these means.
noise = galsim.PoissonNoise(rng)
image.addNoise(noise)
t1 = time.time()
print('Time to make sky image = ',t1-t0)
image.write('sky.fits')
# Check that the photons follow Poisson statistics
import matplotlib.pyplot as plt
from scipy.stats import poisson
fig = plt.figure()
ax = fig.add_subplot(111)
bin_width = 5
bins = np.arange(0,2*skyCounts,bin_width)
n, bins, p = ax.hist(image.array.ravel(), bins=bins, histtype='step', color='blue', fill=True)
npix = np.prod(image.array.shape)
ax.plot(bins, npix * bin_width * poisson.pmf(bins, skyCounts), color='green')
ax.set_xlabel('photons per pixel')
ax.set_ylabel('n pixels')
plt.tight_layout()
plt.savefig('poisson_test.pdf')
|
GalSim-developersREPO_NAMEGalSimPATH_START.@GalSim_extracted@GalSim-main@devel@lsst@treering_skybg2.py@.PATH_END.py
|
{
"filename": "check_group_membership.py",
"repo_name": "SWIFTSIM/SOAP",
"repo_path": "SOAP_extracted/SOAP-master/check_group_membership.py",
"type": "Python"
}
|
#!/bin/env python
import os
import sys
import numpy as np
import h5py
import virgo.mpi.parallel_sort as psort
import virgo.mpi.parallel_hdf5 as phdf5
from mpi4py import MPI
comm = MPI.COMM_WORLD
comm_size = comm.Get_size()
comm_rank = comm.Get_rank()
def check_membership(
membership_filenames,
nr_memb_files,
snapshot_filenames,
nr_snap_files,
hbt_filenames,
):
"""
Check that membership files are consistent with HBT particle lists
"""
# Check on files so we can fail quickly if filenames are wrong
# Also get number of HBT files
if comm_rank == 0:
if not os.path.exists(membership_filenames.format(file_nr=0)):
raise RuntimeError("Membership files not found")
if not os.path.exists(snapshot_filenames.format(file_nr=0)):
raise RuntimeError("Snapshot files not found")
if not os.path.exists(hbt_filenames.format(file_nr=0)):
raise RuntimeError("HBT files not found")
with h5py.File(hbt_filenames.format(file_nr=0)) as infile:
nr_hbt_files = int(infile["NumberOfFiles"][...])
else:
nr_hbt_files = None
nr_hbt_files = comm.bcast(nr_hbt_files)
assert nr_memb_files == nr_snap_files
# Read membership files
if comm_rank == 0:
print("Reading membership files")
memb = phdf5.MultiFile(
membership_filenames, file_idx=np.arange(nr_memb_files, dtype=int), comm=comm
)
memb_grnr = memb.read("PartType1/GroupNr_bound")
# Read snapshot files
if comm_rank == 0:
print("Reading snapshot files")
snap = phdf5.MultiFile(
snapshot_filenames, file_idx=np.arange(nr_snap_files, dtype=int), comm=comm
)
snap_part_ids = snap.read("PartType1/ParticleIDs")
# Mask out particles which aren't in a halo
mask = memb_grnr != -1
memb_grnr = memb_grnr[mask]
snap_part_ids = snap_part_ids[mask]
# Sort by particle id
if comm_rank == 0:
print("Sorting particles from snapshot")
order = psort.parallel_sort(snap_part_ids, return_index=True, comm=comm)
memb_grnr = psort.fetch_elements(memb_grnr, order)
del order
del snap_part_ids
# Get track id of each particle
if comm_rank == 0:
print("Getting track id of each particle")
cat = phdf5.MultiFile(hbt_filenames, file_nr_dataset="NumberOfFiles", comm=comm)
subhalo = cat.read("Subhalos")
hbt_track_id_mapping = subhalo["TrackId"]
del subhalo
memb_track_ids = psort.fetch_elements(hbt_track_id_mapping, memb_grnr, comm=comm)
del memb_grnr
del hbt_track_id_mapping
# Assign files to MPI ranks
files_per_rank = np.zeros(comm_size, dtype=int)
files_per_rank[:] = nr_hbt_files // comm_size
files_per_rank[: nr_hbt_files % comm_size] += 1
assert np.sum(files_per_rank) == nr_hbt_files
first_file_on_rank = np.cumsum(files_per_rank) - files_per_rank
# Read in the halos from the HBT output
if comm_rank == 0:
print("Reading HBT output")
hbt_track_ids = []
hbt_part_ids = []
for file_nr in range(
first_file_on_rank[comm_rank],
first_file_on_rank[comm_rank] + files_per_rank[comm_rank],
):
with h5py.File(hbt_filenames.format(file_nr=file_nr), "r") as infile:
hbt_part_ids.append(infile["SubhaloParticles"][...])
hbt_track_ids.append(
np.repeat(infile["Subhalos"]["TrackId"], infile["Subhalos"]["Nbound"])
)
# Combine arrays of particles in halos
if len(hbt_part_ids) > 0:
hbt_track_ids = np.concatenate(hbt_track_ids)
hbt_part_ids = np.concatenate(
hbt_part_ids
) # Combine arrays of halos from different files
if len(hbt_part_ids) > 0:
hbt_part_ids = np.concatenate(
hbt_part_ids
) # Combine arrays of particles from different halos
# TODO: Handle ranks which didn't read files?
# Sort by particle ID
if comm_rank == 0:
print("Sorting particles from HBT")
order = psort.parallel_sort(hbt_part_ids, return_index=True, comm=comm)
hbt_track_ids = psort.fetch_elements(hbt_track_ids, order)
del order
del hbt_part_ids
# Ensure both arrays of track ids are partitioned in the same way
if comm_rank == 0:
print("Repartitioning particles")
ndesired = np.asarray(comm.allgather(len(hbt_track_ids)), dtype=int)
memb_track_ids = psort.repartition(memb_track_ids, ndesired, comm=comm)
# Check that track ids agree
if np.any(memb_track_ids != hbt_track_ids):
raise RuntimeError("HBT catalogues disagree with membership files!")
comm.barrier()
if comm_rank == 0:
print("Track IDs agree.")
if __name__ == "__main__":
# Read in snapshot as input
snap_nr = int(sys.argv[1])
# Location of membership files
membership_dir = "/cosma8/data/dp004/dc-mcgi1/FLAMINGO/Runs/L2800N10080/DMO_FIDUCIAL/SOAP/HBTplus/"
membership_filenames = (
f"{membership_dir}/membership_{snap_nr:04d}/membership_{snap_nr:04d}."
+ "{file_nr}.hdf5"
)
nr_memb_files = 1024
# Location of HBT output
hbt_dir = "/snap8/scratch/dp004/jch/FLAMINGO/HBT/L2800N10080/DMO_FIDUCIAL/hbt/"
hbt_filenames = f"{hbt_dir}/{snap_nr:03d}/SubSnap_{snap_nr:03d}." + "{file_nr}.hdf5"
# Location of snapshot files
snapshot_dir = (
"/cosma8/data/dp004/flamingo/Runs/L2800N10080/DMO_FIDUCIAL/snapshots/"
)
snapshot_filenames = (
f"{snapshot_dir}/flamingo_{snap_nr:04d}/flamingo_{snap_nr:04d}."
+ "{file_nr}.hdf5"
)
nr_snap_files = 1024
check_membership(
membership_filenames,
nr_memb_files,
snapshot_filenames,
nr_snap_files,
hbt_filenames,
)
|
SWIFTSIMREPO_NAMESOAPPATH_START.@SOAP_extracted@SOAP-master@check_group_membership.py@.PATH_END.py
|
{
"filename": "array_like.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/typing/tests/data/pass/array_like.py",
"type": "Python"
}
|
from __future__ import annotations
from typing import Any, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from numpy._typing import NDArray, ArrayLike, _SupportsArray
x1: ArrayLike = True
x2: ArrayLike = 5
x3: ArrayLike = 1.0
x4: ArrayLike = 1 + 1j
x5: ArrayLike = np.int8(1)
x6: ArrayLike = np.float64(1)
x7: ArrayLike = np.complex128(1)
x8: ArrayLike = np.array([1, 2, 3])
x9: ArrayLike = [1, 2, 3]
x10: ArrayLike = (1, 2, 3)
x11: ArrayLike = "foo"
x12: ArrayLike = memoryview(b'foo')
class A:
def __array__(
self, dtype: None | np.dtype[Any] = None
) -> NDArray[np.float64]:
return np.array([1.0, 2.0, 3.0])
x13: ArrayLike = A()
scalar: _SupportsArray[np.dtype[np.int64]] = np.int64(1)
scalar.__array__()
array: _SupportsArray[np.dtype[np.int_]] = np.array(1)
array.__array__()
a: _SupportsArray[np.dtype[np.float64]] = A()
a.__array__()
a.__array__()
# Escape hatch for when you mean to make something like an object
# array.
object_array_scalar: object = (i for i in range(10))
np.array(object_array_scalar)
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@typing@tests@data@pass@array_like.py@.PATH_END.py
|
{
"filename": "SECURITY.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/libs/protobuf/SECURITY.md",
"type": "Markdown"
}
|
To report security concerns or vulnerabilities within protobuf, please use
Google's official channel for reporting these.
https://www.google.com/appserve/security-bugs/m2/new
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@libs@protobuf@SECURITY.md@.PATH_END.py
|
{
"filename": "python_net_approx.ipynb",
"repo_name": "pynucastro/pynucastro",
"repo_path": "pynucastro_extracted/pynucastro-main/examples/python_net_approx.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import pynucastro as pyna
```
```python
reaclib_lib = pyna.ReacLibLibrary()
```
```python
all_reactants = ["p",
"he4", "c12", "o16", "ne20", "mg24", "si28", "s32",
"ar36", "ca40", "ti44", "cr48", "fe52", "ni56",
"al27", "p31", "cl35", "k39", "sc43", "v47", "mn51", "co55",
"n13", "n14", "f18", "ne21", "na22", "na23"]
subch = reaclib_lib.linking_nuclei(all_reactants)
# in this list, we have the reactants, the actual reactants,
# and modified products that we will use instead
other_rates = [(("c12", "c12"), ("mg23", "n"), ("mg24")),
(("o16", "o16"), ("s31", "n"), ("s32")),
(("c12", "o16"), ("si27", "n"), ("si28"))]
for r, p, mp in other_rates:
rfilter = pyna.rates.RateFilter(reactants=r, products=p)
_library = reaclib_lib.filter(rfilter)
r = _library.get_rates()[0]
r.modify_products(mp)
subch += _library
```
```python
rc = pyna.RateCollection(libraries=[subch])
```
```python
fig = rc.plot(rotated=True, size=(1300,800), hide_xalpha=True)
```

```python
rc.make_ap_pg_approx(intermediate_nuclei=["cl35", "k39", "sc43", "v47", "mn51", "co55"])
```
using approximate rate S32 + He4 ⟶ Ar36 + 𝛾
using approximate rate Ar36 ⟶ S32 + He4
using approximate rate Ar36 + He4 ⟶ Ca40 + 𝛾
using approximate rate Ca40 ⟶ Ar36 + He4
using approximate rate Ca40 + He4 ⟶ Ti44 + 𝛾
using approximate rate Ti44 ⟶ Ca40 + He4
using approximate rate Ti44 + He4 ⟶ Cr48 + 𝛾
using approximate rate Cr48 ⟶ Ti44 + He4
using approximate rate Cr48 + He4 ⟶ Fe52 + 𝛾
using approximate rate Fe52 ⟶ Cr48 + He4
using approximate rate Fe52 + He4 ⟶ Ni56 + 𝛾
using approximate rate Ni56 ⟶ Fe52 + He4
removing rate S32 + He4 ⟶ Ar36 + 𝛾
removing rate S32 + He4 ⟶ p + Cl35
removing rate Cl35 + p ⟶ Ar36 + 𝛾
removing rate Ar36 ⟶ He4 + S32
removing rate Ar36 ⟶ p + Cl35
removing rate Cl35 + p ⟶ He4 + S32
removing rate Ar36 + He4 ⟶ Ca40 + 𝛾
removing rate Ar36 + He4 ⟶ p + K39
removing rate K39 + p ⟶ Ca40 + 𝛾
removing rate Ca40 ⟶ He4 + Ar36
removing rate Ca40 ⟶ p + K39
removing rate K39 + p ⟶ He4 + Ar36
removing rate Ca40 + He4 ⟶ Ti44 + 𝛾
removing rate Ca40 + He4 ⟶ p + Sc43
removing rate Sc43 + p ⟶ Ti44 + 𝛾
removing rate Ti44 ⟶ He4 + Ca40
removing rate Ti44 ⟶ p + Sc43
removing rate Sc43 + p ⟶ He4 + Ca40
removing rate Ti44 + He4 ⟶ Cr48 + 𝛾
removing rate Ti44 + He4 ⟶ p + V47
removing rate V47 + p ⟶ Cr48 + 𝛾
removing rate Cr48 ⟶ He4 + Ti44
removing rate Cr48 ⟶ p + V47
removing rate V47 + p ⟶ He4 + Ti44
removing rate Cr48 + He4 ⟶ Fe52 + 𝛾
removing rate Cr48 + He4 ⟶ p + Mn51
removing rate Mn51 + p ⟶ Fe52 + 𝛾
removing rate Fe52 ⟶ He4 + Cr48
removing rate Fe52 ⟶ p + Mn51
removing rate Mn51 + p ⟶ He4 + Cr48
removing rate Fe52 + He4 ⟶ Ni56 + 𝛾
removing rate Fe52 + He4 ⟶ p + Co55
removing rate Co55 + p ⟶ Ni56 + 𝛾
removing rate Ni56 ⟶ He4 + Fe52
removing rate Ni56 ⟶ p + Co55
removing rate Co55 + p ⟶ He4 + Fe52
```python
fig = rc.plot(rotated=True, size=(1300,800), hide_xalpha=True)
```

```python
rc.remove_nuclei(["cl35", "k39", "sc43", "v47", "mn51", "co55"])
```
looking to remove Cl35 ⟶ He4 + P31
looking to remove K39 ⟶ He4 + Cl35
looking to remove P31 + He4 ⟶ Cl35 + 𝛾
looking to remove Cl35 + He4 ⟶ K39 + 𝛾
looking to remove K39 ⟶ He4 + Cl35
looking to remove Sc43 ⟶ He4 + K39
looking to remove Cl35 + He4 ⟶ K39 + 𝛾
looking to remove K39 + He4 ⟶ Sc43 + 𝛾
looking to remove Sc43 ⟶ He4 + K39
looking to remove V47 ⟶ He4 + Sc43
looking to remove K39 + He4 ⟶ Sc43 + 𝛾
looking to remove Sc43 + He4 ⟶ V47 + 𝛾
looking to remove V47 ⟶ He4 + Sc43
looking to remove Mn51 ⟶ He4 + V47
looking to remove Sc43 + He4 ⟶ V47 + 𝛾
looking to remove V47 + He4 ⟶ Mn51 + 𝛾
looking to remove Mn51 ⟶ He4 + V47
looking to remove Co55 ⟶ He4 + Mn51
looking to remove V47 + He4 ⟶ Mn51 + 𝛾
looking to remove Mn51 + He4 ⟶ Co55 + 𝛾
looking to remove Co55 ⟶ He4 + Mn51
looking to remove Mn51 + He4 ⟶ Co55 + 𝛾
```python
fig = rc.plot(rotated=True, size=(1600,1000), hide_xalpha=True, curved_edges=True)
```

```python
len(rc.unique_nuclei)
```
22
```python
```
|
pynucastroREPO_NAMEpynucastroPATH_START.@pynucastro_extracted@pynucastro-main@examples@python_net_approx.ipynb@.PATH_END.py
|
{
"filename": "test_channelizer.ipynb",
"repo_name": "MazinLab/MKIDGen3",
"repo_path": "MKIDGen3_extracted/MKIDGen3-main/notebooks/old/test_channelizer.ipynb",
"type": "Jupyter Notebook"
}
|
# Imports, Definitions, and Instantiating the overlay
```python
from mkidgen3.daccomb import generate as gen_comb
from mkidgen3.testutils import *
import mkidgen3.testutils as tu
from mkidgen3.fixedpoint import *
from mkidgen3.pynq import dma_status ## also activates the drivers
import logging
import numpy as np
import time
import pynq
from pynq import PL, Overlay, DefaultIP, allocate
from fpbinary import FpBinary, OverflowEnum, RoundingEnum
import itertools
import matplotlib.pyplot as plt
import scipy.signal
import os
logging.basicConfig()
FP16_23 = lambda x: FpBinary(int_bits=-7, frac_bits=23, signed=True, value=x)
n_res = 2048
n_bin = 4096
first_good_packet=17 # sixteen to prime the FIR and 1 of latency
latency_shift=3*16
```
Load the overlay and extract the dma core
```python
# mmio = pynq.MMIO(0xFD610000 , 64)
# mmio.write(0x18, 7) # Return slave errors when timeouts occur
# mmio.write(0x20, 0x1020) # Set and enable prescale of 32 which should be about 10 ms
# mmio.write(0x10, 0x7) # Enable transactions tracking
# mmio.write(0x14, 0x7) # Enable timeouts
```
```python
ol = Overlay('/home/xilinx/overlayfiles/test_flat_design/test_flat_design.bit',ignore_version=True)
ol.download()
print(f"PL Bitfile: {PL.bitfile_name}\nPL Timestamp: {PL.timestamp}\n"
f"Overlay timestamp: {ol.timestamp} Loaded: {ol.is_loaded()}")
```
PL Bitfile: /home/xilinx/overlayfiles/test_flat_design/test_flat_design.bit
PL Timestamp: 2020/5/17 18:43:4 +958889
Overlay timestamp: 2020/5/17 18:43:4 +958889 Loaded: True
```python
dma = ol.axi_dma_0
# opfbcapdma = ol.axi_dma_1
# switch=ol.axis_switch_0
bin2res=ol.reschan.bin_to_res_0
dds=ol.reschan.resonator_dds_0
```
# Generate and feed a stream
First create a timeseries of IQ values for a bin in it.
A 1Hz signal would have 2M samples
## Load reference data into python
Read in the matlab golden input, output, and PFB coefficients as may want to reference them in various places
```python
coeffs=np.zeros((16,256,8)) #lane, coeff set, c7-0
for i in range(16):
with open(f"data/lane{i}.coe",'r') as f:
lines=f.readlines()[1:]
lines[0]=lines[0].partition('=')[-1]
coeffs[i]=np.array(list(map(float,''.join(lines).replace(';','').replace('\n','').split(',')))).reshape(256,8)
```
```python
with open(f"data/fclowpass.coe",'r') as f:
lines=f.readlines()[1:]
lines[0]=lines[0].partition('=')[-1]
lpcoeffs=np.array(list(map(float,''.join(lines).replace(';','').replace('\n','').split(',')))) #c19-0
```
Import matlab results
```python
try:
with np.load("matlabsim_out.npz") as d:
matlab_sim_out=d['matlab_sim_out']
except IOError:
with open('matlabsim_out.csv') as f:
lines=f.readlines()
matlab_sim_out = np.array([list(map(np.complex, x)) for x in
map(lambda x: x.replace('i','j').split(','), lines)],
dtype=np.complex64)
np.savez(f"matlabsim_out.npz", matlab_sim_out=matlab_sim_out)
matlab_sim_out=matlab_sim_out.reshape(matlab_sim_out.shape[0],256,16)
```
## Prepare the Data
What tones we want to send? Note these don't matter if we set use_matlab_input to True
```python
use_matlab_input=True
n_total_packets=128+16+3 #How many packets
n_samples=256*n_total_packets*8 #How many waveform samples do we need to generate
sample_rate=4.096e9
freq=np.array([-300, 100, 151, 1000.5])*1e6 # -2048-2047
amplitudes = np.ones_like(freq)/n_res
phases=np.zeros_like(freq)
```
Generate the complex waveform
```python
d=np.load("matlabsim_in.npz")
```
```python
if not use_matlab_input:
t = 2 * np.pi * np.arange(n_samples)/sample_rate
comb=np.zeros(n_samples, dtype=np.complex64)
for i in range(freq.size):
comb += amplitudes[i] * np.exp(1j * (t * freq[i] + phases[i]))
else:
try:
with np.load("matlabsim_in.npz") as d:
comb=d['comb']
except IOError:
with open('matlabsim_in.csv') as f:
lines=f.read()
comb=np.array(list(map(np.complex,lines.replace('i','j').split(','))), dtype=np.complex64)
np.savez("matlabsim_in.npz", comb=comb)
freq=np.array([-190.5, 175.632, 183, 185])*1e6
amplitudes = np.ones_like(freq)/n_res
phases=np.zeros_like(freq)
comb=comb.reshape(comb.size//n_res, n_res)
print(f"Comb shape: {comb.shape}. \nTotal Samples: {comb.size}. Memory: {comb.size*8/1024**2:.0f} MB\n"
f"Max value: {np.abs(comb).max()}.\n"
f"Expected tone amplitude factor: ~512 * N_TONES_IN_BIN. (4096/8 as FFT scale dfaults to 8 in last 3 stages)\n"
f"Resulting samples per output bin: {comb.size*2/n_bin}")
```
Comb shape: (1280, 2048).
Total Samples: 2621440. Memory: 20 MB
Max value: 0.0019600000232458115.
Expected tone amplitude factor: ~512 * N_TONES_IN_BIN. (4096/8 as FFT scale dfaults to 8 in last 3 stages)
Resulting samples per output bin: 1280.0
Plot an FFT of the first full timeseries
```python
if False:
plt.figure(figsize=(14,8))
plt.plot(np.fft.fftfreq(comb.size, d=1/sample_rate)/1e9,
np.abs(np.fft.fft(comb.ravel(), norm='ortho')))
plt.xlim(-sample_rate/2e9,sample_rate/2e9);
plt.xlabel('Freq (GHz)');
plt.xlim(-.2,.2)
```
## Configure routing
This overlay supports OPFB only or OPFB+Channelizer
```python
# switch.set_master(0) # Master0=OPFB raw Master1=reschan
# switch.set_master(1, disable=True)
# switch.commit()
```
## Configure the fine channelizer
Note this doesn't matter if the switch is set to OPFB only
```python
print('Writing mapping...')
bins=np.arange(2048,dtype=int)-freq.size
bins[:freq.size]=np.round(freq/1e8)+2048
shft_bins=((np.round(freq/1e6).astype(int)+2048)+2048)%4096
strt_bins=np.round(freq/1e6).astype(int)+2048
# Start by assiging bins in order assuming a shifted fft, leaving enough for 2*nfreq slots at the front
bins_fpga=((np.array(([0]*3*freq.size)+list(range(0,2048-3*freq.size)))+2048)+2048)%4096
bins_fpga[:freq.size]=shft_bins
bins_fpga[freq.size:2*freq.size]=shft_bins
bins_fpga[freq.size*2:3*freq.size]=strt_bins[2] #This bin has a tone at the center
bin2res.bins=bins_fpga #2048 items each 0-4095
print('Reading group 0',bin2res.read_group(0))
print('Reading group 255',bin2res.read_group(255))
f_center_strt=np.arange(-2048, 2048)*1e6 #equivalent to f_center=np.fft.fftshift(np.fft.fftfreq(4096, d=1/4.096e9))
f_center=np.fft.fftfreq(4096, d=1/4.096e9)
toneincs=np.zeros(2048)
#This must be 2MHz NOT 2.048MHz, the sign matters! Use 1MHz as that corresponds to ±PI
toneincs[:freq.size]=(freq-f_center[shft_bins])/1e6 #Tones
toneincs[freq.size:2*freq.size]=0 #Unprocessed
toneincs[freq.size*2:3*freq.size]=np.array([.1,.2,.3,.4]) #introduce a sinudoid
phase0s=np.zeros(2048)
print('Writing tones...') # The core expects normalized increments
tones=np.zeros((2,2048))
tones[0,:]=toneincs
tones[1,:]=phase0s
dds.tones=tones
print('Reading toneinc group 0',dds.read_group(0, dds.offset_tones))
print('Reading toneinc group 1',dds.read_group(1, dds.offset_tones))
print('Reading toneinc group 255',dds.read_group(255, dds.offset_tones))
toneincs[:freq.size]=(f_center[shft_bins]-freq)/1e6 #There is some sort of sign issue
```
Writing mapping...
Reading group 0 [3906, 176, 183, 185, 3906, 176, 183, 185]
Reading group 255 [2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035]
Writing tones...
Reading toneinc group 0 ([-0.5, -0.368011474609375, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
Reading toneinc group 1 ([0.100006103515625, 0.20001220703125, 0.29998779296875, 0.399993896484375, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
Reading toneinc group 255 ([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
## Send the data
This takes a while. We send 12, then 8, then 12 to enble waveform capture across the FIR windup and transition into normal operation
Prepare an output array
```python
packets_out=np.zeros((comb.shape[0]//2, n_res), dtype=np.complex64)
```
1660±1clocks 6.48 packets to first output tlast
6 packets in gets ~2.8 out
subsequent sets of 6 get 3 tlast
```python
txrx(dma, comb, 6, packets_out, n_total_packets=200, packet_latency=1, bin_out=False, wait=False, show=True,
rxfp=(-6,22))
```
Sent: 6 Pending: 2
Received: 2.
Sent: 12 Pending: 3
Received: 5.
Sent: 18 Pending: 3
Received: 8.
Sent: 24 Pending: 3
Received: 11.
Sent: 30 Pending: 3
Received: 14.
Sent: 36 Pending: 3
Received: 17.
Sent: 42 Pending: 3
Received: 20.
Sent: 48 Pending: 3
Received: 23.
Sent: 54 Pending: 3
Received: 26.
Sent: 60 Pending: 3
Received: 29.
Sent: 66 Pending: 3
Received: 32.
Sent: 72 Pending: 3
Received: 35.
Sent: 78 Pending: 3
Received: 38.
Sent: 84 Pending: 3
Received: 41.
Sent: 90 Pending: 3
Received: 44.
Sent: 96 Pending: 3
Received: 47.
Sent: 102 Pending: 3
Received: 50.
Sent: 108 Pending: 3
Received: 53.
Sent: 114 Pending: 3
Received: 56.
Sent: 120 Pending: 3
Received: 59.
Sent: 126 Pending: 3
Received: 62.
Sent: 132 Pending: 3
Received: 65.
Sent: 138 Pending: 3
Received: 68.
Sent: 144 Pending: 3
Received: 71.
Sent: 150 Pending: 3
Received: 74.
Sent: 156 Pending: 3
Received: 77.
Sent: 162 Pending: 3
Received: 80.
Sent: 168 Pending: 3
Received: 83.
Sent: 174 Pending: 3
Received: 86.
Sent: 180 Pending: 3
Received: 89.
Sent: 186 Pending: 3
Received: 92.
Sent: 192 Pending: 3
Received: 95.
Sent: 198 Pending: 3
Received: 98.
And save the output to disk.
```python
np.savez(f"packets_out_testchan.npz", freq=freq, phases=phases, amplitudes=amplitudes, packets_out=packets_out)
```
## Look at the results
First define a bunch of helpers
```python
# settings from HLS
NBITS=21
NLUT=10
LUTSIZE=1024
NFINE=9
FINESIZE=512
DELTA=np.pi/(2*LUTSIZE*FINESIZE)
fp_cos_lut = fparray(np.cos(2*np.pi*np.arange(LUTSIZE)/(4*LUTSIZE)), FP18_17)
fp_fine_lut = fparray(np.sin(DELTA*np.arange(FINESIZE)), FP18_25)
FPACCUM=lambda x: FpBinary(1,NBITS,True, x)
def fp_phases(tones, phase0, n, i0=0):
tones=fparray(fparray(tones, FP16_15), FPACCUM)
phase0=fparray(phase0, FP16_15)
t=np.arange(n)+i0
phases=(tones[:,np.newaxis]*t+ phase0[:,np.newaxis])
out=np.array(list(map(lambda x: float(x.resize((1,NBITS), overflow_mode=OverflowEnum.wrap)), phases.flat)))
return fparray(out.reshape(phases.shape), FPACCUM).T
def fp_phase2cossin(phase, ret_fp=False, verbose=False):
"""
Index the sin cos lut with a phase, verify the bit math is right with the code in the docsting.
Don't use slicing on FpBinary numbers as it is slow!
phase = FPACCUM(.43221)
phasebits=phase.__index__()
full_adrfoo = phasebits>>(NBITS-NLUT-2+1) # phase(NBITS, NBITS-NLUT-1) 12 bits 21-10 inclusive
fine_adrfoo = (phasebits>>(NBITS-NLUT-NFINE-1))&0x1ff # phase(NBITS-NLUT-2, NBITS-NLUT-NFINE-1) #9-1, 9 bits
msbfoo = full_adrfoo>>10 # full_adr(NLUT+1,NLUT) #Bits 11-10, 2 total, quadrant
lsbfoo = full_adrfoo&0x3ff # full_adr(NLUT-1,0) #Bits 9-0, 10 total, quadrant ndx
full_adr = phase[NBITS: NBITS-NLUT-1] #12 bits 21-10 inclusive
fine_adr = phase[NBITS-NLUT-2:NBITS-NLUT-NFINE-1] #9-1, 9 bits
msb = full_adr[NLUT+1:NLUT] # full_adr(NLUT+1,NLUT) #Bits 11-10, 2 total, quadrant
lsb = full_adr[NLUT-1:0] # full_adr(NLUT-1,0) #Bits 9-0, 10 total, quadrant ndx
if msb!=msbfoo or lsb!=lsbfoo or fine_adr!=fine_adrfoo or full_adr!=full_adrfoo:
print('mismatch')
print('quad')
print(bin(msb))
print(bin(msbfoo))
print("lsb")
print(bin(lsb))
print(bin(lsbfoo))
print("fine addr")
print(bin(fine_adr))
print(bin(fine_adrfoo))
print("full addr")
print(bin(full_adr))
print(bin(full_adrfoo))
"""
phase = FPACCUM(phase)
phasebits=phase.__index__()
full_adr = phasebits>>(NBITS-NLUT-2+1) # phase(NBITS, NBITS-NLUT-1) 12 bits 21-10 inclusive
fine_adr = (phasebits>>(NBITS-NLUT-NFINE-1))&0x1ff # phase(NBITS-NLUT-2, NBITS-NLUT-NFINE-1) #9-1, 9 bits
msb= full_adr>>10 # full_adr(NLUT+1,NLUT) #Bits 11-10, 2 total, quadrant
lsb= full_adr&0x3ff # full_adr(NLUT-1,0) #Bits 9-0, 10 total, quadrant ndx
def show():
print(f'Phase: {phase}')
print(f'MSB: {msb}')
print(f'LSB: {lsb} (-LSB: {(lsb ^ 0x3ff) +1})')
print(f'Fine: {fine_adr}')
if (msb==0): #right top
if verbose:
print('Top Right')
show()
cos_adr = lsb
cos_lut_word = fp_cos_lut[cos_adr]
if (lsb==0):
sin_lut_word = 0
else:
sin_adr = -lsb #(lsb ^ 0x3ff) +1 #-lsb
sin_lut_word = fp_cos_lut[sin_adr]
elif (msb==1): #left top
if verbose:
print('Top Left')
show()
if (lsb==0):
cos_lut_word = 0
else:
cos_adr = -lsb # (lsb ^ 0x3ff) +1 #-lsb
cos_lut_word = -fp_cos_lut[cos_adr]
sin_adr = lsb
sin_lut_word = fp_cos_lut[sin_adr]
elif (msb==3): # // right bot
if verbose:
print('Bot Right')
show()
if (lsb==0):
cos_lut_word = 0
else:
cos_adr = -lsb # (lsb ^ 0x3ff) +1 #-lsb
cos_lut_word = fp_cos_lut[cos_adr]
sin_adr = lsb
sin_lut_word = -fp_cos_lut[sin_adr]
else: #// left bot
if verbose:
print('Bot Left')
show()
cos_adr = lsb
cos_lut_word = -fp_cos_lut[cos_adr]
if (lsb==0):
sin_lut_word = 0
else:
sin_adr = -lsb # (lsb ^ 0x3ff) +1 #-lsb
sin_lut_word = -fp_cos_lut[sin_adr]
fine_word = fp_fine_lut[fine_adr]
cos_dds = (cos_lut_word - sin_lut_word * fine_word)
sin_dds = (sin_lut_word + cos_lut_word * fine_word)
ret=cos_dds.resize((1,17)), sin_dds.resize((1,17))
return ret if ret_fp else float(ret[0])+float(ret[1])*1j
def fp_cmpy(a,b):
out=a.copy()
ar,ai=a.flat[::2],a.flat[1::2]
br,bi=b.flat[::2],b.flat[1::2]
out.flat[::2]=ar*br-ai*bi
out.flat[1::2]=ai*br+ar*bi
return out
def tone2phase(tone,n, p0=0):
return (FP16_15(tone)*n+FP16_15(p0)).resize((1,NBITS), overflow_mode=OverflowEnum.wrap)
def fp_cmpy_2(ar,ai, br,bi):
return ar*br-ai*bi, ai*br+ar*bi
def fpddc(i,q, inc, n=0, p0=0, fromint=True):
ddsi,ddsq=fp_phase2cossin(tone2phase(inc,n, p0=p0), ret_fp=True, verbose=True)
if fromint:
conv = lambda x: FpBinary(int_bits=-9, frac_bits=25, signed=True, bit_field=int(x))
else:
conv = FP15_25
FP16_23 = lambda x: FpBinary(int_bits=-7, frac_bits=23, signed=True, value=x)
ci,cq=conv(i),conv(q)
out=tuple(map(FP16_23, fp_cmpy_2(ci,cq, ddsi, ddsq)))
print(f'DDS: {ddsi}, {ddsq}')
print(f"IQin :{ci}, {cq} ({ci.__index__()}, {cq.__index__()})")
print(f"IQou :{out[0]}, {out[1]} ({out[0].__index__()}, {out[1].__index__()})")
return out
def ddc_mlab(mlab, ddsv, lowpass=False):
ddced = ddsv*mlab
if lowpass:
out = np.array([scipy.signal.convolve(x, lpcoeffs, mode='valid') for x in ddced.T]).T
else:
out=ddsed
return out
```
Show how good the LUT is:
```python
plt.plot(np.arange(-1,1,.001),
np.cos(np.arange(-1,1,.001)*np.pi)-np.array([fp_phase2cossin(p).real for p in np.arange(-1,1,.001)]))
plt.plot(np.arange(-1,1,.001),
np.sin(np.arange(-1,1,.001)*np.pi)-np.array([fp_phase2cossin(p).imag for p in np.arange(-1,1,.001)]));
```

Do double precision matlab calcs
```python
#Matlab data still needs the FFT
mlab_pfb=np.fft.fftshift(np.fft.fft(matlab_sim_out.reshape(matlab_sim_out.shape[0], n_bin), axis=1),1)
#Matlab PFB output
mlab=mlab_pfb[:tu.n_packets_rcvd, strt_bins]
phasemlab=(toneincs[:mlab.shape[1],np.newaxis]*np.arange(mlab.shape[0]) + phase0s[:mlab.shape[1],np.newaxis]).T
# #Also do the DDC on the matlab data
ddsv = np.cos(phasemlab*np.pi)+np.sin(phasemlab*np.pi)*1j
ddcd=ddc_mlab(mlab, ddsv, lowpass=True)
```
Load cache of or do the fixed point calcs
```python
data=packets_out # [sample, resonator]
```
```python
if not os.path.exists('testchan_cache.npz'):
fpcomb16_15=fparray(comb, FP16_15)
fpcoeff16_26=fparray(coeffs, FP16_26)
pfb_fp16=do_fixed_point_pfb(fpcomb16_15, fpcoeff16_26)
fp_opfb=np.fft.fft(pfb_fp16.reshape(pfb_fp16.shape[0], n_bin), axis=1) #shifting here causes problems for plotting
#Get DDS values for fixed point
fp_biniq=fp_opfb[:tu.n_packets_rcvd,shft_bins] #fftshift is required here
fp_phase=fp_phases(toneincs[:fp_biniq.shape[1]],phase0s[:fp_biniq.shape[1]],fp_biniq.shape[0], i0=18) #more accuate than reality by a bit
fp_ddsv=np.array([list(map(fp_phase2cossin, p)) for p in fp_phase.T]).T
fp_ddsv=fparray(fp_ddsv, FP18_17)
fp_biniq=fparray(fp_biniq, FP16_25)
#Do the DDC
fp_ddcd=fp_cmpy(fp_biniq, fp_ddsv)
fp_ddcd=fparray(fp_ddcd, lambda x: FpBinary(-7,23,True,x))
#Back to floating
fp_ddsv = fp_ddsv[:,:,0].astype(float)+1j*fp_ddsv[:,:,1].astype(float)
fp_biniq = fp_biniq[:,:,0].astype(float)+1j*fp_biniq[:,:,1].astype(float)
fp_ddcd = fp_ddcd[:,:,0].astype(float)+1j*fp_ddcd[:,:,1].astype(float)
fp_opfb = fp_opfb[:,:,0].astype(float)+1j*fp_opfb[:,:,1].astype(float)
np.savez('testchan_cache.npz', fp_ddsv=fp_ddsv, fp_ddcd=fp_ddcd, fp_biniq=fp_biniq, fp_opfb=fp_opfb)
else:
with np.load('testchan_cache.npz') as d:
fp_ddsv=d['fp_ddsv']
fp_ddcd=d['fp_ddcd']
fp_biniq=d['fp_biniq']
fp_opfb=d['fp_opfb']
```
Matlab plots
```python
_,axes = plt.subplots(mlab.shape[1],2,figsize=(14,2.83*mlab.shape[1]))
for i,(raw,ddc) in enumerate(zip(mlab.T,ddcd.T)):
plt.sca(axes[i,0])
plt.plot(raw.real)
plt.plot(raw.imag)
plt.title(f"ResID {i}/BinID {bins[i]} {toneincs[i]}")
plt.sca(axes[i,1])
plt.plot(ddc.real)
plt.plot(ddc.imag)
plt.title(f"DDCed. Tone was at {toneincs[i]*1e3} kHz")
plt.subplots_adjust(wspace=.15, hspace=.35)
```

Fixed point simulation plots
```python
lim=(-.00055,.00055)
_,axes = plt.subplots(fp_biniq.shape[1],3,figsize=(15,2.83*fp_biniq.shape[1]))
for i,(a,b,c) in enumerate(zip(fp_biniq[:tu.n_packets_rcvd].T,fp_ddcd[:tu.n_packets_rcvd].T,
ddcd.T)):
plt.sca(axes[i,0])
plt.plot(a.real)
plt.plot(a.imag)
plt.ylim(*lim)
plt.title(f"ResID {i}/BinID {bins[i]} {toneincs[i]}")
plt.sca(axes[i,1])
plt.plot(b.real)
plt.plot(b.imag)
plt.ylim(*lim)
plt.title(f"DDCed. Tone was at {toneincs[i]*1e3} kHz")
plt.sca(axes[i,2])
plt.plot(c.real)
plt.plot(c.imag)
plt.title('DDCed, floating point')
plt.ylim(*lim)
plt.subplots_adjust(wspace=.2, hspace=.35)
```

```python
plt.plot(np.fft.fftshift(np.fft.fftfreq(mlab.shape[0])),np.fft.fftshift(np.abs(np.fft.fft(mlab[:,0]))),
label='double')
plt.plot(np.fft.fftshift(np.fft.fftfreq(ddcd.shape[0])),np.fft.fftshift(np.abs(np.fft.fft(ddcd[:,0]))),
label='double DDCd')
plt.plot(np.fft.fftshift(np.fft.fftfreq(fp_ddcd.shape[0]))+.2,np.fft.fftshift(np.abs(np.fft.fft(fp_ddcd[:,0]))),
label='fixed point, DDCd+.2')
plt.legend();
```

Try and isolate the first valid output
```python
plt.plot(np.arange(15,30),data.imag[15:30,2],'.');
```

```python
lim=(-.00055,.00055)
res_plot=shft_bins#9,1023,1024,2047] #which channels to plot
_,axes = plt.subplots(len(res_plot),3,figsize=(15,2.8*len(res_plot)))
for i,(rID, a,b, c) in enumerate(zip(res_plot,
fp_biniq[first_good_packet:tu.n_packets_rcvd].T,
data[first_good_packet:tu.n_packets_rcvd,:4].T,
ddcd[:tu.n_packets_rcvd].T)):
plt.sca(axes[i,0])
plt.plot(a.real)
plt.plot(a.imag)
plt.ylim(*lim)
plt.title(f"FP input sim")
plt.sca(axes[i,1])
plt.plot(b.real)
plt.plot(b.imag)
plt.ylim(*lim)
plt.title(f"FPGA. Tone was at {toneincs[i]*1e3} kHz")
plt.sca(axes[i,2])
plt.plot(c.real)
plt.plot(c.imag)
plt.title('DDCed, floating point')
plt.ylim(*lim)
plt.subplots_adjust(wspace=.2, hspace=.35)
```

```python
plt.plot((1j*data[first_good_packet+2:tu.n_packets_rcvd,1]).real)
plt.plot(ddcd[:tu.n_packets_rcvd,1].real)
plt.figure()
plt.plot((-1j*data[first_good_packet+2:tu.n_packets_rcvd,1]).imag)
plt.plot(ddcd[:tu.n_packets_rcvd,1].imag)
#,ddcd[:tu.n_packets_rcvd,1]
```
[<matplotlib.lines.Line2D at 0x7f6ee21978>]


```python
```
|
MazinLabREPO_NAMEMKIDGen3PATH_START.@MKIDGen3_extracted@MKIDGen3-main@notebooks@old@test_channelizer.ipynb@.PATH_END.py
|
{
"filename": "subscriptions.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/prefect/client/subscriptions.py",
"type": "Python"
}
|
import asyncio
from collections.abc import Iterable
from logging import Logger
from typing import Any, Generic, Optional, TypeVar
import orjson
import websockets
import websockets.exceptions
from starlette.status import WS_1008_POLICY_VIOLATION
from typing_extensions import Self
from prefect._internal.schemas.bases import IDBaseModel
from prefect.logging import get_logger
from prefect.settings import PREFECT_API_KEY
logger: Logger = get_logger(__name__)
S = TypeVar("S", bound=IDBaseModel)
class Subscription(Generic[S]):
def __init__(
self,
model: type[S],
path: str,
keys: Iterable[str],
client_id: Optional[str] = None,
base_url: Optional[str] = None,
):
self.model = model
self.client_id = client_id
base_url = base_url.replace("http", "ws", 1) if base_url else None
self.subscription_url: str = f"{base_url}{path}"
self.keys: list[str] = list(keys)
self._connect = websockets.connect(
self.subscription_url,
subprotocols=[websockets.Subprotocol("prefect")],
)
self._websocket = None
def __aiter__(self) -> Self:
return self
@property
def websocket(self) -> websockets.WebSocketClientProtocol:
if not self._websocket:
raise RuntimeError("Subscription is not connected")
return self._websocket
async def __anext__(self) -> S:
while True:
try:
await self._ensure_connected()
message = await self.websocket.recv()
await self.websocket.send(orjson.dumps({"type": "ack"}).decode())
return self.model.model_validate_json(message)
except (
ConnectionRefusedError,
websockets.exceptions.ConnectionClosedError,
):
self._websocket = None
if hasattr(self._connect, "protocol"):
await self._connect.__aexit__(None, None, None)
await asyncio.sleep(0.5)
async def _ensure_connected(self):
if self._websocket:
return
websocket = await self._connect.__aenter__()
try:
await websocket.send(
orjson.dumps(
{"type": "auth", "token": PREFECT_API_KEY.value()}
).decode()
)
auth: dict[str, Any] = orjson.loads(await websocket.recv())
assert auth["type"] == "auth_success", auth.get("message")
message: dict[str, Any] = {"type": "subscribe", "keys": self.keys}
if self.client_id:
message.update({"client_id": self.client_id})
await websocket.send(orjson.dumps(message).decode())
except (
AssertionError,
websockets.exceptions.ConnectionClosedError,
) as e:
if isinstance(e, AssertionError) or (
e.rcvd and e.rcvd.code == WS_1008_POLICY_VIOLATION
):
if isinstance(e, AssertionError):
reason = e.args[0]
elif e.rcvd and e.rcvd.reason:
reason = e.rcvd.reason
else:
reason = "unknown"
else:
reason = None
if reason:
raise Exception(
"Unable to authenticate to the subscription. Please "
"ensure the provided `PREFECT_API_KEY` you are using is "
f"valid for this environment. Reason: {reason}"
) from e
raise
else:
self._websocket = websocket
def __repr__(self) -> str:
return f"{type(self).__name__}[{self.model.__name__}]"
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@prefect@client@subscriptions.py@.PATH_END.py
|
{
"filename": "_namelengthsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choropleth/hoverlabel/_namelengthsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="namelengthsrc", parent_name="choropleth.hoverlabel", **kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choropleth@hoverlabel@_namelengthsrc.py@.PATH_END.py
|
{
"filename": "smith_kcorr.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/py/LSS/DESI_ke/smith_kcorr.py",
"type": "Python"
}
|
import os
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy.interpolate import interp1d
from pkg_resources import resource_filename
raw_dir = os.environ['CODE_ROOT'] + '/data/'
class GAMA_KCorrection(object):
def __init__(self, band, kind="linear"):
"""
Colour-dependent polynomial fit to the GAMA K-correction (Fig. 13 of Smith+17),
used to convert between SDSS r-band Petrosian apparent magnitudes, and rest
frame absolute manigutues at z_ref = 0.1
Args:
k_corr_file: file of polynomial coefficients for each colour bin
z0: reference redshift. Default value is z0=0.1
kind: type of interpolation between colour bins,
e.g. "linear", "cubic". Default is "linear"
"""
k_corr_file = raw_dir + '/ajs_kcorr_{}band_z01.dat'.format(band.lower())
# read file of parameters of polynomial fit to k-correction
# polynomial k-correction is of the form
# A*(z-z0)^4 + B*(z-z0)^3 + C*(z-z0)^2 + D*(z-z0) + E
col_min, col_max, A, B, C, D, E, col_med = \
np.loadtxt(k_corr_file, unpack=True)
self.z0 = 0.1 # reference redshift
self.nbins = len(col_min) # number of colour bins in file
self.colour_min = np.min(col_med)
self.colour_max = np.max(col_med)
self.colour_med = col_med
# functions for interpolating polynomial coefficients in rest-frame color.
self.__A_interpolator = self.__initialize_parameter_interpolator(A, col_med, kind=kind)
self.__B_interpolator = self.__initialize_parameter_interpolator(B, col_med, kind=kind)
self.__C_interpolator = self.__initialize_parameter_interpolator(C, col_med, kind=kind)
self.__D_interpolator = self.__initialize_parameter_interpolator(D, col_med, kind=kind)
self.__E = E[0]
# Linear extrapolation for z > 0.5
self.__X_interpolator = lambda x: None
self.__Y_interpolator = lambda x: None
self.__X_interpolator, self.__Y_interpolator = self.__initialize_line_interpolators()
def __initialize_parameter_interpolator(self, parameter, median_colour, kind="linear"):
# returns function for interpolating polynomial coefficients, as a function of colour
return interp1d(median_colour, parameter, kind=kind, fill_value="extrapolate")
def __initialize_line_interpolators(self):
# linear coefficients for z>0.5
X = np.zeros(self.nbins)
Y = np.zeros(self.nbins)
# find X, Y at each colour
redshift = np.array([0.48,0.5])
arr_ones = np.ones(len(redshift))
for i in range(self.nbins):
k = self.k(redshift, arr_ones*self.colour_med[i])
X[i] = (k[1]-k[0]) / (redshift[1]-redshift[0])
Y[i] = k[0] - X[i]*redshift[0]
X_interpolator = interp1d(self.colour_med, X, kind='linear', fill_value="extrapolate")
Y_interpolator = interp1d(self.colour_med, Y, kind='linear', fill_value="extrapolate")
return X_interpolator, Y_interpolator
def __A(self, colour):
# coefficient of the z**4 term
colour_clipped = np.clip(colour, self.colour_min, self.colour_max)
return self.__A_interpolator(colour_clipped)
def __B(self, colour):
# coefficient of the z**3 term
colour_clipped = np.clip(colour, self.colour_min, self.colour_max)
return self.__B_interpolator(colour_clipped)
def __C(self, colour):
# coefficient of the z**2 term
colour_clipped = np.clip(colour, self.colour_min, self.colour_max)
return self.__C_interpolator(colour_clipped)
def __D(self, colour):
# coefficient of the z**1 term
colour_clipped = np.clip(colour, self.colour_min, self.colour_max)
return self.__D_interpolator(colour_clipped)
def __X(self, colour):
colour_clipped = np.clip(colour, self.colour_min, self.colour_max)
return self.__X_interpolator(colour_clipped)
def __Y(self, colour):
colour_clipped = np.clip(colour, self.colour_min, self.colour_max)
return self.__Y_interpolator(colour_clipped)
def k(self, redshift, restframe_colour, median=False):
"""
Polynomial fit to the GAMA K-correction for z<0.5
The K-correction is extrapolated linearly for z>0.5
Args:
redshift: array of redshifts
colour: array of ^0.1(g-r) colour
Returns:
array of K-corrections
"""
K = np.zeros(len(redshift))
idx = redshift <= 0.5
if median:
restframe_colour = np.copy(restframe_colour)
# Fig. 13 of https://arxiv.org/pdf/1701.06581.pdf
restframe_colour = 0.603 * np.ones_like(restframe_colour)
K[idx] = self.__A(restframe_colour[idx])*(redshift[idx]-self.z0)**4 + \
self.__B(restframe_colour[idx])*(redshift[idx]-self.z0)**3 + \
self.__C(restframe_colour[idx])*(redshift[idx]-self.z0)**2 + \
self.__D(restframe_colour[idx])*(redshift[idx]-self.z0) + self.__E
idx = redshift > 0.5
K[idx] = self.__X(restframe_colour[idx])*redshift[idx] + self.__Y(restframe_colour[idx])
return K
def k_nonnative_zref(self, refz, redshift, restframe_colour, median=False):
refzs = refz * np.ones_like(redshift)
return self.k(redshift, restframe_colour, median=median) - self.k(refzs, restframe_colour, median=median) - 2.5 * np.log10(1. + refz)
def rest_gmr_index(self, rest_gmr, kcoeff=False):
bins = np.array([-100., 0.18, 0.35, 0.52, 0.69, 0.86, 1.03, 100.])
idx = np.digitize(rest_gmr, bins)
'''
if kcoeff==True:
for i in enumerate(rest_gmr):
ddict = {i:{col_med, A[0], B[0], C[0], D[0]}}
'''
return idx
class GAMA_KCorrection_color():
def __init__(self):
self.kRcorr = GAMA_KCorrection(band='R')
self.kGcorr = GAMA_KCorrection(band='G')
def obs_gmr(self, rest_gmr):
return rest_gmr + self.kRcorr.k(z, rest_gmr) - self.kGcorr.k(z, rest_gmr)
def rest_gmr_nonnative(self, native_rest_gmr):
refzs = np.zeros_like(native_rest_gmr)
return native_rest_gmr + self.kGcorr.k(refzs, native_rest_gmr) - self.kRcorr.k(refzs, native_rest_gmr)
def test_plots(axes):
kcorr_r = GAMA_KCorrection(band='R')
kcorr_g = GAMA_KCorrection(band='G')
z = np.arange(-0.01,0.601,0.01)
cols = 0.130634, 0.298124, 0.443336, 0.603434, 0.784644, 0.933226, 1.06731
# make r-band k-correction plot
for c in cols:
col = np.ones(len(z)) * c
k = kcorr_r.k(z, col)
axes[0].plot(z, k, label=r"$^{0.1}(g-r)_\mathrm{med}=%.3f$"%c)
axes[0].set_xlabel(r"$z$")
axes[0].set_ylabel(r"$^{0.1}K_r(z)$")
axes[0].set_xlim(0,0.6)
axes[0].set_ylim(-0.6,1)
axes[0].legend(loc="upper left").draw_frame(False)
# make g-band k-correction plot
for c in cols:
col = np.ones(len(z)) * c
k = kcorr_g.k(z, col)
axes[1].plot(z, k, label=r"$^{0.1}(g-r)_\mathrm{med}=%.3f$"%c)
axes[1].set_xlabel(r"$z$")
axes[1].set_ylabel(r"$^{0.1}K_g(z)$")
axes[1].set_xlim(-0.01,0.6)
axes[1].set_ylim(-0.4,1.4)
axes[1].legend(loc="upper left").draw_frame(False)
axes[0].set_ylabel(r"$^{0.1}K_r(z)$")
axes[0].set_xlim(0,0.6)
axes[0].set_ylim(-0.6,1)
axes[0].legend(loc="upper left").draw_frame(False)
# make g-band k-correction plot
for c in cols:
col = np.ones(len(z)) * c
k = kcorr_g.k(z, col)
axes[1].plot(z, k, label=r"$^{0.1}(g-r)_\mathrm{med}=%.3f$"%c)
axes[1].set_xlabel(r"$z$")
axes[1].set_ylabel(r"$^{0.1}K_g(z)$")
axes[1].set_xlim(-0.01,0.6)
axes[1].set_ylim(-0.4,1.4)
def test_nonnative_plots(axes, zref):
kcorr_r = GAMA_KCorrection(band='R')
kcorr_g = GAMA_KCorrection(band='G')
z = np.arange(-0.01,0.601,0.01)
cols = 0.130634, 0.298124, 0.443336, 0.603434, 0.784644, 0.933226, 1.06731
# make r-band k-correction plot
for c in cols:
col = np.ones(len(z)) * c
k = kcorr_r.k_nonnative_zref(zref, z, col)
axes[0].plot(z, k, label=r"$^{0.0}(g-r)_\mathrm{med}=%.3f$"%c)
axes[0].set_xlabel(r"$z$")
axes[0].set_ylabel(r"$^{0.0}K_r(z)$")
axes[0].set_xlim(0,0.6)
axes[0].set_ylim(-0.6,1)
axes[0].legend(loc="upper left").draw_frame(False)
# make g-band k-correction plot
for c in cols:
col = np.ones(len(z)) * c
k = kcorr_g.k_nonnative_zref(zref, z, col)
axes[1].plot(z, k, label=r"$^{0.0}(g-r)_\mathrm{med}=%.3f$"%c)
axes[1].set_xlabel(r"$z$")
axes[1].set_ylabel(r"$^{0.0}K_g(z)$")
axes[1].set_xlim(-0.01,0.6)
axes[1].set_ylim(-0.4,1.4)
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@py@LSS@DESI_ke@smith_kcorr.py@.PATH_END.py
|
{
"filename": "astropy-bls-tutorial-checkpoint.ipynb",
"repo_name": "noraeisner/LATTE",
"repo_path": "LATTE_extracted/LATTE-master/.ipynb_checkpoints/astropy-bls-tutorial-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
```
%matplotlib inline
%config IPython.matplotlib.backend = "retina"
from matplotlib import rcParams
rcParams["figure.dpi"] = 150
rcParams["savefig.dpi"] = 150
```
In this tutorial, we will demonstrate the usage patterns for the `astropy.stats.bls` package.
To run this tutorial, you need to install the `transit-periodogram` branch of [dfm/astropy](https://github.com/dfm/astropy/tree/transit-periodogram).
This is meant a reference implementation of the "box least-squares" (BLS) method developed [by Kovács et al. (2002)](https://arxiv.org/abs/astro-ph/0206099).
This is the standard algorithm used to detect transiting exoplanets in time series datasets.
We won't go into the details here, but the basic idea is that the transit is modeled as a top hat and, under the assumption of known independent Gaussian uncertainties, this leads to several simplifications that make evaluating the model likelihood (relatively) computationally efficient.
The transit periodogram computes the log likelihood of the "box" fit (maximized over transit depth, duration, and phase) for a list of periods.
Peaks in the periodogram generally indicate transiting planets or other unmodeled noise.
Methods for determining the false alarm rates with these methods is an active area of research so we won't go into that here, but users should always be cautious with their interpretation of the results from a method like this.
To demonstrate the code, we start by downloading the K2 light curve for `K2-3` a system with 3 known transiting planets:
```
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy import units as u
url = "https://archive.stsci.edu/hlsps/everest/v2/c01/201300000/67065/hlsp_everest_k2_llc_201367065-c01_kepler_v2.0_lc.fits"
with fits.open(url) as hdus:
data = hdus[1].data
t = data["TIME"]
y = data["FLUX"]
q = data["QUALITY"]
# This is from the EVEREST source. These are the flagged data points
# that should be removed. Ref: https://github.com/rodluger/everest
m = np.isfinite(t) & np.isfinite(y)
for b in [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17]:
m &= (q & (2 ** (b - 1))) == 0
t = np.ascontiguousarray(t[m], dtype=np.float64) * u.day
y = np.ascontiguousarray(y[m], dtype=np.float64)
y = (y / np.median(y) - 1)*1e3
fig, ax = plt.subplots(1, 1, sharex=True, figsize=(6, 3))
ax.plot(t, y, "k")
ax.set_xlim(t.min().value, t.max().value)
ax.set_xlabel("time [days]")
ax.set_ylabel("relative flux [ppt]");
```

Then we'll fit for the long-term trends using a running windowed median filter.
```
from scipy.signal import medfilt
trend = medfilt(y, 45)
mu = np.median(y)
y_filt =(y - trend)
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(6, 6))
ax = axes[0]
ax.plot(t, y, "k")
ax.plot(t, trend)
ax.set_ylabel("relative flux [ppt]")
ax = axes[1]
ax.plot(t, y_filt, "k")
ax.set_xlim(t.min().value, t.max().value)
ax.set_xlabel("time [days]")
ax.set_ylabel("de-trended flux [ppt]");
```

Now, to find a transiting planet in this light curve, we use the `TransitPeriodogram` class.
The interface was designed to follow the conventions of `astropy.stats.LombScargle` so this might seem familiar to those of you who are familiar with that.
First, the user must select a set of durations to test (in the same units as the time variable above) and then a search can be run using the following commands (see the docstrings for all the options):
```
import astropy
print("astropy version: {0}".format(astropy.__version__))
from astropy.stats import BoxLeastSquares
durations = np.linspace(0.05, 0.2, 10) * u.day
model = BoxLeastSquares(t, y_filt)
results = model.autopower(durations, frequency_factor=5.0)
print(results)
```
astropy version: 3.1.1
depth: array([0.01149651, 0.01286837, 0.01267318, ..., 0.69284145, 0.72999508,
0.61095512])
depth_err: array([0.04057764, 0.04061618, 0.04059689, ..., 0.3337471 , 0.37832923,
0.316664 ])
depth_snr: array([0.28332141, 0.3168287 , 0.31217112, ..., 2.07594748, 1.92952335,
1.92934819])
duration: <Quantity [0.085, 0.085, 0.085, ..., 0.085, 0.065, 0.085] d>
log_likelihood: array([0.05095148, 0.06367127, 0.06183461, ..., 2.16013173, 1.86512486,
1.8663308 ])
objective: 'likelihood'
period: <Quantity [ 0.4 , 0.40000624, 0.40001248, ..., 39.90183014,
39.96400821, 40.02638037] d>
power: array([0.05095148, 0.06367127, 0.06183461, ..., 2.16013173, 1.86512486,
1.8663308 ])
transit_time: <Quantity [4.94946699e-03, 3.74111002e-01, 3.43271575e-01, ...,
3.56002726e+01, 1.23745389e+01, 2.93723116e+01] d>
The output from this method has several useful columns, but the most useful ones are probably `period` and `power`.
Using these, we can find plot the periodogram:
```
# Find the period of the peak
period = results.period[np.argmax(results.power)]
print (type(period))
fig, ax = plt.subplots(1, 1, figsize=(6, 3))
# Highlight the harmonics of the peak period
ax.axvline(period.value, alpha=0.4, lw=3)
for n in range(2, 10):
ax.axvline(n*period.value, alpha=0.4, lw=1, linestyle="dashed")
ax.axvline(period.value / n, alpha=0.4, lw=1, linestyle="dashed")
# Plot the periodogram
ax.plot(results.period, results.power, "k", lw=0.5)
ax.set_xlim(results.period.min().value, results.period.max().value)
ax.set_xlabel("period [days]")
ax.set_ylabel("log likelihood");
```
<class 'astropy.units.quantity.Quantity'>

The structure that you can see in this periodogram is pretty typical for systems with transiting planets.
The peak period is highlighted with a thick blue line and the integer harmonics of this period are indicated with dashed blue lines.
The code can compute some descriptive stats at the maximum peak that are useful for vetting our transit candidate:
```
index = np.argmax(results.power)
period = results.period[index]
t0 = results.transit_time[index]
duration = results.duration[index]
model.compute_stats(period, duration, t0)
```
{'depth': (1.1936640429140075, 0.16099485890719112),
'depth_even': (1.1560693853875603, 0.22422827420626673),
'depth_half': (0.6417136099202612, 0.11903691765705873),
'depth_odd': (1.2332373666260577, 0.2300215161694426),
'depth_phased': (-0.010477717506668725, 0.1748826154294211),
'harmonic_amplitude': 0.024328101050625044,
'harmonic_delta_log_likelihood': -26.950854196140387,
'per_transit_count': array([5, 4, 5, 5, 5, 5, 5, 5]),
'per_transit_log_likelihood': array([3.52474809, 3.16992881, 2.92304483, 3.24175477, 3.60949244,
4.08972639, 3.2935453 , 3.9320194 ]),
'transit_times': <Quantity [1980.41670509, 1990.47117807, 2000.52565106, 2010.58012404,
2020.63459703, 2030.68907001, 2040.743543 , 2050.79801598] d>}
These statistics are documented in the docstring for the `compute_stats` method:
```
model.compute_stats?
```
[0;31mSignature:[0m [0mmodel[0m[0;34m.[0m[0mcompute_stats[0m[0;34m([0m[0mperiod[0m[0;34m,[0m [0mduration[0m[0;34m,[0m [0mtransit_time[0m[0;34m)[0m[0;34m[0m[0m
[0;31mDocstring:[0m
Compute descriptive statistics for a given transit model
These statistics are commonly used for vetting of transit candidates.
Parameters
----------
period : float or Quantity
The period of the transits.
duration : float or Quantity
The duration of the transit.
transit_time : float or Quantity
The mid-transit time of a reference transit.
Returns
-------
stats : dict
A dictionary containing several descriptive statistics:
- ``depth``: The depth and uncertainty (as a tuple with two
values) on the depth for the fiducial model.
- ``depth_odd``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period.
- ``depth_even``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period and the
phase is offset by one orbital period.
- ``harmonic_amplitude``: The amplitude of the best fit sinusoidal
model.
- ``harmonic_delta_log_likelihood``: The difference in log
likelihood between a sinusoidal model and the transit model.
If ``harmonic_delta_log_likelihood`` is greater than zero, the
sinusoidal model is preferred.
- ``transit_times``: The mid-transit time for each transit in the
baseline.
- ``per_transit_count``: An array with a count of the number of
data points in each unique transit included in the baseline.
- ``per_transit_log_likelihood``: An array with the value of the
log likelihood for each unique transit included in the
baseline.
[0;31mFile:[0m ~/research/projects/dfm/astropy/astropy/stats/bls/core.py
[0;31mType:[0m method
We can look more closely at this specific candidate transit using the `TransitPeriodogram.model` method:
```
# Extract the parameters of the best-fit model
index = np.argmax(results.power)
period = results.period[index]
t0 = results.transit_time[index]
duration = results.duration[index]
fig, axes = plt.subplots(2, 1, figsize=(6, 6))
fig.subplots_adjust(hspace=0.3)
# Plot the light curve and best-fit model
ax = axes[0]
ax.plot(t, y_filt, ".k", ms=3)
x = np.linspace(t.min(), t.max(), 3*len(t))
f = model.model(x, period, duration, t0)
ax.plot(x, f, lw=0.75)
ax.set_xlim(t.min().value, t.max().value)
ax.set_ylim(-1.52, 0.4)
ax.set_xlabel("time [days]")
ax.set_ylabel("de-trended flux [ppt]");
# Plot the folded data points within 0.5 days of the transit time
ax = axes[1]
x = (t - t0 + 0.5*period) % period - 0.5*period
m = np.abs(x) < 0.5 * u.day
ax.plot(x[m], y_filt[m], ".k", ms=3)
# Over-plot the best fit model
x = np.linspace(-0.5, 0.5, 1000) * u.day
f = model.model(x + t0, period, duration, t0)
ax.plot(x, f, lw=0.75)
ax.set_xlim(-0.5, 0.5)
ax.set_xlabel("time since transit [days]")
ax.set_ylabel("de-trended flux [ppt]");
```

That looks pretty good!
The standard way to find more planets in a light curve where the highest signal-to-noise transit has been detected is to remove the in-transit data points and then run the algorithm again.
We can do that using the `TransitPeriodogram.transit_mask` method
```
# Find the in-transit points using a longer duration as a buffer to avoid ingress and egress
in_transit = model.transit_mask(t, period, 2*duration, t0)
# Re-run the algorithm, and plot the results
model2 = BoxLeastSquares(t[~in_transit], y_filt[~in_transit])
results2 = model2.autopower(durations, frequency_factor=5.0)
# Extract the parameters of the best-fit model
index = np.argmax(results2.power)
period2 = results2.period[index]
t02 = results2.transit_time[index]
duration2 = results2.duration[index]
fig, axes = plt.subplots(3, 1, figsize=(6, 9))
fig.subplots_adjust(hspace=0.3)
# Highlight the harmonics of the peak period
ax = axes[0]
ax.axvline(period2.value, alpha=0.4, lw=3)
for n in range(2, 15):
ax.axvline(n*period2.value, alpha=0.4, lw=1, linestyle="dashed")
ax.axvline(period2.value / n, alpha=0.4, lw=1, linestyle="dashed")
# Plot the periodogram
ax.plot(results2.period, results2.power, "k", lw=0.5)
ax.set_xlim(results2.period.min().value, results2.period.max().value)
ax.set_xlabel("period [days]")
ax.set_ylabel("log likelihood")
# Plot the light curve and best-fit model
ax = axes[1]
ax.plot(t[~in_transit], y_filt[~in_transit], ".k", ms=3)
x = np.linspace(t.min(), t.max(), 3*len(t))
f = model2.model(x, period2, duration2, t02)
ax.plot(x, f, lw=0.75)
ax.set_xlim(t.min().value, t.max().value)
ax.set_ylim(-0.9, 0.4)
ax.set_xlabel("time [days]")
ax.set_ylabel("de-trended flux [ppt]");
ax = axes[2]
x = (t[~in_transit] - t02 + 0.5*period2) % period2 - 0.5*period2
m = np.abs(x) < 0.5 * u.day
ax.plot(x[m], y_filt[~in_transit][m], ".k", ms=3)
x = np.linspace(-0.5, 0.5, 1000) * u.day
f = model2.model(x + t02, period2, duration2, t02)
ax.plot(x, f, lw=0.75)
ax.set_xlim(-0.5, 0.5)
ax.set_xlabel("time since transit [days]")
ax.set_ylabel("de-trended flux [ppt]");
```

As before, we can also compute some descriptive stats for this candidate.
```
model2.compute_stats(period2, duration2, t02)
```
{'depth': (0.732898987742453, 0.1968368470389144),
'depth_even': (0.7122867471030565, 0.26779054747928266),
'depth_half': (0.4867200382753938, 0.1610132853436982),
'depth_odd': (0.756946601821749, 0.28916524553275047),
'depth_phased': (-0.005596650544266784, 0.2778620689104982),
'harmonic_amplitude': 0.006306899692288489,
'harmonic_delta_log_likelihood': -6.896627831061727,
'per_transit_count': array([7, 6, 7, 6]),
'per_transit_log_likelihood': array([1.9366044 , 1.73704636, 1.61188842, 1.69729286]),
'transit_times': <Quantity [1979.28651304, 2003.92900071, 2028.57148837, 2053.21397603] d>}
Now let's do it one more time to find the third planet.
```
in_transit2 = in_transit | model2.transit_mask(t, period2, 2*duration2, t02)
# Re-run the algorithm, and plot the results
model3 = BoxLeastSquares(t[~in_transit2], y_filt[~in_transit2])
results3 = model3.autopower(durations, maximum_period=50, frequency_factor=5.0)
# Extract the parameters of the best-fit model
index = np.argmax(results3.power)
period3 = results3.period[index]
t03 = results3.transit_time[index]
duration3 = results3.duration[index]
fig, axes = plt.subplots(3, 1, figsize=(6, 9))
fig.subplots_adjust(hspace=0.3)
# Highlight the harmonics of the peak period
ax = axes[0]
ax.axvline(period3.value, alpha=0.4, lw=3)
for n in range(2, 15):
ax.axvline(n*period3.value, alpha=0.4, lw=1, linestyle="dashed")
ax.axvline(period3.value / n, alpha=0.4, lw=1, linestyle="dashed")
# Plot the periodogram
ax.plot(results3.period, results3.power, "k", lw=0.5)
ax.set_xlim(results3.period.min().value, results3.period.max().value)
ax.set_xlabel("period [days]")
ax.set_ylabel("log likelihood")
ax = axes[1]
ax.plot(t[~in_transit2], y_filt[~in_transit2], ".k", ms=3)
x = np.linspace(t.min(), t.max(), 3*len(t))
f = model3.model(x, period3, duration3, t03)
ax.plot(x, f, lw=0.75)
ax.set_xlim(t.min().value, t.max().value)
ax.set_ylim(-0.8, 0.3)
ax.set_xlabel("time [days]")
ax.set_ylabel("de-trended flux [ppt]")
ax = axes[2]
x = (t[~in_transit2] - t03 + 0.5*period3) % period3 - 0.5*period3
m = np.abs(x) < 0.5 * u.day
ax.plot(x[m], y_filt[~in_transit2][m], ".k", ms=3)
x = np.linspace(-0.5, 0.5, 1000) * u.day
f = model3.model(x + t03, period3, duration3, t03)
ax.plot(x, f, lw=0.75)
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.8, 0.3)
ax.set_xlabel("time since transit [days]")
ax.set_ylabel("de-trended flux [ppt]");
```

Interestingly, this time the detected period is actually half of the period reported in the literature.
Looking at the second plot above, it should be possible to figure out what is going on here.
In fact, the likelihood of the correct period is actually *identical* to the peak reported here because the middle transit falls in a data gap, but `np.argmax` returns the *first* index when multiple entries have the same value.
This issue can immediately be identified when we run `compute_stats`:
```
model3.compute_stats(period3, duration3, t03)
```
{'depth': (0.6053673142144782, 0.25875268795917816),
'depth_even': (0.0, inf),
'depth_half': (0.18624197808219442, 0.14685206698862918),
'depth_odd': (0.6053673142144782, 0.25875268795917816),
'depth_phased': (-0.010179310188637113, 0.17759204327376826),
'harmonic_amplitude': 0.0058321010514341,
'harmonic_delta_log_likelihood': -2.7080500006912,
'per_transit_count': array([7, 0, 8]),
'per_transit_log_likelihood': array([1.2763854 , 0. , 1.47213649]),
'transit_times': <Quantity [1993.22528534, 2015.51217057, 2037.7990558 ] d>}
In this case, the `per_transit_count` statistic shows us that there are no data points in the middle transit because it falls in a gap.
```
```
|
noraeisnerREPO_NAMELATTEPATH_START.@LATTE_extracted@LATTE-master@.ipynb_checkpoints@astropy-bls-tutorial-checkpoint.ipynb@.PATH_END.py
|
{
"filename": "gc_runfit.py",
"repo_name": "GalacticDynamics-Oxford/Agama",
"repo_path": "Agama_extracted/Agama-master/py/gc_runfit.py",
"type": "Python"
}
|
#!/usr/bin/env python
'''
This file is part of the Gaia Challenge, and contains the main fitting routine.
The Gaia Challenge (or, more specifically, the "Spherical and triaxial" group)
presents the following task:
infer the gravitational potential, created entirely by the dark matter,
from the array of discrete tracers ("stars"), under the assumption of
spherical symmetry and dynamical equilibrium. The tracer population follows
a different density profile than the dark matter and may be anisotropic
in velocity space; the mock data was created by sampling from a physically valid
distribution function which we pretend not to know.
There are several different models (combinations of the potential and
the tracer distribution), and each one can be approached under three different
assumptions about the content of the data:
(1) full 6d phase-space coordinates of tracer particles with no errors;
(2) 5d (except for z-coordinate), velocities have a fixed Gaussian error of 2km/s;
(3) 3d (only x,y and v_z, the latter with an error of 2km/s);
of course, due to spherical symmetry, we only deal with the cylindrical radius,
not x and y separately.
This program, split into several python files, addresses this problem by
constructing a series of models with particular parameters of the gravitational
potential and the distribution function of tracers, and evaluating the likelihood
of each model against the provided data; we seek the parameters that maximize
the likelihood, and derive their uncertainties.
This module performs the actual fitting procedure for the given data file.
First we find a single maximum-likelihood solution with a deterministic
search algorithm, and then launch a Markov Chain Monte Carlo simulation to
explore the range of parameters that are still consistent with the data;
the latter stage uses the EMCEE algorithm with several independent 'walkers'
in the parameter space.
We perform several episodes in which the MCMC is run for several hundred steps,
and compare the mean and dispersion of parameters over the entire ensemble
between two consecutive episodes; when they no longer change, we declare
the procedure to be converged. Intermediate results are also stored in text
files, so that the fit may be restarted after each episode. We also display
the evolution of parameters along the chain, and the covariance plots.
To redo the plots without running the MCMC again, add a second command-line
argument "plot" (after the name of the data file).
The files "gc_modelparamsE.py" and "gc_modelparamsJ.py" specify two possible
families of models (only one of them should be selected to import).
They also define the conversion between the scaled quantities that are
explored in the fitting process and the actual model parameters,
and decodee the true parameters from the name of the data file.
The file "gc_resample.py" deals with the missing data: each tracer particle
with missing or imprecise phase-space coordinates is split into a number of
subsamples, where this missing data is drawn from a non-uniform prior
distribution with a known sampling law. The likelihood of each tracer particle
is then computed as the weighted sum of likelihoods of all its subsamples.
In other words, this procedure performs the integration over missing coordinates
using a Monte Carlo approach with a fixed array of subsamples, which remains
the same throughout the entire fitting procedure -- this is necessary
to mitigate the impact of sampling noise (it is still present, but is the same
for all models).
To run the program, one needs to select one of the two possible families of
models: f(E,L) or f(J); select the type of available data (3, 5 or 6 known
phase-space coordinates), and provide the name of the data file as the argument.
If the program has been run previously, it will store the current set of best
parameters in a file <filename>.best, which can be used to hot-restart the fit.
However, if you switch to the other family of model, this file should be deleted,
as the model parameters are incompatible between the two families.
'''
from __future__ import print_function
import sys, numpy, scipy.optimize, scipy.special, matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt, emcee, corner
import agama
# a separate module for sampling over missing coordinates/velocities
from gc_resample import sampleMissingData
# a separate module which contains the description of the actual model
# here are two possible options: f(E,L) or f(J), uncomment one of the following lines:
from gc_modelparamsE import ModelParams
#from gc_modelparamsJ import ModelParams
###################$ GLOBAL CONSTANTS $###############
nsteps_deterministic = 500 # number of steps per pass in deterministic minimizer
nsteps_mcmc = 500 # number of MC steps per pass
nwalkers_mcmc = 24 # number of independent MC walkers
nsamples_plot = 200 # number of randomly chosen samples from the MCMC chain to plot
initial_disp_mcmc = 0.01 # initial dispersion of parameters carried by walkers around their best-fit values
phase_space_info_mode = 3 # mode=6: full phase-space information (3 positions and 3 velocities)
# mode=5: 5d phase-space (everything except z-coordinate)
# mode=3: 3d (x, y, vz)
num_subsamples = 1000 # each input data point is represented by this number of samples
# which fill in the missing values of coordinate and velocity components
vel_error = 2.0 # assumed observational error on velocity components (add noise to velocity if it is non-zero)
######################## MODEL-SEARCHER ####################################
def deterministicSearchFnc(params, obj):
'''
function to minimize using the deterministic algorithm (needs to be declared outside the class)
'''
loglike = obj.modelLikelihood(params)
if not numpy.isfinite(loglike):
loglike = -100*len(obj.particles) # replace infinity with a very large negative number
return -loglike
def monteCarloSearchFnc(params, obj):
'''
function to maximize using the monte carlo algorithm (needs to be declared outside the class)
'''
return obj.modelLikelihood(params)
class ModelSearcher:
'''
Class that encompasses the computation of likelihood for the given parameters,
and implements model-searching algorithms (deterministic and MCMC)
'''
def __init__(self):
try:
self.filename = sys.argv[1]
self.particles = numpy.loadtxt(self.filename)[:,0:6]
self.model = ModelParams(self.filename)
except Exception as ex:
print(str(ex)+"\nNeed to provide input text file with stellar coordinates and velocities.")
exit()
if vel_error!=0:
print("Assumed error of %f km/s in velocity" % vel_error)
if phase_space_info_mode <= 5:
self.particles[:,2] *= numpy.nan # remove z-coordinate
if phase_space_info_mode <= 3:
self.particles[:,3:5] *= numpy.nan # remove vx and vy
if phase_space_info_mode != 6 or vel_error != 0:
self.samples, self.weights = sampleMissingData(
numpy.hstack((self.particles, numpy.ones((self.particles.shape[0], 3)) * vel_error)),
num_subsamples )
else:
self.samples = None
# check if we may restart the search from already existing parameters
try:
self.values = numpy.loadtxt(self.filename+".best")
if self.values.ndim==1: # only one set of parameters - this occurs after the deterministic search
self.values = self.values[:-1] # the last column is the likelihood, strip it
else: # a number of MCMC walkers, each with its own set of parameters
self.values = self.values[:,:-1]
print("Loaded from saved file: (nwalkers,nparams)=" + str(self.values.shape))
except:
self.values = None
return
def modelLikelihood(self, params):
'''
Compute the likelihood of model (df+potential specified by scaled params)
against the data (array of Nx6 position/velocity coordinates of tracer particles).
This is the function to be maximized; if parameters are outside the allowed range, return -infinity
'''
prior = self.model.prior(params)
print(params, end=': ')
if prior == -numpy.inf:
print("Out of range")
return prior
try:
# Compute log-likelihood of DF with given params against an array of actions
pot, df = self.model.createModel(params)
if self.samples is None: # actions of tracer particles
if self.particles.shape[0] > 2000: # create an action finder object for a faster evaluation
actions = agama.ActionFinder(pot)(self.particles)
else:
actions = agama.actions(pot, self.particles)
df_val = df(actions) # values of DF for these actions
else: # have full phase space info for resampled input particles (missing components are filled in)
af = agama.ActionFinder(pot)
actions = af(self.samples) # actions of resampled tracer particles
# compute values of DF for these actions, multiplied by sample weights
df_vals = df(actions) * self.weights
# compute the weighted sum of likelihoods of all samples for a single particle,
# replacing the improbable samples (with NaN as likelihood) with zeroes
df_val = numpy.sum(numpy.nan_to_num(df_vals.reshape(-1, num_subsamples)), axis=1)
loglike = numpy.sum( numpy.log( df_val ) )
if numpy.isnan(loglike): loglike = -numpy.inf
loglike += prior
print("LogL=%.8g" % loglike)
return loglike
except ValueError as err:
print("Exception "+str(err))
return -numpy.inf
def deterministicSearch(self):
'''
do a deterministic search to find the best-fit parameters of potential and distribution function.
perform several iterations of search, to avoid getting stuck in a local minimum,
until the log-likelihood ceases to improve
'''
if self.values is None: # just started
self.values = self.model.initValues # get the first guess from the model-scaling object
elif self.values.ndim == 2: # entire ensemble of values (after MCMC)
self.values = self.values[0,:] # leave only one set of values from the ensemble
prevloglike = -deterministicSearchFnc(self.values, self) # initial likelihood
while True:
print('Starting deterministic search')
result = scipy.optimize.minimize(deterministicSearchFnc, \
self.values, args=(self,), method='Nelder-Mead', \
options=dict(maxfev=nsteps_deterministic, disp=True))
self.values = result.x
loglike= -result.fun
print('result='+str(result.x)+' LogL='+str(loglike))
# store the latest best-fit parameters and their likelihood
numpy.savetxt(self.filename+'.best', numpy.hstack((self.values, loglike)).reshape(1,-1), fmt='%.8g')
if loglike - prevloglike < 1.0:
print('Converged')
return
else:
print('Improved log-likelihood by %f' % (loglike - prevloglike))
prevloglike = loglike
def monteCarloSearch(self):
'''
Explore the parameter space around the best-fit values using the MCMC method
'''
if self.values.ndim == 1:
# initial coverage of parameter space (dispersion around the current best-fit values)
nparams = len(self.values)
ensemble = numpy.empty((nwalkers_mcmc, len(self.values)))
for i in range(nwalkers_mcmc):
while True: # ensure that we initialize walkers with feasible values
walker = self.values + (numpy.random.randn(nparams)*initial_disp_mcmc if i>0 else 0)
prob = monteCarloSearchFnc(walker, self)
if numpy.isfinite(prob):
ensemble[i,:] = walker
break
print('*',end='')
self.values = ensemble
else:
# check that all walkers have finite likelihood
prob = numpy.zeros((self.values.shape[0],1))
for i in range(self.values.shape[0]):
prob[i,0] = monteCarloSearchFnc(self.values[i,:], self)
if not numpy.isfinite(prob[i,0]):
print('Invalid parameters for %i-th walker (likelihood is bogus)' % i)
else: print('%i-th walker: logL=%g' % (i, prob[i,0]))
nwalkers, nparams = self.values.shape
sampler = emcee.EnsembleSampler(nwalkers, nparams, monteCarloSearchFnc, args=(self,))
prevmaxloglike = None
while True: # run several passes until convergence
print('Starting MCMC')
sampler.run_mcmc(self.values, nsteps_mcmc)
# restart the next pass from the latest values in the Markov chain
self.values = sampler.chain[:,-1,:]
# store the latest best-fit parameters and their likelihood, and the entire chain for the last nsteps_mcmc steps
numpy.savetxt(self.filename+'.best', \
numpy.hstack((self.values, sampler.lnprobability[:,-1].reshape(-1,1))), fmt='%.8g')
numpy.savetxt(self.filename+".chain", \
numpy.hstack((sampler.chain[:,-nsteps_mcmc:].reshape(-1,nparams),
sampler.lnprobability[:,-nsteps_mcmc:].reshape(-1,1))), fmt='%.8g')
print("Acceptance fraction: %g" % numpy.mean(sampler.acceptance_fraction)) # should be in the range 0.2-0.5
try:
print("Autocorrelation time: %g" % sampler.get_autocorr_time())
# should be considerably shorter than the total number of steps
except: pass # sometimes it can't be computed, then ignore
maxloglike = numpy.max(sampler.lnprobability[:,-nsteps_mcmc:])
avgloglike = numpy.mean(sampler.lnprobability[:,-nsteps_mcmc:]) # avg.log-likelihood during the pass
avgparams = numpy.array([numpy.mean(sampler.chain[:,-nsteps_mcmc:,i]) for i in range(nparams)])
rmsparams = numpy.array([numpy.std (sampler.chain[:,-nsteps_mcmc:,i]) for i in range(nparams)])
print("Max log-likelihood= %.8g, avg log-likelihood= %.8g" % (maxloglike, avgloglike))
for i in range(nparams):
sorted_values = numpy.sort(sampler.chain[:,-nsteps_mcmc:,i], axis=None)
print("Parameter %20s avg= %8.5g; one-sigma range = (%8.5f, %8.5f)" % \
(self.model.labels[i], avgparams[i], \
sorted_values[int(len(sorted_values)*0.16)], \
sorted_values[int(len(sorted_values)*0.84)] ))
# plot the chain evolution and the posterior distribution + correlations between parameters
self.plot(sampler.chain, sampler.lnprobability, self.model.labels)
# check for convergence
if not prevmaxloglike is None:
if maxloglike-prevmaxloglike < 1.0 and \
abs(avgloglike-prevavgloglike) < 1.0 and \
numpy.all(avgparams-prevavgparams < 0.1) and \
numpy.all(rmsparams-prevrmsparams < 0.1):
print("Converged")
return
prevmaxloglike = maxloglike
prevavgloglike = avgloglike
prevavgparams = avgparams
prevrmsparams = rmsparams
def plotProfiles(self, chain):
'''
plot the radial profiles of various quantities from the set of model in the MCMC chain,
together with the true profiles.
'''
axes = plt.subplots(2, 2, figsize=(12,8))[1].T.reshape(-1)
rmin = 0.01
rmax = 100.
radii = numpy.logspace(numpy.log10(rmin), numpy.log10(rmax), 41)
midradii = (radii[1:] * radii[:-1])**0.5
xyz = numpy.column_stack((radii, radii*0, radii*0))
# compute and store the profiles for each model in the chain, then take 68% and 95% percentiles
dmdens = numpy.zeros((chain.shape[0], len(radii)))
dmslope = numpy.zeros((chain.shape[0], len(midradii)))
trdens = numpy.zeros((chain.shape[0], len(radii)))
trbeta = numpy.zeros((chain.shape[0], len(radii)))
print('Plotting profiles...')
for i in range(len(chain)):
pot, df = self.model.createModel(chain[i])
dmdens [i] = pot.density(xyz)
dmslope[i] = numpy.log(dmdens[i,1:] / dmdens[i,:-1]) / numpy.log(radii[1:] / radii[:-1])
trdens [i], trvel = agama.GalaxyModel(pot, df).moments(xyz, dens=True, vel=False, vel2=True)
trbeta [i] = 1 - trvel[:,1] / trvel[:,0]
# log-slope of the DM density profile d(log rho) / d(log r)
cntr = numpy.percentile(dmslope, [2.3, 15.9, 50, 84.1, 97.7], axis=0)
axes[0].fill_between(midradii, cntr[0], cntr[4], color='lightgray') # 2 sigma
axes[0].fill_between(midradii, cntr[1], cntr[3], color='gray') # 1 sigma
axes[0].plot(midradii, cntr[2], color='k') # median
axes[0].set_xscale('log')
axes[0].set_xlim(rmin, rmax)
axes[0].set_ylim(-5, 1)
axes[0].set_xlabel('$r$')
axes[0].set_ylabel(r'$d(\ln\rho_{DM}) / d(\ln r)$')
# DM density profile
cntr = numpy.percentile(dmdens, [2.3, 15.9, 50, 84.1, 97.7], axis=0)
axes[1].fill_between(radii, cntr[0], cntr[4], color='lightgray') # 2 sigma
axes[1].fill_between(radii, cntr[1], cntr[3], color='gray') # 1 sigma
axes[1].plot(radii, cntr[2], color='k') # median
axes[1].set_xscale('log')
axes[1].set_yscale('log')
axes[1].set_xlim(rmin, rmax)
axes[1].set_xlabel('$r$')
axes[1].set_ylabel(r'$\rho_{DM}$')
# velocity anisotropy coefficient (beta) of tracers
cntr = numpy.percentile(trbeta, [2.3, 15.9, 50, 84.1, 97.7], axis=0)
axes[2].fill_between(radii, cntr[0], cntr[4], color='lightgray') # 2 sigma
axes[2].fill_between(radii, cntr[1], cntr[3], color='gray') # 1 sigma
axes[2].plot(radii, cntr[2], color='k') # median
axes[2].set_xscale('log')
axes[2].set_xlim(rmin, rmax)
axes[2].set_ylim(-1, 1)
axes[2].set_xlabel('$r$')
axes[2].set_ylabel(r'$\beta_\star$')
# 3d density profile of tracers
cntr = numpy.percentile(trdens, [2.3, 15.9, 50, 84.1, 97.7], axis=0)
axes[3].fill_between(radii, cntr[0], cntr[4], color='lightgray') # 2 sigma
axes[3].fill_between(radii, cntr[1], cntr[3], color='gray') # 1 sigma
axes[3].plot(radii, cntr[2], color='k') # median
axes[3].set_xscale('log')
axes[3].set_yscale('log')
axes[3].set_xlim(rmin, rmax)
axes[3].set_xlabel('$r$')
axes[3].set_ylabel(r'$\rho_\star$')
# histogram of radial distribution of the original points on each of the four panels
ptcount = numpy.histogram((self.particles[:,0]**2 + self.particles[:,1]**2)**0.5, bins=radii)[0]
for ax in axes:
plt.twinx(ax)
plt.plot(numpy.hstack(zip(radii[:-1], radii[1:])), numpy.repeat(ptcount, 2), 'g-', alpha=0.5)
plt.ylim(0, 2*max(ptcount))
try:
true_dmdens = self.model.truePotential.density(xyz)
true_dmslope= numpy.log(true_dmdens[1:] / true_dmdens[:-1]) / numpy.log(radii[1:] / radii[:-1])
true_trdens = self.model.tracerDensity.density(xyz)
true_trbeta = self.model.tracerBeta(radii)
axes[0].plot(midradii, true_dmslope, color='r', lw=3, linestyle='--')
axes[1].plot( radii, true_dmdens, color='r', lw=3, linestyle='--')
axes[2].plot( radii, true_trbeta, color='r', lw=3, linestyle='--')
axes[3].plot( radii, true_trdens, color='r', lw=3, linestyle='--')
axes[1].set_ylim(true_dmdens[-1]*0.5, true_dmdens[0]*5)
axes[3].set_ylim(true_trdens[-1]*0.5, true_trdens[0]*5)
except AttributeError: pass # no true values known
plt.tight_layout()
plt.savefig(self.filename+"_profiles.png")
plt.close()
def plot(self, chain, loglike, labels):
'''
Show the time evolution of parameters carried by the ensemble of walkers (time=number of MC steps),
and the posterior distribution of parameters for the last nsteps_mcmc only
'''
ndim = chain.shape[2]
fig,axes = plt.subplots(ndim+1, 1, sharex=True, figsize=(20,15))
for i in range(ndim):
axes[i].plot(chain[:,:,i].T, color='k', alpha=0.5)
axes[i].set_ylabel(self.model.labels[i])
# last panel shows the evolution of log-likelihood for the ensemble of walkers
axes[-1].plot(loglike.T, color='k', alpha=0.5)
axes[-1].set_ylabel('log(L)')
maxloglike =numpy.max(loglike)
maxexpected=numpy.median(loglike[:,-nsteps_mcmc:])+0.5*ndim-0.33 # expected max-likelihood for a chi2 distribution with ndim
axes[-1].set_ylim(maxloglike-5-ndim, maxloglike) # restrict the range of log-likelihood arount its maximum
plt.tight_layout(h_pad=0.)
plt.savefig(self.filename+"_chain.png")
plt.close()
latest_chain = chain[:,-nsteps_mcmc:].reshape(-1, chain.shape[2])
try:
trueParams = self.model.trueParams
except AttributeError:
trueParams = None
try:
corner.corner(latest_chain, quantiles=[0.16, 0.5, 0.84], labels=labels, truths=trueParams)
# distribution of log-likelihoods - expected to follow the chi2 law with ndim degrees of freedom
ax=plt.axes([0.64,0.64,0.32,0.32])
bins=numpy.linspace(-4-ndim, 1, 101) + maxexpected
ax.hist(loglike[:,-nsteps_mcmc:].reshape(-1), bins=bins, normed=True, histtype='step')
xx=numpy.linspace(-4-ndim, 0, 101)
ax.plot(xx + maxexpected, 1/scipy.special.gamma(0.5*ndim) * (-xx)**(0.5*ndim-1) * numpy.exp(xx), 'r', lw=2)
ax.set_xlim(bins[0], bins[-1])
ax.set_xlabel('log(L)')
plt.savefig(self.filename+"_posterior.png")
plt.close()
except ValueError as err:
print("Can't plot posterior distribution: "+str(err))
try:
self.plotProfiles(latest_chain[numpy.random.choice(len(latest_chain), nsamples_plot, replace=False)])
except Exception as err:
print("Can't plot profiles: "+str(err))
def run(self):
if self.values is None: # first attempt a deterministic search to find the best-fit params
self.deterministicSearch()
self.monteCarloSearch()
################ MAIN PROGRAM ##################
numpy.set_printoptions(precision=5, linewidth=200, suppress=True)
agama.setUnits(mass=1, length=1, velocity=1)
m=ModelSearcher()
if len(sys.argv)>2 and 'PLOT' in sys.argv[2].upper():
chain = numpy.loadtxt(m.filename+'.chain')
m.plotProfiles(chain[numpy.random.choice(len(chain), nsamples_plot, replace=False)])
else:
m.run()
|
GalacticDynamics-OxfordREPO_NAMEAgamaPATH_START.@Agama_extracted@Agama-master@py@gc_runfit.py@.PATH_END.py
|
{
"filename": "_tickwidth.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatterternary/marker/colorbar/_tickwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="tickwidth",
parent_name="scatterternary.marker.colorbar",
**kwargs,
):
super(TickwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatterternary@marker@colorbar@_tickwidth.py@.PATH_END.py
|
{
"filename": "hod_modeling_tutorial5.ipynb",
"repo_name": "astropy/halotools",
"repo_path": "halotools_extracted/halotools-master/docs/notebooks/hod_modeling/hod_modeling_tutorial5.ipynb",
"type": "Jupyter Notebook"
}
|
# Example 5: An HOD model with cross-component dependencies
This notebook is intended to accompany the corresponding page of the Halotools documentation on HOD-style model building. Although the tutorial on `halotools.readthedocs.io` is in principle comprehensive and stand-alone, there is supplementary material covered here that may provide further clarification. By following along with this notebook you can experiment with variations on the models as you learn the basic syntax. This notebook is not stand-alone, and is intended to be read along with its companion tutorial in the documentation.
## Source code for the new model
```
class Shape(object):
def __init__(self, gal_type):
self.gal_type = gal_type
self._mock_generation_calling_sequence = ['assign_shape']
self._galprop_dtypes_to_allocate = np.dtype([('shape', object)])
def assign_shape(self, **kwargs):
table = kwargs['table']
randomizer = np.random.random(len(table))
table['shape'][:] = np.where(randomizer > 0.5, 'elliptical', 'disk')
class Size(object):
def __init__(self, gal_type):
self.gal_type = gal_type
self._mock_generation_calling_sequence = ['assign_size']
self._galprop_dtypes_to_allocate = np.dtype([('galsize', 'f4')])
self.list_of_haloprops_needed = ['halo_spin']
self.new_haloprop_func_dict = {'halo_custom_size': self.calculate_halo_size}
def assign_size(self, **kwargs):
table = kwargs['table']
disk_mask = table['shape'] == 'disk'
table['galsize'][disk_mask] = table['halo_spin'][disk_mask]
table['galsize'][~disk_mask] = table['halo_custom_size'][~disk_mask]
def calculate_halo_size(self, **kwargs):
table = kwargs['table']
return 2*table['halo_rs']
```
```
from halotools.empirical_models import Leauthaud11Cens, TrivialPhaseSpace
cen_occupation = Leauthaud11Cens()
cen_profile = TrivialPhaseSpace(gal_type = 'centrals')
cen_shape = Shape(gal_type = 'centrals')
cen_size = Size(gal_type = 'centrals')
from halotools.empirical_models import HodModelFactory
model = HodModelFactory(
centrals_occupation = cen_occupation,
centrals_profile = cen_profile,
centrals_shape = cen_shape,
centrals_size = cen_size,
model_feature_calling_sequence = ('centrals_occupation',
'centrals_profile', 'centrals_shape', 'centrals_size')
)
```
```
from halotools.sim_manager import FakeSim
halocat = FakeSim()
model.populate_mock(halocat)
```
## Inspecting our mock
In our mock universe, different halo properties govern the sizes of disk and elliptical galaxies. We can visualize this with a couple of simple scatter plots.
```
from matplotlib import pyplot as plt
%matplotlib inline
```
```
diskmask = model.mock.galaxy_table['shape'] == 'disk'
disk_gals = model.mock.galaxy_table[diskmask]
elliptical_gals = model.mock.galaxy_table[~diskmask]
```
```
plt.scatter(elliptical_gals['halo_spin'], elliptical_gals['galsize'],
color='red', label = 'elliptical galaxies')
plt.scatter(disk_gals['halo_spin'], disk_gals['galsize'],
color='blue', label = 'disk galaxies')
plt.xlim(xmin = -0.02, xmax=0.2)
plt.ylim(ymin = -0.02, ymax=1.25)
plt.xlabel('Halo Spin', fontsize=15)
plt.ylabel('Galaxy Size', fontsize=15)
plt.legend(frameon=False, loc='best')
```
<matplotlib.legend.Legend at 0x128ecaf90>
/usr/local/lib/python2.7/site-packages/matplotlib/collections.py:571: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
if self._edgecolors == str('face'):

```
plt.scatter(disk_gals['halo_custom_size'], disk_gals['galsize'],
color='blue', label = 'disk galaxies')
plt.scatter(elliptical_gals['halo_custom_size'], elliptical_gals['galsize'],
color='red', label = 'elliptical galaxies')
plt.xlim(xmin = -0.02, xmax = 1.25)
plt.ylim(ymin = -0.02, ymax=1.25)
plt.xlabel('Halo Custom Size', fontsize=15)
plt.ylabel('Galaxy Size', fontsize=15)
plt.legend(frameon=False, loc=2)
```
<matplotlib.legend.Legend at 0x103ca5f10>

```
```
```
```
|
astropyREPO_NAMEhalotoolsPATH_START.@halotools_extracted@halotools-master@docs@notebooks@hod_modeling@hod_modeling_tutorial5.ipynb@.PATH_END.py
|
{
"filename": "archival.py",
"repo_name": "tgrassi/prizmo",
"repo_path": "prizmo_extracted/prizmo-main/src_py/ChiantiPy/tools/archival.py",
"type": "Python"
}
|
"""
Functions for reading pre-v8 CHIANTI files
"""
import os
#from .FortranFormat import *
from ChiantiPy.fortranformat import FortranRecordReader
import ChiantiPy.tools.util as util
#
# -------------------------------------------------------------------------------------
#
def elvlcRead(ions, filename = 0, verbose=0, useTh=0):
"""
read a chianti energy level file and returns
{"lvl":lvl,"conf":conf,"term":term,"spin":spin,"l":l,"spd":spd,"j":j
,"mult":mult,"ecm":ecm,"eryd":eryd,"ecmth":ecmth,"erydth":erydth,"ref":ref,"pretty":pretty, 'ionS':ions}
if a energy value for ecm or eryd is zero(=unknown), the theoretical values
(ecmth and erydth) are inserted
"""
#
# (i3,i6,a15,2i3,a3,f4.1,i3,f15.3,f15.6,f15.3,f15.6,f15.3,f15.6)
# fstring='i3,i6,a15,i3,i3,a3,f4.1,i3,4f15.2'
# elvlcFormat=FortranFormat(fstring)
# header_line = FortranRecordReader('i3,i6,a15,i3,i3,a3,i3,f4.1,f15.3,f15.6,f15.3,f15.6')
header_line = FortranRecordReader('i3,i6,a15,i3,i3,a3,f4.1,i3,f15.3,f15.6,f15.3,f15.6') #',f4.1')
#
if filename:
elvlname = filename
bname = os.path.basename(filename)
ions = bname.split('.')[0]
else:
fname = util.ion2filename(ions)
elvlname = fname+'.elvlc'
if not os.path.isfile(elvlname):
print((' elvlc file does not exist: %s'%(elvlname)))
return {'status':0}
status = 1
input = open(elvlname,'r')
s1 = input.readlines()
input.close()
nlvls = 0
ndata = 2
while ndata > 1:
s1a = s1[nlvls][:-1]
s2 = s1a.split()
ndata = len(s2)
nlvls = nlvls+1
nlvls -= 1
if verbose:
print((' nlvls = %i'%(nlvls)))
lvl = [0]*nlvls
conf = [0]*nlvls
term = [0]*nlvls
spin = [0]*nlvls
l = [0]*nlvls
spd = [0]*nlvls
j = [0]*nlvls
mult = [0]*nlvls
ecm = [0]*nlvls
eryd = [0]*nlvls
ecmth = [0]*nlvls
erydth = [0]*nlvls
pretty = [0]*nlvls
label = []
for i in range(0,nlvls):
if verbose:
print((s1[i][0:115]))
# inpt = FortranLine(s1[i][0:115],elvlcFormat)
inpt = header_line.read(s1[i])
lvl[i] = inpt[0]
conf[i] = inpt[1]
label.append(str(inpt[1]))
term[i] = inpt[2].strip()
spin[i] = inpt[3]
l[i] = inpt[4]
spd[i] = inpt[5].strip()
j[i] = inpt[6]
mult[i] = inpt[7]
ecm[i] = inpt[8]
eryd[i] = inpt[9]
ecmth[i] = inpt[10]
erydth[i] = inpt[11]
if ecm[i] == 0.:
if useTh:
ecm[i] = ecmth[i]
eryd[i] = erydth[i]
stuff = term[i].strip() + ' %1i%1s%3.1f'%( spin[i], spd[i], j[i])
pretty[i] = stuff.strip()
ref = []
for i in range(nlvls+1,len(s1)-1):
s1a = s1[i][:-1]
ref.append(s1a.strip())
# self.const.Elvlc = {"lvl":lvl,"conf":conf,"term":term,"spin":spin,"l":l,"spd":spd,"j":j
# ,"mult":mult,"ecm":ecm,"eryd":eryd,"ecmth":ecmth,"erydth":erydth,"ref":ref}
return {"lvl":lvl,"conf":conf,"label":label,"term":term,"spin":spin,"l":l,"spd":spd,"j":j
,"mult":mult,"ecm":ecm,"eryd":eryd,"ecmth":ecmth,"erydth":erydth,"ref":ref,"pretty":pretty, 'ionS':ions, 'status':status}
#
# -------------------------------------------------------------------------------------
#
def elvlcWrite(info, outfile=None, addLvl=0):
'''
Write Chianti data to .elvlc file.
Parameters
----------
info : `dict`
Information about the Chianti data to write. Should contain
the following keys: ionS, the Chianti style name of the ion such as c_4
conf, an integer denoting the configuration - not too essential
term, a string showing the configuration
spin, an integer of the spin of the state in LS coupling
l, an integer of the angular momentum quantum number
spd, an string for the alphabetic symbol of the angular momemtum, S, P, D, etc
j, a floating point number, the total angular momentum
ecm, the observed energy in inverse cm, if unknown, the value is 0.
eryd, the observed energy in Rydbergs, if unknown, the value is 0.
ecmth, the calculated energy from the scattering calculation, in inverse cm
erydth, the calculated energy from the scattering calculation in Rydbergs
ref, the references in the literature to the data in the input info
outfile : `str`
Output filename. ionS+'.elvlc' (in current directory) if None
addLvl : `int`
Add a constant value to the index of all levels
Notes
-----
For use with files created before elvlc format change in November 2012
See Also
--------
ChiantiPy.tools.io.elvlcWrite : Write .elvlc file using the new format.
'''
if outfile:
elvlcName = outfile
else:
elvlcName = info['ionS'] + '.elvlc'
print((' elvlc file name = %s'%(elvlcName)))
out = open(elvlcName, 'w')
for i, conf in enumerate(info['conf']):
mult = int(2.*info['j'][i]+1.)
thisTerm = info['term'][i].ljust(14)
pstring = '%3i%6s%15s%3i%3i%2s%5.1f%3i%15.3f%15.6f%15.3f%15.6f \n'%(i+1+addLvl, conf, thisTerm, info['spin'][i], info['l'][i], info['spd'][i], info['j'][i], mult, info['ecm'][i], info['eryd'][i], info['ecmth'][i], info['erydth'][i])
#i3,a6,a15,2i3,a2,f5.1,i3,f15.3,f15.6,f15.3,f15.6
out.write(pstring)
out.write(' -1\n')
out.write('%filename: ' + elvlcName + '\n')
for one in info['ref']:
out.write(one+'\n')
out.write(' -1\n')
out.close()
def wgfaRead(ions, filename=None, elvlcname=-1, total=False, verbose=False):
"""
Read CHIANTI data from a .wgfa file.
Parameters
----------
ions : `str`
Ion, e.g. 'c_5' for C V
filename : `str`
Custom filename, will override that specified by `ions`
elvlcname : `str`
If specified, the lsj term labels are returned in the `pretty1` and `pretty2` keys of `Wgfa`
total : `bool`
Return the level 2 avalue data in `Wgfa`
verbose : `bool`
Returns
-------
Wgfa : `dict`
Information read from the .wgfa file. The dictionary structure is {"lvl1","lvl2","wvl","gf","avalue","ref","ionS","filename"}
Notes
-----
This is text-wise not different than the v8 version except that it uses the
archival elvlcRead in `~ChiantiPy.tools.archival` though this has now been commented out. Can this routine be removed? Should the elvlcRead routine be uncommented?
See Also
--------
ChiantiPy.tools.io.wgfaRead : Read .wgfa file with the new format.
"""
#
if filename:
wgfaname = filename
if elvlcname < 0:
elvlcname = 0
elvlc = 0
elif not elvlcname:
elvlcname = os.path.splitext(wgfaname)[0] + '.elvlc'
if os.path.isfile(elvlcname):
elvlc = elvlcRead('', elvlcname)
else:
elvlc = 0
else:
elvlc = elvlcRead('',elvlcname)
else:
fname=util.ion2filename(ions)
wgfaname=fname+'.wgfa'
elvlcname = fname + '.elvlc'
if os.path.isfile(elvlcname):
elvlc = elvlcRead('', elvlcname)
else:
elvlc = 0
if verbose:
if elvlc:
print(' have elvlc data')
else:
print(' do not have elvlc data')
#
input=open(wgfaname,'r')
s1=input.readlines()
input.close()
nwvl=0
ndata=2
while ndata > 1:
s1a=s1[nwvl]
s2=s1a.split()
ndata=len(s2)
nwvl += 1
nwvl -= 1
if verbose:
print(' nwvl = %10i ndata = %4i'%(nwvl, ndata))
lvl1=[0]*nwvl
lvl2=[0]*nwvl
wvl=[0.]*nwvl
gf=[0.]*nwvl
avalue=[0.]*nwvl
if elvlc:
pretty1 = ['']*nwvl
pretty2 = ['']*nwvl
#
if verbose:
print(' nwvl = %10i'%(nwvl))
#
wgfaFormat='(2i5,f15.3,2e15.3)'
for ivl in range(nwvl):
inpt=FortranLine(s1[ivl],wgfaFormat)
lvl1[ivl]=inpt[0]
lvl2[ivl]=inpt[1]
wvl[ivl]=inpt[2]
gf[ivl]=inpt[3]
avalue[ivl]=inpt[4]
if elvlc:
pretty1[ivl] = elvlc['pretty'][inpt[0] - 1]
pretty2[ivl] = elvlc['pretty'][inpt[1] - 1]
ref=[]
# should skip the last '-1' in the file
for i in range(nwvl+1,len(s1) -1):
s1a=s1[i][:-1]
ref.append(s1a.strip())
Wgfa={"lvl1":lvl1,"lvl2":lvl2,"wvl":wvl,"gf":gf,"avalue":avalue,"ref":ref, 'ionS':ions, 'filename':wgfaname}
if total:
avalueLvl = [0.]*max(lvl2)
for iwvl in range(nwvl):
avalueLvl[lvl2[iwvl] -1] += avalue[iwvl]
Wgfa['avalueLvl'] = avalueLvl
if elvlc:
Wgfa['pretty1'] = pretty1
Wgfa['pretty2'] = pretty2
#
return Wgfa
#
# --------------------------------------
#
|
tgrassiREPO_NAMEprizmoPATH_START.@prizmo_extracted@prizmo-main@src_py@ChiantiPy@tools@archival.py@.PATH_END.py
|
{
"filename": "_traceref.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatter3d/error_z/_traceref.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TracerefValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="traceref", parent_name="scatter3d.error_z", **kwargs
):
super(TracerefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatter3d@error_z@_traceref.py@.PATH_END.py
|
{
"filename": "_labelpadding.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/carpet/aaxis/_labelpadding.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LabelpaddingValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="labelpadding", parent_name="carpet.aaxis", **kwargs
):
super(LabelpaddingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@carpet@aaxis@_labelpadding.py@.PATH_END.py
|
{
"filename": "TT_lite.py",
"repo_name": "ggalloni/cobaya",
"repo_path": "cobaya_extracted/cobaya-master/cobaya/likelihoods/planck_2018_highl_plik/TT_lite.py",
"type": "Python"
}
|
from cobaya.likelihoods.base_classes import Planck2018Clik
class TT_lite(Planck2018Clik):
r"""
High-$\ell$ temperature-only, foreground-marginalized \textsc{plik\_lite} likelihood
of Planck's 2018 data release \cite{Aghanim:2019ame}.
"""
pass
|
ggalloniREPO_NAMEcobayaPATH_START.@cobaya_extracted@cobaya-master@cobaya@likelihoods@planck_2018_highl_plik@TT_lite.py@.PATH_END.py
|
{
"filename": "_colorsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattercarpet/marker/_colorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="scattercarpet.marker", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattercarpet@marker@_colorsrc.py@.PATH_END.py
|
{
"filename": "DASpec_reduce.py",
"repo_name": "PuDu-Astro/DASpec",
"repo_path": "DASpec_extracted/DASpec-master/DASpec_reduce.py",
"type": "Python"
}
|
#!/usr/bin/env python
def DASpec_reduce(filename):
l = open(filename).readlines()
sep = [i[0] for i in zip(range(len(l)), l) if '####' in i[1]]
name = [l[1 + i[0]].replace('\n', '') for i in zip(range(len(l)), l) if '####' in i[1]]
name_only = set(name)
name_only = list(name_only)
sep_name = zip(sep, name)
index_tot = []
for j in name_only:
index = [i for i in sep_name if j in i]
index = index[-1]
#print j, index
index_tot.append(index)
index_tot = sorted(index_tot)
end_tot = []
for j in xrange(len(index_tot)):
index = [i for i in zip(sep, range(len(sep))) if i[0] == index_tot[j][0]]
#print index, sep[-1]
if index[0][0] != sep[-1]:
#print index, sep[index[0][1] + 1] - 1
end_tot.append(sep[index[0][1] + 1] - 1)
else:
#print index, len(l) - 1
end_tot.append(len(l) - 1)
output = open(sys.argv[1] + '.red', 'w')
for i in xrange(len(index_tot)):
#print index_tot[i][0], end_tot[i]
for j in range(index_tot[i][0], end_tot[i] + 1):
output.write(l[j])
output.close()
if __name__ == '__main__':
import sys
DASpec_reduce(sys.argv[1])
|
PuDu-AstroREPO_NAMEDASpecPATH_START.@DASpec_extracted@DASpec-master@DASpec_reduce.py@.PATH_END.py
|
{
"filename": "show_refs.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipython/py2/IPython/testing/plugin/show_refs.py",
"type": "Python"
}
|
"""Simple script to show reference holding behavior.
This is used by a companion test case.
"""
from __future__ import print_function
import gc
class C(object):
def __del__(self):
pass
#print 'deleting object...' # dbg
if __name__ == '__main__':
c = C()
c_refs = gc.get_referrers(c)
ref_ids = list(map(id,c_refs))
print('c referrers:',list(map(type,c_refs)))
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipython@py2@IPython@testing@plugin@show_refs.py@.PATH_END.py
|
{
"filename": "SNRtools.py",
"repo_name": "CosmoStatGW/DarkSirensStat",
"repo_path": "DarkSirensStat_extracted/DarkSirensStat-master/DarkSirensStat/SNRtools.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 24 12:56:37 2021
@author: Michi
"""
import numpy as np
import time
import pycbc.waveform
import pycbc.filter
import pycbc.psd
from pycbc.types import FrequencySeries
from scipy.signal import savgol_filter
import h5py
from scipy.interpolate import RectBivariateSpline, interp1d
import os
import argparse
import sys
from globals import *
windows = { 'H1O3': 10.,
'L1O3': 10.,
'L1O2': 2,
'H1O2': 4,
}
filenames = { 'H1O3': 'O3-H1-C01_CLEAN_SUB60HZ-1251752040.0_sensitivity_strain_asd.txt',
'L1O3': 'O3-L1-C01_CLEAN_SUB60HZ-1240573680.0_sensitivity_strain_asd.txt',
'L1O2': '2017-08-06_DCH_C02_L1_O2_Sensitivity_strain_asd.txt',
'H1O2': '2017-06-10_DCH_C02_H1_O2_Sensitivity_strain_asd.txt',
}
abbreviations = {v: k for k, v in filenames.items()}
class oSNR(object):
'''
Class to compute optimal SNR with a given PSD, that should be passed when initialising the class,
by giving the path to the file where it is stored through the argument psd_path.
Tabulates the results by using pycbc if pre-computed tables are not available.
Usage:
myosnr = oSNR(psd_path, approximant='IMRPhenomXAS')
myosnr.make_interpolator()
myosnr.get_oSNR(m1d, m2d, dl)
psd_path should be the *full path* to the PSD, not just the name of the file
'''
def __init__(self, from_file=True, psd_path=None, psd_name ='aLIGOEarlyHighSensitivityP1200087', psd_base_path=None, approximant='IMRPhenomXAS', force_recompute = False, verbose=False):
self.from_file=from_file
if from_file:
if psd_path is None:
raise ValueError('Enter valid file name for the PSD if using from_file=True')
print('Using PSD from file %s ' %psd_path)
self.psd_base_path = ('/').join(psd_path.split('/')[:-1])
self.psd_file_name = psd_path.split('/')[-1]
self.name=abbreviations[self.psd_file_name]
self.psd_path = psd_path
self.path = psd_path+'_optimal_snr_'+approximant+'.h5'
else:
self.name = psd_name
self.psd_path = psd_name
self.path = os.path.join(psd_base_path,psd_name+'_optimal_snr_'+approximant+'.h5') #psd_path+'_optimal_snr_'+approximant+'.h5'
#print('Output will be in %s' %self.path)
self.approximant=approximant
self.psd_computed=False
self.verbose=verbose
self.force_recompute=force_recompute
if self.force_recompute:
print('!! You choose to re-compute the SNR pre-computed grid even if a pre-comuted one was present! This will overwrite already present files, if any. ')
#self.interpolator = self._interpolateSNR(**kwargs)
# This will be the max length of hp , is this is computed.
# We track it in case we want to make a plot of the
# extrapolated PSD
self.maxlen=0
def _computeSNR(self, m1, m2, npoints=200, mmin=1., mmax=1000., dL_pivot_Gpc=1., deltaf=1./40. , store=True):
if not self.psd_computed and self.from_file:
psdvals = np.loadtxt(self.psd_path)
self.flow = min(psdvals[:,0])
elif not self.from_file:
self.flow = 10.
hp, hc = pycbc.waveform.get_fd_waveform( approximant=self.approximant,
mass1=m1,
mass2=m2,
delta_f = deltaf,
f_lower=self.flow,
distance=1e03*dL_pivot_Gpc ) # get_fd_waveform wants distance in Mpc
if len(hp)>self.maxlen:
self.maxlen=len(hp)
if not self.psd_computed:# or self.force_recompute:
print('Computing PSD with spectral density from %s ...' %self.psd_path)
print('Wf length: %s ' %len(hp))
self.psd_computed=True
#psdvals = np.loadtxt(self.psd_path)
#print(psdvals)
#print('Min frequency in PSD: %s' %min(psdvals[:,0]))
#print('Max frequency in PSD: %s' %max(psdvals[:,0]))
#assert self.flow==min(psdvals[:,0])
psd = self._get_psd(len(hp), deltaf, self.flow, is_asd_file=True, plot=False) #pycbc.psd.from_txt(self.psd_path, len(hp), deltaf, self.flow, is_asd_file=True)
snr = pycbc.filter.sigma(hp, psd=psd, low_frequency_cutoff=self.flow)
return snr
def _get_psd(self, length, delta_f, low_freq_cutoff, is_asd_file=True, plot=False):
if self.from_file:
return self._get_psd_from_file( length, delta_f, low_freq_cutoff, is_asd_file=True, plot=False)
else:
return pycbc.psd.analytical.from_string( self.name, length, delta_f, low_freq_cutoff)
def _get_psd_from_file(self, length, delta_f, low_freq_cutoff, is_asd_file=True, plot=False):
'''
This function adapts pycbc.psd.from_txt to extrapolate above the max frequency of the
tabulated psd: it interpolates the values smoothly, and pads the original
psd by extrapolating this smooth function.
'''
if not self.psd_computed:
print('Reading PSD from %s...' %self.psd_path)
file_data = np.loadtxt(self.psd_path)
if (file_data < 0).any() or np.logical_not(np.isfinite(file_data)).any():
raise ValueError('Invalid data in ' + self.psd_path)
freq_data = file_data[:, 0]
noise_data = file_data[:, 1]
if is_asd_file:
noise_data = noise_data ** 2
kmin = int(low_freq_cutoff / delta_f)
flow = kmin * delta_f
data_start = (0 if freq_data[0]==low_freq_cutoff else np.searchsorted(freq_data, flow) - 1)
# If the cutoff is exactly in the file, start there
if freq_data[data_start+1] == low_freq_cutoff:
if not self.psd_computed:
print('Starting at +1')
data_start += 1
freq_data = freq_data[data_start:]
noise_data = noise_data[data_start:]
flog = np.log(freq_data)
slog = np.log(noise_data)
psd_interp = interp1d(flog, slog)
kmin = int(low_freq_cutoff / delta_f)
vals = np.log(np.arange(kmin, length) * delta_f)
psd = np.zeros(length, dtype=np.float64)
pad=True
try:
max_exact = np.argwhere(vals>=flog.max())[0][0]
filtered = savgol_filter(slog, int(flog.shape[0]/windows[self.name])+1, 3, deriv=0)
filter_interp = interp1d(flog, filtered, bounds_error=False, fill_value='extrapolate')
switch=10
psd[kmin:kmin+max_exact-switch] = np.exp(psd_interp(vals[:max_exact-switch]))
psd[kmin+max_exact-switch:] = np.exp(filter_interp(vals[max_exact-switch:]))
except IndexError:
pad=False
psd[kmin:] = np.exp(psd_interp(vals))
if plot:
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = 'serif'
plt.rcParams["mathtext.fontset"] = "cm"
fig, ax = plt.subplots(1,2, figsize=(15, 4))
if pad:
ax[0].plot(vals[:max_exact-1], psd_interp(vals[:max_exact-1]), alpha=0.2, color='green',label='Interpolation of the original')
ax[0].plot(vals[8000:], filter_interp(vals[8000:]), alpha=1, label='Extrapolation', color='orange')
ax[0].axvline(vals.min(), color='k', ls='--')
ax[0].axvline(vals.max(), color='k', ls='--')
ax[0].legend()
ax[1].plot(vals, np.log(psd[kmin:]), alpha=1., label='Full')
ax[1].legend()
#plax[1].show()
fig.suptitle(os.path.join(self.psd_base_path,self.name+', '+self.approximant) )
fig.savefig( os.path.join(self.psd_base_path, self.name+'_interpolation.pdf') )
return FrequencySeries(psd, delta_f=delta_f)
def _computeSNRtable(self, npoints=200, mmin=1., mmax=1000., dL_pivot_Gpc=1., deltaf=1./40 , store=True):
npoints=int(npoints)
self.dL_pivot_Gpc=dL_pivot_Gpc
self.mmin = mmin
self.mmax = mmax
# Grid of detector frame masses
#ms = np.exp(np.linspace(np.log(mmin), np.log(mmax), npoints))
ms = np.geomspace(mmin, mmax, npoints)
print('Computing optimal SNR for detector-frame masses in (%s, %s ) solar masses at distance of %s Gpc...' %(mmin, mmax, dL_pivot_Gpc))
osnrs = np.zeros((npoints, npoints))
in_time=time.time()
for i, m1 in enumerate(ms):
for j in range(i+1):
m2 = ms[j]
snr_ = self._computeSNR( m1, m2, npoints=npoints, mmin=mmin, mmax=mmax, dL_pivot_Gpc=dL_pivot_Gpc, deltaf=deltaf , store=store)
osnrs[i,j] = snr_
osnrs[j,i] = snr_
if i+j % 50 == 0 :
print('#', end='', flush=True)
print('\nDone in %.2fs ' %(time.time() - in_time))
if store:
print('Saving result...')
with h5py.File(self.path, 'w') as out:
out.create_dataset('ms', data=ms, compression='gzip', shuffle=True)
out.create_dataset('SNR', data=osnrs, compression='gzip', shuffle=True)
out.attrs['dL'] = dL_pivot_Gpc #'%s Gpc'%dL_pivot_Gpc
out.attrs['npoints'] = npoints
out.attrs['approximant'] = self.approximant
out.attrs['mmin'] = mmin
out.attrs['mmax'] = mmax
out.attrs['deltaf'] = deltaf
out.attrs['flow'] = self.flow
return ms, osnrs
def make_interpolator(self, **kwargs) :
if os.path.exists(self.path) and not self.force_recompute:
if self.verbose:
print('Pre-computed optimal SNR grid is present for this PSD. Loading...')
with h5py.File(self.path, 'r') as inp:
ms = np.array(inp['ms'])
SNRgrid = np.array(inp['SNR'])
self.dL_pivot_Gpc = inp.attrs['dL']
self.flow = inp.attrs['flow']
self.mmin = inp.attrs['mmin']
self.mmax = inp.attrs['mmax']
if self.verbose:
print('Attributes of pre-computed SNRs: ')
print([(k, inp.attrs[k]) for k in inp.attrs.keys() ])
#print(inp.attrs)
else:
print('Tabulating SNRs...')
ms, SNRgrid = self._computeSNRtable(**kwargs)
self.interpolator = RectBivariateSpline(ms, ms, SNRgrid)
def get_oSNR(self, m1det, m2det, dL):
'''
m1det, m2det : masses in detector frame in units of somar mass
dL : luminosity distance in Gpc
'''
if np.any(m2det>m1det):
raise ValueError('Optimal SNR called with m2>m1')
if np.any(m2det<self.mmin):
raise ValueError('get_oSNR called with value of m2 below the interpolation range')
if np.any(m1det>self.mmax):
raise ValueError('get_oSNR called with value of m1 above the interpolation range')
return self.interpolator.ev(m1det, m2det)*self.dL_pivot_Gpc/dL
ifos = {'L1': 'Livingston', 'H1': 'Hanford'}
def tabulate_SNR_from_files(approximant='IMRPhenomXAS', npoints=200, mmin=1., mmax=1000., dL_pivot_Gpc=1., deltaf=1./40 , store=True, force_recompute=False):
for run in ('O3', 'O2'):
for detectorname in ["L1", "H1"]:
print('\n --------- %s obs run, %s interferometer' %(run, ifos[detectorname]))
psd_path = os.path.join(detectorPath, filenames[detectorname+run])
myosnr = oSNR(from_file=True, psd_path=psd_path, approximant=approximant, force_recompute=force_recompute)
myosnr.make_interpolator(npoints=npoints, mmin=mmin, mmax=mmax, dL_pivot_Gpc=dL_pivot_Gpc, deltaf=deltaf, store=store)
#make plot
length = myosnr.maxlen
_ = myosnr._get_psd( length, deltaf, myosnr.flow, is_asd_file=True, plot=True)
def tabulate_SNR_from_analytic(psd_name, approximant='IMRPhenomXAS', npoints=200, mmin=1., mmax=1000., dL_pivot_Gpc=1., deltaf=1./40 , store=True, force_recompute=False):
#print('\n --------- %s obs run, %s interferometer' %(run, ifos[detectorname]))
#psd_path = os.path.join(detectorPath, filenames[detectorname+run])
myosnr = oSNR(from_file=False, psd_name =psd_name, psd_base_path=Globals.detectorPath, approximant=approximant, force_recompute=force_recompute)
myosnr.make_interpolator(npoints=npoints, mmin=mmin, mmax=mmax, dL_pivot_Gpc=dL_pivot_Gpc, deltaf=deltaf, store=store)
##make plot
#length = myosnr.maxlen
#_ = myosnr._get_psd( length, deltaf, myosnr.flow, is_asd_file=True, plot=True)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--from_file", default=0., type=float, required=True)
parser.add_argument("--psd_name", default='aLIGOEarlyHighSensitivityP1200087', type=str, required=False)
parser.add_argument("--approximant", default='IMRPhenomXAS', type=str, required=False)
parser.add_argument("--npoints", default=200, type=float, required=False)
parser.add_argument("--mmin", default=1., type=float, required=False)
parser.add_argument("--mmax", default=1000., type=float, required=False)
parser.add_argument("--dL_pivot_Gpc", default=1., type=float, required=False)
parser.add_argument("--deltaf", default=1./40, type=float, required=False)
parser.add_argument("--force_recompute", default=0., type=float, required=False)
FLAGS = parser.parse_args()
print('Arguments: ')
for key, value in vars(FLAGS).items():
print(key, value )
force_recompute=False
if FLAGS.force_recompute==1.:
force_recompute=True
if FLAGS.from_file==1:
print('Computing from file')
tabulate_SNR_from_files(approximant = FLAGS.approximant, npoints=FLAGS.npoints,mmin=FLAGS.mmin, mmax=FLAGS.mmax, dL_pivot_Gpc=FLAGS.dL_pivot_Gpc, deltaf=FLAGS.deltaf , store=True, force_recompute=force_recompute)
else:
if FLAGS.psd_name is None:
raise ValueError()
tabulate_SNR_from_analytic(FLAGS.psd_name, approximant = FLAGS.approximant, npoints=FLAGS.npoints,mmin=FLAGS.mmin, mmax=FLAGS.mmax, dL_pivot_Gpc=FLAGS.dL_pivot_Gpc, deltaf=FLAGS.deltaf , store=True, force_recompute=force_recompute)
|
CosmoStatGWREPO_NAMEDarkSirensStatPATH_START.@DarkSirensStat_extracted@DarkSirensStat-master@DarkSirensStat@SNRtools.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "danhey/echelle",
"repo_path": "echelle_extracted/echelle-master/docs/conf.py",
"type": "Python"
}
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = "echelle"
copyright = "2021, Daniel R Hey"
author = "Daniel R Hey"
# The full version, including alpha/beta/rc tags
# release = "1.4"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
# "sphinx_rtd_theme",
"nbsphinx",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**/.ipynb_checkpoints"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = "press"
html_theme = "furo"
html_logo = "logo.png"
# html_theme_options = {
# "logo_only": False,
# "display_version": True,
# }
|
danheyREPO_NAMEechellePATH_START.@echelle_extracted@echelle-master@docs@conf.py@.PATH_END.py
|
{
"filename": "paper.md",
"repo_name": "ali-beheshti/Astro-Paint",
"repo_path": "Astro-Paint_extracted/Astro-Paint-master/paper/paper.md",
"type": "Markdown"
}
|
---
title: 'AstroPaint: A Python Package for Painting Halo Catalogs into
Celestial Maps'
tags:
- python
- astrophysics
- simulation
- visualization
- extragalactic foregrounds
authors:
- name: Siavash Yasini^[corresponding author]
orcid: 0000-0003-1978-6325
affiliation: 1
- name: Marcelo Alvarez
affiliation: "2, 3"
- name: Emmanuel Schaan
orcid: 0000-0002-4619-8927
affiliation: "2, 3"
- name: Karime Maamari
affiliation: "1, 5"
- name: Shobeir K. S. Mazinani
affiliation: 4
- name: Nareg Mirzatuny
affiliation: 1
- name: Elena Pierpaoli
affiliation: 1
affiliations:
- name: University of Southern California
index: 1
- name: Lawrence Berkeley National Laboratory
index: 2
- name: University of California, Berkeley
index: 3
- name: Aetna Inc.
index: 4
- name: Argonne National Lab
index: 5
date: 31 July 2020
bibliography: paper.bib
# Optional fields if submitting to a AAS journal too, see this blog post:
# https://blog.joss.theoj.org/2018/12/a-new-collaboration-with-aas-publishing
# aas-doi: 10.3847/xxxxx <- update this with the DOI from AAS once you know it.
# aas-journal: Astrophysical Journal <- The name of the AAS journal.
---
# Overview
`AstroPaint` is a python package for generating and visualizing
sky maps of a wide range of astrophysical signals originating from dark
matter halos or the gas that they host. `AstroPaint` creates a whole-sky mock map of
the target signal/observable, at a desired resolution, by combining an input
halo catalog and the radial/angular profile of the astrophysical effect
(see the
workflow
section for details).
The package also provides a suite of tools that
can facilitate analysis routines such as catalog filtering, map manipulation,
and cutout stacking. The simulation suite has an Object-Oriented design and
runs in parallel, making it both easy to use and readily scalable for
production of high resolution maps with large underlying catalogs. Although the package has been
primarily developed to simulate signals pertinent to galaxy clusters, its application extends to halos of arbitrary size or even point
sources.

# Statement of Need
Studying the large scale structure of the universe heavily relies on
observations of astrophysical signals at various frequencies. Examples of such
studies include detection or characterization of objects such as galaxies, clusters, or voids
through either gravitational lensing, electromagnetic scattering, absorption or emission events in the optical, radio, or x-ray
frequency bands. Such studies typically require simulated high resolution
maps of various astrophysical effects to emulate both the signal and
noise (foregrounds) components. For example, in a study that aims
to evaluate the detection significance of the Birkinshaw-Gull (BG)
effect – a probe of the transverse
velocities of halos [@Birkinshaw:1983; @Yasini:2018] – using the Simons
Observatory [@SO:2019] or CMB-S4 [@CMB-S4:2019], one needs a mock
map of the BG effect
\autoref{fig:BG}
as well as maps of potential contaminants such as kinetic and
thermal Sunyaev-Zeldovich effects (kSZ and tSZ) [@Sunyaev:1970] for the
same set of objects.
While it is possible to create realistic maps of astrophysical effects through
hydrodynamical simulations [@Dolag:2015], these methods are numerically
expensive for large numbers of objects and reproducing them under different
cosmologies and initial conditions can be prohibitive. An alternative
strategy for creating mock observations of extended objects
such as galaxies and galaxy cluster halos is to simulate the
positions of these objects (either semi-analytically or through N-body
simulations [@Stein:2020; @Stein:2018; @Sehgal:2010]) and then synthetically
paint the desired signal at the location of the halos. `AstroPaint` is
developed to help researchers in creating mock maps using the latter
strategy.
AstroPaint can also be used to create templates for detecting astrophysical
effects in image data. For example, to detect kSZ for an ensemble of
galaxies in a CMB map, one needs a
template of this effect for the observed patch of the sky. Such a template can
be generated by taking the catalog of the target galaxies along with their
velocities and painting kSZ profiles around them on a map using
`AstroPaint`.
# Package Structure and Workflow
`AstroPaint` consists of three main objects that interact with each other: `Catalog`, `Canvas`, and `Painter`.
`Catalog` contains the locations, velocities, and masses of the objects.
`Canvas` contains the map of the astrophysical signal in HEALPix format
[@Healpy:2019].
`Painter` contains the template for the radial profile of the signal to be
painetd on the `Canvas` in circular discs centered at the location of the
halos in the
`Catalog`.
These objects are sequentially passed into each other according to the
following workflow:
```python
from astropaint import Catalog, Canvas, Painter
catalog = Catalog(data=input_data)
canvas = Canvas(catalog, nside)
painter = Painter(template=radial_profile)
painter.spray(canvas)
```
The output map array can be accessed via `canvas.pixels` or directly
visualized using `canvas.show_map()`. Here `input_data` is the dataframe that
hold the locations, velocities, and
masses of the halos. `nside` is a parameter in `healpy` that determines the
total number of pixels (`npix = 12 * nside ** 2)` and
consequently the resolution of the map . Finally, `radial_profile` is a one-dimensional function that determines the shape
of the profile. A mind map visualization of the package structure can be
found in [here](https://www.mindmeister.com/1417665103/astropaint-astropaint-py?fullscreen=1).
# Acknowledgements
We would like to thank Simone Ferraro, Tim Morton, Vera Gluscevic, George Stein, Mathew Madhavacheril,
Zack Li, and Alex van Engelen for their incredibly helpful comments and
feedback. SY is grateful to the BCCP
group at UC Berkeley for their
hospitality during
Summer 2019 where this project was inaugurated. EP is supported by NASA
80NSSC18K0403 and the Simons Foundation award number 615662; EP and SY are supported by NSF AST-1910678.
# References
|
ali-beheshtiREPO_NAMEAstro-PaintPATH_START.@Astro-Paint_extracted@Astro-Paint-master@paper@paper.md@.PATH_END.py
|
{
"filename": "data_operations.py",
"repo_name": "bcalden/ClusterPyXT",
"repo_path": "ClusterPyXT_extracted/ClusterPyXT-master/data_operations.py",
"type": "Python"
}
|
import numpy as np
def normalize_data(image: np.ndarray):
image = np.nan_to_num(image)
normalized_image = (image - image.min()) / (image.max() - image.min())
return normalized_image
def make_sizes_match(image1, image2):
# quit early if the image shapes already match
if image1.shape == image2.shape:
return image1, image2
# image 1 is smaller
if image1.size<image2.size:
# resize image 1 to image2 (fill with zeros outter column/row)
resized_1 = np.resize(image1, image2.shape)
return resized_1, image2
# image 2 is smaller
elif image1.size>image2.size:
# image 2 needs to be resized to image 1
resized_2 = np.resize(image2, image1.shape)
return image1, resized_2
return None
|
bcaldenREPO_NAMEClusterPyXTPATH_START.@ClusterPyXT_extracted@ClusterPyXT-master@data_operations.py@.PATH_END.py
|
{
"filename": "PULL_REQUEST_TEMPLATE.md",
"repo_name": "joshspeagle/dynesty",
"repo_path": "dynesty_extracted/dynesty-master/.github/PULL_REQUEST_TEMPLATE.md",
"type": "Markdown"
}
|
<!--
Thanks for contributing a pull request!
Before submitting a PR please take a look at the [CONTRIBUTING.md](CONTRIBUTING.md) page.
-->
### Reference issue
### What does you PR implement/fix ?
|
joshspeagleREPO_NAMEdynestyPATH_START.@dynesty_extracted@dynesty-master@.github@PULL_REQUEST_TEMPLATE.md@.PATH_END.py
|
{
"filename": "plot_adapt_rgb.py",
"repo_name": "scikit-image/scikit-image",
"repo_path": "scikit-image_extracted/scikit-image-main/doc/examples/color_exposure/plot_adapt_rgb.py",
"type": "Python"
}
|
"""
=========================================
Adapting gray-scale filters to RGB images
=========================================
There are many filters that are designed to work with gray-scale images but not
with color images. To simplify the process of creating functions that can adapt
to RGB images, scikit-image provides the ``adapt_rgb`` decorator.
To actually use the ``adapt_rgb`` decorator, you have to decide how you want to
adapt the RGB image for use with the gray-scale filter. There are two
pre-defined handlers:
``each_channel``
Pass each of the RGB channels to the filter one-by-one, and stitch the
results back into an RGB image.
``hsv_value``
Convert the RGB image to HSV and pass the value channel to the filter.
The filtered result is inserted back into the HSV image and converted
back to RGB.
Below, we demonstrate the use of ``adapt_rgb`` on a couple of gray-scale
filters:
"""
from skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value
from skimage import filters
@adapt_rgb(each_channel)
def sobel_each(image):
return filters.sobel(image)
@adapt_rgb(hsv_value)
def sobel_hsv(image):
return filters.sobel(image)
######################################################################
# We can use these functions as we would normally use them, but now they work
# with both gray-scale and color images. Let's plot the results with a color
# image:
from skimage import data
from skimage.exposure import rescale_intensity
import matplotlib.pyplot as plt
image = data.astronaut()
fig, (ax_each, ax_hsv) = plt.subplots(ncols=2, figsize=(14, 7))
# We use 1 - sobel_each(image) but this won't work if image is not normalized
ax_each.imshow(rescale_intensity(1 - sobel_each(image)))
ax_each.set_xticks([]), ax_each.set_yticks([])
ax_each.set_title("Sobel filter computed\n on individual RGB channels")
# We use 1 - sobel_hsv(image) but this won't work if image is not normalized
ax_hsv.imshow(rescale_intensity(1 - sobel_hsv(image)))
ax_hsv.set_xticks([]), ax_hsv.set_yticks([])
ax_hsv.set_title("Sobel filter computed\n on (V)alue converted image (HSV)")
######################################################################
# Notice that the result for the value-filtered image preserves the color of
# the original image, but channel filtered image combines in a more
# surprising way. In other common cases, smoothing for example, the channel
# filtered image will produce a better result than the value-filtered image.
#
# You can also create your own handler functions for ``adapt_rgb``. To do so,
# just create a function with the following signature::
#
# def handler(image_filter, image, *args, **kwargs):
# # Manipulate RGB image here...
# image = image_filter(image, *args, **kwargs)
# # Manipulate filtered image here...
# return image
#
# Note that ``adapt_rgb`` handlers are written for filters where the image is
# the first argument.
#
# As a very simple example, we can just convert any RGB image to grayscale
# and then return the filtered result:
from skimage.color import rgb2gray
def as_gray(image_filter, image, *args, **kwargs):
gray_image = rgb2gray(image)
return image_filter(gray_image, *args, **kwargs)
######################################################################
# It's important to create a signature that uses ``*args`` and ``**kwargs``
# to pass arguments along to the filter so that the decorated function is
# allowed to have any number of positional and keyword arguments.
#
# Finally, we can use this handler with ``adapt_rgb`` just as before:
@adapt_rgb(as_gray)
def sobel_gray(image):
return filters.sobel(image)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(7, 7))
# We use 1 - sobel_gray(image) but this won't work if image is not normalized
ax.imshow(rescale_intensity(1 - sobel_gray(image)), cmap=plt.cm.gray)
ax.set_xticks([]), ax.set_yticks([])
ax.set_title("Sobel filter computed\n on the converted grayscale image")
plt.show()
######################################################################
#
# .. note::
#
# A very simple check of the array shape is used for detecting RGB
# images, so ``adapt_rgb`` is not recommended for functions that support
# 3D volumes or color images in non-RGB spaces.
|
scikit-imageREPO_NAMEscikit-imagePATH_START.@scikit-image_extracted@scikit-image-main@doc@examples@color_exposure@plot_adapt_rgb.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.