metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "download.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/community/hermite_grx/download.py",
"type": "Python"
}
|
#!/usr/bin/env python
import subprocess
import os
import urllib.request
import urllib.parse
import urllib.error
from shutil import which
import argparse
class GetCodeFromHttp:
filename_template = "{version}.tar.gz"
name = ["Hermite_GRX"]
url_template = [
"https://github.com/amusecode/Hermite_GRX/archive/{version}.tar.gz",
]
version = [
"",
]
def directory(self):
return os.path.abspath(os.path.dirname(__file__))
def src_directory(self):
return os.path.join(self.directory(), "src")
def unpack_downloaded_file(self, filename, name, version):
print(f"unpacking {filename}")
arguments = ["tar", "-xf"]
arguments.append(filename)
subprocess.call(arguments, cwd=os.path.join(self.src_directory()))
subprocess.call(
["mv", f"{name}-{version}", name], cwd=os.path.join(self.src_directory())
)
print("done")
def start(self):
if os.path.exists("src"):
counter = 0
while os.path.exists(f"src.{counter}"):
counter += 1
if counter > 100:
print("too many backup directories")
break
os.rename("src", f"src.{counter}")
os.mkdir("src")
for i, url_template in enumerate(self.url_template):
url = url_template.format(version=self.version[i])
filename = self.filename_template.format(version=self.version[i])
filepath = os.path.join(self.src_directory(), filename)
print(f"downloading version {self.version[i]} from {url} to {filename}")
if which("wget") is not None:
arguments = ["wget", url]
subprocess.call(arguments, cwd=os.path.join(self.src_directory()))
elif which("curl") is not None:
arguments = ["curl", "-L", "-O", url]
subprocess.call(arguments, cwd=os.path.join(self.src_directory()))
else:
urllib.request.urlretrieve(url, filepath)
print("downloading finished")
self.unpack_downloaded_file(filename, self.name[i], self.version[i])
def main(hermite_grx_version=""):
version = [
hermite_grx_version,
]
instance = GetCodeFromHttp()
instance.version = version
instance.start()
def new_argument_parser():
result = argparse.ArgumentParser()
result.add_argument(
"--seba-version",
default="c69fa0af018adbbd13d30a7e853c0179b8afbf7f",
dest="hermite_grx_version",
help="Hermite_GRX commit hash to download",
type=str,
)
return result
if __name__ == "__main__":
arguments = new_argument_parser().parse_args()
main(**arguments.__dict__)
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@community@hermite_grx@download.py@.PATH_END.py
|
{
"filename": "ex_sandwich3.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/examples/ex_sandwich3.py",
"type": "Python"
}
|
"""Cluster Robust Standard Errors with Two Clusters
Created on Sat Dec 17 08:39:16 2011
Author: Josef Perktold
"""
from urllib.request import urlretrieve
import numpy as np
from numpy.testing import assert_almost_equal
import statsmodels.api as sm
import statsmodels.stats.sandwich_covariance as sw
#requires Petersen's test_data
#http://www.kellogg.northwestern.edu/faculty/petersen/htm/papers/se/test_data.txt
try:
pet = np.genfromtxt("test_data.txt")
print('using local file')
except OSError:
urlretrieve('http://www.kellogg.northwestern.edu/faculty/petersen/htm/papers/se/test_data.txt',
'test_data.txt')
print('downloading file')
pet = np.genfromtxt("test_data.txt")
endog = pet[:,-1]
group = pet[:,0].astype(int)
time = pet[:,1].astype(int)
exog = sm.add_constant(pet[:,2])
res = sm.OLS(endog, exog).fit()
cov01, covg, covt = sw.cov_cluster_2groups(res, group, group2=time)
#Reference number from Petersen
#http://www.kellogg.northwestern.edu/faculty/petersen/htm/papers/se/test_data.htm
bse_petw = [0.0284, 0.0284]
bse_pet0 = [0.0670, 0.0506]
bse_pet1 = [0.0234, 0.0334] #year
bse_pet01 = [0.0651, 0.0536] #firm and year
bse_0 = sw.se_cov(covg)
bse_1 = sw.se_cov(covt)
bse_01 = sw.se_cov(cov01)
print('OLS ', res.bse)
print('het HC0 ', res.HC0_se, bse_petw - res.HC0_se)
print('het firm ', bse_0, bse_0 - bse_pet0)
print('het year ', bse_1, bse_1 - bse_pet1)
print('het firm & year', bse_01, bse_01 - bse_pet01)
print('relative difference standard error het firm & year to OLS')
print(' ', bse_01 / res.bse)
#From the last line we see that the cluster and year robust standard errors
#are approximately twice those of OLS
assert_almost_equal(bse_petw, res.HC0_se, decimal=4)
assert_almost_equal(bse_0, bse_pet0, decimal=4)
assert_almost_equal(bse_1, bse_pet1, decimal=4)
assert_almost_equal(bse_01, bse_pet01, decimal=4)
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@examples@ex_sandwich3.py@.PATH_END.py
|
{
"filename": "_visible.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/sunburst/_visible.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="visible", parent_name="sunburst", **kwargs):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", [True, False, "legendonly"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@sunburst@_visible.py@.PATH_END.py
|
{
"filename": "show.py",
"repo_name": "enthought/mayavi",
"repo_path": "mayavi_extracted/mayavi-master/mayavi/tools/show.py",
"type": "Python"
}
|
# Author: Prabhu Ramachandran <prabhu[at]aero[dot]iitb[dot]ac[dot]in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
from traits.etsconfig.api import ETSConfig
from pyface.api import GUI, ApplicationWindow
from traits.api import HasTraits, Button, Any
from traitsui.api import View, Group, Item
from pyface.util import guisupport
# Globals.
# The GUI instance.
_gui = None
# The stop show instance.
_stop_show = None
def is_ui_running():
""" Returns True if the UI event loop is running.
"""
from .engine_manager import options
if options.offscreen:
return True
elif ETSConfig.toolkit == 'wx':
return guisupport.is_event_loop_running_wx()
elif ETSConfig.toolkit == 'qt4':
return guisupport.is_event_loop_running_qt4()
else:
return False
###############################################################################
# `StopShow` class.
###############################################################################
class StopShow(HasTraits):
########################################
# Traits
stop = Button('Stop interaction',
desc='if the UI interaction is to be stopped')
# Private traits.
# Stores a reference to the UI object so it can be disposed when the
# interaction is stopped.
_ui = Any
view = View(Group(Item('stop'), show_labels=False),
buttons=[], title='Control Show')
######################################################################
# `object` interface.
######################################################################
def __init__(self, **traits):
super(StopShow, self).__init__(**traits)
self._ui = self.edit_traits()
######################################################################
# Non-public interface.
######################################################################
def _stop_fired(self):
_gui.stop_event_loop()
self._ui.dispose()
def show(func=None, stop=False):
""" Start interacting with the figure.
By default, this function simply creates a GUI and starts its
event loop if needed.
If it is used as a decorator, then it may be used to decorate a
function which requires a UI. If the GUI event loop is already
running it simply runs the function. If not the event loop is
started and function is run in the toolkit's event loop. The choice
of UI is via `ETSConfig.toolkit`.
If the argument stop is set to True then it pops up a UI where the
user can stop the event loop. Subsequent calls to `show` will
restart the event loop.
**Parameters**
:stop: A boolean which specifies if a UI dialog is displayed which
allows the event loop to be stopped.
**Examples**
Here is a simple example demonstrating the use of show::
>>> from mayavi import mlab
>>> mlab.test_contour3d()
>>> mlab.show()
You can stop interaction via a simple pop up UI like so::
>>> mlab.test_contour3d()
>>> mlab.show(stop=True)
The decorator can be used like so::
>>> @mlab.show
... def do():
... mlab.test_contour3d()
...
>>> do()
The decorator can also be passed the stop argument::
>>> @mlab.show(stop=True)
... def do():
... mlab.test_contour3d()
...
>>> do()
"""
global _gui, _stop_show
if func is None:
if not is_ui_running():
g = GUI()
_gui = g
if stop:
_stop_show = StopShow()
g.start_event_loop()
return
def wrapper(*args, **kw):
"""Wrapper function to run given function inside the GUI event
loop.
"""
global _gui, _stop_show
tk = ETSConfig.toolkit
if is_ui_running():
# In this case we should not pop up the UI since we likely
# don't want to stop the mainloop.
return func(*args, **kw)
else:
g = GUI()
if tk == 'wx':
# Create a dummy app so invoke later works on wx.
a = ApplicationWindow(size=(1, 1))
GUI.invoke_later(lambda: a.close())
a.open()
GUI.invoke_later(func, *args, **kw)
_gui = g
if stop:
# Pop up the UI to stop the mainloop.
_stop_show = StopShow()
g.start_event_loop()
return wrapper
|
enthoughtREPO_NAMEmayaviPATH_START.@mayavi_extracted@mayavi-master@mayavi@tools@show.py@.PATH_END.py
|
{
"filename": "test_gradients.py",
"repo_name": "astro-informatics/s2wav",
"repo_path": "s2wav_extracted/s2wav-main/tests/test_gradients.py",
"type": "Python"
}
|
import jax
jax.config.update("jax_enable_x64", True)
import pytest
import jax.numpy as jnp
from jax.test_util import check_grads
import s2fft
from s2wav.transforms import wavelet, wavelet_c, wavelet_precompute, construct
from s2wav import filters, samples
L_to_test = [8]
N_to_test = [3]
J_min_to_test = [2]
reality = [False, True]
recursive_transform = [False, True]
using_c_backend = [False, True]
@pytest.mark.parametrize("L", L_to_test)
@pytest.mark.parametrize("N", N_to_test)
@pytest.mark.parametrize("J_min", J_min_to_test)
@pytest.mark.parametrize("reality", reality)
@pytest.mark.parametrize("recursive", recursive_transform)
@pytest.mark.parametrize("using_c_backend", using_c_backend)
def test_jax_synthesis_gradients(
wavelet_generator,
L: int,
N: int,
J_min: int,
reality: bool,
recursive: bool,
using_c_backend: bool,
):
J = samples.j_max(L)
# Exceptions
if J_min >= J:
pytest.skip("J_min larger than J which isn't a valid test case.")
if not recursive and using_c_backend:
pytest.skip("Precompute transform not supported from C backend libraries.")
if reality and using_c_backend:
pytest.skip("Hermitian symmetry for C backend gradients currently conflicts.")
# Generate random signal
f_wav, f_scal, _, _ = wavelet_generator(
L=L, N=N, J_min=J_min, lam=2, reality=reality
)
# Generate wavelet filters
filter = filters.filters_directional_vectorised(L, N, J_min, 2)
generator = (
None
if using_c_backend
else (
construct.generate_wigner_precomputes
if recursive
else construct.generate_full_precomputes
)
)
synthesis = (
(wavelet_c.synthesis if using_c_backend else wavelet.synthesis)
if recursive
else wavelet_precompute.synthesis
)
precomps = (
None
if using_c_backend
else generator(L, N, J_min, 2, forward=True, reality=reality)
)
args = {"precomps": precomps} if not using_c_backend else {}
def func(f_wav, f_scal):
f = synthesis(
f_wav,
f_scal,
L,
N,
J_min,
reality=reality,
filters=filter,
**args,
)
return jnp.sum(jnp.abs(f) ** 2)
check_grads(func, (f_wav, f_scal), order=1, modes=("rev"))
@pytest.mark.parametrize("L", L_to_test)
@pytest.mark.parametrize("N", N_to_test)
@pytest.mark.parametrize("J_min", J_min_to_test)
@pytest.mark.parametrize("reality", reality)
@pytest.mark.parametrize("recursive", recursive_transform)
@pytest.mark.parametrize("using_c_backend", using_c_backend)
def test_jax_analysis_gradients(
flm_generator,
wavelet_generator,
L: int,
N: int,
J_min: int,
reality: bool,
recursive: bool,
using_c_backend: bool,
):
J = samples.j_max(L)
if J_min >= J:
pytest.skip("J_min larger than J which isn't a valid test case.")
if not recursive and using_c_backend:
pytest.skip("Precompute transform not supported from C backend libraries.")
if reality and using_c_backend:
pytest.skip("Hermitian symmetry for C backend gradients currently conflicts.")
# Generate random signal
flm = flm_generator(L=L, L_lower=0, spin=0, reality=reality)
f = s2fft.inverse_jax(flm, L)
# Generate target signal
f_wav_target, f_scal_target, _, _ = wavelet_generator(
L=L, N=N, J_min=J_min, lam=2, reality=reality
)
# Generate wavelet filters
filter = filters.filters_directional_vectorised(L, N, J_min)
generator = (
None
if using_c_backend
else (
construct.generate_wigner_precomputes
if recursive
else construct.generate_full_precomputes
)
)
analysis = (
(wavelet_c.analysis if using_c_backend else wavelet.analysis)
if recursive
else wavelet_precompute.analysis
)
precomps = (
None
if using_c_backend
else generator(L, N, J_min, forward=False, reality=reality)
)
args = {"precomps": precomps} if not using_c_backend else {}
def func(f):
f_wav, f_scal = analysis(
f, L, N, J_min, reality=reality, filters=filter, **args
)
loss = jnp.sum(jnp.abs(f_scal - f_scal_target) ** 2)
for j in range(J - J_min):
loss += jnp.sum(jnp.abs(f_wav[j - J_min] - f_wav_target[j - J_min]) ** 2)
return loss
check_grads(func, (f.real if reality else f,), order=1, modes=("rev"))
|
astro-informaticsREPO_NAMEs2wavPATH_START.@s2wav_extracted@s2wav-main@tests@test_gradients.py@.PATH_END.py
|
{
"filename": "read_distributed_grid.py",
"repo_name": "bwvdnbro/CMacIonize",
"repo_path": "CMacIonize_extracted/CMacIonize-master/tools/read_distributed_grid.py",
"type": "Python"
}
|
import numpy as np
import h5py
import re
# precompiled regular expressions for reading in int and length arrays
intarray = re.compile("\[([0-9]*), ([0-9]*), ([0-9]*)\]")
lengtharray = re.compile(
"\[([\-0-9\.e\+]*) m, ([\-0-9\.e\+]*) m, ([\-0-9\.e\+]*) m\]"
)
##
# @brief Read quantities from a distributed snapshot. This method takes an
# optional additional conversion function that can be used to convert the
# quantities.
#
# The quantities are read in chunks, with each chunk corresponding to a single
# subgrid in the distributed grid. If @f$N@f$ is the number of quantities and
# @f$s@f$ is the size of a chunk, then the datablock after reading has shape
# @f$(N, s)@f$. The optional conversion function takes the @f$N@f$ input
# quantities and converts them to @f$N'@f$ output quantities:
# @f[
# f: (N, s) \rightarrow{} (N', s)
# @f]
# If no conversion function is provided, @f$N' = N@f$.
#
# The output is a data cube with shape @f$(N', N_x, N_y, N_z)@f$, where
# @f$N_x@f$, @f$N_y@f$ and @f$N_z@f$ are the number of cells in the grid in each
# coordinate direction. If @f$N'=1@f$, the first dimension of the image cube is
# squeezed and a 3D array is returned.
#
# @param filename Name of the snapshot file.
# @param quantities str or list of str containing the names of all quantities
# to read.
# @param conversion_function (optional) Conversion function applied to the
# quantities after reading. Default: no conversion, quantities are returned as
# they are read.
# @param output_coordinates (optional) Output the cell coordinates? Default is
# false.
# @return 1 or 4 arrays: coordinates of the cells (3D, only if
# output_coordinates is true), data cube containing the quantities (3D or 4D,
# depending on the number of quantities or the number of quantities returned by
# the conversion function).
##
def read_quantities(
filename,
quantities,
conversion_function=lambda x: x,
output_coordinates=False,
):
# convert quantities to a list if it is not one already
if not isinstance(quantities, list):
quantities = [quantities]
# open the snapshot file
file = h5py.File(filename, "r")
# make sure we are dealing with a distributed grid
if (
not "DensitySubGridCreator:number of subgrids"
in file["/Parameters"].attrs
):
print("Error: not a distributed snapshot!")
exit(1)
# read grid parameters
res_x, res_y, res_z = intarray.search(
file["/Parameters"].attrs["DensityGrid:number of cells"].decode("utf-8")
).groups()
res = np.array([int(res_x), int(res_y), int(res_z)], dtype=np.uint32)
ch_x, ch_y, ch_z = intarray.search(
file["/Parameters"]
.attrs["DensitySubGridCreator:number of subgrids"]
.decode("utf-8")
).groups()
chunks = np.array([int(ch_x), int(ch_y), int(ch_z)], dtype=np.uint32)
chunksize = res // chunks
if output_coordinates:
# read box dimensions
ax, ay, az = lengtharray.search(
file["/Parameters"].attrs["SimulationBox:anchor"].decode("utf-8")
).groups()
sx, sy, sz = lengtharray.search(
file["/Parameters"].attrs["SimulationBox:sides"].decode("utf-8")
).groups()
box_anchor = np.array(
[float(ax), float(ay), float(az)], dtype=np.float32
)
box_sides = np.array(
[float(sx), float(sy), float(sz)], dtype=np.float32
)
# set up an empty data cube (we will initialise it once the output shape of
# conversion_function() is known
qgrid = None
# control variables for the reading loop
startchunk = 0
endchunk = chunksize[0] * chunksize[1] * chunksize[2]
chunk_length = chunksize[0] * chunksize[1] * chunksize[2]
max_chunk = res[0] * res[1] * res[2]
chunk_shape = None
ix = 0
iy = 0
iz = 0
# reading loop
while endchunk <= max_chunk:
# create a data cube for the chunk data
qs = np.zeros((len(quantities), endchunk - startchunk))
# read the requested quantities
for iq in range(len(quantities)):
qs[iq] = file["/PartType0/" + quantities[iq]][startchunk:endchunk]
# apply the conversion function
qs = conversion_function(qs)
# qgrid.shape[0] now contains the number of output quantities
# use this to initialise the data cube and to determine the shape of
# each converted chunk
if qgrid is None:
if len(qs.shape) == 1:
qgrid = np.zeros((1, res[0], res[1], res[2]))
chunk_shape = (1, chunksize[0], chunksize[1], chunksize[2])
else:
qgrid = np.zeros((qs.shape[0], res[0], res[1], res[2]))
chunk_shape = (
qs.shape[0],
chunksize[0],
chunksize[1],
chunksize[2],
)
# put the values in the appropriate cells
qgrid[
:,
ix : ix + chunksize[0],
iy : iy + chunksize[1],
iz : iz + chunksize[2],
] = qs.reshape(chunk_shape)
# update the read loop control variables
startchunk += chunk_length
endchunk += chunk_length
iz += chunksize[2]
if iz == res[2]:
iz = 0
iy += chunksize[1]
if iy == res[1]:
iy = 0
ix += chunksize[0]
if output_coordinates:
# generate the cell coordinate arrays:
# first generate 1D arrays for the boundaries of the pixels
ixgrid = np.linspace(
box_anchor[0], box_anchor[0] + box_sides[0], res[0] + 1
)
iygrid = np.linspace(
box_anchor[1], box_anchor[1] + box_sides[1], res[1] + 1
)
izgrid = np.linspace(
box_anchor[2], box_anchor[2] + box_sides[2], res[2] + 1
)
# now compute the center of each pixel
ixgrid = 0.5 * (ixgrid[1:] + ixgrid[:-1])
iygrid = 0.5 * (iygrid[1:] + iygrid[:-1])
izgrid = 0.5 * (izgrid[1:] + izgrid[:-1])
# set up a 3D grid
xgrid, ygrid, zgrid = np.meshgrid(ixgrid, iygrid, izgrid, indexing="ij")
# return and exit the function
return xgrid, ygrid, zgrid, qgrid.squeeze()
else:
# return and exit the function
return qgrid.squeeze()
##
# @brief Read quantities from a distributed snapshot and integrate them out
# along the given coordinate axis. This method takes an optional additional
# conversion function that can be used to convert the quantities before the
# integration.
#
# The quantities are read in chunks, with each chunk corresponding to a single
# subgrid in the distributed grid. If @f$N@f$ is the number of quantities and
# @f$s@f$ is the size of a chunk, then the datablock after reading has shape
# @f$(N, s)@f$. The optional conversion function takes the @f$N@f$ input
# quantities and converts them to @f$N'@f$ output quantities:
# @f[
# f: (N, s) \rightarrow{} (N', s)
# @f]
# If no conversion function is provided, @f$N' = N@f$.
#
# The output is an image data cube with shape @f$(N', N_i, N_j)@f$, where
# @f$N_i@f$ and @f$N_j@f$ are the number of cells in the grid in the plane
# perpendicular to the integration axis. If @f$N'=1@f$, the first dimension of
# the image cube is squeezed and a 2D array is returned.
#
# @param filename Name of the snapshot file.
# @param quantities str or list of str containing the names of all quantities
# to read.
# @param conversion_function (optional) Conversion function applied to the
# quantities after reading but before integrating along the line of sight.
# Default: no conversion, quantities are integrated as they are read.
# @param axis (optional) Axis along which to integrate. Defaults to the z-axis
# (axis 2).
# @param output_coordinates (optional) Output the pixel coordinates? Default is
# false.
# @return 1 or 3 arrays: coordinates of the image pixels (2D, only if
# output_coordinates is true), image cube containing the integrated quantities
# (2D or 3D, depending on the number of quantities or the number of quantities
# returned by the conversion function).
##
def read_integrated_quantities(
filename,
quantities,
conversion_function=lambda x: x,
axis=2,
output_coordinates=False,
):
# convert quantities to a list if it is not one already
if not isinstance(quantities, list):
quantities = [quantities]
# open the snapshot file
file = h5py.File(filename, "r")
# make sure we are dealing with a distributed grid
if (
not "DensitySubGridCreator:number of subgrids"
in file["/Parameters"].attrs
):
print("Error: not a distributed snapshot!")
exit(1)
# read grid parameters
res_x, res_y, res_z = intarray.search(
file["/Parameters"].attrs["DensityGrid:number of cells"].decode("utf-8")
).groups()
res = np.array([int(res_x), int(res_y), int(res_z)], dtype=np.uint32)
ch_x, ch_y, ch_z = intarray.search(
file["/Parameters"]
.attrs["DensitySubGridCreator:number of subgrids"]
.decode("utf-8")
).groups()
chunks = np.array([int(ch_x), int(ch_y), int(ch_z)], dtype=np.uint32)
chunksize = res // chunks
if output_coordinates:
# read box dimensions
ax, ay, az = lengtharray.search(
file["/Parameters"].attrs["SimulationBox:anchor"].decode("utf-8")
).groups()
sx, sy, sz = lengtharray.search(
file["/Parameters"].attrs["SimulationBox:sides"].decode("utf-8")
).groups()
box_anchor = np.array(
[float(ax), float(ay), float(az)], dtype=np.float32
)
box_sides = np.array(
[float(sx), float(sy), float(sz)], dtype=np.float32
)
# determine the dimensions of the image
if axis == 0:
image_shape = (res[1], res[2])
elif axis == 1:
image_shape = (res[0], res[2])
else:
image_shape = (res[0], res[1])
# set up an empty image data cube (we will initialise it once the output
# shape of conversion_function() is known
qgrid = None
# control variables for the reading loop
startchunk = 0
endchunk = chunksize[0] * chunksize[1] * chunksize[2]
chunk_length = chunksize[0] * chunksize[1] * chunksize[2]
max_chunk = res[0] * res[1] * res[2]
chunk_shape = None
ix = 0
iy = 0
iz = 0
# reading loop
while endchunk <= max_chunk:
# create a data cube for the chunk data
qs = np.zeros((len(quantities), endchunk - startchunk))
# read the requested quantities
for iq in range(len(quantities)):
qs[iq] = file["/PartType0/" + quantities[iq]][startchunk:endchunk]
# apply the conversion function
qs = conversion_function(qs)
# qgrid.shape[0] now contains the number of output quantities
# use this to initialise the data cube and to determine the shape of
# each converted chunk
if qgrid is None:
if len(qs.shape) == 1:
qgrid = np.zeros((1, image_shape[0], image_shape[1]))
chunk_shape = (1, chunksize[0], chunksize[1], chunksize[2])
else:
qgrid = np.zeros((qs.shape[0], image_shape[0], image_shape[1]))
chunk_shape = (
qs.shape[0],
chunksize[0],
chunksize[1],
chunksize[2],
)
# perform the integration and add the result to the appropriate pixels
if axis == 0:
qgrid[
:, iy : iy + chunksize[1], iz : iz + chunksize[2]
] += qs.reshape(chunk_shape).sum(axis=axis + 1)
elif axis == 1:
qgrid[
:, ix : ix + chunksize[0], iz : iz + chunksize[2]
] += qs.reshape(chunk_shape).sum(axis=axis + 1)
else:
qgrid[
:, ix : ix + chunksize[0], iy : iy + chunksize[1]
] += qs.reshape(chunk_shape).sum(axis=axis + 1)
# update the read loop control variables
startchunk += chunk_length
endchunk += chunk_length
iz += chunksize[2]
if iz == res[2]:
iz = 0
iy += chunksize[1]
if iy == res[1]:
iy = 0
ix += chunksize[0]
# close the file, we are done with it
file.close()
if output_coordinates:
# generate the pixel coordinate arrays:
# first generate 1D arrays for the boundaries of the pixels
if axis == 0:
ixgrid = np.linspace(
box_anchor[1], box_anchor[1] + box_sides[1], res[1] + 1
)
iygrid = np.linspace(
box_anchor[2], box_anchor[2] + box_sides[2], res[2] + 1
)
elif axis == 1:
ixgrid = np.linspace(
box_anchor[0], box_anchor[0] + box_sides[0], res[0] + 1
)
iygrid = np.linspace(
box_anchor[2], box_anchor[2] + box_sides[2], res[2] + 1
)
else:
ixgrid = np.linspace(
box_anchor[0], box_anchor[0] + box_sides[0], res[0] + 1
)
iygrid = np.linspace(
box_anchor[1], box_anchor[1] + box_sides[1], res[1] + 1
)
# now compute the center of each pixel
ixgrid = 0.5 * (ixgrid[1:] + ixgrid[:-1])
iygrid = 0.5 * (iygrid[1:] + iygrid[:-1])
# set up a 2D grid
xgrid, ygrid = np.meshgrid(ixgrid, iygrid, indexing="ij")
# return and exit the function
return xgrid, ygrid, qgrid.squeeze()
else:
# return and exit the function
return qgrid.squeeze()
##
# @brief Read quantities from a distributed snapshot and slice them
# perpendicular to the given axis at the given intercept. This method takes an
# optional additional conversion function that can be used to convert the
# quantities before the slicing.
#
# The quantities are read in chunks, with each chunk corresponding to a single
# subgrid in the distributed grid. If @f$N@f$ is the number of quantities and
# @f$s@f$ is the size of a chunk, then the datablock after reading has shape
# @f$(N, s)@f$. The optional conversion function takes the @f$N@f$ input
# quantities and converts them to @f$N'@f$ output quantities:
# @f[
# f: (N, s) \rightarrow{} (N', s)
# @f]
# If no conversion function is provided, @f$N' = N@f$.
#
# The output is an image data cube with shape @f$(N', N_i, N_j)@f$, where
# @f$N_i@f$ and @f$N_j@f$ are the number of cells in the grid in the plane
# perpendicular to the slice axis. If @f$N'=1@f$, the first dimension of the
# image cube is squeezed and a 2D array is returned.
#
# @param filename Name of the snapshot file.
# @param quantities str or list of str containing the names of all quantities
# to read.
# @param conversion_function (optional) Conversion function applied to the
# quantities after reading but before slicing perpendicular to the line of
# sight. Default: no conversion, quantities are sliced as they are read.
# @param axis (optional) Axis perpendicular to which to integrate. Defaults to
# the z-axis (axis 2).
# @param intercept (optional) Intercept along the slice axis, in SI units.
# Default is the origin (z = 0).
# @param output_coordinates (optional) Output the pixel coordinates? Default is
# false.
# @return 1 or 3 arrays: coordinates of the image pixels (2D, only if
# output_coordinates is true), image cube containing the sliced quantities
# (2D or 3D, depending on the number of quantities or the number of quantities
# returned by the conversion function).
##
def read_sliced_quantities(
filename,
quantities,
conversion_function=lambda x: x,
axis=2,
intercept=0,
output_coordinates=False,
):
# convert quantities to a list if it is not one already
if not isinstance(quantities, list):
quantities = [quantities]
# open the snapshot file
file = h5py.File(filename, "r")
# make sure we are dealing with a distributed grid
if (
not "DensitySubGridCreator:number of subgrids"
in file["/Parameters"].attrs
):
print("Error: not a distributed snapshot!")
exit(1)
# read grid parameters
res_x, res_y, res_z = intarray.search(
file["/Parameters"].attrs["DensityGrid:number of cells"].decode("utf-8")
).groups()
res = np.array([int(res_x), int(res_y), int(res_z)], dtype=np.uint32)
ch_x, ch_y, ch_z = intarray.search(
file["/Parameters"]
.attrs["DensitySubGridCreator:number of subgrids"]
.decode("utf-8")
).groups()
chunks = np.array([int(ch_x), int(ch_y), int(ch_z)], dtype=np.uint32)
chunksize = res // chunks
# read box dimensions
ax, ay, az = lengtharray.search(
file["/Parameters"].attrs["SimulationBox:anchor"].decode("utf-8")
).groups()
sx, sy, sz = lengtharray.search(
file["/Parameters"].attrs["SimulationBox:sides"].decode("utf-8")
).groups()
box_anchor = np.array([float(ax), float(ay), float(az)], dtype=np.float32)
box_sides = np.array([float(sx), float(sy), float(sz)], dtype=np.float32)
# check that the intercept is within the box
if (
intercept < box_anchor[axis]
or intercept > box_anchor[axis] + box_sides[axis]
):
print("Error: intercept value outside box range!")
exit(1)
# determine the index of the slice within the cell grid
slice_index = int(
np.floor((intercept - box_anchor[axis]) * res[axis] / box_sides[axis])
)
# determine the dimensions of the image
if axis == 0:
image_shape = (res[1], res[2])
elif axis == 1:
image_shape = (res[0], res[2])
else:
image_shape = (res[0], res[1])
# set up an empty image data cube (we will initialise it once the output
# shape of conversion_function() is known
qgrid = None
# control variables for the reading loop
startchunk = 0
endchunk = chunksize[0] * chunksize[1] * chunksize[2]
chunk_length = chunksize[0] * chunksize[1] * chunksize[2]
max_chunk = res[0] * res[1] * res[2]
chunk_shape = None
ix = 0
iy = 0
iz = 0
# reading loop
while endchunk <= max_chunk:
# check if the current chunk overlaps with the slice
if axis == 0:
in_slice = (ix <= slice_index) and (slice_index < ix + chunksize[0])
elif axis == 1:
in_slice = (iy <= slice_index) and (slice_index < iy + chunksize[1])
else:
in_slice = (iz <= slice_index) and (slice_index < iz + chunksize[2])
if in_slice:
# create a data cube for the chunk data
qs = np.zeros((len(quantities), endchunk - startchunk))
# read the requested quantities
for iq in range(len(quantities)):
qs[iq] = file["/PartType0/" + quantities[iq]][
startchunk:endchunk
]
# apply the conversion function
qs = conversion_function(qs)
# qgrid.shape[0] now contains the number of output quantities
# use this to initialise the data cube and to determine the shape of
# each converted chunk
if qgrid is None:
if len(qs.shape) == 1:
qgrid = np.zeros((1, image_shape[0], image_shape[1]))
chunk_shape = (1, chunksize[0], chunksize[1], chunksize[2])
else:
qgrid = np.zeros(
(qs.shape[0], image_shape[0], image_shape[1])
)
chunk_shape = (
qs.shape[0],
chunksize[0],
chunksize[1],
chunksize[2],
)
# slice the chunk and put the results in the right image pixel
if axis == 0:
qgrid[
:, iy : iy + chunksize[1], iz : iz + chunksize[2]
] = qs.reshape(chunk_shape)[:, slice_index - ix, :, :]
elif axis == 1:
qgrid[
:, ix : ix + chunksize[0], iz : iz + chunksize[2]
] = qs.reshape(chunk_shape)[:, :, slice_index - iy, :]
else:
qgrid[
:, ix : ix + chunksize[0], iy : iy + chunksize[1]
] = qs.reshape(chunk_shape)[:, :, :, slice_index - iz]
# update the read loop control variables
startchunk += chunk_length
endchunk += chunk_length
iz += chunksize[2]
if iz == res[2]:
iz = 0
iy += chunksize[1]
if iy == res[1]:
iy = 0
ix += chunksize[0]
# close the file, we are done with it
file.close()
if output_coordinates:
# generate the pixel coordinate arrays:
# first generate 1D arrays for the boundaries of the pixels
if axis == 0:
ixgrid = np.linspace(
box_anchor[1], box_anchor[1] + box_sides[1], res[1] + 1
)
iygrid = np.linspace(
box_anchor[2], box_anchor[2] + box_sides[2], res[2] + 1
)
elif axis == 1:
ixgrid = np.linspace(
box_anchor[0], box_anchor[0] + box_sides[0], res[0] + 1
)
iygrid = np.linspace(
box_anchor[2], box_anchor[2] + box_sides[2], res[2] + 1
)
else:
ixgrid = np.linspace(
box_anchor[0], box_anchor[0] + box_sides[0], res[0] + 1
)
iygrid = np.linspace(
box_anchor[1], box_anchor[1] + box_sides[1], res[1] + 1
)
# now compute the center of each pixel
ixgrid = 0.5 * (ixgrid[1:] + ixgrid[:-1])
iygrid = 0.5 * (iygrid[1:] + iygrid[:-1])
# set up a 2D grid
xgrid, ygrid = np.meshgrid(ixgrid, iygrid, indexing="ij")
# return and exit the function
return xgrid, ygrid, qgrid.squeeze()
else:
# return and exit the function
return qgrid.squeeze()
##
# @brief Unit test.
#
# Takes the name of a snaphot file as input and generates an image of the
# density of neutral density using the read, integrate or slice method. Further
# arguments allow to test the method versions with and without coordinate
# arrays.
##
if __name__ == "__main__":
import argparse
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as pl
pl.rcParams["text.usetex"] = True
# command line arguments
argparser = argparse.ArgumentParser()
argparser.add_argument("--file", "-f", action="store", required=True)
argparser.add_argument("--output", "-o", action="store", required=True)
argparser.add_argument(
"--method",
"-m",
action="store",
choices=["read", "integrate", "slice"],
default="slice",
)
argparser.add_argument("--neutral", "-n", action="store_true")
argparser.add_argument("--coordinates", "-c", action="store_true")
args = argparser.parse_args()
# function used to compute the neutral density based on the density and
# the neutral fraction
def neutral_density_function(quantities):
return quantities[0] * quantities[1]
# call the test function with the appropriate arguments
if args.method == "read":
if args.neutral:
result = read_quantities(
args.file,
["NumberDensity", "NeutralFractionH"],
neutral_density_function,
output_coordinates=args.coordinates,
)
if args.coordinates:
xgrid, ygrid, zgrid, rhogrid = result
xgrid = xgrid[:, :, 0]
ygrid = ygrid[:, :, 0]
else:
rhogrid = result
xgrid = np.arange(0, rhogrid.shape[len(rhogrid.shape) - 3])
ygrid = np.arange(0, rhogrid.shape[len(rhogrid.shape) - 2])
xgrid, ygrid = np.meshgrid(xgrid, ygrid, indexing="ij")
else:
result = read_quantities(
args.file,
["NumberDensity", "NeutralFractionH"],
output_coordinates=args.coordinates,
)
if args.coordinates:
xgrid, ygrid, zgrid, rhogrid = result
xgrid = xgrid[:, :, 0]
ygrid = ygrid[:, :, 0]
else:
rhogrid = result
xgrid = np.arange(0, rhogrid.shape[len(rhogrid.shape) - 3])
ygrid = np.arange(0, rhogrid.shape[len(rhogrid.shape) - 2])
xgrid, ygrid = np.meshgrid(xgrid, ygrid, indexing="ij")
rhogrid = rhogrid[0]
rhogrid = rhogrid.sum(axis=2)
elif args.method == "integrate":
if args.neutral:
result = read_integrated_quantities(
args.file,
["NumberDensity", "NeutralFractionH"],
neutral_density_function,
output_coordinates=args.coordinates,
)
if args.coordinates:
xgrid, ygrid, rhogrid = result
else:
rhogrid = result
xgrid = np.arange(0, rhogrid.shape[len(rhogrid.shape) - 2])
ygrid = np.arange(0, rhogrid.shape[len(rhogrid.shape) - 1])
xgrid, ygrid = np.meshgrid(xgrid, ygrid, indexing="ij")
else:
result = read_integrated_quantities(
args.file,
["NumberDensity", "NeutralFractionH"],
output_coordinates=args.coordinates,
)
if args.coordinates:
xgrid, ygrid, rhogrid = result
else:
rhogrid = result
xgrid = np.arange(0, rhogrid.shape[len(rhogrid.shape) - 2])
ygrid = np.arange(0, rhogrid.shape[len(rhogrid.shape) - 1])
xgrid, ygrid = np.meshgrid(xgrid, ygrid, indexing="ij")
rhogrid = rhogrid[0]
elif args.method == "slice":
if args.neutral:
result = read_sliced_quantities(
args.file,
["NumberDensity", "NeutralFractionH"],
neutral_density_function,
output_coordinates=args.coordinates,
)
if args.coordinates:
xgrid, ygrid, rhogrid = result
else:
rhogrid = result
xgrid = np.arange(0, rhogrid.shape[len(rhogrid.shape) - 2])
ygrid = np.arange(0, rhogrid.shape[len(rhogrid.shape) - 1])
xgrid, ygrid = np.meshgrid(xgrid, ygrid, indexing="ij")
else:
result = read_sliced_quantities(
args.file,
["NumberDensity", "NeutralFractionH"],
output_coordinates=args.coordinates,
)
if args.coordinates:
xgrid, ygrid, rhogrid = result
else:
rhogrid = result
xgrid = np.arange(0, rhogrid.shape[len(rhogrid.shape) - 2])
ygrid = np.arange(0, rhogrid.shape[len(rhogrid.shape) - 1])
xgrid, ygrid = np.meshgrid(xgrid, ygrid, indexing="ij")
rhogrid = rhogrid[0]
# plot the result
pl.contourf(xgrid, ygrid, np.log10(rhogrid), 500)
pl.savefig(args.output, dpi=300, bbox_inches="tight")
|
bwvdnbroREPO_NAMECMacIonizePATH_START.@CMacIonize_extracted@CMacIonize-master@tools@read_distributed_grid.py@.PATH_END.py
|
{
"filename": "version.py",
"repo_name": "caseyjlaw/rtpipe",
"repo_path": "rtpipe_extracted/rtpipe-master/rtpipe/version.py",
"type": "Python"
}
|
__version__ = '1.60'
|
caseyjlawREPO_NAMErtpipePATH_START.@rtpipe_extracted@rtpipe-master@rtpipe@version.py@.PATH_END.py
|
{
"filename": "test_gmm_poisson.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/sandbox/regression/tests/test_gmm_poisson.py",
"type": "Python"
}
|
'''
TestGMMMultTwostepDefault() has lower precision
'''
from statsmodels.compat.python import lmap
import numpy as np
import pandas
from scipy import stats
import pytest
from statsmodels.regression.linear_model import OLS
from statsmodels.sandbox.regression import gmm
from numpy.testing import assert_allclose, assert_equal
def get_data():
import os
curdir = os.path.split(__file__)[0]
dt = pandas.read_csv(os.path.join(curdir, 'racd10data_with_transformed.csv'))
# Transformations compared to original data
##dt3['income'] /= 10.
##dt3['aget'] = (dt3['age'] - dt3['age'].min()) / 5.
##dt3['aget2'] = dt3['aget']**2
# How do we do this with pandas
mask = ~((np.asarray(dt['private']) == 1) & (dt['medicaid'] == 1))
mask = mask & (dt['docvis'] <= 70)
dt3 = dt[mask]
dt3['const'] = 1 # add constant
return dt3
DATA = get_data()
#------------- moment conditions for example
def moment_exponential_add(params, exog, exp=True):
if not np.isfinite(params).all():
print("invalid params", params)
# moment condition without instrument
if exp:
predicted = np.exp(np.dot(exog, params))
#if not np.isfinite(predicted).all():
#print "invalid predicted", predicted
#raise RuntimeError('invalid predicted')
predicted = np.clip(predicted, 0, 1e100) # try to avoid inf
else:
predicted = np.dot(exog, params)
return predicted
def moment_exponential_mult(params, data, exp=True):
# multiplicative error model
endog = data[:,0]
exog = data[:,1:]
if not np.isfinite(params).all():
print("invalid params", params)
# moment condition without instrument
if exp:
predicted = np.exp(np.dot(exog, params))
predicted = np.clip(predicted, 0, 1e100) # avoid inf
resid = endog / predicted - 1
if not np.isfinite(resid).all():
print("invalid resid", resid)
else:
resid = endog - np.dot(exog, params)
return resid
#------------------- test classes
# copied from test_gmm.py, with changes
class CheckGMM:
# default tolerance, overwritten by subclasses
params_tol = [5e-6, 5e-6]
bse_tol = [5e-7, 5e-7]
q_tol = [5e-6, 1e-9]
j_tol = [5e-5, 1e-9]
def test_basic(self):
res1, res2 = self.res1, self.res2
# test both absolute and relative difference
rtol, atol = self.params_tol
assert_allclose(res1.params, res2.params, rtol=rtol, atol=0)
assert_allclose(res1.params, res2.params, rtol=0, atol=atol)
rtol, atol = self.bse_tol
assert_allclose(res1.bse, res2.bse, rtol=rtol, atol=0)
assert_allclose(res1.bse, res2.bse, rtol=0, atol=atol)
def test_other(self):
res1, res2 = self.res1, self.res2
rtol, atol = self.q_tol
assert_allclose(res1.q, res2.Q, rtol=atol, atol=rtol)
rtol, atol = self.j_tol
assert_allclose(res1.jval, res2.J, rtol=atol, atol=rtol)
j, jpval, jdf = res1.jtest()
# j and jval should be the same
assert_allclose(res1.jval, res2.J, rtol=13, atol=13)
#pvalue is not saved in Stata results
pval = stats.chi2.sf(res2.J, res2.J_df)
#assert_allclose(jpval, pval, rtol=1e-4, atol=1e-6)
assert_allclose(jpval, pval, rtol=rtol, atol=atol)
assert_equal(jdf, res2.J_df)
@pytest.mark.smoke
def test_summary(self):
res1 = self.res1
summ = res1.summary()
assert_equal(len(summ.tables[1]), len(res1.params) + 1)
class TestGMMAddOnestep(CheckGMM):
@classmethod
def setup_class(cls):
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
cls.bse_tol = [5e-6, 5e-7]
q_tol = [0.04, 0]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(np.log(endog+1), exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog, exog, instrument, moment_exponential_add)
res0 = mod.fit(start, maxiter=0, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False})
cls.res1 = res0
from .results_gmm_poisson import results_addonestep as results
cls.res2 = results
class TestGMMAddTwostep(CheckGMM):
@classmethod
def setup_class(cls):
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
cls.bse_tol = [5e-6, 5e-7]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(np.log(endog+1), exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog, exog, instrument, moment_exponential_add)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
cls.res1 = res0
from .results_gmm_poisson import results_addtwostep as results
cls.res2 = results
class TestGMMMultOnestep(CheckGMM):
#compares has_optimal_weights=True with Stata's has_optimal_weights=False
@classmethod
def setup_class(cls):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
cls.bse_tol = [5e-6, 5e-7]
cls.q_tol = [0.04, 0]
cls.j_tol = [0.04, 0]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=0, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
cls.res1 = res0
from .results_gmm_poisson import results_multonestep as results
cls.res2 = results
class TestGMMMultTwostep(CheckGMM):
#compares has_optimal_weights=True with Stata's has_optimal_weights=False
@classmethod
def setup_class(cls):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
cls.bse_tol = [5e-6, 5e-7]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
cls.res1 = res0
from .results_gmm_poisson import results_multtwostep as results
cls.res2 = results
class TestGMMMultTwostepDefault(CheckGMM):
# compares my defaults with the same options in Stata
# agreement is not very high, maybe vce(unadjusted) is different after all
@classmethod
def setup_class(cls):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
cls.bse_tol = [0.004, 5e-4]
cls.params_tol = [5e-5, 5e-5]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
#wargs={'centered':True}, has_optimal_weights=True
)
cls.res1 = res0
from .results_gmm_poisson import results_multtwostepdefault as results
cls.res2 = results
class TestGMMMultTwostepCenter(CheckGMM):
#compares my defaults with the same options in Stata
@classmethod
def setup_class(cls):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
cls.bse_tol = [5e-4, 5e-5]
cls.params_tol = [5e-5, 5e-5]
q_tol = [5e-5, 1e-8]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':True}, has_optimal_weights=False
)
cls.res1 = res0
from .results_gmm_poisson import results_multtwostepcenter as results
cls.res2 = results
def test_more(self):
# from Stata `overid`
J_df = 1
J_p = 0.332254330027383
J = 0.940091427212973
j, jpval, jdf = self.res1.jtest()
assert_allclose(jpval, J_p, rtol=5e-5, atol=0)
if __name__ == '__main__':
tt = TestGMMAddOnestep()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMAddTwostep()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMMultOnestep()
tt.setup_class()
tt.test_basic()
#tt.test_other()
tt = TestGMMMultTwostep()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMMultTwostepDefault()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMMultTwostepCenter()
tt.setup_class()
tt.test_basic()
tt.test_other()
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@sandbox@regression@tests@test_gmm_poisson.py@.PATH_END.py
|
{
"filename": "_xhoverformat.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/surface/_xhoverformat.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XhoverformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="xhoverformat", parent_name="surface", **kwargs):
super(XhoverformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@surface@_xhoverformat.py@.PATH_END.py
|
{
"filename": "demo_plotting_BHNS_distributions_for_LVK.ipynb",
"repo_name": "FloorBroekgaarden/Double-Compact-Object-Mergers",
"repo_path": "Double-Compact-Object-Mergers_extracted/Double-Compact-Object-Mergers-main/demo_read_hdf5_file/demo_plotting_BHNS_distributions_for_LVK.ipynb",
"type": "Jupyter Notebook"
}
|
```python
# import sys
# sys.path.append('../Scripts')
# from PostProcessingScripts import *
# import pandas as pd
# import string
# just to make the cells appear wider:
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
import h5py as h5
import numpy as np
import os
import matplotlib.pyplot as plt
import pandas as pd
import string
```
```python
```
# 3 "The Big file" with all the data points: Note that my data is weighted!
all lines of code that you might have to change are given with "# change this line! "
```python
# to obtain properties of ALL binaries simulated, do this:
DCOtype = 'BHNS' # You can change this line to 'BBH', 'BHNS' 'BNS', or 'ALL' (All DCOs) # change this line!
# add path to where the COMPASOutput.h5 file is stored.
# For you the part '/Volumes/Andromeda/DATA/AllDCO_bugfix/fiducial/' is probably different
path = '/Volumes/Andromeda/DATA/AllDCO_bugfix/fiducial/COMPASCompactOutput_'+ DCOtype +'_A.h5' # change this line!
fdata = h5.File(path)
# shows the different files within the hdf5 folder
print(fdata.keys())
print('this might take a little while, particularly if you are using the BBH')
# # This code below gets the COMPAS data and only the systems that are DCOs
# Data = COMPASData(path=path, lazyData=True, Mlower=5., \
# Mupper=150., binaryFraction=1)
# Data.setCOMPASDCOmask(types=DCOtype, withinHubbleTime=True, optimistic=False)
# Data.setCOMPASData()
# SeedsHubble = Data.seeds[Data.Hubble==True]
```
<KeysViewHDF5 ['RLOF', 'commonEnvelopes', 'doubleCompactObjects', 'formationChannels', 'supernovae', 'systems', 'weights_detected', 'weights_detectedPerRedshift', 'weights_intrinsic', 'weights_intrinsicPerRedshift']>
this might take a little while, particularly if you are using the BBH
### M1, M2 and Chirpmass in Msun
The most used parameters are quoted in the file "doubleCompactObjects", that describes many properties of the binaries that form the type of DCO merger (here, BHNS) below is an example:
```python
# see several parameters that are contained in this file
print(fdata['doubleCompactObjects'].keys())
```
<KeysViewHDF5 ['COCoreMassDCOFormation1', 'COCoreMassDCOFormation2', 'ECSNPrimary', 'ECSNSecondary', 'HeCoreMassDCOFormation1', 'HeCoreMassDCOFormation2', 'ID', 'M1', 'M1ZAMS', 'M2', 'M2ZAMS', 'Metallicity1', 'Metallicity2', 'PISNPrimary', 'PISNSecondary', 'PPISNPrimary', 'PPISNSecondary', 'PrimaryMTCase', 'RL1to2PostCEE', 'RL1to2PreCEE', 'RL2to1PostCEE', 'RL2to1PreCEE', 'RLOFSecondaryAfterCEE', 'SecondaryMTCase', 'SemiMajorAxisPostCEE', 'SemiMajorAxisPreCEE', 'USSNPrimary', 'USSNSecondary', 'coreMassDCOFormation1', 'coreMassDCOFormation2', 'doubleCommonEnvelopeFlag', 'drawnKick1', 'drawnKick2', 'eccentricityDCOFormation', 'eccentricityInitial', 'eccentricityPrior2ndSN', 'kickDirectionPower', 'mergesInHubbleTimeFlag', 'optimisticCEFlag', 'phiSupernova1', 'phiSupernova2', 'recycledPrimary', 'recycledSecondary', 'relativeVelocity2ndSN', 'samplingPhase', 'seed', 'separationDCOFormation', 'separationInitial', 'separationPrior2ndSN', 'sigmaKickBH', 'sigmaKickNS', 'stellarType1', 'stellarType2', 'tc', 'tform', 'thetaSupernova1', 'thetaSupernova2', 'totalMassDCOFormation1', 'totalMassDCOFormation2', 'weight']>
The weights e.g. for LIGO weighted are also given in these files, under "weights_detected", this takes into account the star formation history of all mergers and the sensitivity of a GW detector at design configuration (LVK).
```python
print(fdata['weights_detected'].keys())
```
<KeysViewHDF5 ['SEED', 'w_000', 'w_111', 'w_112', 'w_113', 'w_121', 'w_122', 'w_123', 'w_131', 'w_132', 'w_133', 'w_211', 'w_212', 'w_213', 'w_221', 'w_222', 'w_223', 'w_231', 'w_232', 'w_233', 'w_311', 'w_312', 'w_313', 'w_321', 'w_322', 'w_323', 'w_331', 'w_332', 'w_333']>
```python
# to obtain the properties of the selected DCOtype you simply do this:
fDCO = fdata['doubleCompactObjects']
#Stuff I need for cosmological integral
# at this moment we dont need to specify a mask, since our datafile has already taken this into account.
DCOmask = [True]*(len(fDCO['Metallicity1'][...].squeeze()))
print(DCOmask)
metallicitySystems = fDCO['Metallicity1'][...].squeeze()[DCOmask] # Metallicity at ZAMS
delayTimes = fDCO['tform'][...].squeeze()[DCOmask] + \
fDCO['tc'][...].squeeze()[DCOmask] # delay time
tc = fDCO['tc'][...].squeeze()[DCOmask] # coalescence time (or merger time)
M1 = fDCO['M1'][...].squeeze()[DCOmask] # Compact object mass of star 1
M2 = fDCO['M2'][...].squeeze()[DCOmask] # Compact object mass of star 2
m1zams = fDCO['M1ZAMS'][...].squeeze()[DCOmask] # Mass at ZAMS of star 1
m2zams = fDCO['M2ZAMS'][...].squeeze()[DCOmask] # Mass at ZAMS of star 2
separationzams = fDCO['separationInitial'][...].squeeze()[DCOmask] # separation at ZAMS of binary
# we will use for this demo the weights from Star formation history model xyz = '000', these can be obtained using:
weights = fdata['weights_detected']['w_000']
# change this to formation weights by uncommenting the following line:
# weights = fDCO['weight']
# other models can be chosen too. (there are 28 options currently)
def chirpmass(m1, m2):
numer = (m1*m2)**(3./5)
denom = (m1+m2)**(1./5)
return numer/denom
# and you can plot properties, e.g., the chirpmass distribution:
chirpmass =chirpmass(m1=M1, m2=M2)
plt.hist(chirpmass, bins=100, weights=weights)
plt.xlabel('chirpmass [Msun]')
plt.ylabel('weighted rate for LVK network at design sensitivity')
plt.show()
```
IOPub data rate exceeded.
The notebook server will temporarily stop sending output
to the client in order to avoid crashing it.
To change this limit, set the config variable
`--NotebookApp.iopub_data_rate_limit`.
Current values:
NotebookApp.iopub_data_rate_limit=1000000.0 (bytes/sec)
NotebookApp.rate_limit_window=3.0 (secs)

### M1 more massive, M2 least massive:
```python
def obtainM1BHandM2BHassymetric(m1, m2):
m1bh, m2bh = np.zeros_like(m1), np.zeros_like(m1)
maskm1heavier = ( m1 >= m2)
maskm2heavier = (m1 < m2)
m1bh[maskm1heavier] = m1[maskm1heavier]
m1bh[maskm2heavier] = m2[maskm2heavier]
m2bh[maskm1heavier] = m2[maskm1heavier]
m2bh[maskm2heavier] = m1[maskm2heavier]
return m1bh, m2bh # m1bh has all the heaviest systems
M_most_massive, M_least_massive = obtainM1BHandM2BHassymetric(m1=M1, m2=M2)
plt.scatter(M_most_massive, M_least_massive)
plt.xlabel('Most massive [Msun]')
plt.ylabel('Least massive [Msun]')
plt.show()
```

### Metallicity
the metallicity of each data point can be obtained with "metallicitySystems"
I used a total of 53 different metallicity bins, quoted in the bottem when printing "Data.metallicityGrid"
```python
# metallicitySystems = metallicitySystems
plt.hist(metallicitySystems, bins=100, weights=weights)
plt.xlabel('metallicity ')
plt.ylabel('weighted rate in LVK detector')
plt.show()
print('this mostly just shows my metallicity bins and where BHNS are originating from')
```

this mostly just shows my metallicity bins and where BHNS are originating from
### Delay time of each simulated data point in Myr
```python
plt.hist(delayTimes, bins=100, weights=weights)
plt.xlabel('Delay time [Myr] ')
plt.ylabel('weighted rate in LVK detector')
plt.show()
```

```python
```
### TO do?: we now looked at BHNS distributions and weighted them with the LVK detector weights, how do these distributions change when instead looking at intrinsic weighted distributions?
### try to do this by changing the weights obtained from fdata['weights_detected'] to fdata['weights_intrinsic']
```python
```
```python
```
|
FloorBroekgaardenREPO_NAMEDouble-Compact-Object-MergersPATH_START.@Double-Compact-Object-Mergers_extracted@Double-Compact-Object-Mergers-main@demo_read_hdf5_file@demo_plotting_BHNS_distributions_for_LVK.ipynb@.PATH_END.py
|
{
"filename": "embed.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipywidgets/py3/ipywidgets/embed.py",
"type": "Python"
}
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
#
#
# Parts of this code is from IPyVolume (24.05.2017), used here under
# this copyright and license with permission from the author
# (see https://github.com/jupyter-widgets/ipywidgets/pull/1387)
"""
Functions for generating embeddable HTML/javascript of a widget.
"""
import json
import re
from .widgets import Widget, DOMWidget, widget as widget_module
from .widgets.widget_link import Link
from .widgets.docutils import doc_subst
from ._version import __html_manager_version__
snippet_template = """
{load}
<script type="application/vnd.jupyter.widget-state+json">
{json_data}
</script>
{widget_views}
"""
load_template = """<script src="{embed_url}"{use_cors}></script>"""
load_requirejs_template = """
<!-- Load require.js. Delete this if your page already loads require.js -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" crossorigin="anonymous"></script>
<script src="{embed_url}"{use_cors}></script>
"""
requirejs_snippet_template = """
<script type="application/vnd.jupyter.widget-state+json">
{json_data}
</script>
{widget_views}
"""
html_template = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>{title}</title>
</head>
<body>
{snippet}
</body>
</html>
"""
widget_view_template = """<script type="application/vnd.jupyter.widget-view+json">
{view_spec}
</script>"""
DEFAULT_EMBED_SCRIPT_URL = 'https://cdn.jsdelivr.net/npm/@jupyter-widgets/html-manager@%s/dist/embed.js'%__html_manager_version__
DEFAULT_EMBED_REQUIREJS_URL = 'https://cdn.jsdelivr.net/npm/@jupyter-widgets/html-manager@%s/dist/embed-amd.js'%__html_manager_version__
_doc_snippets = {}
_doc_snippets['views_attribute'] = """
views: widget or collection of widgets or None
The widgets to include views for. If None, all DOMWidgets are
included (not just the displayed ones).
"""
_doc_snippets['embed_kwargs'] = """
drop_defaults: boolean
Whether to drop default values from the widget states.
state: dict or None (default)
The state to include. When set to None, the state of all widgets
know to the widget manager is included. Otherwise it uses the
passed state directly. This allows for end users to include a
smaller state, under the responsibility that this state is
sufficient to reconstruct the embedded views.
indent: integer, string or None
The indent to use for the JSON state dump. See `json.dumps` for
full description.
embed_url: string or None
Allows for overriding the URL used to fetch the widget manager
for the embedded code. This defaults (None) to a `jsDelivr` CDN url.
requirejs: boolean (True)
Enables the requirejs-based embedding, which allows for custom widgets.
If True, the embed_url should point to an AMD module.
cors: boolean (True)
If True avoids sending user credentials while requesting the scripts.
When opening an HTML file from disk, some browsers may refuse to load
the scripts.
"""
def _find_widget_refs_by_state(widget, state):
"""Find references to other widgets in a widget's state"""
# Copy keys to allow changes to state during iteration:
keys = tuple(state.keys())
for key in keys:
value = getattr(widget, key)
# Trivial case: Direct references to other widgets:
if isinstance(value, Widget):
yield value
# Also check for buried references in known, JSON-able structures
# Note: This might miss references buried in more esoteric structures
elif isinstance(value, (list, tuple)):
for item in value:
if isinstance(item, Widget):
yield item
elif isinstance(value, dict):
for item in value.values():
if isinstance(item, Widget):
yield item
def _get_recursive_state(widget, store=None, drop_defaults=False):
"""Gets the embed state of a widget, and all other widgets it refers to as well"""
if store is None:
store = dict()
state = widget._get_embed_state(drop_defaults=drop_defaults)
store[widget.model_id] = state
# Loop over all values included in state (i.e. don't consider excluded values):
for ref in _find_widget_refs_by_state(widget, state['state']):
if ref.model_id not in store:
_get_recursive_state(ref, store, drop_defaults=drop_defaults)
return store
def add_resolved_links(store, drop_defaults):
"""Adds the state of any link models between two models in store"""
for widget_id, widget in widget_module._instances.items(): # go over all widgets
if isinstance(widget, Link) and widget_id not in store:
if widget.source[0].model_id in store and widget.target[0].model_id in store:
store[widget.model_id] = widget._get_embed_state(drop_defaults=drop_defaults)
def dependency_state(widgets, drop_defaults=True):
"""Get the state of all widgets specified, and their dependencies.
This uses a simple dependency finder, including:
- any widget directly referenced in the state of an included widget
- any widget in a list/tuple attribute in the state of an included widget
- any widget in a dict attribute in the state of an included widget
- any jslink/jsdlink between two included widgets
What this alogorithm does not do:
- Find widget references in nested list/dict structures
- Find widget references in other types of attributes
Note that this searches the state of the widgets for references, so if
a widget reference is not included in the serialized state, it won't
be considered as a dependency.
Parameters
----------
widgets: single widget or list of widgets.
This function will return the state of every widget mentioned
and of all their dependencies.
drop_defaults: boolean
Whether to drop default values from the widget states.
Returns
-------
A dictionary with the state of the widgets and any widget they
depend on.
"""
# collect the state of all relevant widgets
if widgets is None:
# Get state of all widgets, no smart resolution needed.
state = Widget.get_manager_state(drop_defaults=drop_defaults, widgets=None)['state']
else:
try:
widgets[0]
except (IndexError, TypeError):
widgets = [widgets]
state = {}
for widget in widgets:
_get_recursive_state(widget, state, drop_defaults)
# Add any links between included widgets:
add_resolved_links(state, drop_defaults)
return state
@doc_subst(_doc_snippets)
def embed_data(views, drop_defaults=True, state=None):
"""Gets data for embedding.
Use this to get the raw data for embedding if you have special
formatting needs.
Parameters
----------
{views_attribute}
drop_defaults: boolean
Whether to drop default values from the widget states.
state: dict or None (default)
The state to include. When set to None, the state of all widgets
know to the widget manager is included. Otherwise it uses the
passed state directly. This allows for end users to include a
smaller state, under the responsibility that this state is
sufficient to reconstruct the embedded views.
Returns
-------
A dictionary with the following entries:
manager_state: dict of the widget manager state data
view_specs: a list of widget view specs
"""
if views is None:
views = [w for w in widget_module._instances.values() if isinstance(w, DOMWidget)]
else:
try:
views[0]
except (IndexError, TypeError):
views = [views]
if state is None:
# Get state of all known widgets
state = Widget.get_manager_state(drop_defaults=drop_defaults, widgets=None)['state']
# Rely on ipywidget to get the default values
json_data = Widget.get_manager_state(widgets=[])
# but plug in our own state
json_data['state'] = state
view_specs = [w.get_view_spec() for w in views]
return dict(manager_state=json_data, view_specs=view_specs)
script_escape_re = re.compile(r'<(script|/script|!--)', re.IGNORECASE)
def escape_script(s):
"""Escape a string that will be the content of an HTML script tag.
We replace the opening bracket of <script, </script, and <!-- with the unicode
equivalent. This is inspired by the documentation for the script tag at
https://html.spec.whatwg.org/multipage/scripting.html#restrictions-for-contents-of-script-elements
We only replace these three cases so that most html or other content
involving `<` is readable.
"""
return script_escape_re.sub(r'\\u003c\1', s)
@doc_subst(_doc_snippets)
def embed_snippet(views,
drop_defaults=True,
state=None,
indent=2,
embed_url=None,
requirejs=True,
cors=True
):
"""Return a snippet that can be embedded in an HTML file.
Parameters
----------
{views_attribute}
{embed_kwargs}
Returns
-------
A unicode string with an HTML snippet containing several `<script>` tags.
"""
data = embed_data(views, drop_defaults=drop_defaults, state=state)
widget_views = '\n'.join(
widget_view_template.format(view_spec=escape_script(json.dumps(view_spec)))
for view_spec in data['view_specs']
)
if embed_url is None:
embed_url = DEFAULT_EMBED_REQUIREJS_URL if requirejs else DEFAULT_EMBED_SCRIPT_URL
load = load_requirejs_template if requirejs else load_template
use_cors = ' crossorigin="anonymous"' if cors else ''
values = {
'load': load.format(embed_url=embed_url, use_cors=use_cors),
'json_data': escape_script(json.dumps(data['manager_state'], indent=indent)),
'widget_views': widget_views,
}
return snippet_template.format(**values)
@doc_subst(_doc_snippets)
def embed_minimal_html(fp, views, title='IPyWidget export', template=None, **kwargs):
"""Write a minimal HTML file with widget views embedded.
Parameters
----------
fp: filename or file-like object
The file to write the HTML output to.
{views_attribute}
title: title of the html page.
template: Template in which to embed the widget state.
This should be a Python string with placeholders
`{{title}}` and `{{snippet}}`. The `{{snippet}}` placeholder
will be replaced by all the widgets.
{embed_kwargs}
"""
snippet = embed_snippet(views, **kwargs)
values = {
'title': title,
'snippet': snippet,
}
if template is None:
template = html_template
html_code = template.format(**values)
# Check if fp is writable:
if hasattr(fp, 'write'):
fp.write(html_code)
else:
# Assume fp is a filename:
with open(fp, "w") as f:
f.write(html_code)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipywidgets@py3@ipywidgets@embed.py@.PATH_END.py
|
{
"filename": "set-individual-constraints__div.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/_includes/work_src/reusage/set-individual-constraints__div.md",
"type": "Markdown"
}
|
Set constraints individually for each feature as a string (the number of features is n).
Format
```
"(<constraint_0>, <constraint_2>, .., <constraint_n-1>)"
```
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@_includes@work_src@reusage@set-individual-constraints__div.md@.PATH_END.py
|
{
"filename": "synchrotron_self_compton.py",
"repo_name": "cosimoNigro/agnpy",
"repo_path": "agnpy_extracted/agnpy-master/agnpy/compton/synchrotron_self_compton.py",
"type": "Python"
}
|
# module containing the synchrotron self Compton radiative process
import numpy as np
from astropy.constants import c, sigma_T
import astropy.units as u
from .kernels import isotropic_kernel
from ..synchrotron import Synchrotron
from ..utils.math import (
axes_reshaper,
gamma_e_to_integrate,
nu_to_integrate,
)
from ..utils.conversion import nu_to_epsilon_prime
from ..radiative_process import RadiativeProcess
__all__ = ["SynchrotronSelfCompton"]
class SynchrotronSelfCompton(RadiativeProcess):
"""class for Synchrotron Self Compton radiation computation
Parameters
----------
blob : :class:`~agnpy.emission_region.Blob`
emission region and electron distribution hitting the photon target
synchrotron : :class:`~agnpy.synchrotron.Synchrotron`
class describing the synchrotron photons target
integrator : func
function to be used for integration (default = `np.trapz`)
"""
def __init__(self, blob, ssa=False, integrator=np.trapz):
self.blob = blob
self.ssa = ssa
self.integrator = integrator
@staticmethod
def evaluate_sed_flux(
nu,
z,
d_L,
delta_D,
B,
R_b,
n_e,
*args,
ssa=False,
integrator=np.trapz,
gamma=gamma_e_to_integrate,
):
r"""Evaluates the flux SED (:math:`\nu F_{\nu}`) for synchrotron self-Compton,
for a general set of model parameters. Eq. 9 in [Finke2008]_.
**Note** parameters after \*args need to be passed with a keyword
Parameters
----------
nu : :class:`~astropy.units.Quantity`
array of frequencies, in Hz, to compute the sed
**note** these are observed frequencies (observer frame)
z : float
redshift of the source
d_L : :class:`~astropy.units.Quantity`
luminosity distance of the source
delta_D : float
Doppler factor of the relativistic outflow
B : :class:`~astropy.units.Quantity`
magnetic field in the blob
R_b : :class:`~astropy.units.Quantity`
size of the emitting region (spherical blob assumed)
n_e : :class:`~agnpy.spectra.ElectronDistribution`
electron energy distribution
\*args
parameters of the electron energy distribution (k_e, p, ...)
ssa : bool
whether to consider or not the self-absorption, default false
integrator : func
which function to use for integration, default `numpy.trapz`
gamma : :class:`~numpy.ndarray`
array of Lorentz factor over which to integrate the electron
distribution
Returns
-------
:class:`~astropy.units.Quantity`
array of the SED values corresponding to each frequency
"""
# conversions
# synchrotron frequencies to be integrated over
epsilon = nu_to_epsilon_prime(nu_to_integrate, z, delta_D)
# frequencies of the final sed
epsilon_s = nu_to_epsilon_prime(nu, z, delta_D)
sed_synch = Synchrotron.evaluate_sed_flux(
nu_to_integrate,
z,
d_L,
delta_D,
B,
R_b,
n_e,
*args,
ssa=ssa,
integrator=integrator,
gamma=gamma,
)
# Eq. 8 [Finke2008]_
u_synch = (3 * np.power(d_L, 2) * sed_synch) / (
c * np.power(R_b, 2) * np.power(delta_D, 4) * epsilon
)
# factor 3 / 4 accounts for averaging in a sphere
# not included in Dermer and Finke's papers
u_synch *= 3 / 4
# multidimensional integration
_gamma, _epsilon, _epsilon_s = axes_reshaper(gamma, epsilon, epsilon_s)
V_b = 4 / 3 * np.pi * np.power(R_b, 3)
N_e = V_b * n_e.evaluate(_gamma, *args)
# reshape u as epsilon
_u_synch = np.reshape(u_synch, (1, u_synch.size, 1))
# integrate
kernel = isotropic_kernel(_gamma, _epsilon, _epsilon_s)
integrand = (
_u_synch / np.power(_epsilon, 2) * N_e / np.power(_gamma, 2) * kernel
)
integral_gamma = integrator(integrand, gamma, axis=0)
integral_epsilon = integrator(integral_gamma, epsilon, axis=0).reshape(epsilon_s.shape)
emissivity = 3 / 4 * c * sigma_T * np.power(epsilon_s, 2) * integral_epsilon
prefactor = np.power(delta_D, 4) / (4 * np.pi * np.power(d_L, 2))
return (prefactor * emissivity).to("erg cm-2 s-1")
def sed_flux(self, nu):
"""Evaluates the SSC flux SED for a SynchrotronSelfComtpon
object built from a Blob."""
return self.evaluate_sed_flux(
nu,
self.blob.z,
self.blob.d_L,
self.blob.delta_D,
self.blob.B,
self.blob.R_b,
self.blob.n_e,
*self.blob.n_e.parameters,
ssa=self.ssa,
integrator=self.integrator,
gamma=self.blob.gamma_e,
)
def sed_luminosity(self, nu):
r"""Evaluates the SSC luminosity SED
:math:`\nu L_{\nu} \, [\mathrm{erg}\,\mathrm{s}^{-1}]`
for a a SynchrotronSelfCompton object built from a blob."""
sphere = 4 * np.pi * np.power(self.blob.d_L, 2)
return (sphere * self.sed_flux(nu)).to("erg s-1")
def sed_peak_flux(self, nu):
"""provided a grid of frequencies nu, returns the peak flux of the SED
"""
return self.sed_flux(nu).max()
def sed_peak_nu(self, nu):
"""provided a grid of frequencies nu, returns the frequency at which the SED peaks
"""
idx_max = self.sed_flux(nu).argmax()
return nu[idx_max]
|
cosimoNigroREPO_NAMEagnpyPATH_START.@agnpy_extracted@agnpy-master@agnpy@compton@synchrotron_self_compton.py@.PATH_END.py
|
{
"filename": "test_gradient_boosting.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py",
"type": "Python"
}
|
import copyreg
import io
import pickle
import re
import warnings
from unittest.mock import Mock
import joblib
import numpy as np
import pytest
from joblib.numpy_pickle import NumpyPickler
from numpy.testing import assert_allclose, assert_array_equal
import sklearn
from sklearn._loss.loss import (
AbsoluteError,
HalfBinomialLoss,
HalfSquaredError,
PinballLoss,
)
from sklearn.base import BaseEstimator, TransformerMixin, clone, is_regressor
from sklearn.compose import make_column_transformer
from sklearn.datasets import make_classification, make_low_rank_matrix, make_regression
from sklearn.dummy import DummyRegressor
from sklearn.ensemble import (
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
)
from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
from sklearn.ensemble._hist_gradient_boosting.common import G_H_DTYPE
from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
from sklearn.ensemble._hist_gradient_boosting.predictor import TreePredictor
from sklearn.exceptions import NotFittedError
from sklearn.metrics import get_scorer, mean_gamma_deviance, mean_poisson_deviance
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import KBinsDiscretizer, MinMaxScaler, OneHotEncoder
from sklearn.utils import shuffle
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
from sklearn.utils._testing import _convert_container
from sklearn.utils.fixes import _IS_32BIT
n_threads = _openmp_effective_n_threads()
X_classification, y_classification = make_classification(random_state=0)
X_regression, y_regression = make_regression(random_state=0)
X_multi_classification, y_multi_classification = make_classification(
n_classes=3, n_informative=3, random_state=0
)
def _make_dumb_dataset(n_samples):
"""Make a dumb dataset to test early stopping."""
rng = np.random.RandomState(42)
X_dumb = rng.randn(n_samples, 1)
y_dumb = (X_dumb[:, 0] > 0).astype("int64")
return X_dumb, y_dumb
@pytest.mark.parametrize(
"GradientBoosting, X, y",
[
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression),
],
)
@pytest.mark.parametrize(
"params, err_msg",
[
(
{"interaction_cst": [0, 1]},
"Interaction constraints must be a sequence of tuples or lists",
),
(
{"interaction_cst": [{0, 9999}]},
r"Interaction constraints must consist of integer indices in \[0,"
r" n_features - 1\] = \[.*\], specifying the position of features,",
),
(
{"interaction_cst": [{-1, 0}]},
r"Interaction constraints must consist of integer indices in \[0,"
r" n_features - 1\] = \[.*\], specifying the position of features,",
),
(
{"interaction_cst": [{0.5}]},
r"Interaction constraints must consist of integer indices in \[0,"
r" n_features - 1\] = \[.*\], specifying the position of features,",
),
],
)
def test_init_parameters_validation(GradientBoosting, X, y, params, err_msg):
with pytest.raises(ValueError, match=err_msg):
GradientBoosting(**params).fit(X, y)
@pytest.mark.parametrize(
"scoring, validation_fraction, early_stopping, n_iter_no_change, tol",
[
("neg_mean_squared_error", 0.1, True, 5, 1e-7), # use scorer
("neg_mean_squared_error", None, True, 5, 1e-1), # use scorer on train
(None, 0.1, True, 5, 1e-7), # same with default scorer
(None, None, True, 5, 1e-1),
("loss", 0.1, True, 5, 1e-7), # use loss
("loss", None, True, 5, 1e-1), # use loss on training data
(None, None, False, 5, 0.0), # no early stopping
],
)
def test_early_stopping_regression(
scoring, validation_fraction, early_stopping, n_iter_no_change, tol
):
max_iter = 200
X, y = make_regression(n_samples=50, random_state=0)
gb = HistGradientBoostingRegressor(
verbose=1, # just for coverage
min_samples_leaf=5, # easier to overfit fast
scoring=scoring,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
max_iter=max_iter,
n_iter_no_change=n_iter_no_change,
random_state=0,
)
gb.fit(X, y)
if early_stopping:
assert n_iter_no_change <= gb.n_iter_ < max_iter
else:
assert gb.n_iter_ == max_iter
@pytest.mark.parametrize(
"data",
(
make_classification(n_samples=30, random_state=0),
make_classification(
n_samples=30, n_classes=3, n_clusters_per_class=1, random_state=0
),
),
)
@pytest.mark.parametrize(
"scoring, validation_fraction, early_stopping, n_iter_no_change, tol",
[
("accuracy", 0.1, True, 5, 1e-7), # use scorer
("accuracy", None, True, 5, 1e-1), # use scorer on training data
(None, 0.1, True, 5, 1e-7), # same with default scorer
(None, None, True, 5, 1e-1),
("loss", 0.1, True, 5, 1e-7), # use loss
("loss", None, True, 5, 1e-1), # use loss on training data
(None, None, False, 5, 0.0), # no early stopping
],
)
def test_early_stopping_classification(
data, scoring, validation_fraction, early_stopping, n_iter_no_change, tol
):
max_iter = 50
X, y = data
gb = HistGradientBoostingClassifier(
verbose=2, # just for coverage
min_samples_leaf=5, # easier to overfit fast
scoring=scoring,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
max_iter=max_iter,
n_iter_no_change=n_iter_no_change,
random_state=0,
)
gb.fit(X, y)
if early_stopping is True:
assert n_iter_no_change <= gb.n_iter_ < max_iter
else:
assert gb.n_iter_ == max_iter
@pytest.mark.parametrize(
"GradientBoosting, X, y",
[
(HistGradientBoostingClassifier, *_make_dumb_dataset(10000)),
(HistGradientBoostingClassifier, *_make_dumb_dataset(10001)),
(HistGradientBoostingRegressor, *_make_dumb_dataset(10000)),
(HistGradientBoostingRegressor, *_make_dumb_dataset(10001)),
],
)
def test_early_stopping_default(GradientBoosting, X, y):
# Test that early stopping is enabled by default if and only if there
# are more than 10000 samples
gb = GradientBoosting(max_iter=10, n_iter_no_change=2, tol=1e-1)
gb.fit(X, y)
if X.shape[0] > 10000:
assert gb.n_iter_ < gb.max_iter
else:
assert gb.n_iter_ == gb.max_iter
@pytest.mark.parametrize(
"scores, n_iter_no_change, tol, stopping",
[
([], 1, 0.001, False), # not enough iterations
([1, 1, 1], 5, 0.001, False), # not enough iterations
([1, 1, 1, 1, 1], 5, 0.001, False), # not enough iterations
([1, 2, 3, 4, 5, 6], 5, 0.001, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 0.0, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 0.999, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 5 - 1e-5, False), # significant improvement
([1] * 6, 5, 0.0, True), # no significant improvement
([1] * 6, 5, 0.001, True), # no significant improvement
([1] * 6, 5, 5, True), # no significant improvement
],
)
def test_should_stop(scores, n_iter_no_change, tol, stopping):
gbdt = HistGradientBoostingClassifier(n_iter_no_change=n_iter_no_change, tol=tol)
assert gbdt._should_stop(scores) == stopping
def test_absolute_error():
# For coverage only.
X, y = make_regression(n_samples=500, random_state=0)
gbdt = HistGradientBoostingRegressor(loss="absolute_error", random_state=0)
gbdt.fit(X, y)
assert gbdt.score(X, y) > 0.9
def test_absolute_error_sample_weight():
# non regression test for issue #19400
# make sure no error is thrown during fit of
# HistGradientBoostingRegressor with absolute_error loss function
# and passing sample_weight
rng = np.random.RandomState(0)
n_samples = 100
X = rng.uniform(-1, 1, size=(n_samples, 2))
y = rng.uniform(-1, 1, size=n_samples)
sample_weight = rng.uniform(0, 1, size=n_samples)
gbdt = HistGradientBoostingRegressor(loss="absolute_error")
gbdt.fit(X, y, sample_weight=sample_weight)
@pytest.mark.parametrize("y", [([1.0, -2.0, 0.0]), ([0.0, 1.0, 2.0])])
def test_gamma_y_positive(y):
# Test that ValueError is raised if any y_i <= 0.
err_msg = r"loss='gamma' requires strictly positive y."
gbdt = HistGradientBoostingRegressor(loss="gamma", random_state=0)
with pytest.raises(ValueError, match=err_msg):
gbdt.fit(np.zeros(shape=(len(y), 1)), y)
def test_gamma():
# For a Gamma distributed target, we expect an HGBT trained with the Gamma deviance
# (loss) to give better results than an HGBT with any other loss function, measured
# in out-of-sample Gamma deviance as metric/score.
# Note that squared error could potentially predict negative values which is
# invalid (np.inf) for the Gamma deviance. A Poisson HGBT (having a log link)
# does not have that defect.
# Important note: It seems that a Poisson HGBT almost always has better
# out-of-sample performance than the Gamma HGBT, measured in Gamma deviance.
# LightGBM shows the same behaviour. Hence, we only compare to a squared error
# HGBT, but not to a Poisson deviance HGBT.
rng = np.random.RandomState(42)
n_train, n_test, n_features = 500, 100, 20
X = make_low_rank_matrix(
n_samples=n_train + n_test,
n_features=n_features,
random_state=rng,
)
# We create a log-linear Gamma model. This gives y.min ~ 1e-2, y.max ~ 1e2
coef = rng.uniform(low=-10, high=20, size=n_features)
# Numpy parametrizes gamma(shape=k, scale=theta) with mean = k * theta and
# variance = k * theta^2. We parametrize it instead with mean = exp(X @ coef)
# and variance = dispersion * mean^2 by setting k = 1 / dispersion,
# theta = dispersion * mean.
dispersion = 0.5
y = rng.gamma(shape=1 / dispersion, scale=dispersion * np.exp(X @ coef))
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_test, random_state=rng
)
gbdt_gamma = HistGradientBoostingRegressor(loss="gamma", random_state=123)
gbdt_mse = HistGradientBoostingRegressor(loss="squared_error", random_state=123)
dummy = DummyRegressor(strategy="mean")
for model in (gbdt_gamma, gbdt_mse, dummy):
model.fit(X_train, y_train)
for X, y in [(X_train, y_train), (X_test, y_test)]:
loss_gbdt_gamma = mean_gamma_deviance(y, gbdt_gamma.predict(X))
# We restrict the squared error HGBT to predict at least the minimum seen y at
# train time to make it strictly positive.
loss_gbdt_mse = mean_gamma_deviance(
y, np.maximum(np.min(y_train), gbdt_mse.predict(X))
)
loss_dummy = mean_gamma_deviance(y, dummy.predict(X))
assert loss_gbdt_gamma < loss_dummy
assert loss_gbdt_gamma < loss_gbdt_mse
@pytest.mark.parametrize("quantile", [0.2, 0.5, 0.8])
def test_quantile_asymmetric_error(quantile):
"""Test quantile regression for asymmetric distributed targets."""
n_samples = 10_000
rng = np.random.RandomState(42)
# take care that X @ coef + intercept > 0
X = np.concatenate(
(
np.abs(rng.randn(n_samples)[:, None]),
-rng.randint(2, size=(n_samples, 1)),
),
axis=1,
)
intercept = 1.23
coef = np.array([0.5, -2])
# For an exponential distribution with rate lambda, e.g. exp(-lambda * x),
# the quantile at level q is:
# quantile(q) = - log(1 - q) / lambda
# scale = 1/lambda = -quantile(q) / log(1-q)
y = rng.exponential(
scale=-(X @ coef + intercept) / np.log(1 - quantile), size=n_samples
)
model = HistGradientBoostingRegressor(
loss="quantile",
quantile=quantile,
max_iter=25,
random_state=0,
max_leaf_nodes=10,
).fit(X, y)
assert_allclose(np.mean(model.predict(X) > y), quantile, rtol=1e-2)
pinball_loss = PinballLoss(quantile=quantile)
loss_true_quantile = pinball_loss(y, X @ coef + intercept)
loss_pred_quantile = pinball_loss(y, model.predict(X))
# we are overfitting
assert loss_pred_quantile <= loss_true_quantile
@pytest.mark.parametrize("y", [([1.0, -2.0, 0.0]), ([0.0, 0.0, 0.0])])
def test_poisson_y_positive(y):
# Test that ValueError is raised if either one y_i < 0 or sum(y_i) <= 0.
err_msg = r"loss='poisson' requires non-negative y and sum\(y\) > 0."
gbdt = HistGradientBoostingRegressor(loss="poisson", random_state=0)
with pytest.raises(ValueError, match=err_msg):
gbdt.fit(np.zeros(shape=(len(y), 1)), y)
def test_poisson():
# For Poisson distributed target, Poisson loss should give better results
# than least squares measured in Poisson deviance as metric.
rng = np.random.RandomState(42)
n_train, n_test, n_features = 500, 100, 100
X = make_low_rank_matrix(
n_samples=n_train + n_test, n_features=n_features, random_state=rng
)
# We create a log-linear Poisson model and downscale coef as it will get
# exponentiated.
coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
y = rng.poisson(lam=np.exp(X @ coef))
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_test, random_state=rng
)
gbdt_pois = HistGradientBoostingRegressor(loss="poisson", random_state=rng)
gbdt_ls = HistGradientBoostingRegressor(loss="squared_error", random_state=rng)
gbdt_pois.fit(X_train, y_train)
gbdt_ls.fit(X_train, y_train)
dummy = DummyRegressor(strategy="mean").fit(X_train, y_train)
for X, y in [(X_train, y_train), (X_test, y_test)]:
metric_pois = mean_poisson_deviance(y, gbdt_pois.predict(X))
# squared_error might produce non-positive predictions => clip
metric_ls = mean_poisson_deviance(y, np.clip(gbdt_ls.predict(X), 1e-15, None))
metric_dummy = mean_poisson_deviance(y, dummy.predict(X))
assert metric_pois < metric_ls
assert metric_pois < metric_dummy
def test_binning_train_validation_are_separated():
# Make sure training and validation data are binned separately.
# See issue 13926
rng = np.random.RandomState(0)
validation_fraction = 0.2
gb = HistGradientBoostingClassifier(
early_stopping=True, validation_fraction=validation_fraction, random_state=rng
)
gb.fit(X_classification, y_classification)
mapper_training_data = gb._bin_mapper
# Note that since the data is small there is no subsampling and the
# random_state doesn't matter
mapper_whole_data = _BinMapper(random_state=0)
mapper_whole_data.fit(X_classification)
n_samples = X_classification.shape[0]
assert np.all(
mapper_training_data.n_bins_non_missing_
== int((1 - validation_fraction) * n_samples)
)
assert np.all(
mapper_training_data.n_bins_non_missing_
!= mapper_whole_data.n_bins_non_missing_
)
def test_missing_values_trivial():
# sanity check for missing values support. With only one feature and
# y == isnan(X), the gbdt is supposed to reach perfect accuracy on the
# training set.
n_samples = 100
n_features = 1
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
mask = rng.binomial(1, 0.5, size=X.shape).astype(bool)
X[mask] = np.nan
y = mask.ravel()
gb = HistGradientBoostingClassifier()
gb.fit(X, y)
assert gb.score(X, y) == pytest.approx(1)
@pytest.mark.parametrize("problem", ("classification", "regression"))
@pytest.mark.parametrize(
(
"missing_proportion, expected_min_score_classification, "
"expected_min_score_regression"
),
[(0.1, 0.97, 0.89), (0.2, 0.93, 0.81), (0.5, 0.79, 0.52)],
)
def test_missing_values_resilience(
problem,
missing_proportion,
expected_min_score_classification,
expected_min_score_regression,
):
# Make sure the estimators can deal with missing values and still yield
# decent predictions
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 2
if problem == "regression":
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
random_state=rng,
)
gb = HistGradientBoostingRegressor()
expected_min_score = expected_min_score_regression
else:
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
n_redundant=0,
n_repeated=0,
random_state=rng,
)
gb = HistGradientBoostingClassifier()
expected_min_score = expected_min_score_classification
mask = rng.binomial(1, missing_proportion, size=X.shape).astype(bool)
X[mask] = np.nan
gb.fit(X, y)
assert gb.score(X, y) > expected_min_score
@pytest.mark.parametrize(
"data",
[
make_classification(random_state=0, n_classes=2),
make_classification(random_state=0, n_classes=3, n_informative=3),
],
ids=["binary_log_loss", "multiclass_log_loss"],
)
def test_zero_division_hessians(data):
# non regression test for issue #14018
# make sure we avoid zero division errors when computing the leaves values.
# If the learning rate is too high, the raw predictions are bad and will
# saturate the softmax (or sigmoid in binary classif). This leads to
# probabilities being exactly 0 or 1, gradients being constant, and
# hessians being zero.
X, y = data
gb = HistGradientBoostingClassifier(learning_rate=100, max_iter=10)
gb.fit(X, y)
def test_small_trainset():
# Make sure that the small trainset is stratified and has the expected
# length (10k samples)
n_samples = 20000
original_distrib = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
rng = np.random.RandomState(42)
X = rng.randn(n_samples).reshape(n_samples, 1)
y = [
[class_] * int(prop * n_samples) for (class_, prop) in original_distrib.items()
]
y = shuffle(np.concatenate(y))
gb = HistGradientBoostingClassifier()
# Compute the small training set
X_small, y_small, *_ = gb._get_small_trainset(
X, y, seed=42, sample_weight_train=None
)
# Compute the class distribution in the small training set
unique, counts = np.unique(y_small, return_counts=True)
small_distrib = {class_: count / 10000 for (class_, count) in zip(unique, counts)}
# Test that the small training set has the expected length
assert X_small.shape[0] == 10000
assert y_small.shape[0] == 10000
# Test that the class distributions in the whole dataset and in the small
# training set are identical
assert small_distrib == pytest.approx(original_distrib)
def test_missing_values_minmax_imputation():
# Compare the buit-in missing value handling of Histogram GBC with an
# a-priori missing value imputation strategy that should yield the same
# results in terms of decision function.
#
# Each feature (containing NaNs) is replaced by 2 features:
# - one where the nans are replaced by min(feature) - 1
# - one where the nans are replaced by max(feature) + 1
# A split where nans go to the left has an equivalent split in the
# first (min) feature, and a split where nans go to the right has an
# equivalent split in the second (max) feature.
#
# Assuming the data is such that there is never a tie to select the best
# feature to split on during training, the learned decision trees should be
# strictly equivalent (learn a sequence of splits that encode the same
# decision function).
#
# The MinMaxImputer transformer is meant to be a toy implementation of the
# "Missing In Attributes" (MIA) missing value handling for decision trees
# https://www.sciencedirect.com/science/article/abs/pii/S0167865508000305
# The implementation of MIA as an imputation transformer was suggested by
# "Remark 3" in :arxiv:'<1902.06931>`
class MinMaxImputer(TransformerMixin, BaseEstimator):
def fit(self, X, y=None):
mm = MinMaxScaler().fit(X)
self.data_min_ = mm.data_min_
self.data_max_ = mm.data_max_
return self
def transform(self, X):
X_min, X_max = X.copy(), X.copy()
for feature_idx in range(X.shape[1]):
nan_mask = np.isnan(X[:, feature_idx])
X_min[nan_mask, feature_idx] = self.data_min_[feature_idx] - 1
X_max[nan_mask, feature_idx] = self.data_max_[feature_idx] + 1
return np.concatenate([X_min, X_max], axis=1)
def make_missing_value_data(n_samples=int(1e4), seed=0):
rng = np.random.RandomState(seed)
X, y = make_regression(n_samples=n_samples, n_features=4, random_state=rng)
# Pre-bin the data to ensure a deterministic handling by the 2
# strategies and also make it easier to insert np.nan in a structured
# way:
X = KBinsDiscretizer(n_bins=42, encode="ordinal").fit_transform(X)
# First feature has missing values completely at random:
rnd_mask = rng.rand(X.shape[0]) > 0.9
X[rnd_mask, 0] = np.nan
# Second and third features have missing values for extreme values
# (censoring missingness):
low_mask = X[:, 1] == 0
X[low_mask, 1] = np.nan
high_mask = X[:, 2] == X[:, 2].max()
X[high_mask, 2] = np.nan
# Make the last feature nan pattern very informative:
y_max = np.percentile(y, 70)
y_max_mask = y >= y_max
y[y_max_mask] = y_max
X[y_max_mask, 3] = np.nan
# Check that there is at least one missing value in each feature:
for feature_idx in range(X.shape[1]):
assert any(np.isnan(X[:, feature_idx]))
# Let's use a test set to check that the learned decision function is
# the same as evaluated on unseen data. Otherwise it could just be the
# case that we find two independent ways to overfit the training set.
return train_test_split(X, y, random_state=rng)
# n_samples need to be large enough to minimize the likelihood of having
# several candidate splits with the same gain value in a given tree.
X_train, X_test, y_train, y_test = make_missing_value_data(
n_samples=int(1e4), seed=0
)
# Use a small number of leaf nodes and iterations so as to keep
# under-fitting models to minimize the likelihood of ties when training the
# model.
gbm1 = HistGradientBoostingRegressor(max_iter=100, max_leaf_nodes=5, random_state=0)
gbm1.fit(X_train, y_train)
gbm2 = make_pipeline(MinMaxImputer(), clone(gbm1))
gbm2.fit(X_train, y_train)
# Check that the model reach the same score:
assert gbm1.score(X_train, y_train) == pytest.approx(gbm2.score(X_train, y_train))
assert gbm1.score(X_test, y_test) == pytest.approx(gbm2.score(X_test, y_test))
# Check the individual prediction match as a finer grained
# decision function check.
assert_allclose(gbm1.predict(X_train), gbm2.predict(X_train))
assert_allclose(gbm1.predict(X_test), gbm2.predict(X_test))
def test_infinite_values():
# Basic test for infinite values
X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
y = np.array([0, 0, 1, 1])
gbdt = HistGradientBoostingRegressor(min_samples_leaf=1)
gbdt.fit(X, y)
np.testing.assert_allclose(gbdt.predict(X), y, atol=1e-4)
def test_consistent_lengths():
X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
y = np.array([0, 0, 1, 1])
sample_weight = np.array([0.1, 0.3, 0.1])
gbdt = HistGradientBoostingRegressor()
with pytest.raises(ValueError, match=r"sample_weight.shape == \(3,\), expected"):
gbdt.fit(X, y, sample_weight)
with pytest.raises(
ValueError, match="Found input variables with inconsistent number"
):
gbdt.fit(X, y[1:])
def test_infinite_values_missing_values():
# High level test making sure that inf and nan values are properly handled
# when both are present. This is similar to
# test_split_on_nan_with_infinite_values() in test_grower.py, though we
# cannot check the predictions for binned values here.
X = np.asarray([-np.inf, 0, 1, np.inf, np.nan]).reshape(-1, 1)
y_isnan = np.isnan(X.ravel())
y_isinf = X.ravel() == np.inf
stump_clf = HistGradientBoostingClassifier(
min_samples_leaf=1, max_iter=1, learning_rate=1, max_depth=2
)
assert stump_clf.fit(X, y_isinf).score(X, y_isinf) == 1
assert stump_clf.fit(X, y_isnan).score(X, y_isnan) == 1
@pytest.mark.parametrize("scoring", [None, "loss"])
def test_string_target_early_stopping(scoring):
# Regression tests for #14709 where the targets need to be encoded before
# to compute the score
rng = np.random.RandomState(42)
X = rng.randn(100, 10)
y = np.array(["x"] * 50 + ["y"] * 50, dtype=object)
gbrt = HistGradientBoostingClassifier(n_iter_no_change=10, scoring=scoring)
gbrt.fit(X, y)
def test_zero_sample_weights_regression():
# Make sure setting a SW to zero amounts to ignoring the corresponding
# sample
X = [[1, 0], [1, 0], [1, 0], [0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = HistGradientBoostingRegressor(min_samples_leaf=1)
gb.fit(X, y, sample_weight=sample_weight)
assert gb.predict([[1, 0]])[0] > 0.5
def test_zero_sample_weights_classification():
# Make sure setting a SW to zero amounts to ignoring the corresponding
# sample
X = [[1, 0], [1, 0], [1, 0], [0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = HistGradientBoostingClassifier(loss="log_loss", min_samples_leaf=1)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
X = [[1, 0], [1, 0], [1, 0], [0, 1], [1, 1]]
y = [0, 0, 1, 0, 2]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1, 1]
gb = HistGradientBoostingClassifier(loss="log_loss", min_samples_leaf=1)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
@pytest.mark.parametrize(
"problem", ("regression", "binary_classification", "multiclass_classification")
)
@pytest.mark.parametrize("duplication", ("half", "all"))
def test_sample_weight_effect(problem, duplication):
# High level test to make sure that duplicating a sample is equivalent to
# giving it weight of 2.
# fails for n_samples > 255 because binning does not take sample weights
# into account. Keeping n_samples <= 255 makes
# sure only unique values are used so SW have no effect on binning.
n_samples = 255
n_features = 2
if problem == "regression":
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
random_state=0,
)
Klass = HistGradientBoostingRegressor
else:
n_classes = 2 if problem == "binary_classification" else 3
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
n_redundant=0,
n_clusters_per_class=1,
n_classes=n_classes,
random_state=0,
)
Klass = HistGradientBoostingClassifier
# This test can't pass if min_samples_leaf > 1 because that would force 2
# samples to be in the same node in est_sw, while these samples would be
# free to be separate in est_dup: est_dup would just group together the
# duplicated samples.
est = Klass(min_samples_leaf=1)
# Create dataset with duplicate and corresponding sample weights
if duplication == "half":
lim = n_samples // 2
else:
lim = n_samples
X_dup = np.r_[X, X[:lim]]
y_dup = np.r_[y, y[:lim]]
sample_weight = np.ones(shape=(n_samples))
sample_weight[:lim] = 2
est_sw = clone(est).fit(X, y, sample_weight=sample_weight)
est_dup = clone(est).fit(X_dup, y_dup)
# checking raw_predict is stricter than just predict for classification
assert np.allclose(est_sw._raw_predict(X_dup), est_dup._raw_predict(X_dup))
@pytest.mark.parametrize("Loss", (HalfSquaredError, AbsoluteError))
def test_sum_hessians_are_sample_weight(Loss):
# For losses with constant hessians, the sum_hessians field of the
# histograms must be equal to the sum of the sample weight of samples at
# the corresponding bin.
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 2
X, y = make_regression(n_samples=n_samples, n_features=n_features, random_state=rng)
bin_mapper = _BinMapper()
X_binned = bin_mapper.fit_transform(X)
# While sample weights are supposed to be positive, this still works.
sample_weight = rng.normal(size=n_samples)
loss = Loss(sample_weight=sample_weight)
gradients, hessians = loss.init_gradient_and_hessian(
n_samples=n_samples, dtype=G_H_DTYPE
)
gradients, hessians = gradients.reshape((-1, 1)), hessians.reshape((-1, 1))
raw_predictions = rng.normal(size=(n_samples, 1))
loss.gradient_hessian(
y_true=y,
raw_prediction=raw_predictions,
sample_weight=sample_weight,
gradient_out=gradients,
hessian_out=hessians,
n_threads=n_threads,
)
# build sum_sample_weight which contains the sum of the sample weights at
# each bin (for each feature). This must be equal to the sum_hessians
# field of the corresponding histogram
sum_sw = np.zeros(shape=(n_features, bin_mapper.n_bins))
for feature_idx in range(n_features):
for sample_idx in range(n_samples):
sum_sw[feature_idx, X_binned[sample_idx, feature_idx]] += sample_weight[
sample_idx
]
# Build histogram
grower = TreeGrower(
X_binned, gradients[:, 0], hessians[:, 0], n_bins=bin_mapper.n_bins
)
histograms = grower.histogram_builder.compute_histograms_brute(
grower.root.sample_indices
)
for feature_idx in range(n_features):
for bin_idx in range(bin_mapper.n_bins):
assert histograms[feature_idx, bin_idx]["sum_hessians"] == (
pytest.approx(sum_sw[feature_idx, bin_idx], rel=1e-5)
)
def test_max_depth_max_leaf_nodes():
# Non regression test for
# https://github.com/scikit-learn/scikit-learn/issues/16179
# there was a bug when the max_depth and the max_leaf_nodes criteria were
# met at the same time, which would lead to max_leaf_nodes not being
# respected.
X, y = make_classification(random_state=0)
est = HistGradientBoostingClassifier(max_depth=2, max_leaf_nodes=3, max_iter=1).fit(
X, y
)
tree = est._predictors[0][0]
assert tree.get_max_depth() == 2
assert tree.get_n_leaf_nodes() == 3 # would be 4 prior to bug fix
def test_early_stopping_on_test_set_with_warm_start():
# Non regression test for #16661 where second fit fails with
# warm_start=True, early_stopping is on, and no validation set
X, y = make_classification(random_state=0)
gb = HistGradientBoostingClassifier(
max_iter=1,
scoring="loss",
warm_start=True,
early_stopping=True,
n_iter_no_change=1,
validation_fraction=None,
)
gb.fit(X, y)
# does not raise on second call
gb.set_params(max_iter=2)
gb.fit(X, y)
def test_early_stopping_with_sample_weights(monkeypatch):
"""Check that sample weights is passed in to the scorer and _raw_predict is not
called."""
mock_scorer = Mock(side_effect=get_scorer("neg_median_absolute_error"))
def mock_check_scoring(estimator, scoring):
assert scoring == "neg_median_absolute_error"
return mock_scorer
monkeypatch.setattr(
sklearn.ensemble._hist_gradient_boosting.gradient_boosting,
"check_scoring",
mock_check_scoring,
)
X, y = make_regression(random_state=0)
sample_weight = np.ones_like(y)
hist = HistGradientBoostingRegressor(
max_iter=2,
early_stopping=True,
random_state=0,
scoring="neg_median_absolute_error",
)
mock_raw_predict = Mock(side_effect=hist._raw_predict)
hist._raw_predict = mock_raw_predict
hist.fit(X, y, sample_weight=sample_weight)
# _raw_predict should never be called with scoring as a string
assert mock_raw_predict.call_count == 0
# For scorer is called twice (train and val) for the baseline score, and twice
# per iteration (train and val) after that. So 6 times in total for `max_iter=2`.
assert mock_scorer.call_count == 6
for arg_list in mock_scorer.call_args_list:
assert "sample_weight" in arg_list[1]
def test_raw_predict_is_called_with_custom_scorer():
"""Custom scorer will still call _raw_predict."""
mock_scorer = Mock(side_effect=get_scorer("neg_median_absolute_error"))
X, y = make_regression(random_state=0)
hist = HistGradientBoostingRegressor(
max_iter=2,
early_stopping=True,
random_state=0,
scoring=mock_scorer,
)
mock_raw_predict = Mock(side_effect=hist._raw_predict)
hist._raw_predict = mock_raw_predict
hist.fit(X, y)
# `_raw_predict` and scorer is called twice (train and val) for the baseline score,
# and twice per iteration (train and val) after that. So 6 times in total for
# `max_iter=2`.
assert mock_raw_predict.call_count == 6
assert mock_scorer.call_count == 6
@pytest.mark.parametrize(
"Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
)
def test_single_node_trees(Est):
# Make sure it's still possible to build single-node trees. In that case
# the value of the root is set to 0. That's a correct value: if the tree is
# single-node that's because min_gain_to_split is not respected right from
# the root, so we don't want the tree to have any impact on the
# predictions.
X, y = make_classification(random_state=0)
y[:] = 1 # constant target will lead to a single root node
est = Est(max_iter=20)
est.fit(X, y)
assert all(len(predictor[0].nodes) == 1 for predictor in est._predictors)
assert all(predictor[0].nodes[0]["value"] == 0 for predictor in est._predictors)
# Still gives correct predictions thanks to the baseline prediction
assert_allclose(est.predict(X), y)
@pytest.mark.parametrize(
"Est, loss, X, y",
[
(
HistGradientBoostingClassifier,
HalfBinomialLoss(sample_weight=None),
X_classification,
y_classification,
),
(
HistGradientBoostingRegressor,
HalfSquaredError(sample_weight=None),
X_regression,
y_regression,
),
],
)
def test_custom_loss(Est, loss, X, y):
est = Est(loss=loss, max_iter=20)
est.fit(X, y)
@pytest.mark.parametrize(
"HistGradientBoosting, X, y",
[
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression),
(
HistGradientBoostingClassifier,
X_multi_classification,
y_multi_classification,
),
],
)
def test_staged_predict(HistGradientBoosting, X, y):
# Test whether staged predictor eventually gives
# the same prediction.
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0
)
gb = HistGradientBoosting(max_iter=10)
# test raise NotFittedError if not fitted
with pytest.raises(NotFittedError):
next(gb.staged_predict(X_test))
gb.fit(X_train, y_train)
# test if the staged predictions of each iteration
# are equal to the corresponding predictions of the same estimator
# trained from scratch.
# this also test limit case when max_iter = 1
method_names = (
["predict"]
if is_regressor(gb)
else ["predict", "predict_proba", "decision_function"]
)
for method_name in method_names:
staged_method = getattr(gb, "staged_" + method_name)
staged_predictions = list(staged_method(X_test))
assert len(staged_predictions) == gb.n_iter_
for n_iter, staged_predictions in enumerate(staged_method(X_test), 1):
aux = HistGradientBoosting(max_iter=n_iter)
aux.fit(X_train, y_train)
pred_aux = getattr(aux, method_name)(X_test)
assert_allclose(staged_predictions, pred_aux)
assert staged_predictions.shape == pred_aux.shape
@pytest.mark.parametrize("insert_missing", [False, True])
@pytest.mark.parametrize(
"Est", (HistGradientBoostingRegressor, HistGradientBoostingClassifier)
)
@pytest.mark.parametrize("bool_categorical_parameter", [True, False])
@pytest.mark.parametrize("missing_value", [np.nan, -1])
def test_unknown_categories_nan(
insert_missing, Est, bool_categorical_parameter, missing_value
):
# Make sure no error is raised at predict if a category wasn't seen during
# fit. We also make sure they're treated as nans.
rng = np.random.RandomState(0)
n_samples = 1000
f1 = rng.rand(n_samples)
f2 = rng.randint(4, size=n_samples)
X = np.c_[f1, f2]
y = np.zeros(shape=n_samples)
y[X[:, 1] % 2 == 0] = 1
if bool_categorical_parameter:
categorical_features = [False, True]
else:
categorical_features = [1]
if insert_missing:
mask = rng.binomial(1, 0.01, size=X.shape).astype(bool)
assert mask.sum() > 0
X[mask] = missing_value
est = Est(max_iter=20, categorical_features=categorical_features).fit(X, y)
assert_array_equal(est.is_categorical_, [False, True])
# Make sure no error is raised on unknown categories and nans
# unknown categories will be treated as nans
X_test = np.zeros((10, X.shape[1]), dtype=float)
X_test[:5, 1] = 30
X_test[5:, 1] = missing_value
assert len(np.unique(est.predict(X_test))) == 1
def test_categorical_encoding_strategies():
# Check native categorical handling vs different encoding strategies. We
# make sure that native encoding needs only 1 split to achieve a perfect
# prediction on a simple dataset. In contrast, OneHotEncoded data needs
# more depth / splits, and treating categories as ordered (just using
# OrdinalEncoder) requires even more depth.
# dataset with one random continuous feature, and one categorical feature
# with values in [0, 5], e.g. from an OrdinalEncoder.
# class == 1 iff categorical value in {0, 2, 4}
rng = np.random.RandomState(0)
n_samples = 10_000
f1 = rng.rand(n_samples)
f2 = rng.randint(6, size=n_samples)
X = np.c_[f1, f2]
y = np.zeros(shape=n_samples)
y[X[:, 1] % 2 == 0] = 1
# make sure dataset is balanced so that the baseline_prediction doesn't
# influence predictions too much with max_iter = 1
assert 0.49 < y.mean() < 0.51
native_cat_specs = [
[False, True],
[1],
]
try:
import pandas as pd
X = pd.DataFrame(X, columns=["f_0", "f_1"])
native_cat_specs.append(["f_1"])
except ImportError:
pass
for native_cat_spec in native_cat_specs:
clf_cat = HistGradientBoostingClassifier(
max_iter=1, max_depth=1, categorical_features=native_cat_spec
)
clf_cat.fit(X, y)
# Using native categorical encoding, we get perfect predictions with just
# one split
assert cross_val_score(clf_cat, X, y).mean() == 1
# quick sanity check for the bitset: 0, 2, 4 = 2**0 + 2**2 + 2**4 = 21
expected_left_bitset = [21, 0, 0, 0, 0, 0, 0, 0]
left_bitset = clf_cat.fit(X, y)._predictors[0][0].raw_left_cat_bitsets[0]
assert_array_equal(left_bitset, expected_left_bitset)
# Treating categories as ordered, we need more depth / more splits to get
# the same predictions
clf_no_cat = HistGradientBoostingClassifier(
max_iter=1, max_depth=4, categorical_features=None
)
assert cross_val_score(clf_no_cat, X, y).mean() < 0.9
clf_no_cat.set_params(max_depth=5)
assert cross_val_score(clf_no_cat, X, y).mean() == 1
# Using OHEd data, we need less splits than with pure OEd data, but we
# still need more splits than with the native categorical splits
ct = make_column_transformer(
(OneHotEncoder(sparse_output=False), [1]), remainder="passthrough"
)
X_ohe = ct.fit_transform(X)
clf_no_cat.set_params(max_depth=2)
assert cross_val_score(clf_no_cat, X_ohe, y).mean() < 0.9
clf_no_cat.set_params(max_depth=3)
assert cross_val_score(clf_no_cat, X_ohe, y).mean() == 1
@pytest.mark.parametrize(
"Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
)
@pytest.mark.parametrize(
"categorical_features, monotonic_cst, expected_msg",
[
(
[b"hello", b"world"],
None,
re.escape(
"categorical_features must be an array-like of bool, int or str, "
"got: bytes40."
),
),
(
np.array([b"hello", 1.3], dtype=object),
None,
re.escape(
"categorical_features must be an array-like of bool, int or str, "
"got: bytes, float."
),
),
(
[0, -1],
None,
re.escape(
"categorical_features set as integer indices must be in "
"[0, n_features - 1]"
),
),
(
[True, True, False, False, True],
None,
re.escape(
"categorical_features set as a boolean mask must have shape "
"(n_features,)"
),
),
(
[True, True, False, False],
[0, -1, 0, 1],
"Categorical features cannot have monotonic constraints",
),
],
)
def test_categorical_spec_errors(
Est, categorical_features, monotonic_cst, expected_msg
):
# Test errors when categories are specified incorrectly
n_samples = 100
X, y = make_classification(random_state=0, n_features=4, n_samples=n_samples)
rng = np.random.RandomState(0)
X[:, 0] = rng.randint(0, 10, size=n_samples)
X[:, 1] = rng.randint(0, 10, size=n_samples)
est = Est(categorical_features=categorical_features, monotonic_cst=monotonic_cst)
with pytest.raises(ValueError, match=expected_msg):
est.fit(X, y)
@pytest.mark.parametrize(
"Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
)
def test_categorical_spec_errors_with_feature_names(Est):
pd = pytest.importorskip("pandas")
n_samples = 10
X = pd.DataFrame(
{
"f0": range(n_samples),
"f1": range(n_samples),
"f2": [1.0] * n_samples,
}
)
y = [0, 1] * (n_samples // 2)
est = Est(categorical_features=["f0", "f1", "f3"])
expected_msg = re.escape(
"categorical_features has a item value 'f3' which is not a valid "
"feature name of the training data."
)
with pytest.raises(ValueError, match=expected_msg):
est.fit(X, y)
est = Est(categorical_features=["f0", "f1"])
expected_msg = re.escape(
"categorical_features should be passed as an array of integers or "
"as a boolean mask when the model is fitted on data without feature "
"names."
)
with pytest.raises(ValueError, match=expected_msg):
est.fit(X.to_numpy(), y)
@pytest.mark.parametrize(
"Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
)
@pytest.mark.parametrize("categorical_features", ([False, False], []))
@pytest.mark.parametrize("as_array", (True, False))
def test_categorical_spec_no_categories(Est, categorical_features, as_array):
# Make sure we can properly detect that no categorical features are present
# even if the categorical_features parameter is not None
X = np.arange(10).reshape(5, 2)
y = np.arange(5)
if as_array:
categorical_features = np.asarray(categorical_features)
est = Est(categorical_features=categorical_features).fit(X, y)
assert est.is_categorical_ is None
@pytest.mark.parametrize(
"Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
)
@pytest.mark.parametrize(
"use_pandas, feature_name", [(False, "at index 0"), (True, "'f0'")]
)
def test_categorical_bad_encoding_errors(Est, use_pandas, feature_name):
# Test errors when categories are encoded incorrectly
gb = Est(categorical_features=[True], max_bins=2)
if use_pandas:
pd = pytest.importorskip("pandas")
X = pd.DataFrame({"f0": [0, 1, 2]})
else:
X = np.array([[0, 1, 2]]).T
y = np.arange(3)
msg = (
f"Categorical feature {feature_name} is expected to have a "
"cardinality <= 2 but actually has a cardinality of 3."
)
with pytest.raises(ValueError, match=msg):
gb.fit(X, y)
# nans are ignored in the counts
X = np.array([[0, 1, np.nan]]).T
y = np.arange(3)
gb.fit(X, y)
@pytest.mark.parametrize(
"Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
)
def test_uint8_predict(Est):
# Non regression test for
# https://github.com/scikit-learn/scikit-learn/issues/18408
# Make sure X can be of dtype uint8 (i.e. X_BINNED_DTYPE) in predict. It
# will be converted to X_DTYPE.
rng = np.random.RandomState(0)
X = rng.randint(0, 100, size=(10, 2)).astype(np.uint8)
y = rng.randint(0, 2, size=10).astype(np.uint8)
est = Est()
est.fit(X, y)
est.predict(X)
@pytest.mark.parametrize(
"interaction_cst, n_features, result",
[
(None, 931, None),
([{0, 1}], 2, [{0, 1}]),
("pairwise", 2, [{0, 1}]),
("pairwise", 4, [{0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3}]),
("no_interactions", 2, [{0}, {1}]),
("no_interactions", 4, [{0}, {1}, {2}, {3}]),
([(1, 0), [5, 1]], 6, [{0, 1}, {1, 5}, {2, 3, 4}]),
],
)
def test_check_interaction_cst(interaction_cst, n_features, result):
"""Check that _check_interaction_cst returns the expected list of sets"""
est = HistGradientBoostingRegressor()
est.set_params(interaction_cst=interaction_cst)
assert est._check_interaction_cst(n_features) == result
def test_interaction_cst_numerically():
"""Check that interaction constraints have no forbidden interactions."""
rng = np.random.RandomState(42)
n_samples = 1000
X = rng.uniform(size=(n_samples, 2))
# Construct y with a strong interaction term
# y = x0 + x1 + 5 * x0 * x1
y = np.hstack((X, 5 * X[:, [0]] * X[:, [1]])).sum(axis=1)
est = HistGradientBoostingRegressor(random_state=42)
est.fit(X, y)
est_no_interactions = HistGradientBoostingRegressor(
interaction_cst=[{0}, {1}], random_state=42
)
est_no_interactions.fit(X, y)
delta = 0.25
# Make sure we do not extrapolate out of the training set as tree-based estimators
# are very bad in doing so.
X_test = X[(X[:, 0] < 1 - delta) & (X[:, 1] < 1 - delta)]
X_delta_d_0 = X_test + [delta, 0]
X_delta_0_d = X_test + [0, delta]
X_delta_d_d = X_test + [delta, delta]
# Note: For the y from above as a function of x0 and x1, we have
# y(x0+d, x1+d) = y(x0, x1) + 5 * d * (2/5 + x0 + x1) + 5 * d**2
# y(x0+d, x1) = y(x0, x1) + 5 * d * (1/5 + x1)
# y(x0, x1+d) = y(x0, x1) + 5 * d * (1/5 + x0)
# Without interaction constraints, we would expect a result of 5 * d**2 for the
# following expression, but zero with constraints in place.
assert_allclose(
est_no_interactions.predict(X_delta_d_d)
+ est_no_interactions.predict(X_test)
- est_no_interactions.predict(X_delta_d_0)
- est_no_interactions.predict(X_delta_0_d),
0,
atol=1e-12,
)
# Correct result of the expressions is 5 * delta**2. But this is hard to achieve by
# a fitted tree-based model. However, with 100 iterations the expression should
# at least be positive!
assert np.all(
est.predict(X_delta_d_d)
+ est.predict(X_test)
- est.predict(X_delta_d_0)
- est.predict(X_delta_0_d)
> 0.01
)
def test_no_user_warning_with_scoring():
"""Check that no UserWarning is raised when scoring is set.
Non-regression test for #22907.
"""
pd = pytest.importorskip("pandas")
X, y = make_regression(n_samples=50, random_state=0)
X_df = pd.DataFrame(X, columns=[f"col{i}" for i in range(X.shape[1])])
est = HistGradientBoostingRegressor(
random_state=0, scoring="neg_mean_absolute_error", early_stopping=True
)
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
est.fit(X_df, y)
def test_class_weights():
"""High level test to check class_weights."""
n_samples = 255
n_features = 2
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
n_redundant=0,
n_clusters_per_class=1,
n_classes=2,
random_state=0,
)
y_is_1 = y == 1
# class_weight is the same as sample weights with the corresponding class
clf = HistGradientBoostingClassifier(
min_samples_leaf=2, random_state=0, max_depth=2
)
sample_weight = np.ones(shape=(n_samples))
sample_weight[y_is_1] = 3.0
clf.fit(X, y, sample_weight=sample_weight)
class_weight = {0: 1.0, 1: 3.0}
clf_class_weighted = clone(clf).set_params(class_weight=class_weight)
clf_class_weighted.fit(X, y)
assert_allclose(clf.decision_function(X), clf_class_weighted.decision_function(X))
# Check that sample_weight and class_weight are multiplicative
clf.fit(X, y, sample_weight=sample_weight**2)
clf_class_weighted.fit(X, y, sample_weight=sample_weight)
assert_allclose(clf.decision_function(X), clf_class_weighted.decision_function(X))
# Make imbalanced dataset
X_imb = np.concatenate((X[~y_is_1], X[y_is_1][:10]))
y_imb = np.concatenate((y[~y_is_1], y[y_is_1][:10]))
# class_weight="balanced" is the same as sample_weights to be
# inversely proportional to n_samples / (n_classes * np.bincount(y))
clf_balanced = clone(clf).set_params(class_weight="balanced")
clf_balanced.fit(X_imb, y_imb)
class_weight = y_imb.shape[0] / (2 * np.bincount(y_imb))
sample_weight = class_weight[y_imb]
clf_sample_weight = clone(clf).set_params(class_weight=None)
clf_sample_weight.fit(X_imb, y_imb, sample_weight=sample_weight)
assert_allclose(
clf_balanced.decision_function(X_imb),
clf_sample_weight.decision_function(X_imb),
)
def test_unknown_category_that_are_negative():
"""Check that unknown categories that are negative does not error.
Non-regression test for #24274.
"""
rng = np.random.RandomState(42)
n_samples = 1000
X = np.c_[rng.rand(n_samples), rng.randint(4, size=n_samples)]
y = np.zeros(shape=n_samples)
y[X[:, 1] % 2 == 0] = 1
hist = HistGradientBoostingRegressor(
random_state=0,
categorical_features=[False, True],
max_iter=10,
).fit(X, y)
# Check that negative values from the second column are treated like a
# missing category
X_test_neg = np.asarray([[1, -2], [3, -4]])
X_test_nan = np.asarray([[1, np.nan], [3, np.nan]])
assert_allclose(hist.predict(X_test_neg), hist.predict(X_test_nan))
@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
@pytest.mark.parametrize(
"HistGradientBoosting",
[HistGradientBoostingClassifier, HistGradientBoostingRegressor],
)
def test_dataframe_categorical_results_same_as_ndarray(
dataframe_lib, HistGradientBoosting
):
"""Check that pandas categorical give the same results as ndarray."""
pytest.importorskip(dataframe_lib)
rng = np.random.RandomState(42)
n_samples = 5_000
n_cardinality = 50
max_bins = 100
f_num = rng.rand(n_samples)
f_cat = rng.randint(n_cardinality, size=n_samples)
# Make f_cat an informative feature
y = (f_cat % 3 == 0) & (f_num > 0.2)
X = np.c_[f_num, f_cat]
f_cat = [f"cat{c:0>3}" for c in f_cat]
X_df = _convert_container(
np.asarray([f_num, f_cat]).T,
dataframe_lib,
["f_num", "f_cat"],
categorical_feature_names=["f_cat"],
)
X_train, X_test, X_train_df, X_test_df, y_train, y_test = train_test_split(
X, X_df, y, random_state=0
)
hist_kwargs = dict(max_iter=10, max_bins=max_bins, random_state=0)
hist_np = HistGradientBoosting(categorical_features=[False, True], **hist_kwargs)
hist_np.fit(X_train, y_train)
hist_pd = HistGradientBoosting(categorical_features="from_dtype", **hist_kwargs)
hist_pd.fit(X_train_df, y_train)
# Check categories are correct and sorted
categories = hist_pd._preprocessor.named_transformers_["encoder"].categories_[0]
assert_array_equal(categories, np.unique(f_cat))
assert len(hist_np._predictors) == len(hist_pd._predictors)
for predictor_1, predictor_2 in zip(hist_np._predictors, hist_pd._predictors):
assert len(predictor_1[0].nodes) == len(predictor_2[0].nodes)
score_np = hist_np.score(X_test, y_test)
score_pd = hist_pd.score(X_test_df, y_test)
assert score_np == pytest.approx(score_pd)
assert_allclose(hist_np.predict(X_test), hist_pd.predict(X_test_df))
@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
@pytest.mark.parametrize(
"HistGradientBoosting",
[HistGradientBoostingClassifier, HistGradientBoostingRegressor],
)
def test_dataframe_categorical_errors(dataframe_lib, HistGradientBoosting):
"""Check error cases for pandas categorical feature."""
pytest.importorskip(dataframe_lib)
msg = "Categorical feature 'f_cat' is expected to have a cardinality <= 16"
hist = HistGradientBoosting(categorical_features="from_dtype", max_bins=16)
rng = np.random.RandomState(42)
f_cat = rng.randint(0, high=100, size=100).astype(str)
X_df = _convert_container(
f_cat[:, None], dataframe_lib, ["f_cat"], categorical_feature_names=["f_cat"]
)
y = rng.randint(0, high=2, size=100)
with pytest.raises(ValueError, match=msg):
hist.fit(X_df, y)
@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
def test_categorical_different_order_same_model(dataframe_lib):
"""Check that the order of the categorical gives same model."""
pytest.importorskip(dataframe_lib)
rng = np.random.RandomState(42)
n_samples = 1_000
f_ints = rng.randint(low=0, high=2, size=n_samples)
# Construct a target with some noise
y = f_ints.copy()
flipped = rng.choice([True, False], size=n_samples, p=[0.1, 0.9])
y[flipped] = 1 - y[flipped]
# Construct categorical where 0 -> A and 1 -> B and 1 -> A and 0 -> B
f_cat_a_b = np.asarray(["A", "B"])[f_ints]
f_cat_b_a = np.asarray(["B", "A"])[f_ints]
df_a_b = _convert_container(
f_cat_a_b[:, None],
dataframe_lib,
["f_cat"],
categorical_feature_names=["f_cat"],
)
df_b_a = _convert_container(
f_cat_b_a[:, None],
dataframe_lib,
["f_cat"],
categorical_feature_names=["f_cat"],
)
hist_a_b = HistGradientBoostingClassifier(
categorical_features="from_dtype", random_state=0
)
hist_b_a = HistGradientBoostingClassifier(
categorical_features="from_dtype", random_state=0
)
hist_a_b.fit(df_a_b, y)
hist_b_a.fit(df_b_a, y)
assert len(hist_a_b._predictors) == len(hist_b_a._predictors)
for predictor_1, predictor_2 in zip(hist_a_b._predictors, hist_b_a._predictors):
assert len(predictor_1[0].nodes) == len(predictor_2[0].nodes)
def get_different_bitness_node_ndarray(node_ndarray):
new_dtype_for_indexing_fields = np.int64 if _IS_32BIT else np.int32
# field names in Node struct with np.intp types (see
# sklearn/ensemble/_hist_gradient_boosting/common.pyx)
indexing_field_names = ["feature_idx"]
new_dtype_dict = {
name: dtype for name, (dtype, _) in node_ndarray.dtype.fields.items()
}
for name in indexing_field_names:
new_dtype_dict[name] = new_dtype_for_indexing_fields
new_dtype = np.dtype(
{"names": list(new_dtype_dict.keys()), "formats": list(new_dtype_dict.values())}
)
return node_ndarray.astype(new_dtype, casting="same_kind")
def reduce_predictor_with_different_bitness(predictor):
cls, args, state = predictor.__reduce__()
new_state = state.copy()
new_state["nodes"] = get_different_bitness_node_ndarray(new_state["nodes"])
return (cls, args, new_state)
def test_different_bitness_pickle():
X, y = make_classification(random_state=0)
clf = HistGradientBoostingClassifier(random_state=0, max_depth=3)
clf.fit(X, y)
score = clf.score(X, y)
def pickle_dump_with_different_bitness():
f = io.BytesIO()
p = pickle.Pickler(f)
p.dispatch_table = copyreg.dispatch_table.copy()
p.dispatch_table[TreePredictor] = reduce_predictor_with_different_bitness
p.dump(clf)
f.seek(0)
return f
# Simulate loading a pickle of the same model trained on a platform with different
# bitness that than the platform it will be used to make predictions on:
new_clf = pickle.load(pickle_dump_with_different_bitness())
new_score = new_clf.score(X, y)
assert score == pytest.approx(new_score)
def test_different_bitness_joblib_pickle():
# Make sure that a platform specific pickle generated on a 64 bit
# platform can be converted at pickle load time into an estimator
# with Cython code that works with the host's native integer precision
# to index nodes in the tree data structure when the host is a 32 bit
# platform (and vice versa).
#
# This is in particular useful to be able to train a model on a 64 bit Linux
# server and deploy the model as part of a (32 bit) WASM in-browser
# application using pyodide.
X, y = make_classification(random_state=0)
clf = HistGradientBoostingClassifier(random_state=0, max_depth=3)
clf.fit(X, y)
score = clf.score(X, y)
def joblib_dump_with_different_bitness():
f = io.BytesIO()
p = NumpyPickler(f)
p.dispatch_table = copyreg.dispatch_table.copy()
p.dispatch_table[TreePredictor] = reduce_predictor_with_different_bitness
p.dump(clf)
f.seek(0)
return f
new_clf = joblib.load(joblib_dump_with_different_bitness())
new_score = new_clf.score(X, y)
assert score == pytest.approx(new_score)
def test_pandas_nullable_dtype():
# Non regression test for https://github.com/scikit-learn/scikit-learn/issues/28317
pd = pytest.importorskip("pandas")
rng = np.random.default_rng(0)
X = pd.DataFrame({"a": rng.integers(10, size=100)}).astype(pd.Int64Dtype())
y = rng.integers(2, size=100)
clf = HistGradientBoostingClassifier()
clf.fit(X, y)
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@ensemble@_hist_gradient_boosting@tests@test_gradient_boosting.py@.PATH_END.py
|
{
"filename": "utils_lbol.py",
"repo_name": "nuclear-multimessenger-astronomy/nmma",
"repo_path": "nmma_extracted/nmma-main/nmma/em/utils_lbol.py",
"type": "Python"
}
|
import numpy as np
from scipy.integrate import quad
import astropy.constants
def arnett_lc_get_int_A_non_vec(x, y):
r = quad(lambda z: 2 * z * np.exp(-2 * z * y + z**2), 0, x)
int_A = r[0]
return int_A
arnett_lc_get_int_A = np.vectorize(arnett_lc_get_int_A_non_vec, excluded=["y"])
def arnett_lc_get_int_B_non_vec(x, y, s):
r = quad(lambda z: 2 * z * np.exp(-2 * z * y + 2 * z * s + z**2), 0, x)
int_B = r[0]
return int_B
arnett_lc_get_int_B = np.vectorize(arnett_lc_get_int_B_non_vec, excluded=["y", "s"])
def arnett_lc(t_day, param_dict):
day = 86400.0 # in seconds
ts = t_day * day
Mni = 10**param_dict["log10_mni"]
Mni *= astropy.constants.M_sun.cgs.value # in g
tau_m = param_dict["tau_m"] * day
epsilon_ni = 3.9e10 # erg / s / g
epsilon_co = 6.78e9 # erg / s / g
tau_ni = 8.8 * day # s
tau_co = 111.3 * day # s
x = ts / tau_m
y = tau_m / (2 * tau_ni)
s = tau_m * (tau_co - tau_ni) / (2 * tau_co * tau_ni)
int_A = arnett_lc_get_int_A(x, y)
int_B = arnett_lc_get_int_B(x, y, s)
Ls = Mni * np.exp(-x**2) * (
(epsilon_ni - epsilon_co) * int_A + epsilon_co * int_B)
return Ls
def arnett_modified_lc(t_day, param_dict):
day = 86400.0 # in seconds
ts = t_day * day # in seconds
Mni = 10**param_dict["log10_mni"]
Mni *= astropy.constants.M_sun.cgs.value # in g
tau_m = param_dict["tau_m"] * day
t0 = param_dict["t_0"] * day
epsilon_ni = 3.9e10 # erg / s / g
epsilon_co = 6.78e9 # erg / s / g
tau_ni = 8.8 * day # second
tau_co = 111.3 * day # second
x = ts / tau_m
y = tau_m / (2 * tau_ni)
s = tau_m * (tau_co - tau_ni) / (2 * tau_co * tau_ni)
int_A = arnett_lc_get_int_A(x, y)
int_B = arnett_lc_get_int_B(x, y, s)
Ls = Mni * np.exp(-x**2) * ((epsilon_ni - epsilon_co) * int_A + epsilon_co * int_B)
Ls_modified = Ls * (1. - np.exp(-1. * (t0 / ts)**2))
return Ls_modified
|
nuclear-multimessenger-astronomyREPO_NAMEnmmaPATH_START.@nmma_extracted@nmma-main@nmma@em@utils_lbol.py@.PATH_END.py
|
{
"filename": "_variantsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/isosurface/hoverlabel/font/_variantsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="variantsrc",
parent_name="isosurface.hoverlabel.font",
**kwargs,
):
super(VariantsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@isosurface@hoverlabel@font@_variantsrc.py@.PATH_END.py
|
{
"filename": "incomplete_beta.py",
"repo_name": "oliverphilcox/HIPSTER",
"repo_path": "HIPSTER_extracted/HIPSTER-master/python/wcdm/incomplete_beta.py",
"type": "Python"
}
|
# This is an adaptation by Daniel Eisenstein of GSL code in
# beta_cont_frac_gsl(a, b, x).
# This file is distributed under the GNU Public License
import numpy as np
######### An adaptation of the GSL beta function code
def beta_cont_frac_gsl(a, b, x):
# This computes B_x(a,b) using a continued fraction approximation.
# We do require a>0, but b can be negative.
# Having b<0 and x very near 1 can cause of precision (not enough iterations)
# However, x near 1 only occurs in the far future for our cosmology application
# Require 0<=x<1.
#
# This python subroutine is adapted from the
# Gnu Science Library (GSL) specfunc/beta_inc.c code
# by Daniel Eisenstein (July 2015).
# Changes were generally to strip down to the case of interest, removing
# the pre-factor from the complete beta function. Also vectorized.
#
# Original GSL header:
# Copyright (C) 2007 Brian Gough
# Copyright (C) 1996, 1997, 1998, 1999, 2000 Gerard Jungman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Author: G. Jungman
x = np.array(x, copy=False, ndmin=1)
if (np.min(x)<0 or np.max(x)>=1):
print("Illegal entry in beta_cont_frac_gsl()\n")
import sys
sys.exit()
cutoff = 1e-30 # control the zero cutoff
# standard initialization for continued fraction
num_term = 1.0
den_term = 1.0 - (a+b)*x/(a+1.0)
den_term[np.where(np.abs(den_term)<cutoff)] = cutoff
den_term = 1.0/den_term
cf = den_term
for k in range(1,200):
# first step
coeff = k*(b-k)*x/(((a-1.0)+2*k)*(a+2*k))
den_term = 1.0 + coeff*den_term
num_term = 1.0 + coeff/num_term
den_term[np.where(np.abs(den_term)<cutoff)] = cutoff
num_term[np.where(np.abs(num_term)<cutoff)] = cutoff
den_term = 1.0/den_term
cf *= den_term*num_term
# second step
coeff = -(a+k)*(a+b+k)*x/((a+2*k)*(a+2*k+1.0))
den_term = 1.0 + coeff*den_term
num_term = 1.0 + coeff/num_term
den_term[np.where(np.abs(den_term)<cutoff)] = cutoff
num_term[np.where(np.abs(num_term)<cutoff)] = cutoff
den_term = 1.0/den_term
cf *= den_term*num_term
# Are we done?
if (np.max(np.abs(den_term*num_term-1))<1e-12): break
# End k loop
# If this ends, we're just accepting the answer even if we haven't converged
# Include the prefactor
# We need a>0 so that x=0 doesn't crash.
cf *= np.power(x,a)*np.power(1-x,b)/a
if (len(cf)==1): return cf[0] # Get back to a scalar
else: return cf
|
oliverphilcoxREPO_NAMEHIPSTERPATH_START.@HIPSTER_extracted@HIPSTER-master@python@wcdm@incomplete_beta.py@.PATH_END.py
|
{
"filename": "io_utils.py",
"repo_name": "threeML/threeML",
"repo_path": "threeML_extracted/threeML-master/threeML/io/cern_root_utils/io_utils.py",
"type": "Python"
}
|
import ROOT
import contextlib
def get_list_of_keys(root_file, dir=""):
"""
Given a ROOT file, it returns the list of object names contained in the file in the provided directory.
:param root_file: a ROOT.TFile instance
:param dir: the directory (default: "", i.e., the root of the file)
:return: a list of object names
"""
root_file.cd(dir)
return [key.GetName() for key in ROOT.gDirectory.GetListOfKeys()]
@contextlib.contextmanager
def open_ROOT_file(filename):
"""
Open a ROOT file in a context. Will close it no matter what, even if there are exceptions
:param filename:
:return:
"""
f = ROOT.TFile(filename)
try:
yield f
finally:
f.Close()
del f
|
threeMLREPO_NAMEthreeMLPATH_START.@threeML_extracted@threeML-master@threeML@io@cern_root_utils@io_utils.py@.PATH_END.py
|
{
"filename": "arxiv.py",
"repo_name": "showyourwork/showyourwork",
"repo_path": "showyourwork_extracted/showyourwork-main/src/showyourwork/workflow/scripts/arxiv.py",
"type": "Python"
}
|
"""
Wraps the :doc:`pdf` script to build the article PDF, then
tars everything in the ``src/tex`` directory into the tarball
``arxiv.tar.gz`` for easy arXiv submission.
"""
import shutil
import subprocess
import tarfile
from pathlib import Path
from tempfile import TemporaryDirectory
from showyourwork import paths
if __name__ == "__main__":
# Snakemake config (available automagically)
config = snakemake.config # type:ignore
# File names to exclude
ms_name = config["ms_name"]
exclude = [
".gitignore",
f"{ms_name}.pdf",
f"{ms_name}.aux",
f"{ms_name}.blg",
f"{ms_name}.log",
f"{ms_name}.out",
f"{ms_name}.fls",
f"{ms_name}.synctex.gz",
f"{ms_name}.fdb_latexmk",
]
with TemporaryDirectory() as tmpdir:
using_preprocessing_script = config["preprocess_arxiv_script"] is not None
if using_preprocessing_script:
# First, copy to a temporary directory, excluding files
shutil.copytree(paths.user().compile, tmpdir, dirs_exist_ok=True)
# Run the preprocessing script, if provided:
script = config["preprocess_arxiv_script"]
subprocess.run([script, tmpdir], check=True)
src_dir = Path(tmpdir) if using_preprocessing_script else paths.user().compile
# Tar up everything in the src/tex directory
with tarfile.open("arxiv.tar.gz", "w:gz") as tarball:
for file in src_dir.rglob("*"):
if file.name not in exclude and file.suffix != ".bib":
tarball.add(file, arcname=file.name)
|
showyourworkREPO_NAMEshowyourworkPATH_START.@showyourwork_extracted@showyourwork-main@src@showyourwork@workflow@scripts@arxiv.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "bwvdnbro/CMacIonize",
"repo_path": "CMacIonize_extracted/CMacIonize-master/python/__init__.py",
"type": "Python"
}
|
#! /usr/bin/python
################################################################################
# This file is part of CMacIonize
# Copyright (C) 2016 Bert Vandenbroucke (bert.vandenbroucke@gmail.com)
#
# CMacIonize is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CMacIonize is distributed in the hope that it will be useful,
# but WITOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with CMacIonize. If not, see <http://www.gnu.org/licenses/>.
################################################################################
##
# @file __init__.py
#
# @brief Initialization script for the Python modules.
#
# This script loads all relevant module files.
#
# @author Bert Vandenbroucke (bv7@st-andrews.ac.uk)
##
import emissivitycalculator
import libdensitygrid
import libemissivitycalculator
import libflashsnapshotdensityfunction
|
bwvdnbroREPO_NAMECMacIonizePATH_START.@CMacIonize_extracted@CMacIonize-master@python@__init__.py@.PATH_END.py
|
{
"filename": "_colorscale.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattergl/marker/line/_colorscale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(
self, plotly_name="colorscale", parent_name="scattergl.marker.line", **kwargs
):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattergl@marker@line@_colorscale.py@.PATH_END.py
|
{
"filename": "test_tracers.py",
"repo_name": "LSSTDESC/CCL",
"repo_path": "CCL_extracted/CCL-master/benchmarks/test_tracers.py",
"type": "Python"
}
|
import numpy as np
import pyccl as ccl
import pytest
def get_prediction(ells, chi_i, chi_f, alpha, beta, gamma,
der_bessel, der_angles):
exponent = 2 * gamma - alpha - 2 * beta - 1
fl = np.ones_like(ells)
if der_bessel == -1:
fl *= 1. / (ells + 0.5)**4
if der_angles == 2:
fl *= (ells + 2.) * (ells + 1.) * ells * (ells - 1)
elif der_angles == 1:
fl *= ((ells + 1.) * ells)**2
lfac = fl * (ells + 0.5)**(alpha + 2 * beta)
norm = (chi_f**exponent-chi_i**exponent) / exponent
return lfac * norm
@pytest.fixture(scope='module')
def set_up():
ccl.gsl_params.INTEGRATION_LIMBER_EPSREL = 1E-4
ccl.gsl_params.INTEGRATION_EPSREL = 1E-4
cosmo = ccl.Cosmology(Omega_c=0.30, Omega_b=0.00, Omega_g=0, Omega_k=0,
h=0.7, sigma8=0.8, n_s=0.96, Neff=0, m_nu=0.0,
w0=-1, wa=0, T_CMB=2.7, transfer_function='bbks',
matter_power_spectrum='linear')
ccl.gsl_params.reload()
return cosmo
@pytest.mark.parametrize("alpha,beta,gamma,is_factorizable,"
"w_transfer,mismatch,der_bessel,der_angles",
# non-factorizable
[(-2., -1., -1., False, True, False, 0, 0),
(-2., 0., -1., False, True, False, 0, 0),
(-2., -1., 0., False, True, False, 0, 0),
(-2., 0., 0., False, True, False, 0, 0),
(0., 0., 0., False, True, False, 0, 0),
# factorizable transfer functions
(-2., -1., -1., True, True, False, 0, 0),
(-2., 0., -1., True, True, False, 0, 0),
(-2., -1., 0., True, True, False, 0, 0),
(-2., 0., 0., True, True, False, 0, 0),
(0., 0., 0., True, True, False, 0, 0),
# Unit transfer functions
(-2., 0., 0., False, False, False, 0, 0),
(0., 0., 0., False, False, False, 0, 0),
# Mismatch in kernel-transfer coverage
(-2., -1., 0., True, True, True, 0, 0),
(-2., -1., 0., False, True, True, 0, 0),
# non-zero der_bessel
(-2., -1., -1., False, True, False, -1, 0),
# non-zero der_angles
(-2., -1., -1., False, True, False, 0, 1),
(-2., -1., -1., False, True, False, 0, 2),
(-2., -1., -1., False, True, False, -1, 2)])
def test_tracers_analytic(set_up, alpha, beta, gamma,
is_factorizable, w_transfer,
mismatch, der_bessel, der_angles):
cosmo = set_up
zmax = 0.8
zi = 0.4
zf = 0.6
nchi = 1024
nk = 512
# x arrays
chii = ccl.comoving_radial_distance(cosmo, 1. / (1 + zi))
chif = ccl.comoving_radial_distance(cosmo, 1. / (1 + zf))
chimax = ccl.comoving_radial_distance(cosmo, 1. / (1 + zmax))
chiarr = np.linspace(0.1, chimax, nchi)
if mismatch:
# Remove elements around the edges
mask = (chiarr < 0.9 * chif) & (chiarr > 1.1 * chii)
chiarr_transfer = chiarr[mask]
else:
chiarr_transfer = chiarr
aarr_transfer = ccl.scale_factor_of_chi(cosmo, chiarr_transfer)[::-1]
aarr = ccl.scale_factor_of_chi(cosmo, chiarr)[::-1]
lkarr = np.log(10.**np.linspace(-6, 3, nk))
# Kernel
wchi = np.ones_like(chiarr)
wchi[chiarr < chii] = 0
wchi[chiarr > chif] = 0
# Transfer
t = ccl.Tracer()
if w_transfer:
ta = (chiarr_transfer**gamma)[::-1]
tk = np.exp(beta*lkarr)
if is_factorizable:
# 1D
t.add_tracer(cosmo,
kernel=(chiarr, wchi),
transfer_k=(lkarr, tk),
transfer_a=(aarr_transfer, ta),
der_bessel=der_bessel,
der_angles=der_angles)
else:
# 2D
tka = ta[:, None] * tk[None, :]
t.add_tracer(cosmo,
kernel=(chiarr, wchi),
transfer_ka=(aarr_transfer, lkarr, tka),
der_bessel=der_bessel,
der_angles=der_angles)
else:
t.add_tracer(cosmo,
kernel=(chiarr, wchi),
der_bessel=der_bessel,
der_angles=der_angles)
# Power spectrum
pkarr = np.ones_like(aarr)[:, None] * (np.exp(alpha * lkarr))[None, :]
pk2d = ccl.Pk2D(a_arr=aarr, lk_arr=lkarr, pk_arr=pkarr, is_logp=False)
# C_ells
larr = np.linspace(2, 3000, 100)
# 1D
cl = ccl.angular_cl(cosmo, t, t, larr, p_of_k_a=pk2d)
# Prediction
clpred = get_prediction(larr, chii, chif,
alpha, beta, gamma,
der_bessel, der_angles)
assert np.all(np.fabs(cl / clpred - 1) < 5E-3)
|
LSSTDESCREPO_NAMECCLPATH_START.@CCL_extracted@CCL-master@benchmarks@test_tracers.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "sebastian-zieba/PACMAN",
"repo_path": "PACMAN_extracted/PACMAN-master/src/pacman/lib/models/__init__.py",
"type": "Python"
}
|
from . import constant
from . import constants_cj
from . import divide_white
from . import eclipse
from . import exponential_visit
from . import gp_matern32
from . import gp_sho
from . import logarithmic_visit
from . import model_ramp
from . import polynomial1
from . import polynomial2
from . import sine1
from . import sine2
from . import sine_curve
from . import transit
from . import uncmulti
from . import upstream_downstream
|
sebastian-ziebaREPO_NAMEPACMANPATH_START.@PACMAN_extracted@PACMAN-master@src@pacman@lib@models@__init__.py@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "AlexLaroche7/xp_vae",
"repo_path": "xp_vae_extracted/xp_vae-main/xp_vae/utils.py",
"type": "Python"
}
|
import torch
import numpy as np
from numpy.typing import NDArray
import copy
import random
rng = np.random.default_rng()
class DataGenerator(torch.utils.data.Dataset):
def __init__(self,
batch_size: int,
xp: NDArray,
xp_err: NDArray
):
self.batch_size = batch_size
# get rid of nans if they exist
nan_mask = (np.sum(~np.isnan(xp),axis=-1).astype('bool'))*(np.sum(~np.isnan(xp_err),axis=-1).astype('bool'))
self.xp = copy.deepcopy(xp)
self.xp_err = copy.deepcopy(xp_err)
self.data_length = len(self.xp)
self.steps_per_epoch = self.data_length // self.batch_size
self.idx_list = np.arange(self.data_length)
self.epoch_xp = None
self.epoch_xp_err = None
self.epoch_end()
def __iter__(self):
for i in range(len(self)):
tmp_idxs = self.idx_list[i*self.batch_size:(i+1)*self.batch_size]
yield (self.epoch_xp[tmp_idxs],self.epoch_xp_err[tmp_idxs])
def __len__(self):
return self.steps_per_epoch
def epoch_end(self):
tmp = list(zip(self.xp,self.xp_err))
tmp = np.random.permutation(self.data_length)
self.epoch_xp,self.epoch_xp_err = self.xp[tmp],self.xp_err[tmp]
|
AlexLaroche7REPO_NAMExp_vaePATH_START.@xp_vae_extracted@xp_vae-main@xp_vae@utils.py@.PATH_END.py
|
{
"filename": "Check_fields.ipynb",
"repo_name": "desy-multimessenger/nuztf",
"repo_path": "nuztf_extracted/nuztf-main/notebooks/Check_fields.ipynb",
"type": "Jupyter Notebook"
}
|
```python
%matplotlib inline
from ztfquery import fields, query
from astropy import time
import datetime
```
```python
fields_to_check = [1568, 522]
```
```python
for field_number in fields_to_check:
print(field_number, fields.has_field_reference(field_number))
```
1568 {'zg': True, 'zr': True, 'zi': True}
522 {'zg': True, 'zr': True, 'zi': True}
Check ZTF observations for the last month, to see where has been observed:
```python
def get_zquery_partnership(start_date=None, end_date=None):
date_format = "%Y-%m-%d"
if start_date is None:
now = datetime.datetime.now()
last_month = now - datetime.timedelta(days=30)
start_date = last_month.strftime(date_format)
if end_date is None:
now = datetime.datetime.now()
end_date = now.strftime(date_format)
# Convert to JD
jd_start = time.Time(start_date).jd
jd_end = time.Time(end_date).jd
# Do the Query to see what exists
zquery = query.ZTFQuery()
zquery.load_metadata(
sql_query="pid>1 and obsjd BETWEEN {0} AND {1}".format(jd_start, jd_end)
) # this will take about 1min
zquery.show_gri_fields(
title="ZTF observations in from {0} to {1}".format(start_date, end_date),
grid="main",
)
```
```python
get_zquery_partnership()
```
```python
```
|
desy-multimessengerREPO_NAMEnuztfPATH_START.@nuztf_extracted@nuztf-main@notebooks@Check_fields.ipynb@.PATH_END.py
|
{
"filename": "scriptQTestEdit.py",
"repo_name": "bill-cotton/Obit",
"repo_path": "Obit_extracted/Obit-master/ObitSystem/ObitSD/share/scripts/scriptQTestEdit.py",
"type": "Python"
}
|
# Program to test editing of OTF data from python in Obit
# Test for Richard Prestages Q band data
import OTF, OTFUtil, OErr, OSystem
from Obit import Bomb
# Init Obit
err=OErr.OErr()
ObitSys=OSystem.OSystem ("Python", 1, 103, 1, ["None"], 1, ["../PythonData/"], 1, 0, err)
OErr.printErrMsg(err, "Error with Obit startup")
# Bomb if needed for debugging
#Bomb()
# Files
disk = 1
inFile = "QbandOTF.fits" # input OTF data
# Editing
timerange = [0.0,1.0e20] # all times
chans = [1,0] # all channels
flagVer = 1 # Flagging table 1
target = "Any" # all targets
feed = 2 # Flag microphonic data
stokes = "100" # Stokes flag
reason = "Microphonic" # reason string
# Set data
inData = OTF.newPOTF("Input data", inFile, disk, 1, err)
OErr.printErrMsg(err, "Error creating input data object")
# Make edit
OTFUtil.PFlag(inData,err,timerange,target,flagVer,chans,stokes,feed,reason)
feed = 4
OTFUtil.PFlag(inData,err,timerange,target,flagVer,chans,stokes,feed,reason)
OErr.printErrMsg(err, "Error writing flag table")
# tell results
print "Flag",inFile,"timerange",timerange,"stokes",stokes,'feed',feed
# Shutdown Obit
OErr.printErr(err)
|
bill-cottonREPO_NAMEObitPATH_START.@Obit_extracted@Obit-master@ObitSystem@ObitSD@share@scripts@scriptQTestEdit.py@.PATH_END.py
|
{
"filename": "Usage_Example.py",
"repo_name": "beastraban/INSANE",
"repo_path": "INSANE_extracted/INSANE-main/INSANE/src/INSANE/Usage_Example.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 28 13:18:31 2015
@author: Spawn
"""
from __future__ import division
from __future__ import print_function
import sys
import platform
from INSANE.BackgroundGeometry import BackgroundSolver
import os
"""
Here we need to define the parameters to run the script:
I am defining a range for, say l4, and I draw random values from this range.
"""
from INSANE.MsSolver16 import MsSolver
from scipy import io
import numpy as np
import matplotlib.pyplot as plt
import time as TIME
import sympy as sym
from configparser import ConfigParser
def main():
Platform=platform.system()
WD=os.getcwd()
if len(sys.argv)<2:
filename=(r'./params.ini')
else:
filename=sys.argv[1]
config = ConfigParser()
if Platform.lower() =='linux':
config.read(filename)
WD=WD+'/'
else:
config.read(filename)
WD=WD+'\\'
print(WD)
input("Press Enter to continue...")
"""
Define initial conditions and other background quantities from params.ini file
"""
symbolic=eval(config.get('main', 'symbolic'))
V0=np.float64(config.get('main', 'V0'))
l1=np.float64(config.get('main', 'l1'))
l2=np.float64(config.get('main', 'l2'))
l3=np.float64(config.get('main', 'l3'))
l4=np.float64(config.get('main', 'l4'))
l5=np.float64(config.get('main', 'l5'))
l6=np.float64(config.get('main', 'l6'))
l7=np.float64(config.get('main', 'l7'))
phi=eval(config.get('main', 'phi'))
V=eval(config.get('main', 'V'))
H0=np.float64(eval(config.get('main', 'H0')))
phidot0=np.float64(config.get('main', 'phidot0'))
efolds=np.float64(eval(config.get('main', 'efolds')))
efoldsago=np.float64(config.get('main', 'efoldsago'))
phi0=np.float64(config.get('main', 'phi0'))
Tprecision=np.float64(config.get('main', 'Tprecision'))
from_list=eval(config.get('main', 'from_list'))
feed=eval(config.get('main', 'feedback'))
if (feed==0):
feedback='silent'
else:
if (feed==1):
feedback='verbose'
else:
feedback='graphic'
physical=eval(config.get('main', 'physical'))
EndK=np.float64(eval(config.get('main', 'EndK')))
StartK=np.float64(eval(config.get('main', 'StartK')))
"""
Define initial conditions and other quantities dor MS solver from params.ini file
"""
MStimePrec=np.float64(eval(config.get('MS', 'MStimeprec')))
MSkPrec=np.int32(eval(config.get('MS', 'MSkPrec')))
Klog=eval(config.get('MS', 'Klog'))
MSfeed=np.int32(eval(config.get('MS', 'MSfeedback')))
if MSfeed==0:
MSfeedback='silent'
else:
if MSfeed==1:
MSfeedback='verbose'
else:
MSfeedback='graphic'
Anfeed=np.int32(eval(config.get('MS', 'AnFeedback')))
if Anfeed==0:
AnFeedback='silent'
else:
if Anfeed==1:
AnFeedback='verbose'
else:
AnFeedback='graphic'
CMBpoint=config.get('MS', 'CMBpoint')
pivotScale=np.float64(eval(config.get('MS', 'pivotScale')))
deg=config.get('MS', 'deg')
logfile=config.get('LOGFILE','logfile')
if not logfile=='':
sys.stdout = open('./'+logfile, 'w')
if deg=='None':
deg=None
else:
deg=np.int32(deg)
if from_list:
NumOfFiles=10
number=sys.argv[1]
if (len(sys.argv)>2):
degree=np.int32(sys.argv[2])
else:
degree=None;
def sendMail(to,n):
"""
you may insert a method that sends a mail to your email when done.
"""
print('redacted')
def parseCoeffs(string):
"""
returns a tuple of the coefficients l1...l5,phi0 that have been stored in
txt format as "l1;l2;l3;l4;l5;l6;phi0"
Parameters
-----------
string the string to parse
Returns
-----------
a 5 element list [l1,l2,l3,l4,l5,l6,phi0] of floats after parsing
"""
STR=string
print(STR)
if isinstance(STR, str):
STR=str(STR)
STR=STR.split(";")
return([np.double(STR[0]),np.double(STR[1]),np.double(STR[2]),np.double(STR[3]),np.double(STR[4]),np.double(STR[5]),np.double(STR[6])])
else:
e=Exception()
e.message="not a proper string"
raise e
return([])
plat=platform.system()
"""
read the line from the list, and delete it from the list
"""
if from_list:
fileread="/home/spawn/Dropbox/PEGASUS/list{0}.txt".format(number)
filewrite="/home/spawn/Dropbox/PEGASUS/PEGASUS results/results{0}.txt".format(number)
ii=0
while not (os.stat(fileread).st_size == 0):
fileid=open(fileread,"r")
try:
content=fileid.readlines()
line=content[0]
Vars=parseCoeffs(line)
fileid.close()
content=content[1:]
fileid=open(fileread,"w")
for line in content:
fileid.write(line)
fileid.flush()
fileid.close()
except Exception as e:
fileid.close()
print(e.message)
l1=Vars[0]
l2=Vars[1]
l3=Vars[2]
l4=Vars[3]
l5=Vars[4]
l6=Vars[5]
l7=0
phi0=Vars[6]
V0=1
phi=sym.Symbol('phi')
V=1+l1*phi+l2*phi**2 +l3*phi**3 +l4*phi**4 +l5*phi**5 +l6*phi**6 +l7*phi**7
try:
solver=BackgroundSolver(l1,l2,l3,l4,l5,H0,phi0,1,phidot0,Tprecision=Tprecision,poly=(not symbolic),Pot=V,mode=feedback)
solver.Solve()
if (solver.message=="Inflation ended - epsilon~1"):#or(solver.message=="Too long"):
#execfile("MS_script1.py")
if (solver.a[len(solver.a)-1]>50):
try:
Solver=MsSolver(solver.a,solver.H,solver.phi,solver.phidot,solver.t,solver.epsilon,solver.eta,solver.xisq,solver.vd4v,solver.vd5v,solver.v,solver.eh,solver.deltah,solver.delpp,Tprecision=MStimePrec,Kprecision=MSkPrec,efoldsAgo=efoldsago,efoldsNum=efolds,log=Klog,mode=CMBpoint)
Solver.prepareToWork(mode=MSfeedback)
Solver.buildUKT(mode=MSfeedback)
if (Solver.message=="Not a slow roll evolution"):
result="{0};{1};{2};{3};{4};{5};{6};{7};{8}; -- {9}".format(V0,l1,l2,l3,l4,l5,phi0,phidot0,H0,Solver.message)
else:
try:
Solver.analysis(mode=AnFeedback,pivotScale=pivotScale,deg=deg)
assymNRfull=200*np.abs(Solver.analNrun2-Solver.nrun)/np.abs(Solver.analNrun2+Solver.nrun)
assymNRhalf=200*np.abs(Solver.analNrunHalf-Solver.nrun)/np.abs(Solver.analNrunHalf+Solver.nrun)
assymNS=200*np.abs((Solver.analNs-Solver.ns)/(Solver.analNs+Solver.ns))
result="{0};{1};{2};{3};{4};{5};{6};{7};{8};{9};{10};{11};{12};{13};{14};{15};{16};{17};{18};{19};{20};{21};{22};{23}".format(l1,l2,l3,l4,l5,l6,Solver.coeffs[0],Solver.coeffs[1],Solver.coeffs[2],Solver.coeffs[3],Solver.coeffs[4],Solver.coeffs[5],Solver.coeffs[6],Solver.phiCMB,phi0,phidot0,H0,efolds,efoldsago,Solver.Slope,Solver.ns,Solver.nrun,Solver.nrunrun,Solver.error)#,assymNS,assymNRfull,assymNRhalf)
except Exception as e:
print(e.message)
result=e.message
except Exception as e:
result="{0};{1};{2};{3};{4};{5};{6};{7};{8};{9};{10};error message:'{11}'".format(V0,l1,l2,l3,l4,l5,phi0,phidot0,H0,efolds,efoldsago,e.message)
else:
if (solver.message=="Too long"):
print("Too long")
result="{0};{1};{2};{3};{4};{5};{6};{7};{8}; -- Too long".format(V0,l1,l2,l3,l4,l5,phi0,phidot0,H0)
else:
print("not enough efolds for physical inflation")
result="{0};{1};{2};{3};{4};{5};{6};{7};{8}; -- not enough efolds for physical inflation".format(V0,l1,l2,l3,l4,l5,phi0,phidot0,H0)
else:
if (solver.message=="Too long"):
print("Too long")
result="{0};{1};{2};{3};{4};{5};{6};{7};{8}; -- Too long".format(V0,l1,l2,l3,l4,l5,phi0,phidot0,H0)
else:
print(solver.message)
result="{0};{1};{2};{3};{4};{5};{6};{7};{8};{9}".format(V0,l1,l2,l3,l4,l5,phi0,phidot0,H0,solver.message)
# break
#;{10};{11},{12}
if (plat=='Linux'):
results=open(filewrite,"a")
FORMAT=open("/home/spawn/Dropbox/PEGASUS/PEGASUS results/FORMAT.txt","a")
else:
results =open("E:\\Dropbox\\PEGASUS\\PEGASUS results\\results.txt","a")
FORMAT=open("E:\\Dropbox\\PEGASUS\\PEGASUS results\\FORMAT.txt","a")
results.write(result+"\n")
results.flush()
results.close()
if (ii==0):
__format="l1,l2,l3,l4,l5,,l6,V0,a1,a2,a3,a4,a5,a6,phiCMB,phi0,phidot0,H0,efolds,efoldsago,slope,ns,nrun,nrunrun,error"
FORMAT.write(__format+"\n")
FORMAT.flush()
FORMAT.close()
TIME.sleep(1)
# except Exception as e:
# print("something went wrong")
# print(e.message)
# print("Moving on then...")
except Exception as e:
print(e.message)
print('didnt go that well')
ii=ii+1
else:
solver=BackgroundSolver(l1,l2,l3,l4,l5,H0,phi0,1,phidot0,Tprecision=Tprecision,poly=(not symbolic),Pot=V,mode=feedback)
solver.Solve()
if (solver.message=="Inflation ended - epsilon~1"):#or(solver.message=="Too long"):
#execfile("MS_script1.py")
if (solver.a[len(solver.a)-1]>50):
try:
Solver=MsSolver(solver.a,solver.H,solver.phi,solver.phidot,solver.t,solver.epsilon,solver.eta,solver.xisq,solver.vd4v,solver.vd5v,solver.v,solver.eh,solver.deltah,solver.delpp,Tprecision=MStimePrec,Kprecision=MSkPrec,efoldsAgo=efoldsago,efoldsNum=efolds,log=Klog,mode=CMBpoint)
Solver.prepareToWork(mode=MSfeedback)
Solver.buildUKT(mode=MSfeedback)
if (Solver.message=="Not a slow roll evolution"):
result="{0};{1};{2};{3};{4};{5};{6};{7};{8}; -- {9}".format(V0,l1,l2,l3,l4,l5,phi0,phidot0,H0,Solver.message)
else:
try:
Solver.analysis(mode=AnFeedback,pivotScale=pivotScale,deg=deg)
Solver.image.savefig(WD+'OutputPlot.png')
asymNRfull=200*np.abs(Solver.analNrun2-Solver.nrun)/np.abs(Solver.analNrun2+Solver.nrun)
asymNRhalf=200*np.abs(Solver.analNrunHalf-Solver.nrun)/np.abs(Solver.analNrunHalf+Solver.nrun)
asymNS=200*np.abs((Solver.analNs-Solver.ns)/(Solver.analNs+Solver.ns))
asymNR=200*np.abs((Solver.nrun-Solver.analNrun)/(Solver.nrun+Solver.analNrun))
if Solver.nrunrun is not None:
nrr=Solver.nrunrun
else:
nrr=0
result="{0};{1};{2};{3};{4};{5};{6};{7};{8};{9};{10};{11};{12};{13};{14};{15};{16};{17};{18};{19};{20};{21};{22};{23}".format(l1,l2,l3,l4,l5,l6,Solver.coeffs[0],Solver.coeffs[1],Solver.coeffs[2],Solver.coeffs[3],Solver.coeffs[4],Solver.coeffs[5],Solver.coeffs[6],Solver.phiCMB,phi0,phidot0,H0,efolds,efoldsago,Solver.Slope,Solver.ns,Solver.nrun,Solver.nrunrun,Solver.error)#,assymNS,assymNRfull,assymNRhalf)
io.savemat(WD+'OUTPUT.mat',dict(analNS=Solver.analNs,analNR=Solver.analNrun,ns=Solver.ns,nr=Solver.nrun,nrr=Solver.nrunrun,lk=Solver.lk,lps=Solver.lps,asymNS=asymNS,asymNR=asymNR))
except Exception as e:
print(e.message)
result=e.message
except Exception as e:
result="{0};{1};{2};{3};{4};{5};{6};{7};{8};{9};{10};error message:'{11}'".format(V0,l1,l2,l3,l4,l5,phi0,phidot0,H0,efolds,efoldsago,e.message)
else:
if (solver.message=="Too long"):
print("Too long")
result="{0};{1};{2};{3};{4};{5};{6};{7};{8}; -- Too long".format(V0,l1,l2,l3,l4,l5,phi0,phidot0,H0)
else:
print("not enough efolds for physical inflation")
result="{0};{1};{2};{3};{4};{5};{6};{7};{8}; -- not enough efolds for physical inflation".format(V0,l1,l2,l3,l4,l5,phi0,phidot0,H0)
else:
if (solver.message=="Too long"):
print("Too long")
result="{0};{1};{2};{3};{4};{5};{6};{7};{8}; -- Too long".format(V0,l1,l2,l3,l4,l5,phi0,phidot0,H0)
else:
print(solver.message)
result="{0};{1};{2};{3};{4};{5};{6};{7};{8};{9}".format(V0,l1,l2,l3,l4,l5,phi0,phidot0,H0,solver.message)
print('done')
#input("Press Enter to continue...")
|
beastrabanREPO_NAMEINSANEPATH_START.@INSANE_extracted@INSANE-main@INSANE@src@INSANE@Usage_Example.py@.PATH_END.py
|
{
"filename": "distributed_datamodel.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/community/distributed/distributed_datamodel.py",
"type": "Python"
}
|
from amuse.datamodel import Particles, Particle
class Resources(Particles):
def __getitem__(self, index):
keys = self.get_all_keys_in_store()[index]
index = self.get_all_indices_in_store()[index]
if hasattr(keys, '__iter__'):
return self._subset(keys)
else:
return Resource(keys, self, index, self._get_version())
def __iter__(self):
keys = self.get_all_keys_in_store()
indices = self.get_all_indices_in_store()
version = self._get_version()
for i in range(len(keys)):
yield Resource(keys[i], self, indices[i], version)
add_resource = Particles.add_particle
add_resources = Particles.add_particles
remove_resource = Particles.remove_particle
remove_resources = Particles.remove_particles
class Resource(Particle):
def __init__(self, key = None, particles_set = None, set_index = -1, set_version = -1, **keyword_arguments):
if particles_set is None:
if key == None:
particles_set = Resources(1)
key = particles_set.get_all_keys_in_store()[0]
else:
particles_set = Resources(1, keys = [key])
object.__setattr__(self, "key", key)
object.__setattr__(self, "particles_set", particles_set)
object.__setattr__(self, "_set_index", set_index)
object.__setattr__(self, "_set_version", set_version)
for attribute_name in keyword_arguments:
attribute_value = keyword_arguments[attribute_name]
setattr(self, attribute_name, attribute_value)
class Pilots(Particles):
def __getitem__(self, index):
keys = self.get_all_keys_in_store()[index]
index = self.get_all_indices_in_store()[index]
if hasattr(keys, '__iter__'):
return self._subset(keys)
else:
return Pilot(keys, self, index, self._get_version())
def __iter__(self):
keys = self.get_all_keys_in_store()
indices = self.get_all_indices_in_store()
version = self._get_version()
for i in range(len(keys)):
yield Pilot(keys[i], self, indices[i], version)
add_pilot = Particles.add_particle
add_pilots = Particles.add_particles
remove_pilot = Particles.remove_particle
remove_pilots = Particles.remove_particles
class Pilot(Particle):
def __init__(self, key = None, particles_set = None, set_index = -1, set_version = -1, **keyword_arguments):
if particles_set is None:
if key == None:
particles_set = Pilots(1)
key = particles_set.get_all_keys_in_store()[0]
else:
particles_set = Pilots(1, keys = [key])
object.__setattr__(self, "key", key)
object.__setattr__(self, "particles_set", particles_set)
object.__setattr__(self, "_set_index", set_index)
object.__setattr__(self, "_set_version", set_version)
for attribute_name in keyword_arguments:
attribute_value = keyword_arguments[attribute_name]
setattr(self, attribute_name, attribute_value)
class ScriptJobs(Particles):
def __getitem__(self, index):
keys = self.get_all_keys_in_store()[index]
index = self.get_all_indices_in_store()[index]
if hasattr(keys, '__iter__'):
return self._subset(keys)
else:
return ScriptJob(keys, self, index, self._get_version())
def __iter__(self):
keys = self.get_all_keys_in_store()
indices = self.get_all_indices_in_store()
version = self._get_version()
for i in range(len(keys)):
yield ScriptJob(keys[i], self, indices[i], version)
submit_script_job = Particles.add_particle
submit_script_jobs = Particles.add_particles
cancel_script_job = Particles.remove_particle
cancel_script_jobs = Particles.remove_particles
remove_script_job = Particles.remove_particle
remove_script_jobs = Particles.remove_particles
class ScriptJob(Particle):
def __init__(self, key = None, particles_set = None, set_index = -1, set_version = -1, **keyword_arguments):
if particles_set is None:
if key == None:
particles_set = ScriptJobs(1)
key = particles_set.get_all_keys_in_store()[0]
else:
particles_set = ScriptJobs(1, keys = [key])
object.__setattr__(self, "key", key)
object.__setattr__(self, "particles_set", particles_set)
object.__setattr__(self, "_set_index", set_index)
object.__setattr__(self, "_set_version", set_version)
for attribute_name in keyword_arguments:
attribute_value = keyword_arguments[attribute_name]
setattr(self, attribute_name, attribute_value)
class FunctionJobs(Particles):
def __getitem__(self, index):
keys = self.get_all_keys_in_store()[index]
index = self.get_all_indices_in_store()[index]
if hasattr(keys, '__iter__'):
return self._subset(keys)
else:
return FunctionJob(keys, self, index, self._get_version())
def __iter__(self):
keys = self.get_all_keys_in_store()
indices = self.get_all_indices_in_store()
version = self._get_version()
for i in range(len(keys)):
yield FunctionJob(keys[i], self, indices[i], version)
submit_function_job = Particles.add_particle
submit_function_jobs = Particles.add_particles
cancel_function_job = Particles.remove_particle
cancel_function_jobs = Particles.remove_particles
remove_function_job = Particles.remove_particle
remove_function_jobs = Particles.remove_particles
class FunctionJob(Particle):
def __init__(self, key = None, particles_set = None, set_index = -1, set_version = -1, **keyword_arguments):
if particles_set is None:
if key == None:
particles_set = FunctionJobs(1)
key = particles_set.get_all_keys_in_store()[0]
else:
particles_set = FunctionJobs(1, keys = [key])
object.__setattr__(self, "key", key)
object.__setattr__(self, "particles_set", particles_set)
object.__setattr__(self, "_set_index", set_index)
object.__setattr__(self, "_set_version", set_version)
for attribute_name in keyword_arguments:
attribute_value = keyword_arguments[attribute_name]
setattr(self, attribute_name, attribute_value)
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@community@distributed@distributed_datamodel.py@.PATH_END.py
|
{
"filename": "plot_freq.py",
"repo_name": "gwastro/pycbc",
"repo_path": "pycbc_extracted/pycbc-master/examples/waveform/plot_freq.py",
"type": "Python"
}
|
import matplotlib.pyplot as pp
from pycbc import waveform
for phase_order in [2, 3, 4, 5, 6, 7]:
hp, hc = waveform.get_td_waveform(approximant='SpinTaylorT4',
mass1=10, mass2=10,
phase_order=phase_order,
delta_t=1.0/4096,
f_lower=100)
hp, hc = hp.trim_zeros(), hc.trim_zeros()
amp = waveform.utils.amplitude_from_polarizations(hp, hc)
f = waveform.utils.frequency_from_polarizations(hp, hc)
pp.plot(f.sample_times, f, label="PN Order = %s" % phase_order)
pp.ylabel('Frequency (Hz)')
pp.xlabel('Time (s)')
pp.legend(loc='upper left')
pp.show()
|
gwastroREPO_NAMEpycbcPATH_START.@pycbc_extracted@pycbc-master@examples@waveform@plot_freq.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/utils/masked/tests/__init__.py",
"type": "Python"
}
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@utils@masked@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "test_likelihood.py",
"repo_name": "rfeldmann/leopy",
"repo_path": "leopy_extracted/leopy-master/leopy/test/test_likelihood.py",
"type": "Python"
}
|
"""Simple test cases for the leopy.likelihood module.
This module is part of LEO-Py -- \
Likelihood Estimation of Observational data with Python
Copyright 2019 University of Zurich, Robert Feldmann
LEO-Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
LEO-Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with LEO-Py. If not, see <https://www.gnu.org/licenses/>.
"""
import numpy as np
import pandas as pd
import scipy.stats
import scipy.optimize
import sys
import time
import pytest
import leopy
@pytest.fixture
def obs_norm_no_error():
np.random.seed(10)
dist = scipy.stats.norm
Ndata = 100
rho = 0.5
R = np.array([[1., rho], [rho, 1.]])
loc_true = [-1., 2.]
scale_true = [1., 3.]
x = scipy.stats.multivariate_normal.rvs(cov=R, size=Ndata)
y = dist.ppf(scipy.stats.norm.cdf(x), loc=loc_true, scale=scale_true)
y *= scale_true / np.std(y, axis=0)
y += loc_true - np.mean(y, axis=0)
ey = np.zeros_like(y)
df = pd.DataFrame(np.array([y[:, 0], y[:, 1], ey[:, 0], ey[:, 1]]).T,
columns=['v0', 'v1', 'e_v0', 'e_v1'])
return leopy.Observation(df, 'test', verbosity=0)
@pytest.fixture
def obs_lognorm_no_error():
np.random.seed(14)
dist = scipy.stats.lognorm
Ndata = 100
rho = 0.5
R = np.array([[1., rho], [rho, 1.]])
loc_true = np.array([0., 2.])
scale_true = np.array([1., 3.])
shape_true = np.array([0.5, 1.5])
x = scipy.stats.multivariate_normal.rvs(cov=R, size=Ndata)
y = dist.ppf(scipy.stats.norm.cdf(x),
shape_true, loc=loc_true, scale=scale_true)
ey = np.zeros_like(y)
df = pd.DataFrame(np.array([y[:, 0], y[:, 1], ey[:, 0], ey[:, 1]]).T,
columns=['v0', 'v1', 'e_v0', 'e_v1'])
return leopy.Observation(df, 'test', verbosity=0)
@pytest.fixture
def obs_norm_cen():
np.random.seed(16)
dist = scipy.stats.norm
Ndata = 200
rho = 0.5
R = np.array([[1., rho], [rho, 1.]])
loc_true = np.array([0.5, 1.5])
scale_true = np.array([1., 2.5])
x = scipy.stats.multivariate_normal.rvs(cov=R, size=Ndata)
y = dist.ppf(scipy.stats.norm.cdf(x),
loc=loc_true, scale=scale_true)
y_true = np.copy(y)
ey = np.zeros_like(y)
ey[:, 0] = 0.2
ey[:, 1] = 0.1
y[:, 0] += ey[:, 0] * np.random.randn(Ndata)
y[:, 1] += ey[:, 1] * np.random.randn(Ndata)
lower_limit = 0.3*np.random.randn(Ndata)+0.3
lower_limit[lower_limit<0.05] = 0.05
upper_limit = 0.6*np.random.randn(Ndata)+2.5
upper_limit[upper_limit<0.2] = 0.2
cy = np.zeros_like(y).astype(bool)
ly = -np.infty*np.ones_like(y)
uy = np.infty*np.ones_like(y)
for i in range(2):
sel = y[:, i] < lower_limit
y[sel, i] = float('NaN')
cy[sel, i] = True
ly[sel, i] = lower_limit[sel]
sel = y[:, i] > upper_limit
y[sel, i]=float('NaN')
cy[sel, i] = True
uy[sel, i] = upper_limit[sel]
df = pd.DataFrame(np.array([y[:, 0], y[:, 1], ey[:, 0], ey[:, 1],
cy[:, 0], cy[:, 1], ly[:, 0], ly[:, 1],
uy[:, 0], uy[:, 1]]).T,
columns=['v0', 'v1', 'e_v0', 'e_v1', 'c_v0', 'c_v1', 'l_v0',
'l_v1', 'u_v0', 'u_v1'])
return leopy.Observation(df, 'test', verbosity=0)
@pytest.fixture
def obs_norm_cen_uncorr():
np.random.seed(16)
dist = scipy.stats.norm
Ndata = 200
rho = 0.
R = np.array([[1., rho], [rho, 1.]])
loc_true = np.array([0.5, 1.5])
scale_true = np.array([1., 2.5])
x = scipy.stats.multivariate_normal.rvs(cov=R, size=Ndata)
y = dist.ppf(scipy.stats.norm.cdf(x),
loc=loc_true, scale=scale_true)
y_true = np.copy(y)
ey = np.zeros_like(y)
ey[:, 0] = 0.2
ey[:, 1] = 0.1
y[:, 0] += ey[:, 0] * np.random.randn(Ndata)
y[:, 1] += ey[:, 1] * np.random.randn(Ndata)
lower_limit = 0.3*np.random.randn(Ndata)+0.3
lower_limit[lower_limit<0.05] = 0.05
upper_limit = 0.6*np.random.randn(Ndata)+2.5
upper_limit[upper_limit<0.2] = 0.2
cy = np.zeros_like(y).astype(bool)
ly = -np.infty*np.ones_like(y)
uy = np.infty*np.ones_like(y)
for i in range(2):
sel = y[:, i] < lower_limit
y[sel, i] = float('NaN')
cy[sel, i] = True
ly[sel, i] = lower_limit[sel]
sel = y[:, i] > upper_limit
y[sel, i]=float('NaN')
cy[sel, i] = True
uy[sel, i] = upper_limit[sel]
df = pd.DataFrame(np.array([y[:, 0], y[:, 1], ey[:, 0], ey[:, 1],
cy[:, 0], cy[:, 1], ly[:, 0], ly[:, 1],
uy[:, 0], uy[:, 1]]).T,
columns=['v0', 'v1', 'e_v0', 'e_v1', 'c_v0', 'c_v1',
'l_v0', 'l_v1', 'u_v0', 'u_v1'])
return leopy.Observation(df, 'test', verbosity=0)
@pytest.fixture
def obs_norm_MAR():
np.random.seed(16)
dist = scipy.stats.norm
Ndata = 100
rho = 0.5
R = np.array([[1., rho], [rho, 1.]])
loc_true = np.array([0., 2.])
scale_true = np.array([1., 3.])
x = scipy.stats.multivariate_normal.rvs(cov=R, size=Ndata)
y = dist.ppf(scipy.stats.norm.cdf(x),
loc=loc_true, scale=scale_true)
y_true = np.copy(y)
ey = np.zeros_like(y)
ey[:, 0] = 0.2
ey[:, 1] = 0.1
y[:, 0] += ey[:, 0] * np.random.randn(Ndata)
y[:, 1] += ey[:, 1] * np.random.randn(Ndata)
def logistic(x):
return np.exp(x) / (np.exp(x) + 1.)
m1 = scipy.stats.bernoulli.rvs(logistic(y[:, 0]-1.)).astype(bool) # for col 1
m0 = scipy.stats.bernoulli.rvs(logistic(y[:, 1]-2.)).astype(bool) # for col 0
y[m1, 1] = np.float('NaN')
y[m0, 0] = np.float('NaN')
df = pd.DataFrame(np.array([y[:, 0], y[:, 1], ey[:, 0], ey[:, 1]]).T,
columns=['v0', 'v1', 'e_v0', 'e_v1'])
return leopy.Observation(df, 'test', verbosity=0)
@pytest.fixture
def obs_lognorm_MAR():
np.random.seed(2)
dist = scipy.stats.lognorm
Ndata = 100
rho = 0.5
R = np.array([[1., rho], [rho, 1.]])
loc_true = np.array([0., 2.])
scale_true = np.array([1., 3.])
shape_true = np.array([0.5, 1.5])
x = scipy.stats.multivariate_normal.rvs(cov=R, size=Ndata)
y = dist.ppf(scipy.stats.norm.cdf(x),
shape_true, loc=loc_true, scale=scale_true)
y_true = np.copy(y)
ey = np.zeros_like(y)
ey[:, 0] = 0.2
ey[:, 1] = 0.1
y[:, 0] += ey[:, 0] * np.random.randn(Ndata)
y[:, 1] += ey[:, 1] * np.random.randn(Ndata)
def logistic(x):
return np.exp(x) / (np.exp(x) + 1.)
m1 = scipy.stats.bernoulli.rvs(logistic(y[:, 0]-2.)).astype(bool) # for col 1
m0 = scipy.stats.bernoulli.rvs(logistic(y[:, 1]-5.)).astype(bool) # for col 0
y[m1, 1] = np.float('NaN')
y[m0, 0] = np.float('NaN')
df = pd.DataFrame(np.array([y[:, 0], y[:, 1], ey[:, 0], ey[:, 1]]).T,
columns=['v0', 'v1', 'e_v0', 'e_v1'])
return leopy.Observation(df, 'test', verbosity=0)
@pytest.fixture
def obs_norm_obscorr():
np.random.seed(10)
dist = scipy.stats.norm
Ndata = 100
rho = 0.5
R = np.array([[1., rho], [rho, 1.]])
loc_true = [-1., 2.]
scale_true = [1., 3.]
x = scipy.stats.multivariate_normal.rvs(cov=R, size=Ndata)
y = dist.ppf(scipy.stats.norm.cdf(x), loc=loc_true, scale=scale_true)
y *= scale_true / np.std(y, axis=0)
y += loc_true - np.mean(y, axis=0)
sigma_c = [1., 1.5]
ey = np.zeros_like(y)
ey[:, 0] = sigma_c[0]
ey[:, 1] = sigma_c[1]
rho_c = np.zeros(Ndata)
error_y = np.zeros_like(y)
for i in range(Ndata):
rho_c[i] = 0.01*np.random.rand()-0.99
R_c = np.array([[1., rho_c[i]], [rho_c[i], 1.]])
cov_c = np.diag(sigma_c).dot(R_c.dot(np.diag(sigma_c)))
error_y[i, :] = scipy.stats.multivariate_normal.rvs(cov=cov_c)
y += error_y
df = pd.DataFrame(np.array([y[:, 0], y[:, 1], ey[:, 0], ey[:, 1],
rho_c]).T,
columns=['v0', 'v1', 'e_v0', 'e_v1', 'r_v0_v1'])
return leopy.Observation(df, 'test', verbosity=0)
@pytest.fixture
def obs_lognorm_obscorr():
np.random.seed(19)
dist = scipy.stats.lognorm
Ndata = 100
rho = 0.5
R = np.array([[1., rho], [rho, 1.]])
loc_true = np.array([0., 2.])
scale_true = np.array([1., 3.])
shape_true = np.array([0.5, 1.5])
x = scipy.stats.multivariate_normal.rvs(cov=R, size=Ndata)
y = dist.ppf(scipy.stats.norm.cdf(x),
shape_true, loc=loc_true, scale=scale_true)
sigma_c = [0.1, 0.2]
ey = np.zeros_like(y)
ey[:, 0] = sigma_c[0]
ey[:, 1] = sigma_c[1]
rho_c = np.zeros(Ndata)
error_y = np.zeros_like(y)
for i in range(Ndata):
rho_c[i] = 0.99*2*(np.random.rand()-0.5)
R_c = np.array([[1., rho_c[i]], [rho_c[i], 1.]])
cov_c = np.diag(sigma_c).dot(R_c.dot(np.diag(sigma_c)))
error_y[i, :] = scipy.stats.multivariate_normal.rvs(cov=cov_c)
y += error_y
df = pd.DataFrame(np.array([y[:, 0], y[:, 1], ey[:, 0], ey[:, 1],
rho_c]).T,
columns=['v0', 'v1', 'e_v0', 'e_v1', 'r_v0_v1'])
return leopy.Observation(df, 'test', verbosity=0)
@pytest.mark.usefixtures('pool')
class TestLikelihood:
def test_1(self):
d = {'v0': [1, 2], 'e_v0': [0.1, 0.2],
'v1': [3, 4], 'e_v1': [0.1, 0.1]}
df = pd.DataFrame(d)
obs = leopy.Observation(df, 'testdata', verbosity=0)
like = leopy.Likelihood(obs, p_true='norm', verbosity=-1)
stddev = [1, 2]
mean = [0.5, 0.7]
p = like.p(mean, stddev, pool=self.pool)
p_v1 = scipy.stats.norm.pdf(df['v0']-mean[0], scale=stddev[0])
p_v2 = scipy.stats.norm.pdf(df['v1']-mean[1], scale=stddev[1])
assert np.all(np.isclose(p.T[0], p_v1*p_v2))
def test_2(self):
d = {'v0': [1., 2., -4.], 'e_v0': [0.1, 0.2, 0.3],
'v1': [3., 4., 1.], 'e_v1': [0.1, 0.1, 0.1]}
df = pd.DataFrame(d)
obs = leopy.Observation(df, 'testdata', verbosity=0)
like = leopy.Likelihood(obs, p_true='norm', verbosity=-1)
R = np.array([[1, 0.6], [0.6, 1]])
stddev = [1, 2]
mean = [0.5, 0.7]
cov = np.diag(stddev).dot(R.dot(np.diag(stddev)))
p = like.p(mean, stddev, R_true=R, pool=self.pool)
p_v1v2 = scipy.stats.multivariate_normal.pdf(
df[['v0', 'v1']], mean=mean, cov=cov)
assert np.all(np.isclose(p.T[0], p_v1v2))
def test_3(self):
d = {'v0': [1., 2., -4.], 'e_v0': [0.1, 0.2, 0.3],
'v1': [3., 4., 1.], 'e_v1': [0.1, 0.1, 0.1]}
df = pd.DataFrame(d)
obs = leopy.Observation(df, 'testdata', verbosity=0)
like = leopy.Likelihood(obs, p_true='norm', verbosity=-1)
R = np.array([[1, -0.3], [-0.3, 1]])
stddev = [1, 2]
mean = [0.5, 0.7]
cov = np.diag(stddev).dot(R.dot(np.diag(stddev)))
p = like.p(mean, stddev, R_true=R, pool=self.pool)
p_v1v2 = scipy.stats.multivariate_normal.pdf(
df[['v0', 'v1']], mean=mean, cov=cov)
assert np.all(np.isclose(p.T[0], p_v1v2))
def test_4(self):
d = {'v0': [1., 2., -4.], 'e_v0': [1e-6, 1e-6, 1e-6],
'v1': [3., 4., 1.], 'e_v1': [1e-6, 1e-6, 1e-6]}
df = pd.DataFrame(d)
obs = leopy.Observation(df, 'testdata', verbosity=0)
like = leopy.Likelihood(
obs, p_true='norm', p_cond='norm', verbosity=-1)
R = np.array([[1, -0.3], [-0.3, 1]])
stddev = [1, 2]
mean = [0.5, 0.7]
cov = np.diag(stddev).dot(R.dot(np.diag(stddev)))
p = like.p(mean, stddev, R_true=R, pool=self.pool)
p_v1v2 = scipy.stats.multivariate_normal.pdf(
df[['v0', 'v1']], mean=mean, cov=cov)
assert np.all(np.isclose(p.T[0], p_v1v2))
def test_5(self):
d = {'v0': [1, 2], 'e_v0': [0.1, 0.2],
'v1': [3, 4], 'e_v1': [0.1, 0.1]}
obs = leopy.Observation(pd.DataFrame(d), 'testdata', verbosity=0)
like = leopy.Likelihood(obs, p_true='lognorm', verbosity=-1)
p = like.p(
[0.5, 0.7], [1, 2], shape_true=[[1.4], [2.]], pool=self.pool)
assert np.all(np.isclose(p, np.array([[0.0436189 ],
[0.01067159]])))
def test_6(self):
d = {'v0': [1, 2], 'e_v0': [1e-6, 1e-6],
'v1': [3, 4], 'e_v1': [1e-6, 1e-6]}
obs = leopy.Observation(pd.DataFrame(d), 'testdata', verbosity=0)
like = leopy.Likelihood(
obs, p_true='lognorm', p_cond='norm', verbosity=-1)
p = like.p(
[0.5, 0.7], [1, 2], shape_true=[[1.4], [2.]], pool=self.pool)
assert np.all(np.isclose(p, np.array([[0.0436189 ],
[0.01067159]])))
def test_7(self):
d = {'v0': [1, 2], 'e_v0': [0.1, 0.2],
'v1': [3, 4], 'e_v1': [0.1, 0.1]}
obs = leopy.Observation(pd.DataFrame(d), 'testdata', verbosity=0)
like = leopy.Likelihood(
obs, p_true='lognorm', p_cond='norm', verbosity=-1)
p = like.p(
[0.5, 0.7], [1, 2], shape_true=[[1.4], [2.]], pool=self.pool)
assert np.all(np.isclose(p, np.array([[0.04415356], [0.01089342]]),
rtol=1e-5, atol=1e-5))
def test_8(self):
d = {'v0': [1., 2., 0.8], 'e_v0': [1e-6, 1e-6, 1e-6],
'v1': [3., 4., 1.], 'e_v1': [1e-6, 1e-6, 1e-6]}
df = pd.DataFrame(d)
obs = leopy.Observation(df, 'testdata', verbosity=0)
like = leopy.Likelihood(
obs, p_true='lognorm', p_cond='norm', verbosity=-1)
R = np.array([[1, -0.3], [-0.3, 1]])
scale = [1, 2]
loc = [0.5, 0.]
shape = [[1], [1.5]]
p = like.p(loc, scale, shape_true=shape, R_true=R, pool=self.pool)
assert np.all(
np.isclose(
p, np.array([[0.05819145], [0.01415945], [0.12375991]]),
rtol=1e-5, atol=1e-5))
def test_9(self, obs_norm_no_error):
like = leopy.Likelihood(obs_norm_no_error, p_true='norm',
verbosity=-1)
def f_mlnlike(x):
loc_true = x[0:2]
scale_true = x[2:4]
rho = x[4]
R = np.array([[1., rho], [rho, 1.]])
pp = like.p(loc_true, scale_true, R_true=R, pool=self.pool)
if np.sum(pp==0) > 0:
return np.inf
else:
return -np.sum(np.log(pp))
bounds = scipy.optimize.Bounds(
[-np.inf, -np.inf, 1e-3, 1e-3, 1e-3],
[np.inf, np.inf, np.inf, np.inf, 1-1e-3])
optres = scipy.optimize.minimize(f_mlnlike, [0., 0., 1., 1., 0.3],
bounds=bounds, method='SLSQP',
options={'disp': True, 'ftol': 1e-12})
assert np.all(np.isclose(optres.x, [
-1, 2, 1, 3, 0.4940357], rtol=1e-5, atol=1e-5))
def test_10(self, obs_lognorm_no_error):
like = leopy.Likelihood(obs_lognorm_no_error, p_true='lognorm',
verbosity=-1)
def f_mlnlike(x):
print(x)
loc_true = x[0:2]
scale_true = x[2:4]
shape_true = x[4:6].reshape(2, 1)
rho = x[6]
R = np.array([[1., rho], [rho, 1.]])
pp = like.p(loc_true, scale_true, shape_true=shape_true, R_true=R,
pool=self.pool)
if np.sum(pp==0) > 0:
return np.inf
else:
return -np.sum(np.log(pp))
bounds = scipy.optimize.Bounds(
[-np.inf, -np.inf, 1e-3, 1e-3, 1e-3, 1e-3, -1+1e-3],
[np.inf, np.inf, 10., 10., 10., 10., 1-1e-3])
optres = scipy.optimize.minimize(
f_mlnlike, [0., 0., 1., 1., 1., 1., 0.3],
bounds=bounds, method='SLSQP',
options={'disp': True, 'ftol': 1e-12})
assert np.all(np.isclose(optres.x, [
-0.01389813, 1.98866462, 1.17630436, 3.85686233, 0.53775924,
1.47418086, 0.54154499], rtol=1e-5, atol=1e-5))
def test_12(self, obs_norm_MAR):
like = leopy.Likelihood(obs_norm_MAR, p_true='norm', p_cond='norm')
def f_mlnlike(x):
loc_true = x[0:2]
scale_true = x[2:4]
rho = x[4]
R = np.array([[1., rho], [rho, 1.]])
pp = like.p(loc_true, scale_true, R_true=R, pool=self.pool)
if np.sum(pp==0) > 0:
return np.inf
else:
return -np.sum(np.log(pp))
bounds = scipy.optimize.Bounds(
[-np.inf, -np.inf, 1e-3, 1e-3, 1e-3],
[np.inf, np.inf, 10., 10., 1-1e-3])
optres = scipy.optimize.minimize(f_mlnlike, [0., 0., 1., 1., 0.3],
bounds=bounds, method='SLSQP',
options={'disp': True, 'ftol': 1e-12})
assert np.all(np.isclose(optres.x, [
-0.17991379, 1.49608098, 0.98586541, 2.69842305, 0.44114192],
rtol=1e-5, atol=1e-5))
def test_13(self, obs_norm_cen):
like = leopy.Likelihood(obs_norm_cen, p_true='norm', p_cond='norm')
def f_mlnlike(x):
loc_true = x[0:2]
scale_true = x[2:4]
rho = x[4]
R = np.array([[1., rho], [rho, 1.]])
pp = like.p(loc_true, scale_true, R_true=R, pool=self.pool)
if np.sum(pp==0) > 0:
return np.inf
else:
return -np.sum(np.log(pp))
bounds = scipy.optimize.Bounds(
[-np.inf, -np.inf, 1e-3, 1e-3, 1e-3],
[np.inf, np.inf, 10., 10., 1-1e-3])
optres = scipy.optimize.minimize(f_mlnlike, [0., 0., 1., 1., 0.3],
bounds=bounds, method='SLSQP',
options={'disp': True, 'ftol': 1e-12})
assert np.all(np.isclose(optres.x, [
0.47954307, 1.2705067, 0.88797593, 2.36476421, 0.52029972],
rtol=1e-5, atol=1e-5))
def test_14(self, obs_norm_cen_uncorr):
like = leopy.Likelihood(
obs_norm_cen_uncorr, p_true='norm', p_cond='norm')
def f_mlnlike(x):
loc_true = x[0:2]
scale_true = x[2:4]
pp = like.p(loc_true, scale_true, pool=self.pool)
if np.sum(pp==0) > 0:
return np.inf
else:
return -np.sum(np.log(pp))
bounds = scipy.optimize.Bounds(
[-np.inf, -np.inf, 1e-3, 1e-3],
[np.inf, np.inf, 10., 10.])
optres = scipy.optimize.minimize(f_mlnlike, [0., 0., 1., 1.],
bounds=bounds, method='SLSQP',
options={'disp': True, 'ftol': 1e-12})
assert np.all(np.isclose(optres.x, [
0.54321826, 1.27320101, 0.97319273, 2.3491366],
rtol=1e-5, atol=1e-5))
def test_15(self, obs_norm_obscorr):
t0 = time.time()
like = leopy.Likelihood(obs_norm_obscorr, p_true='norm', p_cond='norm')
def f_mlnlike(x):
loc_true = x[0:2]
scale_true = x[2:4]
rho = x[4]
R = np.array([[1., rho], [rho, 1.]])
pp = like.p(loc_true, scale_true, R_true=R, pool=self.pool)
if np.sum(pp==0) > 0:
return np.inf
else:
return -np.sum(np.log(pp))
bounds = scipy.optimize.Bounds(
[-np.inf, -np.inf, 1e-3, 1e-3, -1+1e-3],
[np.inf, np.inf, 10., 10., 1-1e-3])
optres = scipy.optimize.minimize(f_mlnlike, [0., 0., 1., 1., 0.3],
bounds=bounds, method='SLSQP',
options={'disp': True, 'ftol': 1e-12})
t1 = time.time()
print('Needed {:.4f} s'.format(t1-t0))
print(optres.x)
assert np.all(np.isclose(optres.x, [
-1.08265859, 2.14778872, 1.18368684, 2.74908927, 0.49219241],
rtol=1e-5, atol=1e-5))
# test independence of observations
# p(x_1, y_1, ..., x_N, y_N | theta) = \prod_{i=1}^N p(x_i, y_i | theta)
def test_16(self, obs_lognorm_obscorr):
like = leopy.Likelihood(obs_lognorm_obscorr, p_true='lognorm',
p_cond='norm')
loc_true = [-0.02, 1.95]
scale_true = [1.2, 2.9]
shape_true = np.array([0.5, 1.44]).reshape(2, 1)
rho = 0.54
R = np.array([[1., rho], [rho, 1.]])
p_all = like.p(loc_true, scale_true, shape_true=shape_true, R_true=R,
pool=self.pool)
N = obs_lognorm_obscorr.df.shape[0]
p_all2 = np.zeros(N)
for i in range(N):
obs = leopy.Observation(obs_lognorm_obscorr.df.iloc[i:i+1],
'test', verbosity=0)
like = leopy.Likelihood(obs, p_true='lognorm', p_cond='norm')
p_all2[i] = like.p(loc_true, scale_true, shape_true=shape_true,
R_true=R, pool=self.pool)
assert np.all(np.isclose(p_all.reshape(N), p_all2))
# test integrating out variables
# p(x_i | theta, R) = int dy_i p(y_i | x_i, theta) p(x_i | theta, R)
# = int dy_i p(x_i, y_i | theta, R)
def test_17(self):
v0 = [0.5, 2.0, 1.7]
ev0 = [0.1, 0.2, 0.3]
v1 = [3, 4, 5.2]
ev1 = [0.1, 0.1, 0.15]
rv0v1 = [0.2, 0.8, -0.8]
d = {'v0': v0, 'e_v0': ev0, 'v1': v1, 'e_v1': ev1, 'r_v0_v1': rv0v1}
obs = leopy.Observation(d, 'test', verbosity=0)
like = leopy.Likelihood(obs, p_true='lognorm', p_cond='norm')
loc_true = [-0.02, 1.95]
scale_true = [0.7, 1.9]
shape_true = np.array([0.5, 2.03]).reshape(2, 1)
rho = 0.0
R = np.array([[1., rho], [rho, 1.]])
p_x = like.p(loc_true, scale_true, shape_true=shape_true, R_true=R,
vars=[0], pool=self.pool)
p_y = like.p(loc_true, scale_true, shape_true=shape_true, R_true=R,
vars=[1], pool=self.pool)
import scipy.integrate
N = 2000
xx = np.concatenate(
[-np.logspace(1, -5, N//5)+loc_true[0], [loc_true[0]],
np.logspace(-5, 4, N-N//5-1) + loc_true[0]])
yy = np.concatenate(
[-np.logspace(1, -5, N//5)+loc_true[1], [loc_true[1]],
np.logspace(-5, 4, N-N//5-1) + loc_true[1]])
d_x = {'v0': np.outer(v0, np.ones(N)).flatten(),
'e_v0': np.outer(ev0, np.ones(N)).flatten(),
'v1': np.outer(np.ones(3), yy).flatten(),
'e_v1': np.outer(ev1, np.ones(N)).flatten(),
'r_v0_v1': np.outer(rv0v1, np.ones(N)).flatten()}
obs_x = leopy.Observation(d_x, 'test', verbosity=0)
like_x = leopy.Likelihood(obs_x, p_true='lognorm', p_cond='norm')
res = like_x.p(loc_true, scale_true, shape_true=shape_true, R_true=R,
pool=self.pool)
res = res.reshape(3, N)
p_x_2 = scipy.integrate.trapz(res, yy)
assert np.all(np.isclose(p_x.reshape(3), p_x_2, atol=1e-4))
d_y = {'v0': np.outer(np.ones(3), xx).flatten(),
'e_v0': np.outer(ev0, np.ones(N)).flatten(),
'v1': np.outer(v1, np.ones(N)).flatten(),
'e_v1': np.outer(ev1, np.ones(N)).flatten(),
'r_v0_v1': np.outer(rv0v1, np.ones(N)).flatten()}
obs_y = leopy.Observation(d_y, 'test', verbosity=0)
like_y = leopy.Likelihood(obs_y, p_true='lognorm', p_cond='norm')
res = like_y.p(loc_true, scale_true, shape_true=shape_true, R_true=R,
pool=self.pool).reshape(3, N)
p_y_2 = scipy.integrate.trapz(res, xx)
assert np.all(np.isclose(p_y.reshape(3), p_y_2, atol=1e-4))
# test product decomposition of uncorrelated, joint pdf
def test_18(self):
v0 = [0.5, 2.0, 1.7, 1.1]
ev0 = [0.1, 0.2, 0.3, 0.15]
v1 = [3, 4, 5.2, 2.2]
ev1 = [0.1, 0.1, 0.15, 0.12]
v2 = [-2, 3, 1.7, 1.]
ev2 = [0.2, 0.1, 0.05, 0.15]
d = {'v0': v0, 'e_v0': ev0, 'v1': v1, 'e_v1': ev1, 'v2': v2,
'e_v2': ev2}
obs = leopy.Observation(d, 'test', verbosity=0)
like = leopy.Likelihood(obs, p_true=['lognorm','gamma','norm'],
p_cond='norm')
loc_true = [-0.02, 1.95, 1]
scale_true = [0.7, 1.9, 2.5]
shape_true = [[0.5], [2.03], []]
p_0 = like.p(loc_true, scale_true, shape_true=shape_true, vars=[0],
pool=self.pool)
p_01 = like.p(loc_true, scale_true, shape_true=shape_true, vars=[0, 1],
pool=self.pool)
p_02 = like.p(loc_true, scale_true, shape_true=shape_true, vars=[0, 2],
pool=self.pool)
p_012 = like.p(loc_true, scale_true, shape_true=shape_true,
pool=self.pool)
assert np.all(np.isclose(p_01/p_0 * p_02/p_0, p_012/p_0))
|
rfeldmannREPO_NAMEleopyPATH_START.@leopy_extracted@leopy-master@leopy@test@test_likelihood.py@.PATH_END.py
|
{
"filename": "test_loader.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/traitlets/py3/tests/config/test_loader.py",
"type": "Python"
}
|
"""Tests for traitlets.config.loader"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import annotations
import copy
import os
import pickle
from itertools import chain
from tempfile import mkstemp
from unittest import TestCase
import pytest
from traitlets import Dict, Integer, List, Tuple, Unicode
from traitlets.config import Configurable
from traitlets.config.loader import (
ArgParseConfigLoader,
Config,
JSONFileConfigLoader,
KeyValueConfigLoader,
KVArgParseConfigLoader,
LazyConfigValue,
PyFileConfigLoader,
)
pyfile = """
c = get_config()
c.a=10
c.b=20
c.Foo.Bar.value=10
c.Foo.Bam.value=list(range(10))
c.D.C.value='hi there'
"""
json1file = """
{
"version": 1,
"a": 10,
"b": 20,
"Foo": {
"Bam": {
"value": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ]
},
"Bar": {
"value": 10
}
},
"D": {
"C": {
"value": "hi there"
}
}
}
"""
# should not load
json2file = """
{
"version": 2
}
"""
import logging
log = logging.getLogger("devnull")
log.setLevel(0)
class TestFileCL(TestCase):
def _check_conf(self, config):
self.assertEqual(config.a, 10)
self.assertEqual(config.b, 20)
self.assertEqual(config.Foo.Bar.value, 10)
self.assertEqual(config.Foo.Bam.value, list(range(10)))
self.assertEqual(config.D.C.value, "hi there")
def test_python(self):
fd, fname = mkstemp(".py", prefix="μnïcø∂e")
f = os.fdopen(fd, "w")
f.write(pyfile)
f.close()
# Unlink the file
cl = PyFileConfigLoader(fname, log=log)
config = cl.load_config()
self._check_conf(config)
def test_json(self):
fd, fname = mkstemp(".json", prefix="μnïcø∂e")
f = os.fdopen(fd, "w")
f.write(json1file)
f.close()
# Unlink the file
cl = JSONFileConfigLoader(fname, log=log)
config = cl.load_config()
self._check_conf(config)
def test_context_manager(self):
fd, fname = mkstemp(".json", prefix="μnïcø∂e")
f = os.fdopen(fd, "w")
f.write("{}")
f.close()
cl = JSONFileConfigLoader(fname, log=log)
value = "context_manager"
with cl as c:
c.MyAttr.value = value
self.assertEqual(cl.config.MyAttr.value, value)
# check that another loader does see the change
_ = JSONFileConfigLoader(fname, log=log)
self.assertEqual(cl.config.MyAttr.value, value)
def test_json_context_bad_write(self):
fd, fname = mkstemp(".json", prefix="μnïcø∂e")
f = os.fdopen(fd, "w")
f.write("{}")
f.close()
with JSONFileConfigLoader(fname, log=log) as config:
config.A.b = 1
with self.assertRaises(TypeError), JSONFileConfigLoader(fname, log=log) as config:
config.A.cant_json = lambda x: x
loader = JSONFileConfigLoader(fname, log=log)
cfg = loader.load_config()
assert cfg.A.b == 1
assert "cant_json" not in cfg.A
def test_collision(self):
a = Config()
b = Config()
self.assertEqual(a.collisions(b), {})
a.A.trait1 = 1
b.A.trait2 = 2
self.assertEqual(a.collisions(b), {})
b.A.trait1 = 1
self.assertEqual(a.collisions(b), {})
b.A.trait1 = 0
self.assertEqual(
a.collisions(b),
{
"A": {
"trait1": "1 ignored, using 0",
}
},
)
self.assertEqual(
b.collisions(a),
{
"A": {
"trait1": "0 ignored, using 1",
}
},
)
a.A.trait2 = 3
self.assertEqual(
b.collisions(a),
{
"A": {
"trait1": "0 ignored, using 1",
"trait2": "2 ignored, using 3",
}
},
)
def test_v2raise(self):
fd, fname = mkstemp(".json", prefix="μnïcø∂e")
f = os.fdopen(fd, "w")
f.write(json2file)
f.close()
# Unlink the file
cl = JSONFileConfigLoader(fname, log=log)
with self.assertRaises(ValueError):
cl.load_config()
def _parse_int_or_str(v):
try:
return int(v)
except Exception:
return str(v)
class MyLoader1(ArgParseConfigLoader):
def _add_arguments(self, aliases=None, flags=None, classes=None):
p = self.parser
p.add_argument("-f", "--foo", dest="Global.foo", type=str)
p.add_argument("-b", dest="MyClass.bar", type=int)
p.add_argument("-n", dest="n", action="store_true")
p.add_argument("Global.bam", type=str)
p.add_argument("--list1", action="append", type=_parse_int_or_str)
p.add_argument("--list2", nargs="+", type=int)
class MyLoader2(ArgParseConfigLoader):
def _add_arguments(self, aliases=None, flags=None, classes=None):
subparsers = self.parser.add_subparsers(dest="subparser_name")
subparser1 = subparsers.add_parser("1")
subparser1.add_argument("-x", dest="Global.x")
subparser2 = subparsers.add_parser("2")
subparser2.add_argument("y")
class TestArgParseCL(TestCase):
def test_basic(self):
cl = MyLoader1()
config = cl.load_config("-f hi -b 10 -n wow".split())
self.assertEqual(config.Global.foo, "hi")
self.assertEqual(config.MyClass.bar, 10)
self.assertEqual(config.n, True)
self.assertEqual(config.Global.bam, "wow")
config = cl.load_config(["wow"])
self.assertEqual(list(config.keys()), ["Global"])
self.assertEqual(list(config.Global.keys()), ["bam"])
self.assertEqual(config.Global.bam, "wow")
def test_add_arguments(self):
cl = MyLoader2()
config = cl.load_config("2 frobble".split())
self.assertEqual(config.subparser_name, "2")
self.assertEqual(config.y, "frobble")
config = cl.load_config("1 -x frobble".split())
self.assertEqual(config.subparser_name, "1")
self.assertEqual(config.Global.x, "frobble")
def test_argv(self):
cl = MyLoader1(argv="-f hi -b 10 -n wow".split())
config = cl.load_config()
self.assertEqual(config.Global.foo, "hi")
self.assertEqual(config.MyClass.bar, 10)
self.assertEqual(config.n, True)
self.assertEqual(config.Global.bam, "wow")
def test_list_args(self):
cl = MyLoader1()
config = cl.load_config("--list1 1 wow --list2 1 2 3 --list1 B".split())
self.assertEqual(list(config.Global.keys()), ["bam"])
self.assertEqual(config.Global.bam, "wow")
self.assertEqual(config.list1, [1, "B"])
self.assertEqual(config.list2, [1, 2, 3])
class C(Configurable):
str_trait = Unicode(config=True)
int_trait = Integer(config=True)
list_trait = List(config=True)
list_of_ints = List(Integer(), config=True)
dict_trait = Dict(config=True)
dict_of_ints = Dict(
key_trait=Integer(),
value_trait=Integer(),
config=True,
)
dict_multi = Dict(
key_trait=Unicode(),
per_key_traits={
"int": Integer(),
"str": Unicode(),
},
config=True,
)
class TestKeyValueCL(TestCase):
klass = KeyValueConfigLoader
def test_eval(self):
cl = self.klass(log=log)
config = cl.load_config(
'--C.str_trait=all --C.int_trait=5 --C.list_trait=["hello",5]'.split()
)
c = C(config=config)
assert c.str_trait == "all"
assert c.int_trait == 5
assert c.list_trait == ["hello", 5]
def test_basic(self):
cl = self.klass(log=log)
argv = ["--" + s[2:] for s in pyfile.split("\n") if s.startswith("c.")]
config = cl.load_config(argv)
assert config.a == "10"
assert config.b == "20"
assert config.Foo.Bar.value == "10"
# non-literal expressions are not evaluated
self.assertEqual(config.Foo.Bam.value, "list(range(10))")
self.assertEqual(Unicode().from_string(config.D.C.value), "hi there")
def test_expanduser(self):
cl = self.klass(log=log)
argv = ["--a=~/1/2/3", "--b=~", "--c=~/", '--d="~/"']
config = cl.load_config(argv)
u = Unicode()
self.assertEqual(u.from_string(config.a), os.path.expanduser("~/1/2/3"))
self.assertEqual(u.from_string(config.b), os.path.expanduser("~"))
self.assertEqual(u.from_string(config.c), os.path.expanduser("~/"))
self.assertEqual(u.from_string(config.d), "~/")
def test_extra_args(self):
cl = self.klass(log=log)
config = cl.load_config(["--a=5", "b", "d", "--c=10"])
self.assertEqual(cl.extra_args, ["b", "d"])
assert config.a == "5"
assert config.c == "10"
config = cl.load_config(["--", "--a=5", "--c=10"])
self.assertEqual(cl.extra_args, ["--a=5", "--c=10"])
cl = self.klass(log=log)
config = cl.load_config(["extra", "--a=2", "--c=1", "--", "-"])
self.assertEqual(cl.extra_args, ["extra", "-"])
def test_unicode_args(self):
cl = self.klass(log=log)
argv = ["--a=épsîlön"]
config = cl.load_config(argv)
print(config, cl.extra_args)
self.assertEqual(config.a, "épsîlön")
def test_list_append(self):
cl = self.klass(log=log)
argv = ["--C.list_trait", "x", "--C.list_trait", "y"]
config = cl.load_config(argv)
assert config.C.list_trait == ["x", "y"]
c = C(config=config)
assert c.list_trait == ["x", "y"]
def test_list_single_item(self):
cl = self.klass(log=log)
argv = ["--C.list_trait", "x"]
config = cl.load_config(argv)
c = C(config=config)
assert c.list_trait == ["x"]
def test_dict(self):
cl = self.klass(log=log)
argv = ["--C.dict_trait", "x=5", "--C.dict_trait", "y=10"]
config = cl.load_config(argv)
c = C(config=config)
assert c.dict_trait == {"x": "5", "y": "10"}
def test_dict_key_traits(self):
cl = self.klass(log=log)
argv = ["--C.dict_of_ints", "1=2", "--C.dict_of_ints", "3=4"]
config = cl.load_config(argv)
c = C(config=config)
assert c.dict_of_ints == {1: 2, 3: 4}
class CBase(Configurable):
a = List().tag(config=True)
b = List(Integer()).tag(config=True, multiplicity="*")
c = List().tag(config=True, multiplicity="append")
adict = Dict().tag(config=True)
class CSub(CBase):
d = Tuple().tag(config=True)
e = Tuple().tag(config=True, multiplicity="+")
bdict = Dict().tag(config=True, multiplicity="*")
class TestArgParseKVCL(TestKeyValueCL):
klass = KVArgParseConfigLoader # type:ignore
def test_no_cast_literals(self):
cl = self.klass(log=log) # type:ignore
# test ipython -c 1 doesn't cast to int
argv = ["-c", "1"]
config = cl.load_config(argv, aliases=dict(c="IPython.command_to_run"))
assert config.IPython.command_to_run == "1"
def test_int_literals(self):
cl = self.klass(log=log) # type:ignore
# test ipython -c 1 doesn't cast to int
argv = ["-c", "1"]
config = cl.load_config(argv, aliases=dict(c="IPython.command_to_run"))
assert config.IPython.command_to_run == "1"
def test_unicode_alias(self):
cl = self.klass(log=log) # type:ignore
argv = ["--a=épsîlön"]
config = cl.load_config(argv, aliases=dict(a="A.a"))
print(dict(config))
print(cl.extra_args)
print(cl.aliases)
self.assertEqual(config.A.a, "épsîlön")
def test_expanduser2(self):
cl = self.klass(log=log) # type:ignore
argv = ["-a", "~/1/2/3", "--b", "'~/1/2/3'"]
config = cl.load_config(argv, aliases=dict(a="A.a", b="A.b"))
class A(Configurable):
a = Unicode(config=True)
b = Unicode(config=True)
a = A(config=config)
self.assertEqual(a.a, os.path.expanduser("~/1/2/3"))
self.assertEqual(a.b, "~/1/2/3")
def test_eval(self):
cl = self.klass(log=log) # type:ignore
argv = ["-c", "a=5"]
config = cl.load_config(argv, aliases=dict(c="A.c"))
self.assertEqual(config.A.c, "a=5")
def test_seq_traits(self):
cl = self.klass(log=log, classes=(CBase, CSub)) # type:ignore
aliases = {"a3": "CBase.c", "a5": "CSub.e"}
argv = (
"--CBase.a A --CBase.a 2 --CBase.b 1 --CBase.b 3 --a3 AA --CBase.c BB "
"--CSub.d 1 --CSub.d BBB --CSub.e 1 --CSub.e=bcd a b c "
).split()
config = cl.load_config(argv, aliases=aliases)
assert cl.extra_args == ["a", "b", "c"]
assert config.CBase.a == ["A", "2"]
assert config.CBase.b == [1, 3]
self.assertEqual(config.CBase.c, ["AA", "BB"])
assert config.CSub.d == ("1", "BBB")
assert config.CSub.e == ("1", "bcd")
def test_seq_traits_single_empty_string(self):
cl = self.klass(log=log, classes=(CBase,)) # type:ignore
aliases = {"seqopt": "CBase.c"}
argv = ["--seqopt", ""]
config = cl.load_config(argv, aliases=aliases)
self.assertEqual(config.CBase.c, [""])
def test_dict_traits(self):
cl = self.klass(log=log, classes=(CBase, CSub)) # type:ignore
aliases = {"D": "CBase.adict", "E": "CSub.bdict"}
argv = ["-D", "k1=v1", "-D=k2=2", "-D", "k3=v 3", "-E", "k=v", "-E", "22=222"]
config = cl.load_config(argv, aliases=aliases)
c = CSub(config=config)
assert c.adict == {"k1": "v1", "k2": "2", "k3": "v 3"}
assert c.bdict == {"k": "v", "22": "222"}
def test_mixed_seq_positional(self):
aliases = {"c": "Class.trait"}
cl = self.klass(log=log, aliases=aliases) # type:ignore
assignments = [("-c", "1"), ("--Class.trait=2",), ("--c=3",), ("--Class.trait", "4")]
positionals = ["a", "b", "c"]
# test with positionals at any index
for idx in range(len(assignments) + 1):
argv_parts = assignments[:]
argv_parts[idx:idx] = (positionals,) # type:ignore
argv = list(chain(*argv_parts))
config = cl.load_config(argv)
assert config.Class.trait == ["1", "2", "3", "4"]
assert cl.extra_args == ["a", "b", "c"]
def test_split_positional(self):
"""Splitting positionals across flags is no longer allowed in traitlets 5"""
cl = self.klass(log=log) # type:ignore
argv = ["a", "--Class.trait=5", "b"]
with pytest.raises(SystemExit):
cl.load_config(argv)
class TestConfig(TestCase):
def test_setget(self):
c = Config()
c.a = 10
self.assertEqual(c.a, 10)
self.assertEqual("b" in c, False)
def test_auto_section(self):
c = Config()
self.assertNotIn("A", c)
assert not c._has_section("A")
A = c.A
A.foo = "hi there"
self.assertIn("A", c)
assert c._has_section("A")
self.assertEqual(c.A.foo, "hi there")
del c.A
self.assertEqual(c.A, Config())
def test_merge_doesnt_exist(self):
c1 = Config()
c2 = Config()
c2.bar = 10
c2.Foo.bar = 10
c1.merge(c2)
self.assertEqual(c1.Foo.bar, 10)
self.assertEqual(c1.bar, 10)
c2.Bar.bar = 10
c1.merge(c2)
self.assertEqual(c1.Bar.bar, 10)
def test_merge_exists(self):
c1 = Config()
c2 = Config()
c1.Foo.bar = 10
c1.Foo.bam = 30
c2.Foo.bar = 20
c2.Foo.wow = 40
c1.merge(c2)
self.assertEqual(c1.Foo.bam, 30)
self.assertEqual(c1.Foo.bar, 20)
self.assertEqual(c1.Foo.wow, 40)
c2.Foo.Bam.bam = 10
c1.merge(c2)
self.assertEqual(c1.Foo.Bam.bam, 10)
def test_deepcopy(self):
c1 = Config()
c1.Foo.bar = 10
c1.Foo.bam = 30
c1.a = "asdf"
c1.b = range(10)
c1.Test.logger = logging.Logger("test")
c1.Test.get_logger = logging.getLogger("test")
c2 = copy.deepcopy(c1)
self.assertEqual(c1, c2)
self.assertTrue(c1 is not c2)
self.assertTrue(c1.Foo is not c2.Foo)
self.assertTrue(c1.Test is not c2.Test)
self.assertTrue(c1.Test.logger is c2.Test.logger)
self.assertTrue(c1.Test.get_logger is c2.Test.get_logger)
def test_builtin(self):
c1 = Config()
c1.format = "json"
def test_fromdict(self):
c1 = Config({"Foo": {"bar": 1}})
self.assertEqual(c1.Foo.__class__, Config)
self.assertEqual(c1.Foo.bar, 1)
def test_fromdictmerge(self):
c1 = Config()
c2 = Config({"Foo": {"bar": 1}})
c1.merge(c2)
self.assertEqual(c1.Foo.__class__, Config)
self.assertEqual(c1.Foo.bar, 1)
def test_fromdictmerge2(self):
c1 = Config({"Foo": {"baz": 2}})
c2 = Config({"Foo": {"bar": 1}})
c1.merge(c2)
self.assertEqual(c1.Foo.__class__, Config)
self.assertEqual(c1.Foo.bar, 1)
self.assertEqual(c1.Foo.baz, 2)
self.assertNotIn("baz", c2.Foo)
def test_contains(self):
c1 = Config({"Foo": {"baz": 2}})
c2 = Config({"Foo": {"bar": 1}})
self.assertIn("Foo", c1)
self.assertIn("Foo.baz", c1)
self.assertIn("Foo.bar", c2)
self.assertNotIn("Foo.bar", c1)
def test_pickle_config(self):
cfg = Config()
cfg.Foo.bar = 1
pcfg = pickle.dumps(cfg)
cfg2 = pickle.loads(pcfg)
self.assertEqual(cfg2, cfg)
def test_getattr_section(self):
cfg = Config()
self.assertNotIn("Foo", cfg)
Foo = cfg.Foo
assert isinstance(Foo, Config)
self.assertIn("Foo", cfg)
def test_getitem_section(self):
cfg = Config()
self.assertNotIn("Foo", cfg)
Foo = cfg["Foo"]
assert isinstance(Foo, Config)
self.assertIn("Foo", cfg)
def test_getattr_not_section(self):
cfg = Config()
self.assertNotIn("foo", cfg)
foo = cfg.foo
assert isinstance(foo, LazyConfigValue)
self.assertIn("foo", cfg)
def test_getattr_private_missing(self):
cfg = Config()
self.assertNotIn("_repr_html_", cfg)
with self.assertRaises(AttributeError):
_ = cfg._repr_html_
self.assertNotIn("_repr_html_", cfg)
self.assertEqual(len(cfg), 0)
def test_lazy_config_repr(self):
cfg = Config()
cfg.Class.lazy.append(1)
cfg_repr = repr(cfg)
assert "<LazyConfigValue" in cfg_repr
assert "extend" in cfg_repr
assert " [1]}>" in cfg_repr
assert "value=" not in cfg_repr
cfg.Class.lazy.get_value([0])
repr2 = repr(cfg)
assert repr([0, 1]) in repr2
assert "value=" in repr2
def test_getitem_not_section(self):
cfg = Config()
self.assertNotIn("foo", cfg)
foo = cfg["foo"]
assert isinstance(foo, LazyConfigValue)
self.assertIn("foo", cfg)
def test_merge_no_copies(self):
c = Config()
c2 = Config()
c2.Foo.trait = []
c.merge(c2)
c2.Foo.trait.append(1)
self.assertIs(c.Foo, c2.Foo)
self.assertEqual(c.Foo.trait, [1])
self.assertEqual(c2.Foo.trait, [1])
def test_merge_multi_lazy(self):
"""
With multiple config files (systemwide and users), we want compounding.
If systemwide overwrite and user append, we want both in the right
order.
"""
c1 = Config()
c2 = Config()
c1.Foo.trait = [1]
c2.Foo.trait.append(2)
c = Config()
c.merge(c1)
c.merge(c2)
self.assertEqual(c.Foo.trait, [1, 2])
def test_merge_multi_lazyII(self):
"""
With multiple config files (systemwide and users), we want compounding.
If both are lazy we still want a lazy config.
"""
c1 = Config()
c2 = Config()
c1.Foo.trait.append(1)
c2.Foo.trait.append(2)
c = Config()
c.merge(c1)
c.merge(c2)
self.assertEqual(c.Foo.trait._extend, [1, 2])
def test_merge_multi_lazy_III(self):
"""
With multiple config files (systemwide and users), we want compounding.
Prepend should prepend in the right order.
"""
c1 = Config()
c2 = Config()
c1.Foo.trait = [1]
c2.Foo.trait.prepend([0])
c = Config()
c.merge(c1)
c.merge(c2)
self.assertEqual(c.Foo.trait, [0, 1])
def test_merge_multi_lazy_IV(self):
"""
With multiple config files (systemwide and users), we want compounding.
Both prepending should be lazy
"""
c1 = Config()
c2 = Config()
c1.Foo.trait.prepend([1])
c2.Foo.trait.prepend([0])
c = Config()
c.merge(c1)
c.merge(c2)
self.assertEqual(c.Foo.trait._prepend, [0, 1])
def test_merge_multi_lazy_update_I(self):
"""
With multiple config files (systemwide and users), we want compounding.
dict update should be in the right order.
"""
c1 = Config()
c2 = Config()
c1.Foo.trait = {"a": 1, "z": 26}
c2.Foo.trait.update({"a": 0, "b": 1})
c = Config()
c.merge(c1)
c.merge(c2)
self.assertEqual(c.Foo.trait, {"a": 0, "b": 1, "z": 26})
def test_merge_multi_lazy_update_II(self):
"""
With multiple config files (systemwide and users), we want compounding.
Later dict overwrite laziness
"""
c1 = Config()
c2 = Config()
c1.Foo.trait.update({"a": 0, "b": 1})
c2.Foo.trait = {"a": 1, "z": 26}
c = Config()
c.merge(c1)
c.merge(c2)
self.assertEqual(c.Foo.trait, {"a": 1, "z": 26})
def test_merge_multi_lazy_update_III(self):
"""
With multiple config files (systemwide and users), we want compounding.
Later dict overwrite laziness
"""
c1 = Config()
c2 = Config()
c1.Foo.trait.update({"a": 0, "b": 1})
c2.Foo.trait.update({"a": 1, "z": 26})
c = Config()
c.merge(c1)
c.merge(c2)
self.assertEqual(c.Foo.trait._update, {"a": 1, "z": 26, "b": 1})
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@traitlets@py3@tests@config@test_loader.py@.PATH_END.py
|
{
"filename": "test_create_skymodel_chunk_map.py",
"repo_name": "JLBLine/WODEN",
"repo_path": "WODEN_extracted/WODEN-master/cmake_testing/wodenpy/skymodel/test_create_skymodel_chunk_map.py",
"type": "Python"
}
|
"""
"""
from sys import path
import os
import unittest
import numpy as np
##This is where our code lives
# code_dir = os.environ['CMAKE_CURRENT_SOURCE_DIR']
# ##Code we are testing
from wodenpy.skymodel import read_yaml_skymodel
# import wodenpy
from wodenpy.skymodel.woden_skymodel import Component_Type_Counter, CompTypes
from wodenpy.skymodel.chunk_sky_model import create_skymodel_chunk_map, Skymodel_Chunk_Map
from common_skymodel_test import fill_comp_counter_for_chunking, Expec_Counter, BaseChunkTest, Skymodel_Settings
import binpacking
D2R = np.pi/180.0
##for now, WODEN has three flux types: power law, curved power law, and list
NUM_FLUX_TYPES = 3
##based class on an existing class test that includes methods for
##for
class Test(BaseChunkTest):
def run_test_create_skymodel_chunk_map(self, settings : Skymodel_Settings):
"""Makes a populted Component_Type_Counter based off the given
inputs, runs it through
`wodenpy.skymodel.chunk_sky_model.map_chunk_pointgauss` and
checks that gives the correct answers"""
max_num_visibilities = settings.max_num_visibilities
num_time_steps = settings.num_time_steps
num_baselines = settings.num_baselines
num_freqs = settings.num_freqs
num_points = settings.num_points
num_gauss = settings.num_gauss
num_shapes = settings.num_shapes
num_coeff_per_shape = settings.num_coeff_per_shape
##make the fake sky model
comp_counter = fill_comp_counter_for_chunking(settings)
comp_counter.total_components()
comp_counter.print_info()
##Run the code we are testing!
chunked_skymodel_counters = create_skymodel_chunk_map(comp_counter,
max_num_visibilities, num_baselines,
num_freqs, num_time_steps,
max_chunks_per_set=1e5)
# print("DIS", chunked_skymodel_counters.shape)
# print("DIS", len(chunked_skymodel_counters[0,0]), chunked_skymodel_counters[0,0])
comps_per_chunk = int(np.floor(max_num_visibilities / (num_baselines * num_freqs * num_time_steps)))
##pray this never happens, probably means we're going to run out of
##GPU memory TODO don't pray, submit a warning?
if comps_per_chunk < 1: comps_per_chunk = 1
num_point_chunks = int(np.ceil(NUM_FLUX_TYPES*num_points / comps_per_chunk))
num_gauss_chunks = int(np.ceil(NUM_FLUX_TYPES*num_gauss / comps_per_chunk))
##we split SHAPELET by the basis components (number of coeffs)
num_coeff_chunks = int(np.ceil(comp_counter.total_shape_basis / comps_per_chunk))
##Counters to see how far many point/gauss have already been assigned
##to previous chunks
expec_counter = Expec_Counter()
for point_ind in range(num_point_chunks):
# chunk_map = chunked_skymodel_counters[point_ind][0][0]
chunk_map = chunked_skymodel_counters[0,0][point_ind]
expec_counter = self.check_pointgauss_chunking(point_ind,
comps_per_chunk,
num_point_chunks, num_points,
settings,
CompTypes.POINT,
comp_counter, chunk_map,
expec_counter)
for gauss_ind in range(num_gauss_chunks):
# chunk_map = chunked_skymodel_counters[num_point_chunks + gauss_ind][0][0]
chunk_map = chunked_skymodel_counters[0,0][num_point_chunks + gauss_ind]
expec_counter = self.check_pointgauss_chunking(gauss_ind,
comps_per_chunk,
num_gauss_chunks, num_gauss,
settings,
CompTypes.GAUSSIAN,
comp_counter, chunk_map,
expec_counter)
if num_coeff_chunks > 0:
num_shapes_per_comp = []
for coeff_ind in range(num_coeff_chunks):
chunk_map = 'meh'
num_shapes_comp = self.check_shapelet_chunking(coeff_ind, num_coeff_per_shape,
comps_per_chunk, num_shapes,
settings,
comp_counter, chunk_map,
total_point_comps=NUM_FLUX_TYPES*num_points,
total_gauss_comps=NUM_FLUX_TYPES*num_gauss,
do_check=False)
num_shapes_per_comp.append(num_shapes_comp)
##We will have some unedfined number of chunks, so we want to split
##things as evenly as possible in the available number of threads
indexed_shape_chunk_sizes = [(i, n_shape) for i,n_shape in enumerate(num_shapes_per_comp)] # List of (index, value) tuples
target_volume = comps_per_chunk # Set the target volume for each bin
# Step 2: Partition the numbers while keeping track of indices using the `to_constant_volume` function
binned_shape_chunk_sizes = binpacking.to_constant_volume(indexed_shape_chunk_sizes, target_volume, weight_pos=1)
# print(len(binned_shape_chunk_sizes), binned_shape_chunk_sizes)
num_threads = 1
if len(binned_shape_chunk_sizes) > num_threads:
while len(binned_shape_chunk_sizes) > num_threads:
# Find the two smallest binned_shape_chunk_sizes and merge them
binned_shape_chunk_sizes = sorted(binned_shape_chunk_sizes, key=lambda bin: sum(item[1] for item in bin)) # Sort binned_shape_chunk_sizes by their total sum
binned_shape_chunk_sizes[0].extend(binned_shape_chunk_sizes[1]) # Merge the two smallest binned_shape_chunk_sizes
binned_shape_chunk_sizes.pop(1) # Remove the now-empty bin
shape_comp_chunk_order = []
for bin_index_size in binned_shape_chunk_sizes:
for index, value in bin_index_size:
shape_comp_chunk_order.append(index)
for new_ind, coeff_ind in enumerate(shape_comp_chunk_order):
# print(num_coeff_chunks, num_point_chunks, num_gauss_chunks)
# print(len(chunked_skymodel_counters[0,0])-num_point_chunks-num_gauss_chunks)
# chunk_map = chunked_skymodel_counters[-1][0][coeff_ind]
chunk_map = chunked_skymodel_counters[0,0][num_point_chunks + num_gauss_chunks + new_ind]
self.check_shapelet_chunking(coeff_ind, num_coeff_per_shape,
comps_per_chunk, num_shapes,
settings,
comp_counter, chunk_map,
total_point_comps=NUM_FLUX_TYPES*num_points,
total_gauss_comps=NUM_FLUX_TYPES*num_gauss)
##Ok now run with many many combinations
def test_P3_G0000_S00_00_C1_Time001(self):
max_num_visibilities = 2
settings = Skymodel_Settings(num_points = 3,
num_gauss = 0,
num_shapes = 0,
num_coeff_per_shape = 0,
num_time_steps = 1,
num_freqs = 1,
num_baselines = 8128,
num_list_values = 3)
self.run_test_create_skymodel_chunk_map(settings),
def test_P4544_G1736_S20_14_C1e8_Time014(self):
settings = Skymodel_Settings(num_points = 4544,
num_gauss = 1736,
num_shapes = 20,
num_coeff_per_shape = 14,
max_num_visibilities = 1e8,
num_time_steps = 14,
num_baselines = 8128,
num_freqs = 16,
num_list_values = 4)
self.run_test_create_skymodel_chunk_map(settings)
def test_P1000_G0000_S00_00_C1e8_Time014(self):
settings = Skymodel_Settings(num_points = 1000,
num_gauss = 0,
num_shapes = 0,
num_coeff_per_shape = 0,
max_num_visibilities = 1e9,
num_time_steps = 14,
num_baselines = 8128,
num_freqs = 16,
num_list_values = 6)
self.run_test_create_skymodel_chunk_map(settings)
def test_P0000_G1000_S00_00_C1e8_Time014(self):
settings = Skymodel_Settings(num_points = 0,
num_gauss = 1000,
num_shapes = 0,
num_coeff_per_shape = 0,
max_num_visibilities = 1e9,
num_time_steps = 14,
num_baselines = 8128,
num_freqs = 16,
num_list_values = 5)
self.run_test_create_skymodel_chunk_map(settings)
def test_P0000_G0000_S67_78_C1e8_Time014(self):
settings = Skymodel_Settings(num_points = 0,
num_gauss = 0,
num_shapes = 67,
num_coeff_per_shape = 78,
max_num_visibilities = 1e9,
num_time_steps = 14,
num_baselines = 8128,
num_freqs = 16,
num_list_values = 2)
self.run_test_create_skymodel_chunk_map(settings)
def test_P0050_G0050_S10_25_C4576_Time014(self):
settings = Skymodel_Settings(num_points = 50,
num_gauss = 50,
num_shapes = 10,
num_coeff_per_shape = 25,
max_num_visibilities = 4576,
num_time_steps = 14,
num_baselines = 8128,
num_freqs = 16,
num_list_values = 8)
self.run_test_create_skymodel_chunk_map(settings)
def test_P87760_G12207_S121_678_C1e10_Time056(self):
settings = Skymodel_Settings(num_points = 87760,
num_gauss = 12207,
num_shapes = 121,
num_coeff_per_shape = 678,
max_num_visibilities = 1e10,
num_time_steps = 56,
num_baselines = 8128,
num_freqs = 32,
num_list_values = 16)
self.run_test_create_skymodel_chunk_map(settings)
def test_P87760_G12207_S121_678_C1e10_Time056_Lin70_V100(self):
settings = Skymodel_Settings(num_points = 87760,
num_gauss = 12207,
num_shapes = 121,
num_coeff_per_shape = 678,
max_num_visibilities = 1e10,
num_time_steps = 56,
num_baselines = 8128,
num_freqs = 32,
num_list_values = 16,
linpol_cadence = 70,
linpol_num_list=2,
linpol_num_p_list=3,
stokesV_cadence = 80,
stokesV_num_list=4)
self.run_test_create_skymodel_chunk_map(settings)
##Run the test
if __name__ == '__main__':
unittest.main()
|
JLBLineREPO_NAMEWODENPATH_START.@WODEN_extracted@WODEN-master@cmake_testing@wodenpy@skymodel@test_create_skymodel_chunk_map.py@.PATH_END.py
|
{
"filename": "test_reshape.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/indexes/base_class/test_reshape.py",
"type": "Python"
}
|
"""
Tests for ndarray-like method on the base Index class
"""
import numpy as np
import pytest
from pandas import Index
import pandas._testing as tm
class TestReshape:
def test_repeat(self):
repeats = 2
index = Index([1, 2, 3])
expected = Index([1, 1, 2, 2, 3, 3])
result = index.repeat(repeats)
tm.assert_index_equal(result, expected)
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(["b", "c", "d"])
# test 0th element
tm.assert_index_equal(Index(["a", "b", "c", "d"]), result.insert(0, "a"))
# test Nth element that follows Python list behavior
tm.assert_index_equal(Index(["b", "c", "e", "d"]), result.insert(-1, "e"))
# test loc +/- neq (0, -1)
tm.assert_index_equal(result.insert(1, "z"), result.insert(-2, "z"))
# test empty
null_index = Index([])
tm.assert_index_equal(Index(["a"], dtype=object), null_index.insert(0, "a"))
def test_insert_missing(self, nulls_fixture, using_infer_string):
# GH#22295
# test there is no mangling of NA values
expected = Index(["a", nulls_fixture, "b", "c"], dtype=object)
result = Index(list("abc"), dtype=object).insert(
1, Index([nulls_fixture], dtype=object)
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"val", [(1, 2), np.datetime64("2019-12-31"), np.timedelta64(1, "D")]
)
@pytest.mark.parametrize("loc", [-1, 2])
def test_insert_datetime_into_object(self, loc, val):
# GH#44509
idx = Index(["1", "2", "3"])
result = idx.insert(loc, val)
expected = Index(["1", "2", val, "3"])
tm.assert_index_equal(result, expected)
assert type(expected[2]) is type(val)
def test_insert_none_into_string_numpy(self, string_dtype_no_object):
# GH#55365
index = Index(["a", "b", "c"], dtype=string_dtype_no_object)
result = index.insert(-1, None)
expected = Index(["a", "b", None, "c"], dtype=string_dtype_no_object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"pos,expected",
[
(0, Index(["b", "c", "d"], name="index")),
(-1, Index(["a", "b", "c"], name="index")),
],
)
def test_delete(self, pos, expected):
index = Index(["a", "b", "c", "d"], name="index")
result = index.delete(pos)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
def test_delete_raises(self):
index = Index(["a", "b", "c", "d"], name="index")
msg = "index 5 is out of bounds for axis 0 with size 4"
with pytest.raises(IndexError, match=msg):
index.delete(5)
def test_append_multiple(self):
index = Index(["a", "b", "c", "d", "e", "f"])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
tm.assert_index_equal(result, index)
# empty
result = index.append([])
tm.assert_index_equal(result, index)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@indexes@base_class@test_reshape.py@.PATH_END.py
|
{
"filename": "plot_structure.py",
"repo_name": "dullemond/radmc3d-2.0",
"repo_path": "radmc3d-2.0_extracted/radmc3d-2.0-master/examples/run_ppdisk_analytic_2/plot_structure.py",
"type": "Python"
}
|
import problem_setup as p
import numpy as np
from mpl_toolkits.mplot3d import axes3d
from matplotlib import pyplot as plt
from matplotlib import cm
from radmc3dPy.image import *
from radmc3dPy.analyze import *
from radmc3dPy.natconst import *
#
# Make sure to have done the following beforhand:
#
# First compile RADMC-3D
# Then run:
# python problem_setup.py
# radmc3d mctherm
#
#
# Read the data
#
d = readData()
rr,tt = np.meshgrid(d.grid.x,d.grid.y,indexing='ij')
zzr = np.pi/2-tt
rhod = d.rhodust[:,:,0,0]
temp = d.dusttemp[:,:,0,0]
#
# View a surface plot of the density structure
#
fig1 = plt.figure()
ax = fig1.gca(projection='3d')
ax.plot_surface(np.log10(rr)/au, zzr, np.log10(rhod), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=1, antialiased=False)
#
# Set the radii where to make the following plots
#
rpl = np.array([0.5,3,15,90])*au # Radii where to make the plots
irr = np.array(np.interp(rpl,d.grid.x,np.arange(len(d.grid.x)))+0.5,dtype=int) # Nearest radial grid point
#
# Plot the vertical density structure at different radii
#
plt.figure()
for ir in irr:
r = d.grid.x[ir]
rstr = '{0:4.0f}'.format(r/au)
rstr = 'r = '+rstr.strip()+' au'
plt.semilogy(zzr[ir,:],rhod[ir,:],label=rstr)
plt.ylim((1e-22,1e-12))
plt.xlabel(r'$\pi/2-\theta\simeq z/r$')
plt.ylabel(r'$\rho_{\mathrm{dust}}\;[\mathrm{g}/\mathrm{cm}^3]$')
plt.legend()
#
# Plot the vertical temperature structure at different radii
#
plt.figure()
for ir in irr:
r = d.grid.x[ir]
rstr = '{0:4.0f}'.format(r/au)
rstr = 'r = '+rstr.strip()+' au'
plt.plot(zzr[ir,:],temp[ir,:],label=rstr)
plt.xlabel(r'$\pi/2-\theta\simeq z/r$')
plt.ylabel(r'$T_{\mathrm{dust}}\;[\mathrm{K}]$')
plt.legend()
#
# Plot the radial midplane and surface temperature,
# and compare to the analytic estimate
#
plt.figure()
plt.semilogx(d.grid.x/au,d.dusttemp[:,0,0],':',label='RADMC-3D temperature (surface)',color='C0')
plt.semilogx(d.grid.x/au,d.dusttemp[:,-1,0],label='RADMC-3D temperature (midplane)',color='C1')
plt.semilogx(d.grid.x/au,d.dusttemp[:,-1,0],'.',color='C1')
plt.semilogx(p.r/au,p.tmid,label='Analytic temperature (midplane)',color='C2')
plt.xlabel(r'$r\;[\mathrm{au}]$')
plt.ylabel(r'$T\;[\mathrm{K}]$')
plt.ylim((0,2100))
plt.legend()
plt.show()
|
dullemondREPO_NAMEradmc3d-2.0PATH_START.@radmc3d-2.0_extracted@radmc3d-2.0-master@examples@run_ppdisk_analytic_2@plot_structure.py@.PATH_END.py
|
{
"filename": "test_astrometry_net.py",
"repo_name": "astropy/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/astrometry_net/tests/test_astrometry_net.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import json
import pytest
from .. import AstrometryNet
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
def data_path(filename):
return os.path.join(DATA_DIR, filename)
def test_api_key_property():
"""
Check that an empty key is returned if the api key is not in
the configuration file and that the expected message shows up in
the log.
"""
anet = AstrometryNet()
with pytest.raises(RuntimeError, match='Astrometry.net API key not set'):
anet.api_key
def test_empty_settings_property():
"""
Check that the empty settings property returns something and that
the keys match what we expect.
"""
anet = AstrometryNet()
empty = anet.empty_settings
# Ironically, empty should not be devoid of content...
assert empty
assert set(empty.keys()) == set(anet._constraints.keys())
def test_show_allowed_settings(capsys):
"""
Check that the expected content is printed to standard out when the
show_allowed_settings method is called.
"""
anet = AstrometryNet()
anet.show_allowed_settings()
output = capsys.readouterr()
for key in anet._constraints.keys():
assert "{}: type ".format(key) in output.out
def test_login_fails_with_no_api_key():
"""
Test for expected behavior when the api_key is not set
"""
anet = AstrometryNet()
anet.api_key = ''
with pytest.raises(RuntimeError, match='Astrometry.net API key not set'):
anet._login()
def test_construct_payload():
"""
Test that _construct_payload returns the expected dictionary
"""
anet = AstrometryNet()
settings = anet.empty_settings
payload = anet._construct_payload(settings)
assert list(payload.keys()) == ['request-json']
assert payload['request-json'] == json.dumps(settings)
def test_setting_validation_basic():
anet = AstrometryNet()
# This gets us a list of settings, their types, and restrictions.
constraints = anet._constraints
for constraint, vals in constraints.items():
if vals['type'] == str or vals['type'] == bool:
settings = {constraint: 'asdiuhiuas'}
with pytest.raises(ValueError) as e:
anet._validate_settings(settings)
if vals['type'] == str:
assert settings[constraint] in str(e.value)
assert 'is invalid. The valid values' in str(e.value)
else:
assert "must be of type" in str(e.value) and "'bool'" in str(e.value)
else:
# Pick a value smaller than the minimum value
settings = {constraint: vals['allowed'][0] - 10}
with pytest.raises(ValueError) as e:
anet._validate_settings(settings)
assert str(settings[constraint]) in str(e.value)
assert 'The valid values are' in str(e.value)
def test_setting_validation_with_float_values():
"""
Check that values that are supposed to be float are handled
properly.
"""
anet = AstrometryNet()
# Try a setting that should be float and with a value that IS NOT
# coercable to float.
settings = {'center_ra': 'seven'}
with pytest.raises(ValueError) as e:
anet._validate_settings(settings)
assert "Value for center_ra must be of type " in str(e.value)
# Try a setting that should be float and with a value that IS
# coercable to float.
settings = {'center_ra': 7}
# Nothing to assert here...success is this not raising an error
anet._validate_settings(settings)
def test_setting_validation_with_bool_values():
"""
Check that values that are supposed to be bool are handled
properly.
"""
anet = AstrometryNet()
# Try a setting that should be bool but is not
settings = {'crpix_center': 'seven'}
with pytest.raises(ValueError) as e:
anet._validate_settings(settings)
assert "Value for crpix_center must be of type " in str(e.value)
# Try a setting that should be bool and is bool
settings = {'crpix_center': True}
# Nothing to assert here...success is this not raising an error
anet._validate_settings(settings)
def test_setting_validation_with_no_upper_bound():
"""
Check that nothing happens when checking bounds for a
setting with a lower but not upper bound. scale_lower is
an example; it has a lower bound of 0 but no upper bound.
"""
anet = AstrometryNet()
# Try a setting that should be bool but is not
settings = {'scale_lower': 1}
# Nothing to assert here...success is this not raising an error
anet._validate_settings(settings)
# Order in the required keys is meaningful; the first is supposed
# to be bigger, the second smaller.
@pytest.mark.parametrize("scale_type,required_keys", [
('ev', ('scale_est', 'scale_err')),
('ul', ('scale_upper', 'scale_lower'))
])
def test_setting_validation_scale_type(scale_type, required_keys):
"""
There are two scale types and each has a couple of other
keywords that are expected to be present with them.
"""
anet = AstrometryNet()
settings = {'scale_type': scale_type}
for key, value in zip(required_keys, [1, 0.1]):
settings[key] = value
# If an expected key is missing we should get an error. The missing
# key here is "scale_unit"
with pytest.raises(ValueError) as e:
anet._validate_settings(settings)
assert ('Scale type {} requires values '
'for '.format(scale_type)) in str(e.value)
# If we add the missing key then we should get no error
settings['scale_units'] = 'arcsecperpix'
# Nothing to assert here...success is this not raising an error
anet._validate_settings(settings)
def test_invalid_setting_in_solve_from_source_list_name_raises_error():
anet = AstrometryNet()
anet.api_key = 'nonsensekey'
with pytest.raises(ValueError) as e:
# First four arguments are required but values are not checked until
# after settings are checked (in the current implementation, at
# least), so any values should work.
# The keyword argument is definitely not one of the allowed ones.
anet.solve_from_source_list([], [], [], [], im_a_bad_setting_name=5)
assert 'im_a_bad_setting_name is not allowed' in str(e.value)
|
astropyREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@astrometry_net@tests@test_astrometry_net.py@.PATH_END.py
|
{
"filename": "dustpylib.ipynb",
"repo_name": "stammler/dustpy",
"repo_path": "dustpy_extracted/dustpy-master/examples/dustpylib.ipynb",
"type": "Jupyter Notebook"
}
|
# Library: dustpylib
`dustpylib` is a library with auxiliary tools and extensions for `DustPy`, containing for example interfaces to radiative transfer tools. It is a community project, to which everyone can upload their own `DustPy` customizations.
For more details, please have a look at the [dustpylib documentation](https://dustpylib.rtfd.io/).
|
stammlerREPO_NAMEdustpyPATH_START.@dustpy_extracted@dustpy-master@examples@dustpylib.ipynb@.PATH_END.py
|
{
"filename": "T_S_I_B_.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I_B_.py",
"type": "Python"
}
|
from .T_S_I_V_ import table_T_S_I_V_
class table_T_S_I_B_(table_T_S_I_V_):
pass
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@fonttools@fontTools@ttLib@tables@T_S_I_B_.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "benstahl92/LOSSPhotPypeline",
"repo_path": "LOSSPhotPypeline_extracted/LOSSPhotPypeline-master/LOSSPhotPypeline/utils/__init__.py",
"type": "Python"
}
|
# internal imports
from LOSSPhotPypeline.utils.astroCatalog import astroCatalog
from LOSSPhotPypeline.utils.LPP_utils import *
from LOSSPhotPypeline.utils.plotLC import plotLC
|
benstahl92REPO_NAMELOSSPhotPypelinePATH_START.@LOSSPhotPypeline_extracted@LOSSPhotPypeline-master@LOSSPhotPypeline@utils@__init__.py@.PATH_END.py
|
{
"filename": "paper_plots.py",
"repo_name": "daniel-muthukrishna/astrorapid",
"repo_path": "astrorapid_extracted/astrorapid-master/paper_plots/paper_plots.py",
"type": "Python"
}
|
# MAKE DATA
# orig_lc, timesX, y_predict = classify_lasair_light_curves(object_names=[
# 'ZTF18abxftqm', # TDE
# 'ZTF19aadnmgf', # SNIa
# 'ZTF18acmzpbf', # SNIa
# ])
#
# with open('paper_plot_real_data.pickle', 'wb') as f:
# pickle.dump([orig_lc, timesX, y_predict], f)
# PLOT FOR PAPER
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.ticker import MaxNLocator
import matplotlib.animation as animation
font = {'family': 'normal',
'size': 60}
matplotlib.rc('font', **font)
plt.rcParams['text.usetex'] = True
plt.rcParams['font.serif'] = ['Computer Modern Roman'] + plt.rcParams['font.serif']
with open('paper_plot_real_data.pickle', 'rb') as f:
orig_lc, timesX, y_predict = pickle.load(f)
with open('../paper_plot_real_data2.pickle', 'rb') as f:
orig_lc2, timesX2, y_predict2 = pickle.load(f)
with open('/Users/danmuth/PycharmProjects/astrorapid/save_real_mags.pickle', 'rb') as f:
real_names, real_mjds, real_passbands, real_mags, real_magerrs, real_zeropoints, real_photflags = pickle.load(f)
# mask = (y_predict > 0.1)
# y_predict[mask] = 0.1*np.random.random(np.shape(y_predict[mask])) + y_predict[mask]
CLASS_NAMES = ['Pre-explosion', 'SNIa-norm', 'SNIbc', 'SNII', 'SNIa-91bg', 'SNIa-x', 'point-Ia', 'Kilonova',
'SLSN-I',
'PISN', 'ILOT', 'CART', 'TDE']
CLASS_COLOR = {'Pre-explosion': 'grey', 'SNIa-norm': 'tab:green', 'SNIbc': 'tab:orange', 'SNII': 'tab:blue',
'SNIa-91bg': 'tab:red', 'SNIa-x': 'tab:purple', 'point-Ia': 'tab:brown', 'Kilonova': '#aaffc3',
'SLSN-I': 'tab:olive', 'PISN': 'tab:cyan', 'ILOT': '#FF1493', 'CART': 'navy', 'TDE': 'tab:pink'}
PB_COLOR = {'u': 'tab:blue', 'g': 'tab:blue', 'r': 'tab:orange', 'i': 'm', 'z': 'k', 'Y': 'y'}
PB_MARKER = {'g': 'o', 'r': 's'}
PB_ALPHA = {'g': 0.3, 'r': 1.}
passbands = ['g', 'r']
use_interp_flux = False
step = True
texts = ['TDE ($z = 0.09$)', 'SNIa ($z = 0.08$)', 'SNIa ($z = 0.036$)']
names = ['ZTF18abxftqm', 'ZTF19aadnmgf', 'ZTF18acmzpbf']
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(45, 25), num="ZTF_real_data_examples", sharex='col')
for idx in range(len(orig_lc)):
argmax = timesX[idx].argmax() + 1
# ax1.axvline(x=0, color='k', linestyle='-', linewidth=1)
# ax2.axvline(x=0, color='k', linestyle='-', linewidth=1)
for pbidx, pb in enumerate(passbands):
if pb in orig_lc[idx].keys():
# ax[0][idx].errorbar(orig_lc[idx][pb]['time'], orig_lc[idx][pb]['flux'],
# yerr=orig_lc[idx][pb]['fluxErr'], fmt=PB_MARKER[pb], label=pb,
# c=PB_COLOR[pb], lw=4, markersize=14)
# ax[0][idx].errorbar(orig_lc2[idx][pb]['time'], orig_lc2[idx][pb]['flux'],
# yerr=orig_lc2[idx][pb]['fluxErr'], fmt=PB_MARKER[pb], label=pb,
# c=PB_COLOR[pb], lw=4, markersize=14)
trigger_mjd = orig_lc2[idx]['otherinfo'][0][3]
pbmask = (real_passbands[idx] == pbidx + 1) & ((real_photflags[idx] != 0) | (real_mjds[idx] <= trigger_mjd))
# ax[0][idx].errorbar(real_mjds[idx][pbmask]-trigger_mjd, real_mags[idx][pbmask],
# yerr=real_magerrs[idx][pbmask], fmt=PB_MARKER[pb], label=pb,
# c=PB_COLOR[pb], lw=4, markersize=14)
ax[0][idx].errorbar(orig_lc2[idx][pb]['time'].dropna(), real_mags[idx][pbmask],
yerr=real_magerrs[idx][pbmask], fmt=PB_MARKER[pb], label=pb,
c=PB_COLOR[pb], lw=4, markersize=14)
# ax[0][idx].errorbar(orig_lc2[idx][pb]['time'], -2.5*np.log10(orig_lc2[idx][pb]['flux']) + 26.2,
# yerr=2.5*orig_lc2[idx][pb]['fluxErr']/orig_lc2[idx][pb]['flux']/np.log(10.), fmt=PB_MARKER[pb], label=pb,
# c=PB_COLOR[pb], lw=4, markersize=14)
ax[0][idx].invert_yaxis()
new_t = np.array([orig_lc[idx][pb]['time'].values for pb in passbands]).flatten()
new_t = np.sort(new_t[~np.isnan(new_t)])
if not use_interp_flux:
new_y_predict = []
for classnum, classname in enumerate(CLASS_NAMES):
new_y_predict.append(
np.interp(new_t, timesX[idx][:argmax], y_predict[idx][:, classnum][:argmax]))
class_lines = []
for classnum, classname in enumerate(CLASS_NAMES):
if not use_interp_flux:
if step:
class_lines.append(ax[1][idx].step(new_t, new_y_predict[classnum], '-', label=classname, color=CLASS_COLOR[classname], linewidth=4, where='post'))
else:
class_lines.append(ax[1][idx].plot(new_t, new_y_predict[classnum], '-', label=classname, color=CLASS_COLOR[classname], linewidth=4))
else:
class_lines.append(ax[1][idx].plot(timesX[idx][:argmax], y_predict[idx][:, classnum][:argmax], '-', label=classname, color=CLASS_COLOR[classname], linewidth=4))
# ax[0][idx].legend(frameon=True, fontsize=50, loc='lower right')
# ax[1][idx].legend(frameon=True, fontsize=21.5) # , loc='center right') # , ncol=2)
ax[1][idx].set_xlabel("Days since trigger (rest frame)") # , fontsize=18)
ax[0][idx].set_ylabel("Magnitude") # , fontsize=15)
ax[1][idx].set_ylabel("Class Probability") # , fontsize=18)
# ax1.set_ylim(-0.1, 1.1)
# ax[1][idx].set_ylim(0, 1)
ax[0][idx].set_xlim(left=min(new_t), right=max(timesX[idx][:argmax])) # ax1.set_xlim(-70, 80)
# ax1.grid(True)
# ax2.grid(True)
ttl = ax[0][idx].set_title(names[idx], fontsize=60)
ttl.set_position([.5, 1.02])
plt.setp(ax[0][idx].get_xticklabels(), visible=False)
ax[1][idx].yaxis.set_major_locator(MaxNLocator(nbins=6, prune='upper')) # added
ax[0][0].legend(frameon=True, fontsize=50, loc='lower right')
ax[0][1].legend(frameon=True, fontsize=50, loc='lower right')
ax[0][2].legend(frameon=True, fontsize=50, loc='upper right')
handles, labels = ax[1][0].get_legend_handles_labels()
lgd = fig.legend(handles, labels, loc='lower center', ncol=5, bbox_to_anchor=(0., 0.0, 1., .02), borderaxespad=0.) #, mode='expand')
savename = 'ZTF_real_data_examples'
plt.tight_layout()
fig.subplots_adjust(hspace=0)
fig.subplots_adjust(bottom=0.24)
plt.savefig("{}3.pdf".format(savename), bbox_extra_artists=(lgd,), bbox_inches="tight")
|
daniel-muthukrishnaREPO_NAMEastrorapidPATH_START.@astrorapid_extracted@astrorapid-master@paper_plots@paper_plots.py@.PATH_END.py
|
{
"filename": "distance.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/spatial/distance.py",
"type": "Python"
}
|
"""
Distance computations (:mod:`scipy.spatial.distance`)
=====================================================
.. sectionauthor:: Damian Eads
Function reference
------------------
Distance matrix computation from a collection of raw observation vectors
stored in a rectangular array.
.. autosummary::
:toctree: generated/
pdist -- pairwise distances between observation vectors.
cdist -- distances between two collections of observation vectors
squareform -- convert distance matrix to a condensed one and vice versa
directed_hausdorff -- directed Hausdorff distance between arrays
Predicates for checking the validity of distance matrices, both
condensed and redundant. Also contained in this module are functions
for computing the number of observations in a distance matrix.
.. autosummary::
:toctree: generated/
is_valid_dm -- checks for a valid distance matrix
is_valid_y -- checks for a valid condensed distance matrix
num_obs_dm -- # of observations in a distance matrix
num_obs_y -- # of observations in a condensed distance matrix
Distance functions between two numeric vectors ``u`` and ``v``. Computing
distances over a large collection of vectors is inefficient for these
functions. Use ``pdist`` for this purpose.
.. autosummary::
:toctree: generated/
braycurtis -- the Bray-Curtis distance.
canberra -- the Canberra distance.
chebyshev -- the Chebyshev distance.
cityblock -- the Manhattan distance.
correlation -- the Correlation distance.
cosine -- the Cosine distance.
euclidean -- the Euclidean distance.
jensenshannon -- the Jensen-Shannon distance.
mahalanobis -- the Mahalanobis distance.
minkowski -- the Minkowski distance.
seuclidean -- the normalized Euclidean distance.
sqeuclidean -- the squared Euclidean distance.
Distance functions between two boolean vectors (representing sets) ``u`` and
``v``. As in the case of numerical vectors, ``pdist`` is more efficient for
computing the distances between all pairs.
.. autosummary::
:toctree: generated/
dice -- the Dice dissimilarity.
hamming -- the Hamming distance.
jaccard -- the Jaccard distance.
kulczynski1 -- the Kulczynski 1 distance.
rogerstanimoto -- the Rogers-Tanimoto dissimilarity.
russellrao -- the Russell-Rao dissimilarity.
sokalmichener -- the Sokal-Michener dissimilarity.
sokalsneath -- the Sokal-Sneath dissimilarity.
yule -- the Yule dissimilarity.
:func:`hamming` also operates over discrete numerical vectors.
"""
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
__all__ = [
'braycurtis',
'canberra',
'cdist',
'chebyshev',
'cityblock',
'correlation',
'cosine',
'dice',
'directed_hausdorff',
'euclidean',
'hamming',
'is_valid_dm',
'is_valid_y',
'jaccard',
'jensenshannon',
'kulczynski1',
'mahalanobis',
'minkowski',
'num_obs_dm',
'num_obs_y',
'pdist',
'rogerstanimoto',
'russellrao',
'seuclidean',
'sokalmichener',
'sokalsneath',
'sqeuclidean',
'squareform',
'yule'
]
import math
import warnings
import numpy as np
import dataclasses
from collections.abc import Callable
from functools import partial
from scipy._lib._util import _asarray_validated, _transition_to_rng
from scipy._lib.deprecation import _deprecated
from . import _distance_wrap
from . import _hausdorff
from ..linalg import norm
from ..special import rel_entr
from . import _distance_pybind
def _copy_array_if_base_present(a):
"""Copy the array if its base points to a parent array."""
if a.base is not None:
return a.copy()
return a
def _correlation_cdist_wrap(XA, XB, dm, **kwargs):
XA = XA - XA.mean(axis=1, keepdims=True)
XB = XB - XB.mean(axis=1, keepdims=True)
_distance_wrap.cdist_cosine_double_wrap(XA, XB, dm, **kwargs)
def _correlation_pdist_wrap(X, dm, **kwargs):
X2 = X - X.mean(axis=1, keepdims=True)
_distance_wrap.pdist_cosine_double_wrap(X2, dm, **kwargs)
def _convert_to_type(X, out_type):
return np.ascontiguousarray(X, dtype=out_type)
def _nbool_correspond_all(u, v, w=None):
if u.dtype == v.dtype == bool and w is None:
not_u = ~u
not_v = ~v
nff = (not_u & not_v).sum()
nft = (not_u & v).sum()
ntf = (u & not_v).sum()
ntt = (u & v).sum()
else:
dtype = np.result_type(int, u.dtype, v.dtype)
u = u.astype(dtype)
v = v.astype(dtype)
not_u = 1.0 - u
not_v = 1.0 - v
if w is not None:
not_u = w * not_u
u = w * u
nff = (not_u * not_v).sum()
nft = (not_u * v).sum()
ntf = (u * not_v).sum()
ntt = (u * v).sum()
return (nff, nft, ntf, ntt)
def _nbool_correspond_ft_tf(u, v, w=None):
if u.dtype == v.dtype == bool and w is None:
not_u = ~u
not_v = ~v
nft = (not_u & v).sum()
ntf = (u & not_v).sum()
else:
dtype = np.result_type(int, u.dtype, v.dtype)
u = u.astype(dtype)
v = v.astype(dtype)
not_u = 1.0 - u
not_v = 1.0 - v
if w is not None:
not_u = w * not_u
u = w * u
nft = (not_u * v).sum()
ntf = (u * not_v).sum()
return (nft, ntf)
def _validate_cdist_input(XA, XB, mA, mB, n, metric_info, **kwargs):
# get supported types
types = metric_info.types
# choose best type
typ = types[types.index(XA.dtype)] if XA.dtype in types else types[0]
# validate data
XA = _convert_to_type(XA, out_type=typ)
XB = _convert_to_type(XB, out_type=typ)
# validate kwargs
_validate_kwargs = metric_info.validator
if _validate_kwargs:
kwargs = _validate_kwargs((XA, XB), mA + mB, n, **kwargs)
return XA, XB, typ, kwargs
def _validate_weight_with_size(X, m, n, **kwargs):
w = kwargs.pop('w', None)
if w is None:
return kwargs
if w.ndim != 1 or w.shape[0] != n:
raise ValueError("Weights must have same size as input vector. "
f"{w.shape[0]} vs. {n}")
kwargs['w'] = _validate_weights(w)
return kwargs
def _validate_hamming_kwargs(X, m, n, **kwargs):
w = kwargs.get('w', np.ones((n,), dtype='double'))
if w.ndim != 1 or w.shape[0] != n:
raise ValueError(
"Weights must have same size as input vector. %d vs. %d" % (w.shape[0], n)
)
kwargs['w'] = _validate_weights(w)
return kwargs
def _validate_mahalanobis_kwargs(X, m, n, **kwargs):
VI = kwargs.pop('VI', None)
if VI is None:
if m <= n:
# There are fewer observations than the dimension of
# the observations.
raise ValueError("The number of observations (%d) is too "
"small; the covariance matrix is "
"singular. For observations with %d "
"dimensions, at least %d observations "
"are required." % (m, n, n + 1))
if isinstance(X, tuple):
X = np.vstack(X)
CV = np.atleast_2d(np.cov(X.astype(np.float64, copy=False).T))
VI = np.linalg.inv(CV).T.copy()
kwargs["VI"] = _convert_to_double(VI)
return kwargs
def _validate_minkowski_kwargs(X, m, n, **kwargs):
kwargs = _validate_weight_with_size(X, m, n, **kwargs)
if 'p' not in kwargs:
kwargs['p'] = 2.
else:
if kwargs['p'] <= 0:
raise ValueError("p must be greater than 0")
return kwargs
def _validate_pdist_input(X, m, n, metric_info, **kwargs):
# get supported types
types = metric_info.types
# choose best type
typ = types[types.index(X.dtype)] if X.dtype in types else types[0]
# validate data
X = _convert_to_type(X, out_type=typ)
# validate kwargs
_validate_kwargs = metric_info.validator
if _validate_kwargs:
kwargs = _validate_kwargs(X, m, n, **kwargs)
return X, typ, kwargs
def _validate_seuclidean_kwargs(X, m, n, **kwargs):
V = kwargs.pop('V', None)
if V is None:
if isinstance(X, tuple):
X = np.vstack(X)
V = np.var(X.astype(np.float64, copy=False), axis=0, ddof=1)
else:
V = np.asarray(V, order='c')
if len(V.shape) != 1:
raise ValueError('Variance vector V must '
'be one-dimensional.')
if V.shape[0] != n:
raise ValueError('Variance vector V must be of the same '
'dimension as the vectors on which the distances '
'are computed.')
kwargs['V'] = _convert_to_double(V)
return kwargs
def _validate_vector(u, dtype=None):
# XXX Is order='c' really necessary?
u = np.asarray(u, dtype=dtype, order='c')
if u.ndim == 1:
return u
raise ValueError("Input vector should be 1-D.")
def _validate_weights(w, dtype=np.float64):
w = _validate_vector(w, dtype=dtype)
if np.any(w < 0):
raise ValueError("Input weights should be all non-negative")
return w
@_transition_to_rng('seed', position_num=2, replace_doc=False)
def directed_hausdorff(u, v, rng=0):
"""
Compute the directed Hausdorff distance between two 2-D arrays.
Distances between pairs are calculated using a Euclidean metric.
Parameters
----------
u : (M,N) array_like
Input array with M points in N dimensions.
v : (O,N) array_like
Input array with O points in N dimensions.
rng : int or `numpy.random.Generator` or None, optional
Pseudorandom number generator state. Default is 0 so the
shuffling of `u` and `v` is reproducible.
If `rng` is passed by keyword, types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a ``Generator``.
If `rng` is already a ``Generator`` instance, then the provided instance is
used.
If this argument is passed by position or `seed` is passed by keyword,
legacy behavior for the argument `seed` applies:
- If `seed` is None, a new ``RandomState`` instance is used. The state is
initialized using data from ``/dev/urandom`` (or the Windows analogue)
if available or from the system clock otherwise.
- If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
- If `seed` is already a ``Generator`` or ``RandomState`` instance, then
that instance is used.
.. versionchanged:: 1.15.0
As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
transition from use of `numpy.random.RandomState` to
`numpy.random.Generator`, this keyword was changed from `seed` to `rng`.
For an interim period, both keywords will continue to work, although only
one may be specified at a time. After the interim period, function calls
using the `seed` keyword will emit warnings. The behavior of both `seed`
and `rng` are outlined above, but only the `rng` keyword should be used in
new code.
Returns
-------
d : double
The directed Hausdorff distance between arrays `u` and `v`,
index_1 : int
index of point contributing to Hausdorff pair in `u`
index_2 : int
index of point contributing to Hausdorff pair in `v`
Raises
------
ValueError
An exception is thrown if `u` and `v` do not have
the same number of columns.
See Also
--------
scipy.spatial.procrustes : Another similarity test for two data sets
Notes
-----
Uses the early break technique and the random sampling approach
described by [1]_. Although worst-case performance is ``O(m * o)``
(as with the brute force algorithm), this is unlikely in practice
as the input data would have to require the algorithm to explore
every single point interaction, and after the algorithm shuffles
the input points at that. The best case performance is O(m), which
is satisfied by selecting an inner loop distance that is less than
cmax and leads to an early break as often as possible. The authors
have formally shown that the average runtime is closer to O(m).
.. versionadded:: 0.19.0
References
----------
.. [1] A. A. Taha and A. Hanbury, "An efficient algorithm for
calculating the exact Hausdorff distance." IEEE Transactions On
Pattern Analysis And Machine Intelligence, vol. 37 pp. 2153-63,
2015.
Examples
--------
Find the directed Hausdorff distance between two 2-D arrays of
coordinates:
>>> from scipy.spatial.distance import directed_hausdorff
>>> import numpy as np
>>> u = np.array([(1.0, 0.0),
... (0.0, 1.0),
... (-1.0, 0.0),
... (0.0, -1.0)])
>>> v = np.array([(2.0, 0.0),
... (0.0, 2.0),
... (-2.0, 0.0),
... (0.0, -4.0)])
>>> directed_hausdorff(u, v)[0]
2.23606797749979
>>> directed_hausdorff(v, u)[0]
3.0
Find the general (symmetric) Hausdorff distance between two 2-D
arrays of coordinates:
>>> max(directed_hausdorff(u, v)[0], directed_hausdorff(v, u)[0])
3.0
Find the indices of the points that generate the Hausdorff distance
(the Hausdorff pair):
>>> directed_hausdorff(v, u)[1:]
(3, 3)
"""
u = np.asarray(u, dtype=np.float64, order='c')
v = np.asarray(v, dtype=np.float64, order='c')
if u.shape[1] != v.shape[1]:
raise ValueError('u and v need to have the same '
'number of columns')
result = _hausdorff.directed_hausdorff(u, v, rng)
return result
def minkowski(u, v, p=2, w=None):
"""
Compute the Minkowski distance between two 1-D arrays.
The Minkowski distance between 1-D arrays `u` and `v`,
is defined as
.. math::
{\\|u-v\\|}_p = (\\sum{|u_i - v_i|^p})^{1/p}.
\\left(\\sum{w_i(|(u_i - v_i)|^p)}\\right)^{1/p}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
p : scalar
The order of the norm of the difference :math:`{\\|u-v\\|}_p`. Note
that for :math:`0 < p < 1`, the triangle inequality only holds with
an additional multiplicative factor, i.e. it is only a quasi-metric.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
minkowski : double
The Minkowski distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.minkowski([1, 0, 0], [0, 1, 0], 1)
2.0
>>> distance.minkowski([1, 0, 0], [0, 1, 0], 2)
1.4142135623730951
>>> distance.minkowski([1, 0, 0], [0, 1, 0], 3)
1.2599210498948732
>>> distance.minkowski([1, 1, 0], [0, 1, 0], 1)
1.0
>>> distance.minkowski([1, 1, 0], [0, 1, 0], 2)
1.0
>>> distance.minkowski([1, 1, 0], [0, 1, 0], 3)
1.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if p <= 0:
raise ValueError("p must be greater than 0")
u_v = u - v
if w is not None:
w = _validate_weights(w)
if p == 1:
root_w = w
elif p == 2:
# better precision and speed
root_w = np.sqrt(w)
elif p == np.inf:
root_w = (w != 0)
else:
root_w = np.power(w, 1/p)
u_v = root_w * u_v
dist = norm(u_v, ord=p)
return dist
def euclidean(u, v, w=None):
"""
Computes the Euclidean distance between two 1-D arrays.
The Euclidean distance between 1-D arrays `u` and `v`, is defined as
.. math::
{\\|u-v\\|}_2
\\left(\\sum{(w_i |(u_i - v_i)|^2)}\\right)^{1/2}
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
euclidean : double
The Euclidean distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.euclidean([1, 0, 0], [0, 1, 0])
1.4142135623730951
>>> distance.euclidean([1, 1, 0], [0, 1, 0])
1.0
"""
return minkowski(u, v, p=2, w=w)
def sqeuclidean(u, v, w=None):
"""
Compute the squared Euclidean distance between two 1-D arrays.
The squared Euclidean distance between `u` and `v` is defined as
.. math::
\\sum_i{w_i |u_i - v_i|^2}
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
sqeuclidean : double
The squared Euclidean distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.sqeuclidean([1, 0, 0], [0, 1, 0])
2.0
>>> distance.sqeuclidean([1, 1, 0], [0, 1, 0])
1.0
"""
# Preserve float dtypes, but convert everything else to np.float64
# for stability.
utype, vtype = None, None
if not (hasattr(u, "dtype") and np.issubdtype(u.dtype, np.inexact)):
utype = np.float64
if not (hasattr(v, "dtype") and np.issubdtype(v.dtype, np.inexact)):
vtype = np.float64
u = _validate_vector(u, dtype=utype)
v = _validate_vector(v, dtype=vtype)
u_v = u - v
u_v_w = u_v # only want weights applied once
if w is not None:
w = _validate_weights(w)
u_v_w = w * u_v
return np.dot(u_v, u_v_w)
def correlation(u, v, w=None, centered=True):
"""
Compute the correlation distance between two 1-D arrays.
The correlation distance between `u` and `v`, is
defined as
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{\\|(u - \\bar{u})\\|}_2 {\\|(v - \\bar{v})\\|}_2}
where :math:`\\bar{u}` is the mean of the elements of `u`
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
Parameters
----------
u : (N,) array_like of floats
Input array.
.. deprecated:: 1.15.0
Complex `u` is deprecated and will raise an error in SciPy 1.17.0
v : (N,) array_like of floats
Input array.
.. deprecated:: 1.15.0
Complex `v` is deprecated and will raise an error in SciPy 1.17.0
w : (N,) array_like of floats, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
centered : bool, optional
If True, `u` and `v` will be centered. Default is True.
Returns
-------
correlation : double
The correlation distance between 1-D array `u` and `v`.
Examples
--------
Find the correlation between two arrays.
>>> from scipy.spatial.distance import correlation
>>> correlation([1, 0, 1], [1, 1, 0])
1.5
Using a weighting array, the correlation can be calculated as:
>>> correlation([1, 0, 1], [1, 1, 0], w=[0.9, 0.1, 0.1])
1.1
If centering is not needed, the correlation can be calculated as:
>>> correlation([1, 0, 1], [1, 1, 0], centered=False)
0.5
"""
u = _validate_vector(u)
v = _validate_vector(v)
if np.iscomplexobj(u) or np.iscomplexobj(v):
message = (
"Complex `u` and `v` are deprecated and will raise an error in "
"SciPy 1.17.0.")
warnings.warn(message, DeprecationWarning, stacklevel=2)
if w is not None:
w = _validate_weights(w)
w = w / w.sum()
if centered:
if w is not None:
umu = np.dot(u, w)
vmu = np.dot(v, w)
else:
umu = np.mean(u)
vmu = np.mean(v)
u = u - umu
v = v - vmu
if w is not None:
vw = v * w
uw = u * w
else:
vw, uw = v, u
uv = np.dot(u, vw)
uu = np.dot(u, uw)
vv = np.dot(v, vw)
dist = 1.0 - uv / math.sqrt(uu * vv)
# Clip the result to avoid rounding error
return np.clip(dist, 0.0, 2.0)
def cosine(u, v, w=None):
"""
Compute the Cosine distance between 1-D arrays.
The Cosine distance between `u` and `v`, is defined as
.. math::
1 - \\frac{u \\cdot v}
{\\|u\\|_2 \\|v\\|_2}.
where :math:`u \\cdot v` is the dot product of :math:`u` and
:math:`v`.
Parameters
----------
u : (N,) array_like of floats
Input array.
.. deprecated:: 1.15.0
Complex `u` is deprecated and will raise an error in SciPy 1.17.0
v : (N,) array_like of floats
Input array.
.. deprecated:: 1.15.0
Complex `v` is deprecated and will raise an error in SciPy 1.17.0
w : (N,) array_like of floats, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
cosine : double
The Cosine distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.cosine([1, 0, 0], [0, 1, 0])
1.0
>>> distance.cosine([100, 0, 0], [0, 1, 0])
1.0
>>> distance.cosine([1, 1, 0], [0, 1, 0])
0.29289321881345254
"""
# cosine distance is also referred to as 'uncentered correlation',
# or 'reflective correlation'
return correlation(u, v, w=w, centered=False)
def hamming(u, v, w=None):
"""
Compute the Hamming distance between two 1-D arrays.
The Hamming distance between 1-D arrays `u` and `v`, is simply the
proportion of disagreeing components in `u` and `v`. If `u` and `v` are
boolean vectors, the Hamming distance is
.. math::
\\frac{c_{01} + c_{10}}{n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
hamming : double
The Hamming distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.hamming([1, 0, 0], [0, 1, 0])
0.66666666666666663
>>> distance.hamming([1, 0, 0], [1, 1, 0])
0.33333333333333331
>>> distance.hamming([1, 0, 0], [2, 0, 0])
0.33333333333333331
>>> distance.hamming([1, 0, 0], [3, 0, 0])
0.33333333333333331
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.shape != v.shape:
raise ValueError('The 1d arrays must have equal lengths.')
u_ne_v = u != v
if w is not None:
w = _validate_weights(w)
if w.shape != u.shape:
raise ValueError("'w' should have the same length as 'u' and 'v'.")
w = w / w.sum()
return np.dot(u_ne_v, w)
return np.mean(u_ne_v)
def jaccard(u, v, w=None):
r"""
Compute the Jaccard dissimilarity between two boolean vectors.
Given boolean vectors :math:`u \equiv (u_1, \cdots, u_n)`
and :math:`v \equiv (v_1, \cdots, v_n)` that are not both zero,
their *Jaccard dissimilarity* is defined as ([1]_, p. 26)
.. math::
d_\textrm{jaccard}(u, v) := \frac{c_{10} + c_{01}}
{c_{11} + c_{10} + c_{01}}
where
.. math::
c_{ij} := \sum_{1 \le k \le n, u_k=i, v_k=j} 1
for :math:`i, j \in \{ 0, 1\}`. If :math:`u` and :math:`v` are both zero,
their Jaccard dissimilarity is defined to be zero. [2]_
If a (non-negative) weight vector :math:`w \equiv (w_1, \cdots, w_n)`
is supplied, the *weighted Jaccard dissimilarity* is defined similarly
but with :math:`c_{ij}` replaced by
.. math::
\tilde{c}_{ij} := \sum_{1 \le k \le n, u_k=i, v_k=j} w_k
Parameters
----------
u : (N,) array_like of bools
Input vector.
v : (N,) array_like of bools
Input vector.
w : (N,) array_like of floats, optional
Weights for each pair of :math:`(u_k, v_k)`. Default is ``None``,
which gives each pair a weight of ``1.0``.
Returns
-------
jaccard : float
The Jaccard dissimilarity between vectors `u` and `v`, optionally
weighted by `w` if supplied.
Notes
-----
The Jaccard dissimilarity satisfies the triangle inequality and is
qualified as a metric. [2]_
The *Jaccard index*, or *Jaccard similarity coefficient*, is equal to
one minus the Jaccard dissimilarity. [3]_
The dissimilarity between general (finite) sets may be computed by
encoding them as boolean vectors and computing the dissimilarity
between the encoded vectors.
For example, subsets :math:`A,B` of :math:`\{ 1, 2, ..., n \}` may be
encoded into boolean vectors :math:`u, v` by setting
:math:`u_k := 1_{k \in A}`, :math:`v_k := 1_{k \in B}`
for :math:`k = 1,2,\cdots,n`.
.. versionchanged:: 1.2.0
Previously, if all (positively weighted) elements in `u` and `v` are
zero, the function would return ``nan``. This was changed to return
``0`` instead.
.. versionchanged:: 1.15.0
Non-0/1 numeric input used to produce an ad hoc result. Since 1.15.0,
numeric input is converted to Boolean before computation.
References
----------
.. [1] Kaufman, L. and Rousseeuw, P. J. (1990). "Finding Groups in Data:
An Introduction to Cluster Analysis." John Wiley & Sons, Inc.
.. [2] Kosub, S. (2019). "A note on the triangle inequality for the
Jaccard distance." *Pattern Recognition Letters*, 120:36-38.
.. [3] https://en.wikipedia.org/wiki/Jaccard_index
Examples
--------
>>> from scipy.spatial import distance
Non-zero vectors with no matching 1s have dissimilarity of 1.0:
>>> distance.jaccard([1, 0, 0], [0, 1, 0])
1.0
Vectors with some matching 1s have dissimilarity less than 1.0:
>>> distance.jaccard([1, 0, 0, 0], [1, 1, 1, 0])
0.6666666666666666
Identical vectors, including zero vectors, have dissimilarity of 0.0:
>>> distance.jaccard([1, 0, 0], [1, 0, 0])
0.0
>>> distance.jaccard([0, 0, 0], [0, 0, 0])
0.0
The following example computes the dissimilarity from a confusion matrix
directly by setting the weight vector to the frequency of True Positive,
False Negative, False Positive, and True Negative:
>>> distance.jaccard([1, 1, 0, 0], [1, 0, 1, 0], [31, 41, 59, 26])
0.7633587786259542 # (41+59)/(31+41+59)
"""
u = _validate_vector(u)
v = _validate_vector(v)
unequal = np.bitwise_xor(u != 0, v != 0)
nonzero = np.bitwise_or(u != 0, v != 0)
if w is not None:
w = _validate_weights(w)
unequal = w * unequal
nonzero = w * nonzero
a = np.float64(unequal.sum())
b = np.float64(nonzero.sum())
return (a / b) if b != 0 else np.float64(0)
_deprecated_kulczynski1 = _deprecated(
"The kulczynski1 metric is deprecated since SciPy 1.15.0 and will be "
"removed in SciPy 1.17.0. Replace usage of 'kulczynski1(u, v)' with "
"'1/jaccard(u, v) - 1'."
)
@_deprecated_kulczynski1
def kulczynski1(u, v, *, w=None):
"""
Compute the Kulczynski 1 dissimilarity between two boolean 1-D arrays.
.. deprecated:: 1.15.0
This function is deprecated and will be removed in SciPy 1.17.0.
Replace usage of ``kulczynski1(u, v)`` with ``1/jaccard(u, v) - 1``.
The Kulczynski 1 dissimilarity between two boolean 1-D arrays `u` and `v`
of length ``n``, is defined as
.. math::
\\frac{c_{11}}
{c_{01} + c_{10}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k \\in {0, 1, ..., n-1}`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
kulczynski1 : float
The Kulczynski 1 distance between vectors `u` and `v`.
Notes
-----
This measure has a minimum value of 0 and no upper limit.
It is un-defined when there are no non-matches.
.. versionadded:: 1.8.0
References
----------
.. [1] Kulczynski S. et al. Bulletin
International de l'Academie Polonaise des Sciences
et des Lettres, Classe des Sciences Mathematiques
et Naturelles, Serie B (Sciences Naturelles). 1927;
Supplement II: 57-203.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.kulczynski1([1, 0, 0], [0, 1, 0])
0.0
>>> distance.kulczynski1([True, False, False], [True, True, False])
1.0
>>> distance.kulczynski1([True, False, False], [True])
0.5
>>> distance.kulczynski1([1, 0, 0], [3, 1, 0])
-3.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
(_, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
return ntt / (ntf + nft)
def seuclidean(u, v, V):
"""
Return the standardized Euclidean distance between two 1-D arrays.
The standardized Euclidean distance between two n-vectors `u` and `v` is
.. math::
\\sqrt{\\sum\\limits_i \\frac{1}{V_i} \\left(u_i-v_i \\right)^2}
``V`` is the variance vector; ``V[I]`` is the variance computed over all the i-th
components of the points. If not passed, it is automatically computed.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
V : (N,) array_like
`V` is an 1-D array of component variances. It is usually computed
among a larger collection of vectors.
Returns
-------
seuclidean : double
The standardized Euclidean distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.seuclidean([1, 0, 0], [0, 1, 0], [0.1, 0.1, 0.1])
4.4721359549995796
>>> distance.seuclidean([1, 0, 0], [0, 1, 0], [1, 0.1, 0.1])
3.3166247903553998
>>> distance.seuclidean([1, 0, 0], [0, 1, 0], [10, 0.1, 0.1])
3.1780497164141406
"""
u = _validate_vector(u)
v = _validate_vector(v)
V = _validate_vector(V, dtype=np.float64)
if V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]:
raise TypeError('V must be a 1-D array of the same dimension '
'as u and v.')
return euclidean(u, v, w=1/V)
def cityblock(u, v, w=None):
"""
Compute the City Block (Manhattan) distance.
Computes the Manhattan distance between two 1-D arrays `u` and `v`,
which is defined as
.. math::
\\sum_i {\\left| u_i - v_i \\right|}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
cityblock : double
The City Block (Manhattan) distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.cityblock([1, 0, 0], [0, 1, 0])
2
>>> distance.cityblock([1, 0, 0], [0, 2, 0])
3
>>> distance.cityblock([1, 0, 0], [1, 1, 0])
1
"""
u = _validate_vector(u)
v = _validate_vector(v)
l1_diff = abs(u - v)
if w is not None:
w = _validate_weights(w)
l1_diff = w * l1_diff
return l1_diff.sum()
def mahalanobis(u, v, VI):
"""
Compute the Mahalanobis distance between two 1-D arrays.
The Mahalanobis distance between 1-D arrays `u` and `v`, is defined as
.. math::
\\sqrt{ (u-v) V^{-1} (u-v)^T }
where ``V`` is the covariance matrix. Note that the argument `VI`
is the inverse of ``V``.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
VI : array_like
The inverse of the covariance matrix.
Returns
-------
mahalanobis : double
The Mahalanobis distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> iv = [[1, 0.5, 0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]]
>>> distance.mahalanobis([1, 0, 0], [0, 1, 0], iv)
1.0
>>> distance.mahalanobis([0, 2, 0], [0, 1, 0], iv)
1.0
>>> distance.mahalanobis([2, 0, 0], [0, 1, 0], iv)
1.7320508075688772
"""
u = _validate_vector(u)
v = _validate_vector(v)
VI = np.atleast_2d(VI)
delta = u - v
m = np.dot(np.dot(delta, VI), delta)
return np.sqrt(m)
def chebyshev(u, v, w=None):
r"""
Compute the Chebyshev distance.
The *Chebyshev distance* between real vectors
:math:`u \equiv (u_1, \cdots, u_n)` and
:math:`v \equiv (v_1, \cdots, v_n)` is defined as [1]_
.. math::
d_\textrm{chebyshev}(u,v) := \max_{1 \le i \le n} |u_i-v_i|
If a (non-negative) weight vector :math:`w \equiv (w_1, \cdots, w_n)`
is supplied, the *weighted Chebyshev distance* is defined to be the
weighted Minkowski distance of infinite order; that is,
.. math::
\begin{align}
d_\textrm{chebyshev}(u,v;w) &:= \lim_{p\rightarrow \infty}
\left( \sum_{i=1}^n w_i | u_i-v_i |^p \right)^\frac{1}{p} \\
&= \max_{1 \le i \le n} 1_{w_i > 0} | u_i - v_i |
\end{align}
Parameters
----------
u : (N,) array_like of floats
Input vector.
v : (N,) array_like of floats
Input vector.
w : (N,) array_like of floats, optional
Weight vector. Default is ``None``, which gives all pairs
:math:`(u_i, v_i)` the same weight ``1.0``.
Returns
-------
chebyshev : float
The Chebyshev distance between vectors `u` and `v`, optionally weighted
by `w`.
References
----------
.. [1] https://en.wikipedia.org/wiki/Chebyshev_distance
Examples
--------
>>> from scipy.spatial import distance
>>> distance.chebyshev([1, 0, 0], [0, 1, 0])
1
>>> distance.chebyshev([1, 1, 0], [0, 1, 0])
1
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
return max((w > 0) * abs(u - v))
return max(abs(u - v))
def braycurtis(u, v, w=None):
"""
Compute the Bray-Curtis distance between two 1-D arrays.
Bray-Curtis distance is defined as
.. math::
\\sum{|u_i-v_i|} / \\sum{|u_i+v_i|}
The Bray-Curtis distance is in the range [0, 1] if all coordinates are
positive, and is undefined if the inputs are of length zero.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
braycurtis : double
The Bray-Curtis distance between 1-D arrays `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.braycurtis([1, 0, 0], [0, 1, 0])
1.0
>>> distance.braycurtis([1, 1, 0], [0, 1, 0])
0.33333333333333331
"""
u = _validate_vector(u)
v = _validate_vector(v, dtype=np.float64)
l1_diff = abs(u - v)
l1_sum = abs(u + v)
if w is not None:
w = _validate_weights(w)
l1_diff = w * l1_diff
l1_sum = w * l1_sum
return l1_diff.sum() / l1_sum.sum()
def canberra(u, v, w=None):
"""
Compute the Canberra distance between two 1-D arrays.
The Canberra distance is defined as
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
canberra : double
The Canberra distance between vectors `u` and `v`.
Notes
-----
When ``u[i]`` and ``v[i]`` are 0 for given i, then the fraction 0/0 = 0 is
used in the calculation.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.canberra([1, 0, 0], [0, 1, 0])
2.0
>>> distance.canberra([1, 1, 0], [0, 1, 0])
1.0
"""
u = _validate_vector(u)
v = _validate_vector(v, dtype=np.float64)
if w is not None:
w = _validate_weights(w)
with np.errstate(invalid='ignore'):
abs_uv = abs(u - v)
abs_u = abs(u)
abs_v = abs(v)
d = abs_uv / (abs_u + abs_v)
if w is not None:
d = w * d
d = np.nansum(d)
return d
def jensenshannon(p, q, base=None, *, axis=0, keepdims=False):
"""
Compute the Jensen-Shannon distance (metric) between
two probability arrays. This is the square root
of the Jensen-Shannon divergence.
The Jensen-Shannon distance between two probability
vectors `p` and `q` is defined as,
.. math::
\\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}}
where :math:`m` is the pointwise mean of :math:`p` and :math:`q`
and :math:`D` is the Kullback-Leibler divergence.
This routine will normalize `p` and `q` if they don't sum to 1.0.
Parameters
----------
p : (N,) array_like
left probability vector
q : (N,) array_like
right probability vector
base : double, optional
the base of the logarithm used to compute the output
if not given, then the routine uses the default base of
scipy.stats.entropy.
axis : int, optional
Axis along which the Jensen-Shannon distances are computed. The default
is 0.
.. versionadded:: 1.7.0
keepdims : bool, optional
If this is set to `True`, the reduced axes are left in the
result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
Default is False.
.. versionadded:: 1.7.0
Returns
-------
js : double or ndarray
The Jensen-Shannon distances between `p` and `q` along the `axis`.
Notes
-----
.. versionadded:: 1.2.0
Examples
--------
>>> from scipy.spatial import distance
>>> import numpy as np
>>> distance.jensenshannon([1.0, 0.0, 0.0], [0.0, 1.0, 0.0], 2.0)
1.0
>>> distance.jensenshannon([1.0, 0.0], [0.5, 0.5])
0.46450140402245893
>>> distance.jensenshannon([1.0, 0.0, 0.0], [1.0, 0.0, 0.0])
0.0
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]])
>>> b = np.array([[13, 14, 15, 16],
... [17, 18, 19, 20],
... [21, 22, 23, 24]])
>>> distance.jensenshannon(a, b, axis=0)
array([0.1954288, 0.1447697, 0.1138377, 0.0927636])
>>> distance.jensenshannon(a, b, axis=1)
array([0.1402339, 0.0399106, 0.0201815])
"""
p = np.asarray(p)
q = np.asarray(q)
p = p / np.sum(p, axis=axis, keepdims=True)
q = q / np.sum(q, axis=axis, keepdims=True)
m = (p + q) / 2.0
left = rel_entr(p, m)
right = rel_entr(q, m)
left_sum = np.sum(left, axis=axis, keepdims=keepdims)
right_sum = np.sum(right, axis=axis, keepdims=keepdims)
js = left_sum + right_sum
if base is not None:
js /= np.log(base)
return np.sqrt(js / 2.0)
def yule(u, v, w=None):
"""
Compute the Yule dissimilarity between two boolean 1-D arrays.
The Yule dissimilarity is defined as
.. math::
\\frac{R}{c_{TT} * c_{FF} + \\frac{R}{2}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2.0 * c_{TF} * c_{FT}`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
yule : double
The Yule dissimilarity between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.yule([1, 0, 0], [0, 1, 0])
2.0
>>> distance.yule([1, 1, 0], [0, 1, 0])
0.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
half_R = ntf * nft
if half_R == 0:
return 0.0
else:
return float(2.0 * half_R / (ntt * nff + half_R))
def dice(u, v, w=None):
"""
Compute the Dice dissimilarity between two boolean 1-D arrays.
The Dice dissimilarity between `u` and `v`, is
.. math::
\\frac{c_{TF} + c_{FT}}
{2c_{TT} + c_{FT} + c_{TF}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input 1-D array.
v : (N,) array_like, bool
Input 1-D array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
dice : double
The Dice dissimilarity between 1-D arrays `u` and `v`.
Notes
-----
This function computes the Dice dissimilarity index. To compute the
Dice similarity index, convert one to the other with similarity =
1 - dissimilarity.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.dice([1, 0, 0], [0, 1, 0])
1.0
>>> distance.dice([1, 0, 0], [1, 1, 0])
0.3333333333333333
>>> distance.dice([1, 0, 0], [2, 0, 0])
-0.3333333333333333
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
if u.dtype == v.dtype == bool and w is None:
ntt = (u & v).sum()
else:
dtype = np.result_type(int, u.dtype, v.dtype)
u = u.astype(dtype)
v = v.astype(dtype)
if w is None:
ntt = (u * v).sum()
else:
ntt = (u * v * w).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)
return float((ntf + nft) / np.array(2.0 * ntt + ntf + nft))
def rogerstanimoto(u, v, w=None):
"""
Compute the Rogers-Tanimoto dissimilarity between two boolean 1-D arrays.
The Rogers-Tanimoto dissimilarity between two boolean 1-D arrays
`u` and `v`, is defined as
.. math::
\\frac{R}
{c_{TT} + c_{FF} + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
rogerstanimoto : double
The Rogers-Tanimoto dissimilarity between vectors
`u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.rogerstanimoto([1, 0, 0], [0, 1, 0])
0.8
>>> distance.rogerstanimoto([1, 0, 0], [1, 1, 0])
0.5
>>> distance.rogerstanimoto([1, 0, 0], [2, 0, 0])
-1.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft)))
def russellrao(u, v, w=None):
"""
Compute the Russell-Rao dissimilarity between two boolean 1-D arrays.
The Russell-Rao dissimilarity between two boolean 1-D arrays, `u` and
`v`, is defined as
.. math::
\\frac{n - c_{TT}}
{n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
russellrao : double
The Russell-Rao dissimilarity between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.russellrao([1, 0, 0], [0, 1, 0])
1.0
>>> distance.russellrao([1, 0, 0], [1, 1, 0])
0.6666666666666666
>>> distance.russellrao([1, 0, 0], [2, 0, 0])
0.3333333333333333
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == v.dtype == bool and w is None:
ntt = (u & v).sum()
n = float(len(u))
elif w is None:
ntt = (u * v).sum()
n = float(len(u))
else:
w = _validate_weights(w)
ntt = (u * v * w).sum()
n = w.sum()
return float(n - ntt) / n
_deprecated_sokalmichener = _deprecated(
"The sokalmichener metric is deprecated since SciPy 1.15.0 and will be "
"removed in SciPy 1.17.0. Replace usage of 'sokalmichener(u, v)' with "
"'rogerstanimoto(u, v)'."
)
@_deprecated_sokalmichener
def sokalmichener(u, v, w=None):
"""
Compute the Sokal-Michener dissimilarity between two boolean 1-D arrays.
.. deprecated:: 1.15.0
This function is deprecated and will be removed in SciPy 1.17.0.
Replace usage of ``sokalmichener(u, v)`` with ``rogerstanimoto(u, v)``.
The Sokal-Michener dissimilarity between boolean 1-D arrays `u` and `v`,
is defined as
.. math::
\\frac{R}
{S + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`, :math:`R = 2 * (c_{TF} + c_{FT})` and
:math:`S = c_{FF} + c_{TT}`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
sokalmichener : double
The Sokal-Michener dissimilarity between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.sokalmichener([1, 0, 0], [0, 1, 0])
0.8
>>> distance.sokalmichener([1, 0, 0], [1, 1, 0])
0.5
>>> distance.sokalmichener([1, 0, 0], [2, 0, 0])
-1.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
nff, nft, ntf, ntt = _nbool_correspond_all(u, v, w=w)
return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft))
def sokalsneath(u, v, w=None):
"""
Compute the Sokal-Sneath dissimilarity between two boolean 1-D arrays.
The Sokal-Sneath dissimilarity between `u` and `v`,
.. math::
\\frac{R}
{c_{TT} + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
sokalsneath : double
The Sokal-Sneath dissimilarity between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.sokalsneath([1, 0, 0], [0, 1, 0])
1.0
>>> distance.sokalsneath([1, 0, 0], [1, 1, 0])
0.66666666666666663
>>> distance.sokalsneath([1, 0, 0], [2, 1, 0])
0.0
>>> distance.sokalsneath([1, 0, 0], [3, 1, 0])
-2.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == v.dtype == bool and w is None:
ntt = (u & v).sum()
elif w is None:
ntt = (u * v).sum()
else:
w = _validate_weights(w)
ntt = (u * v * w).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)
denom = np.array(ntt + 2.0 * (ntf + nft))
if not denom.any():
raise ValueError('Sokal-Sneath dissimilarity is not defined for '
'vectors that are entirely false.')
return float(2.0 * (ntf + nft)) / denom
_convert_to_double = partial(_convert_to_type, out_type=np.float64)
_convert_to_bool = partial(_convert_to_type, out_type=bool)
# adding python-only wrappers to _distance_wrap module
_distance_wrap.pdist_correlation_double_wrap = _correlation_pdist_wrap
_distance_wrap.cdist_correlation_double_wrap = _correlation_cdist_wrap
@dataclasses.dataclass(frozen=True)
class CDistMetricWrapper:
metric_name: str
def __call__(self, XA, XB, *, out=None, **kwargs):
XA = np.ascontiguousarray(XA)
XB = np.ascontiguousarray(XB)
mA, n = XA.shape
mB, _ = XB.shape
metric_name = self.metric_name
metric_info = _METRICS[metric_name]
XA, XB, typ, kwargs = _validate_cdist_input(
XA, XB, mA, mB, n, metric_info, **kwargs)
w = kwargs.pop('w', None)
if w is not None:
metric = metric_info.dist_func
return _cdist_callable(
XA, XB, metric=metric, out=out, w=w, **kwargs)
dm = _prepare_out_argument(out, np.float64, (mA, mB))
# get cdist wrapper
cdist_fn = getattr(_distance_wrap, f'cdist_{metric_name}_{typ}_wrap')
cdist_fn(XA, XB, dm, **kwargs)
return dm
@dataclasses.dataclass(frozen=True)
class PDistMetricWrapper:
metric_name: str
def __call__(self, X, *, out=None, **kwargs):
X = np.ascontiguousarray(X)
m, n = X.shape
metric_name = self.metric_name
metric_info = _METRICS[metric_name]
X, typ, kwargs = _validate_pdist_input(
X, m, n, metric_info, **kwargs)
out_size = (m * (m - 1)) // 2
w = kwargs.pop('w', None)
if w is not None:
metric = metric_info.dist_func
return _pdist_callable(
X, metric=metric, out=out, w=w, **kwargs)
dm = _prepare_out_argument(out, np.float64, (out_size,))
# get pdist wrapper
pdist_fn = getattr(_distance_wrap, f'pdist_{metric_name}_{typ}_wrap')
pdist_fn(X, dm, **kwargs)
return dm
@dataclasses.dataclass(frozen=True)
class MetricInfo:
# Name of python distance function
canonical_name: str
# All aliases, including canonical_name
aka: set[str]
# unvectorized distance function
dist_func: Callable
# Optimized cdist function
cdist_func: Callable
# Optimized pdist function
pdist_func: Callable
# function that checks kwargs and computes default values:
# f(X, m, n, **kwargs)
validator: Callable | None = None
# list of supported types:
# X (pdist) and XA (cdist) are used to choose the type. if there is no
# match the first type is used. Default double
types: list[str] = dataclasses.field(default_factory=lambda: ['double'])
# true if out array must be C-contiguous
requires_contiguous_out: bool = True
# Registry of implemented metrics:
_METRIC_INFOS = [
MetricInfo(
canonical_name='braycurtis',
aka={'braycurtis'},
dist_func=braycurtis,
cdist_func=_distance_pybind.cdist_braycurtis,
pdist_func=_distance_pybind.pdist_braycurtis,
),
MetricInfo(
canonical_name='canberra',
aka={'canberra'},
dist_func=canberra,
cdist_func=_distance_pybind.cdist_canberra,
pdist_func=_distance_pybind.pdist_canberra,
),
MetricInfo(
canonical_name='chebyshev',
aka={'chebychev', 'chebyshev', 'cheby', 'cheb', 'ch'},
dist_func=chebyshev,
cdist_func=_distance_pybind.cdist_chebyshev,
pdist_func=_distance_pybind.pdist_chebyshev,
),
MetricInfo(
canonical_name='cityblock',
aka={'cityblock', 'cblock', 'cb', 'c'},
dist_func=cityblock,
cdist_func=_distance_pybind.cdist_cityblock,
pdist_func=_distance_pybind.pdist_cityblock,
),
MetricInfo(
canonical_name='correlation',
aka={'correlation', 'co'},
dist_func=correlation,
cdist_func=CDistMetricWrapper('correlation'),
pdist_func=PDistMetricWrapper('correlation'),
),
MetricInfo(
canonical_name='cosine',
aka={'cosine', 'cos'},
dist_func=cosine,
cdist_func=CDistMetricWrapper('cosine'),
pdist_func=PDistMetricWrapper('cosine'),
),
MetricInfo(
canonical_name='dice',
aka={'dice'},
types=['bool'],
dist_func=dice,
cdist_func=_distance_pybind.cdist_dice,
pdist_func=_distance_pybind.pdist_dice,
),
MetricInfo(
canonical_name='euclidean',
aka={'euclidean', 'euclid', 'eu', 'e'},
dist_func=euclidean,
cdist_func=_distance_pybind.cdist_euclidean,
pdist_func=_distance_pybind.pdist_euclidean,
),
MetricInfo(
canonical_name='hamming',
aka={'matching', 'hamming', 'hamm', 'ha', 'h'},
types=['double', 'bool'],
validator=_validate_hamming_kwargs,
dist_func=hamming,
cdist_func=_distance_pybind.cdist_hamming,
pdist_func=_distance_pybind.pdist_hamming,
),
MetricInfo(
canonical_name='jaccard',
aka={'jaccard', 'jacc', 'ja', 'j'},
types=['double', 'bool'],
dist_func=jaccard,
cdist_func=_distance_pybind.cdist_jaccard,
pdist_func=_distance_pybind.pdist_jaccard,
),
MetricInfo(
canonical_name='jensenshannon',
aka={'jensenshannon', 'js'},
dist_func=jensenshannon,
cdist_func=CDistMetricWrapper('jensenshannon'),
pdist_func=PDistMetricWrapper('jensenshannon'),
),
MetricInfo(
canonical_name='kulczynski1',
aka={'kulczynski1'},
types=['bool'],
dist_func=kulczynski1,
cdist_func=_deprecated_kulczynski1(_distance_pybind.cdist_kulczynski1),
pdist_func=_deprecated_kulczynski1(_distance_pybind.pdist_kulczynski1),
),
MetricInfo(
canonical_name='mahalanobis',
aka={'mahalanobis', 'mahal', 'mah'},
validator=_validate_mahalanobis_kwargs,
dist_func=mahalanobis,
cdist_func=CDistMetricWrapper('mahalanobis'),
pdist_func=PDistMetricWrapper('mahalanobis'),
),
MetricInfo(
canonical_name='minkowski',
aka={'minkowski', 'mi', 'm', 'pnorm'},
validator=_validate_minkowski_kwargs,
dist_func=minkowski,
cdist_func=_distance_pybind.cdist_minkowski,
pdist_func=_distance_pybind.pdist_minkowski,
),
MetricInfo(
canonical_name='rogerstanimoto',
aka={'rogerstanimoto'},
types=['bool'],
dist_func=rogerstanimoto,
cdist_func=_distance_pybind.cdist_rogerstanimoto,
pdist_func=_distance_pybind.pdist_rogerstanimoto,
),
MetricInfo(
canonical_name='russellrao',
aka={'russellrao'},
types=['bool'],
dist_func=russellrao,
cdist_func=_distance_pybind.cdist_russellrao,
pdist_func=_distance_pybind.pdist_russellrao,
),
MetricInfo(
canonical_name='seuclidean',
aka={'seuclidean', 'se', 's'},
validator=_validate_seuclidean_kwargs,
dist_func=seuclidean,
cdist_func=CDistMetricWrapper('seuclidean'),
pdist_func=PDistMetricWrapper('seuclidean'),
),
MetricInfo(
canonical_name='sokalmichener',
aka={'sokalmichener'},
types=['bool'],
dist_func=sokalmichener,
cdist_func=_deprecated_sokalmichener(_distance_pybind.cdist_sokalmichener),
pdist_func=_deprecated_sokalmichener(_distance_pybind.pdist_sokalmichener),
),
MetricInfo(
canonical_name='sokalsneath',
aka={'sokalsneath'},
types=['bool'],
dist_func=sokalsneath,
cdist_func=_distance_pybind.cdist_sokalsneath,
pdist_func=_distance_pybind.pdist_sokalsneath,
),
MetricInfo(
canonical_name='sqeuclidean',
aka={'sqeuclidean', 'sqe', 'sqeuclid'},
dist_func=sqeuclidean,
cdist_func=_distance_pybind.cdist_sqeuclidean,
pdist_func=_distance_pybind.pdist_sqeuclidean,
),
MetricInfo(
canonical_name='yule',
aka={'yule'},
types=['bool'],
dist_func=yule,
cdist_func=_distance_pybind.cdist_yule,
pdist_func=_distance_pybind.pdist_yule,
),
]
_METRICS = {info.canonical_name: info for info in _METRIC_INFOS}
_METRIC_ALIAS = {alias: info
for info in _METRIC_INFOS
for alias in info.aka}
_METRICS_NAMES = list(_METRICS.keys())
_TEST_METRICS = {'test_' + info.canonical_name: info for info in _METRIC_INFOS}
def pdist(X, metric='euclidean', *, out=None, **kwargs):
"""
Pairwise distances between observations in n-dimensional space.
See Notes for common calling conventions.
Parameters
----------
X : array_like
An m by n array of m original observations in an
n-dimensional space.
metric : str or function, optional
The distance metric to use. The distance function can
be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
'correlation', 'cosine', 'dice', 'euclidean', 'hamming',
'jaccard', 'jensenshannon', 'kulczynski1',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule'.
out : ndarray, optional
The output array.
If not None, condensed distance matrix Y is stored in this array.
**kwargs : dict, optional
Extra arguments to `metric`: refer to each metric documentation for a
list of all possible arguments.
Some possible arguments:
p : scalar
The p-norm to apply for Minkowski, weighted and unweighted.
Default: 2.
w : ndarray
The weight vector for metrics that support weights (e.g., Minkowski).
V : ndarray
The variance vector for standardized Euclidean.
Default: var(X, axis=0, ddof=1)
VI : ndarray
The inverse of the covariance matrix for Mahalanobis.
Default: inv(cov(X.T)).T
Returns
-------
Y : ndarray
Returns a condensed distance matrix Y. For each :math:`i` and :math:`j`
(where :math:`i<j<m`),where m is the number of original observations.
The metric ``dist(u=X[i], v=X[j])`` is computed and stored in entry ``m
* i + j - ((i + 2) * (i + 1)) // 2``.
See Also
--------
squareform : converts between condensed distance matrices and
square distance matrices.
Notes
-----
See ``squareform`` for information on how to calculate the index of
this entry or to convert the condensed distance matrix to a
redundant square matrix.
The following are common calling conventions.
1. ``Y = pdist(X, 'euclidean')``
Computes the distance between m points using Euclidean distance
(2-norm) as the distance metric between the points. The points
are arranged as m n-dimensional row vectors in the matrix X.
2. ``Y = pdist(X, 'minkowski', p=2.)``
Computes the distances using the Minkowski distance
:math:`\\|u-v\\|_p` (:math:`p`-norm) where :math:`p > 0` (note
that this is only a quasi-metric if :math:`0 < p < 1`).
3. ``Y = pdist(X, 'cityblock')``
Computes the city block or Manhattan distance between the
points.
4. ``Y = pdist(X, 'seuclidean', V=None)``
Computes the standardized Euclidean distance. The standardized
Euclidean distance between two n-vectors ``u`` and ``v`` is
.. math::
\\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
automatically computed.
5. ``Y = pdist(X, 'sqeuclidean')``
Computes the squared Euclidean distance :math:`\\|u-v\\|_2^2` between
the vectors.
6. ``Y = pdist(X, 'cosine')``
Computes the cosine distance between vectors u and v,
.. math::
1 - \\frac{u \\cdot v}
{{\\|u\\|}_2 {\\|v\\|}_2}
where :math:`\\|*\\|_2` is the 2-norm of its argument ``*``, and
:math:`u \\cdot v` is the dot product of ``u`` and ``v``.
7. ``Y = pdist(X, 'correlation')``
Computes the correlation distance between vectors u and v. This is
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{\\|(u - \\bar{u})\\|}_2 {\\|(v - \\bar{v})\\|}_2}
where :math:`\\bar{v}` is the mean of the elements of vector v,
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = pdist(X, 'hamming')``
Computes the normalized Hamming distance, or the proportion of
those vector elements between two n-vectors ``u`` and ``v``
which disagree. To save memory, the matrix ``X`` can be of type
boolean.
9. ``Y = pdist(X, 'jaccard')``
Computes the Jaccard distance between the points. Given two
vectors, ``u`` and ``v``, the Jaccard distance is the
proportion of those elements ``u[i]`` and ``v[i]`` that
disagree.
10. ``Y = pdist(X, 'jensenshannon')``
Computes the Jensen-Shannon distance between two probability arrays.
Given two probability vectors, :math:`p` and :math:`q`, the
Jensen-Shannon distance is
.. math::
\\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}}
where :math:`m` is the pointwise mean of :math:`p` and :math:`q`
and :math:`D` is the Kullback-Leibler divergence.
11. ``Y = pdist(X, 'chebyshev')``
Computes the Chebyshev distance between the points. The
Chebyshev distance between two n-vectors ``u`` and ``v`` is the
maximum norm-1 distance between their respective elements. More
precisely, the distance is given by
.. math::
d(u,v) = \\max_i {|u_i-v_i|}
12. ``Y = pdist(X, 'canberra')``
Computes the Canberra distance between the points. The
Canberra distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}
13. ``Y = pdist(X, 'braycurtis')``
Computes the Bray-Curtis distance between the points. The
Bray-Curtis distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\frac{\\sum_i {|u_i-v_i|}}
{\\sum_i {|u_i+v_i|}}
14. ``Y = pdist(X, 'mahalanobis', VI=None)``
Computes the Mahalanobis distance between the points. The
Mahalanobis distance between two points ``u`` and ``v`` is
:math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
variable) is the inverse covariance. If ``VI`` is not None,
``VI`` will be used as the inverse covariance matrix.
15. ``Y = pdist(X, 'yule')``
Computes the Yule distance between each pair of boolean
vectors. (see yule function documentation)
16. ``Y = pdist(X, 'matching')``
Synonym for 'hamming'.
17. ``Y = pdist(X, 'dice')``
Computes the Dice distance between each pair of boolean
vectors. (see dice function documentation)
18. ``Y = pdist(X, 'kulczynski1')``
Computes the kulczynski1 distance between each pair of
boolean vectors. (see kulczynski1 function documentation)
.. deprecated:: 1.15.0
This metric is deprecated and will be removed in SciPy 1.17.0.
Replace usage of ``pdist(X, 'kulczynski1')`` with
``1 / pdist(X, 'jaccard') - 1``.
19. ``Y = pdist(X, 'rogerstanimoto')``
Computes the Rogers-Tanimoto distance between each pair of
boolean vectors. (see rogerstanimoto function documentation)
20. ``Y = pdist(X, 'russellrao')``
Computes the Russell-Rao distance between each pair of
boolean vectors. (see russellrao function documentation)
21. ``Y = pdist(X, 'sokalmichener')``
Computes the Sokal-Michener distance between each pair of
boolean vectors. (see sokalmichener function documentation)
.. deprecated:: 1.15.0
This metric is deprecated and will be removed in SciPy 1.17.0.
Replace usage of ``pdist(X, 'sokalmichener')`` with
``pdist(X, 'rogerstanimoto')``.
22. ``Y = pdist(X, 'sokalsneath')``
Computes the Sokal-Sneath distance between each pair of
boolean vectors. (see sokalsneath function documentation)
23. ``Y = pdist(X, 'kulczynski1')``
Computes the Kulczynski 1 distance between each pair of
boolean vectors. (see kulczynski1 function documentation)
24. ``Y = pdist(X, f)``
Computes the distance between all pairs of vectors in X
using the user supplied 2-arity function f. For example,
Euclidean distance between the vectors could be computed
as follows::
dm = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum()))
Note that you should avoid passing a reference to one of
the distance functions defined in this library. For example,::
dm = pdist(X, sokalsneath)
would calculate the pair-wise distances between the vectors in
X using the Python function sokalsneath. This would result in
sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax.::
dm = pdist(X, 'sokalsneath')
Examples
--------
>>> import numpy as np
>>> from scipy.spatial.distance import pdist
``x`` is an array of five points in three-dimensional space.
>>> x = np.array([[2, 0, 2], [2, 2, 3], [-2, 4, 5], [0, 1, 9], [2, 2, 4]])
``pdist(x)`` with no additional arguments computes the 10 pairwise
Euclidean distances:
>>> pdist(x)
array([2.23606798, 6.40312424, 7.34846923, 2.82842712, 4.89897949,
6.40312424, 1. , 5.38516481, 4.58257569, 5.47722558])
The following computes the pairwise Minkowski distances with ``p = 3.5``:
>>> pdist(x, metric='minkowski', p=3.5)
array([2.04898923, 5.1154929 , 7.02700737, 2.43802731, 4.19042714,
6.03956994, 1. , 4.45128103, 4.10636143, 5.0619695 ])
The pairwise city block or Manhattan distances:
>>> pdist(x, metric='cityblock')
array([ 3., 11., 10., 4., 8., 9., 1., 9., 7., 8.])
"""
# You can also call this as:
# Y = pdist(X, 'test_abc')
# where 'abc' is the metric being tested. This computes the distance
# between all pairs of vectors in X using the distance metric 'abc' but
# with a more succinct, verifiable, but less efficient implementation.
X = _asarray_validated(X, sparse_ok=False, objects_ok=True, mask_ok=True,
check_finite=False)
s = X.shape
if len(s) != 2:
raise ValueError('A 2-dimensional array must be passed.')
m, n = s
if callable(metric):
mstr = getattr(metric, '__name__', 'UnknownCustomMetric')
metric_info = _METRIC_ALIAS.get(mstr, None)
if metric_info is not None:
X, typ, kwargs = _validate_pdist_input(
X, m, n, metric_info, **kwargs)
return _pdist_callable(X, metric=metric, out=out, **kwargs)
elif isinstance(metric, str):
mstr = metric.lower()
metric_info = _METRIC_ALIAS.get(mstr, None)
if metric_info is not None:
pdist_fn = metric_info.pdist_func
return pdist_fn(X, out=out, **kwargs)
elif mstr.startswith("test_"):
metric_info = _TEST_METRICS.get(mstr, None)
if metric_info is None:
raise ValueError(f'Unknown "Test" Distance Metric: {mstr[5:]}')
X, typ, kwargs = _validate_pdist_input(
X, m, n, metric_info, **kwargs)
return _pdist_callable(
X, metric=metric_info.dist_func, out=out, **kwargs)
else:
raise ValueError(f'Unknown Distance Metric: {mstr}')
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
def squareform(X, force="no", checks=True):
"""
Convert a vector-form distance vector to a square-form distance
matrix, and vice-versa.
Parameters
----------
X : array_like
Either a condensed or redundant distance matrix.
force : str, optional
As with MATLAB(TM), if force is equal to ``'tovector'`` or
``'tomatrix'``, the input will be treated as a distance matrix or
distance vector respectively.
checks : bool, optional
If set to False, no checks will be made for matrix
symmetry nor zero diagonals. This is useful if it is known that
``X - X.T1`` is small and ``diag(X)`` is close to zero.
These values are ignored any way so they do not disrupt the
squareform transformation.
Returns
-------
Y : ndarray
If a condensed distance matrix is passed, a redundant one is
returned, or if a redundant one is passed, a condensed distance
matrix is returned.
Notes
-----
1. ``v = squareform(X)``
Given a square n-by-n symmetric distance matrix ``X``,
``v = squareform(X)`` returns a ``n * (n-1) / 2``
(i.e. binomial coefficient n choose 2) sized vector `v`
where :math:`v[{n \\choose 2} - {n-i \\choose 2} + (j-i-1)]`
is the distance between distinct points ``i`` and ``j``.
If ``X`` is non-square or asymmetric, an error is raised.
2. ``X = squareform(v)``
Given a ``n * (n-1) / 2`` sized vector ``v``
for some integer ``n >= 1`` encoding distances as described,
``X = squareform(v)`` returns a n-by-n distance matrix ``X``.
The ``X[i, j]`` and ``X[j, i]`` values are set to
:math:`v[{n \\choose 2} - {n-i \\choose 2} + (j-i-1)]`
and all diagonal elements are zero.
In SciPy 0.19.0, ``squareform`` stopped casting all input types to
float64, and started returning arrays of the same dtype as the input.
Examples
--------
>>> import numpy as np
>>> from scipy.spatial.distance import pdist, squareform
``x`` is an array of five points in three-dimensional space.
>>> x = np.array([[2, 0, 2], [2, 2, 3], [-2, 4, 5], [0, 1, 9], [2, 2, 4]])
``pdist(x)`` computes the Euclidean distances between each pair of
points in ``x``. The distances are returned in a one-dimensional
array with length ``5*(5 - 1)/2 = 10``.
>>> distvec = pdist(x)
>>> distvec
array([2.23606798, 6.40312424, 7.34846923, 2.82842712, 4.89897949,
6.40312424, 1. , 5.38516481, 4.58257569, 5.47722558])
``squareform(distvec)`` returns the 5x5 distance matrix.
>>> m = squareform(distvec)
>>> m
array([[0. , 2.23606798, 6.40312424, 7.34846923, 2.82842712],
[2.23606798, 0. , 4.89897949, 6.40312424, 1. ],
[6.40312424, 4.89897949, 0. , 5.38516481, 4.58257569],
[7.34846923, 6.40312424, 5.38516481, 0. , 5.47722558],
[2.82842712, 1. , 4.58257569, 5.47722558, 0. ]])
When given a square distance matrix ``m``, ``squareform(m)`` returns
the one-dimensional condensed distance vector associated with the
matrix. In this case, we recover ``distvec``.
>>> squareform(m)
array([2.23606798, 6.40312424, 7.34846923, 2.82842712, 4.89897949,
6.40312424, 1. , 5.38516481, 4.58257569, 5.47722558])
"""
X = np.ascontiguousarray(X)
s = X.shape
if force.lower() == 'tomatrix':
if len(s) != 1:
raise ValueError("Forcing 'tomatrix' but input X is not a "
"distance vector.")
elif force.lower() == 'tovector':
if len(s) != 2:
raise ValueError("Forcing 'tovector' but input X is not a "
"distance matrix.")
# X = squareform(v)
if len(s) == 1:
if s[0] == 0:
return np.zeros((1, 1), dtype=X.dtype)
# Grab the closest value to the square root of the number
# of elements times 2 to see if the number of elements
# is indeed a binomial coefficient.
d = int(np.ceil(np.sqrt(s[0] * 2)))
# Check that v is of valid dimensions.
if d * (d - 1) != s[0] * 2:
raise ValueError('Incompatible vector size. It must be a binomial '
'coefficient n choose 2 for some integer n >= 2.')
# Allocate memory for the distance matrix.
M = np.zeros((d, d), dtype=X.dtype)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
X = _copy_array_if_base_present(X)
# Fill in the values of the distance matrix.
_distance_wrap.to_squareform_from_vector_wrap(M, X)
# Return the distance matrix.
return M
elif len(s) == 2:
if s[0] != s[1]:
raise ValueError('The matrix argument must be square.')
if checks:
is_valid_dm(X, throw=True, name='X')
# One-side of the dimensions is set here.
d = s[0]
if d <= 1:
return np.array([], dtype=X.dtype)
# Create a vector.
v = np.zeros((d * (d - 1)) // 2, dtype=X.dtype)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
X = _copy_array_if_base_present(X)
# Convert the vector to squareform.
_distance_wrap.to_vector_from_squareform_wrap(X, v)
return v
else:
raise ValueError("The first argument must be one or two dimensional "
f"array. A {len(s)}-dimensional array is not permitted")
def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False):
"""
Return True if input array is a valid distance matrix.
Distance matrices must be 2-dimensional numpy arrays.
They must have a zero-diagonal, and they must be symmetric.
Parameters
----------
D : array_like
The candidate object to test for validity.
tol : float, optional
The distance matrix should be symmetric. `tol` is the maximum
difference between entries ``ij`` and ``ji`` for the distance
metric to be considered symmetric.
throw : bool, optional
An exception is thrown if the distance matrix passed is not valid.
name : str, optional
The name of the variable to checked. This is useful if
throw is set to True so the offending variable can be identified
in the exception message when an exception is thrown.
warning : bool, optional
Instead of throwing an exception, a warning message is
raised.
Returns
-------
valid : bool
True if the variable `D` passed is a valid distance matrix.
Notes
-----
Small numerical differences in `D` and `D.T` and non-zeroness of
the diagonal are ignored if they are within the tolerance specified
by `tol`.
Examples
--------
>>> import numpy as np
>>> from scipy.spatial.distance import is_valid_dm
This matrix is a valid distance matrix.
>>> d = np.array([[0.0, 1.1, 1.2, 1.3],
... [1.1, 0.0, 1.0, 1.4],
... [1.2, 1.0, 0.0, 1.5],
... [1.3, 1.4, 1.5, 0.0]])
>>> is_valid_dm(d)
True
In the following examples, the input is not a valid distance matrix.
Not square:
>>> is_valid_dm([[0, 2, 2], [2, 0, 2]])
False
Nonzero diagonal element:
>>> is_valid_dm([[0, 1, 1], [1, 2, 3], [1, 3, 0]])
False
Not symmetric:
>>> is_valid_dm([[0, 1, 3], [2, 0, 1], [3, 1, 0]])
False
"""
D = np.asarray(D, order='c')
valid = True
try:
s = D.shape
if len(D.shape) != 2:
if name:
raise ValueError(f"Distance matrix '{name}' must have shape=2 "
"(i.e. be two-dimensional).")
else:
raise ValueError('Distance matrix must have shape=2 (i.e. '
'be two-dimensional).')
if tol == 0.0:
if not (D == D.T).all():
if name:
raise ValueError(f"Distance matrix '{name}' must be symmetric.")
else:
raise ValueError('Distance matrix must be symmetric.')
if not (D[range(0, s[0]), range(0, s[0])] == 0).all():
if name:
raise ValueError(f"Distance matrix '{name}' diagonal must be zero.")
else:
raise ValueError('Distance matrix diagonal must be zero.')
else:
if not (D - D.T <= tol).all():
if name:
raise ValueError(f'Distance matrix \'{name}\' must be '
f'symmetric within tolerance {tol:5.5f}.')
else:
raise ValueError('Distance matrix must be symmetric within '
f'tolerance {tol:5.5f}.')
if not (D[range(0, s[0]), range(0, s[0])] <= tol).all():
if name:
raise ValueError(f'Distance matrix \'{name}\' diagonal must be '
f'close to zero within tolerance {tol:5.5f}.')
else:
raise ValueError(('Distance matrix \'{}\' diagonal must be close '
'to zero within tolerance {:5.5f}.').format(*tol))
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e), stacklevel=2)
valid = False
return valid
def is_valid_y(y, warning=False, throw=False, name=None):
"""
Return True if the input array is a valid condensed distance matrix.
Condensed distance matrices must be 1-dimensional numpy arrays.
Their length must be a binomial coefficient :math:`{n \\choose 2}`
for some positive integer n.
Parameters
----------
y : array_like
The condensed distance matrix.
warning : bool, optional
Invokes a warning if the variable passed is not a valid
condensed distance matrix. The warning message explains why
the distance matrix is not valid. `name` is used when
referencing the offending variable.
throw : bool, optional
Throws an exception if the variable passed is not a valid
condensed distance matrix.
name : bool, optional
Used when referencing the offending variable in the
warning or exception message.
Returns
-------
bool
True if the input array is a valid condensed distance matrix,
False otherwise.
Examples
--------
>>> from scipy.spatial.distance import is_valid_y
This vector is a valid condensed distance matrix. The length is 6,
which corresponds to ``n = 4``, since ``4*(4 - 1)/2`` is 6.
>>> v = [1.0, 1.2, 1.0, 0.5, 1.3, 0.9]
>>> is_valid_y(v)
True
An input vector with length, say, 7, is not a valid condensed distance
matrix.
>>> is_valid_y([1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7])
False
"""
y = np.asarray(y, order='c')
valid = True
try:
if len(y.shape) != 1:
if name:
raise ValueError(f"Condensed distance matrix '{name}' must "
"have shape=1 (i.e. be one-dimensional).")
else:
raise ValueError('Condensed distance matrix must have shape=1 '
'(i.e. be one-dimensional).')
n = y.shape[0]
d = int(np.ceil(np.sqrt(n * 2)))
if (d * (d - 1) / 2) != n:
if name:
raise ValueError(f"Length n of condensed distance matrix '{name}' "
"must be a binomial coefficient, i.e."
"there must be a k such that (k \\choose 2)=n)!")
else:
raise ValueError('Length n of condensed distance matrix must '
'be a binomial coefficient, i.e. there must '
'be a k such that (k \\choose 2)=n)!')
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e), stacklevel=2)
valid = False
return valid
def num_obs_dm(d):
"""
Return the number of original observations that correspond to a
square, redundant distance matrix.
Parameters
----------
d : array_like
The target distance matrix.
Returns
-------
num_obs_dm : int
The number of observations in the redundant distance matrix.
Examples
--------
Find the number of original observations corresponding
to a square redundant distance matrix d.
>>> from scipy.spatial.distance import num_obs_dm
>>> d = [[0, 100, 200], [100, 0, 150], [200, 150, 0]]
>>> num_obs_dm(d)
3
"""
d = np.asarray(d, order='c')
is_valid_dm(d, tol=np.inf, throw=True, name='d')
return d.shape[0]
def num_obs_y(Y):
"""
Return the number of original observations that correspond to a
condensed distance matrix.
Parameters
----------
Y : array_like
Condensed distance matrix.
Returns
-------
n : int
The number of observations in the condensed distance matrix `Y`.
Examples
--------
Find the number of original observations corresponding to a
condensed distance matrix Y.
>>> from scipy.spatial.distance import num_obs_y
>>> Y = [1, 2, 3.5, 7, 10, 4]
>>> num_obs_y(Y)
4
"""
Y = np.asarray(Y, order='c')
is_valid_y(Y, throw=True, name='Y')
k = Y.shape[0]
if k == 0:
raise ValueError("The number of observations cannot be determined on "
"an empty distance matrix.")
d = int(np.ceil(np.sqrt(k * 2)))
if (d * (d - 1) / 2) != k:
raise ValueError("Invalid condensed distance matrix passed. Must be "
"some k where k=(n choose 2) for some n >= 2.")
return d
def _prepare_out_argument(out, dtype, expected_shape):
if out is None:
return np.empty(expected_shape, dtype=dtype)
if out.shape != expected_shape:
raise ValueError("Output array has incorrect shape.")
if not out.flags.c_contiguous:
raise ValueError("Output array must be C-contiguous.")
if out.dtype != np.float64:
raise ValueError("Output array must be double type.")
return out
def _pdist_callable(X, *, out, metric, **kwargs):
n = X.shape[0]
out_size = (n * (n - 1)) // 2
dm = _prepare_out_argument(out, np.float64, (out_size,))
k = 0
for i in range(X.shape[0] - 1):
for j in range(i + 1, X.shape[0]):
dm[k] = metric(X[i], X[j], **kwargs)
k += 1
return dm
def _cdist_callable(XA, XB, *, out, metric, **kwargs):
mA = XA.shape[0]
mB = XB.shape[0]
dm = _prepare_out_argument(out, np.float64, (mA, mB))
for i in range(mA):
for j in range(mB):
dm[i, j] = metric(XA[i], XB[j], **kwargs)
return dm
def cdist(XA, XB, metric='euclidean', *, out=None, **kwargs):
"""
Compute distance between each pair of the two collections of inputs.
See Notes for common calling conventions.
Parameters
----------
XA : array_like
An :math:`m_A` by :math:`n` array of :math:`m_A`
original observations in an :math:`n`-dimensional space.
Inputs are converted to float type.
XB : array_like
An :math:`m_B` by :math:`n` array of :math:`m_B`
original observations in an :math:`n`-dimensional space.
Inputs are converted to float type.
metric : str or callable, optional
The distance metric to use. If a string, the distance function can be
'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation',
'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'jensenshannon',
'kulczynski1', 'mahalanobis', 'matching', 'minkowski',
'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule'.
**kwargs : dict, optional
Extra arguments to `metric`: refer to each metric documentation for a
list of all possible arguments.
Some possible arguments:
p : scalar
The p-norm to apply for Minkowski, weighted and unweighted.
Default: 2.
w : array_like
The weight vector for metrics that support weights (e.g., Minkowski).
V : array_like
The variance vector for standardized Euclidean.
Default: var(vstack([XA, XB]), axis=0, ddof=1)
VI : array_like
The inverse of the covariance matrix for Mahalanobis.
Default: inv(cov(vstack([XA, XB].T))).T
out : ndarray
The output array
If not None, the distance matrix Y is stored in this array.
Returns
-------
Y : ndarray
A :math:`m_A` by :math:`m_B` distance matrix is returned.
For each :math:`i` and :math:`j`, the metric
``dist(u=XA[i], v=XB[j])`` is computed and stored in the
:math:`ij` th entry.
Raises
------
ValueError
An exception is thrown if `XA` and `XB` do not have
the same number of columns.
Notes
-----
The following are common calling conventions:
1. ``Y = cdist(XA, XB, 'euclidean')``
Computes the distance between :math:`m` points using
Euclidean distance (2-norm) as the distance metric between the
points. The points are arranged as :math:`m`
:math:`n`-dimensional row vectors in the matrix X.
2. ``Y = cdist(XA, XB, 'minkowski', p=2.)``
Computes the distances using the Minkowski distance
:math:`\\|u-v\\|_p` (:math:`p`-norm) where :math:`p > 0` (note
that this is only a quasi-metric if :math:`0 < p < 1`).
3. ``Y = cdist(XA, XB, 'cityblock')``
Computes the city block or Manhattan distance between the
points.
4. ``Y = cdist(XA, XB, 'seuclidean', V=None)``
Computes the standardized Euclidean distance. The standardized
Euclidean distance between two n-vectors ``u`` and ``v`` is
.. math::
\\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}.
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
automatically computed.
5. ``Y = cdist(XA, XB, 'sqeuclidean')``
Computes the squared Euclidean distance :math:`\\|u-v\\|_2^2` between
the vectors.
6. ``Y = cdist(XA, XB, 'cosine')``
Computes the cosine distance between vectors u and v,
.. math::
1 - \\frac{u \\cdot v}
{{\\|u\\|}_2 {\\|v\\|}_2}
where :math:`\\|*\\|_2` is the 2-norm of its argument ``*``, and
:math:`u \\cdot v` is the dot product of :math:`u` and :math:`v`.
7. ``Y = cdist(XA, XB, 'correlation')``
Computes the correlation distance between vectors u and v. This is
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{\\|(u - \\bar{u})\\|}_2 {\\|(v - \\bar{v})\\|}_2}
where :math:`\\bar{v}` is the mean of the elements of vector v,
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = cdist(XA, XB, 'hamming')``
Computes the normalized Hamming distance, or the proportion of
those vector elements between two n-vectors ``u`` and ``v``
which disagree. To save memory, the matrix ``X`` can be of type
boolean.
9. ``Y = cdist(XA, XB, 'jaccard')``
Computes the Jaccard distance between the points. Given two
vectors, ``u`` and ``v``, the Jaccard distance is the
proportion of those elements ``u[i]`` and ``v[i]`` that
disagree where at least one of them is non-zero.
10. ``Y = cdist(XA, XB, 'jensenshannon')``
Computes the Jensen-Shannon distance between two probability arrays.
Given two probability vectors, :math:`p` and :math:`q`, the
Jensen-Shannon distance is
.. math::
\\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}}
where :math:`m` is the pointwise mean of :math:`p` and :math:`q`
and :math:`D` is the Kullback-Leibler divergence.
11. ``Y = cdist(XA, XB, 'chebyshev')``
Computes the Chebyshev distance between the points. The
Chebyshev distance between two n-vectors ``u`` and ``v`` is the
maximum norm-1 distance between their respective elements. More
precisely, the distance is given by
.. math::
d(u,v) = \\max_i {|u_i-v_i|}.
12. ``Y = cdist(XA, XB, 'canberra')``
Computes the Canberra distance between the points. The
Canberra distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}.
13. ``Y = cdist(XA, XB, 'braycurtis')``
Computes the Bray-Curtis distance between the points. The
Bray-Curtis distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\frac{\\sum_i (|u_i-v_i|)}
{\\sum_i (|u_i+v_i|)}
14. ``Y = cdist(XA, XB, 'mahalanobis', VI=None)``
Computes the Mahalanobis distance between the points. The
Mahalanobis distance between two points ``u`` and ``v`` is
:math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
variable) is the inverse covariance. If ``VI`` is not None,
``VI`` will be used as the inverse covariance matrix.
15. ``Y = cdist(XA, XB, 'yule')``
Computes the Yule distance between the boolean
vectors. (see `yule` function documentation)
16. ``Y = cdist(XA, XB, 'matching')``
Synonym for 'hamming'.
17. ``Y = cdist(XA, XB, 'dice')``
Computes the Dice distance between the boolean vectors. (see
`dice` function documentation)
18. ``Y = cdist(XA, XB, 'kulczynski1')``
Computes the kulczynski distance between the boolean
vectors. (see `kulczynski1` function documentation)
.. deprecated:: 1.15.0
This metric is deprecated and will be removed in SciPy 1.17.0.
Replace usage of ``cdist(XA, XB, 'kulczynski1')`` with
``1 / cdist(XA, XB, 'jaccard') - 1``.
19. ``Y = cdist(XA, XB, 'rogerstanimoto')``
Computes the Rogers-Tanimoto distance between the boolean
vectors. (see `rogerstanimoto` function documentation)
20. ``Y = cdist(XA, XB, 'russellrao')``
Computes the Russell-Rao distance between the boolean
vectors. (see `russellrao` function documentation)
21. ``Y = cdist(XA, XB, 'sokalmichener')``
Computes the Sokal-Michener distance between the boolean
vectors. (see `sokalmichener` function documentation)
.. deprecated:: 1.15.0
This metric is deprecated and will be removed in SciPy 1.17.0.
Replace usage of ``cdist(XA, XB, 'sokalmichener')`` with
``cdist(XA, XB, 'rogerstanimoto')``.
22. ``Y = cdist(XA, XB, 'sokalsneath')``
Computes the Sokal-Sneath distance between the vectors. (see
`sokalsneath` function documentation)
23. ``Y = cdist(XA, XB, f)``
Computes the distance between all pairs of vectors in X
using the user supplied 2-arity function f. For example,
Euclidean distance between the vectors could be computed
as follows::
dm = cdist(XA, XB, lambda u, v: np.sqrt(((u-v)**2).sum()))
Note that you should avoid passing a reference to one of
the distance functions defined in this library. For example,::
dm = cdist(XA, XB, sokalsneath)
would calculate the pair-wise distances between the vectors in
X using the Python function `sokalsneath`. This would result in
sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax::
dm = cdist(XA, XB, 'sokalsneath')
Examples
--------
Find the Euclidean distances between four 2-D coordinates:
>>> from scipy.spatial import distance
>>> import numpy as np
>>> coords = [(35.0456, -85.2672),
... (35.1174, -89.9711),
... (35.9728, -83.9422),
... (36.1667, -86.7833)]
>>> distance.cdist(coords, coords, 'euclidean')
array([[ 0. , 4.7044, 1.6172, 1.8856],
[ 4.7044, 0. , 6.0893, 3.3561],
[ 1.6172, 6.0893, 0. , 2.8477],
[ 1.8856, 3.3561, 2.8477, 0. ]])
Find the Manhattan distance from a 3-D point to the corners of the unit
cube:
>>> a = np.array([[0, 0, 0],
... [0, 0, 1],
... [0, 1, 0],
... [0, 1, 1],
... [1, 0, 0],
... [1, 0, 1],
... [1, 1, 0],
... [1, 1, 1]])
>>> b = np.array([[ 0.1, 0.2, 0.4]])
>>> distance.cdist(a, b, 'cityblock')
array([[ 0.7],
[ 0.9],
[ 1.3],
[ 1.5],
[ 1.5],
[ 1.7],
[ 2.1],
[ 2.3]])
"""
# You can also call this as:
# Y = cdist(XA, XB, 'test_abc')
# where 'abc' is the metric being tested. This computes the distance
# between all pairs of vectors in XA and XB using the distance metric 'abc'
# but with a more succinct, verifiable, but less efficient implementation.
XA = np.asarray(XA)
XB = np.asarray(XB)
s = XA.shape
sB = XB.shape
if len(s) != 2:
raise ValueError('XA must be a 2-dimensional array.')
if len(sB) != 2:
raise ValueError('XB must be a 2-dimensional array.')
if s[1] != sB[1]:
raise ValueError('XA and XB must have the same number of columns '
'(i.e. feature dimension.)')
mA = s[0]
mB = sB[0]
n = s[1]
if callable(metric):
mstr = getattr(metric, '__name__', 'Unknown')
metric_info = _METRIC_ALIAS.get(mstr, None)
if metric_info is not None:
XA, XB, typ, kwargs = _validate_cdist_input(
XA, XB, mA, mB, n, metric_info, **kwargs)
return _cdist_callable(XA, XB, metric=metric, out=out, **kwargs)
elif isinstance(metric, str):
mstr = metric.lower()
metric_info = _METRIC_ALIAS.get(mstr, None)
if metric_info is not None:
cdist_fn = metric_info.cdist_func
return cdist_fn(XA, XB, out=out, **kwargs)
elif mstr.startswith("test_"):
metric_info = _TEST_METRICS.get(mstr, None)
if metric_info is None:
raise ValueError(f'Unknown "Test" Distance Metric: {mstr[5:]}')
XA, XB, typ, kwargs = _validate_cdist_input(
XA, XB, mA, mB, n, metric_info, **kwargs)
return _cdist_callable(
XA, XB, metric=metric_info.dist_func, out=out, **kwargs)
else:
raise ValueError(f'Unknown Distance Metric: {mstr}')
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@spatial@distance.py@.PATH_END.py
|
{
"filename": "bigfile.py",
"repo_name": "bccp/nbodykit",
"repo_path": "nbodykit_extracted/nbodykit-master/nbodykit/source/mesh/bigfile.py",
"type": "Python"
}
|
from __future__ import absolute_import
# the future import is important. or in python 2.7 we try to
# import this module itself. Due to the unfortnate name conflict!
from nbodykit.base.mesh import MeshSource
from nbodykit import CurrentMPIComm
from nbodykit.utils import JSONDecoder
from bigfile import FileMPI
from pmesh.pm import ParticleMesh, ComplexField, RealField
import numpy
import json
from six import string_types
class BigFileMesh(MeshSource):
"""
A MeshSource object that reads a mesh from disk using :mod:`bigfile`.
This can read meshes that have been stored with the
:func:`~nbodykit.base.mesh.MeshSource.save` function of MeshSource objects.
Parameters
----------
path : str
the name of the file to load
dataset : str
the name of the dataset in the Bigfile holding the grid
comm : MPI.Communicator
the MPI communicator
**kwargs :
extra meta-data to be stored in the :attr:`attrs` dict
"""
def __repr__(self):
import os
return "BigFileMesh(file=%s)" % os.path.basename(self.path)
@CurrentMPIComm.enable
def __init__(self, path, dataset, comm=None, **kwargs):
self.path = path
self.dataset = dataset
self.comm = comm
# update the meta-data
self.attrs.update(kwargs)
with FileMPI(comm=self.comm, filename=path)[dataset] as ff:
for key in ff.attrs:
v = ff.attrs[key]
if isinstance(v, string_types) and v.startswith('json://'):
self.attrs[key] = json.loads(v[7:], cls=JSONDecoder)
else:
self.attrs[key] = numpy.squeeze(v)
# fourier space or config space
if ff.dtype.kind == 'c':
self.isfourier = True
if ff.dtype.itemsize == 16:
dtype = 'f8'
else:
dtype = 'f4'
else:
self.isfourier = False
if ff.dtype.itemsize == 8:
dtype = 'f8'
else:
dtype = 'f4'
# determine Nmesh
if 'ndarray.shape' not in self.attrs:
raise ValueError("`ndarray.shape` should be stored in the Bigfile `attrs` to determine `Nmesh`")
if 'Nmesh' not in self.attrs:
raise ValueError("`ndarray.shape` should be stored in the Bigfile `attrs` to determine `Nmesh`")
Nmesh = self.attrs['Nmesh']
BoxSize = self.attrs['BoxSize']
MeshSource.__init__(self, BoxSize=BoxSize, Nmesh=Nmesh, dtype=dtype, comm=comm)
def to_real_field(self):
"""
Return the RealField stored on disk.
.. note::
The mesh stored on disk must be stored with ``mode=real``
Returns
-------
real : pmesh.pm.RealField
an array-like object holding the mesh loaded from disk in
configuration space
"""
if self.isfourier:
return NotImplemented
# the real field to paint to
pmread = self.pm
with FileMPI(comm=self.comm, filename=self.path)[self.dataset] as ds:
if self.comm.rank == 0:
self.logger.info("reading real field from %s" % self.path)
real2 = RealField(pmread)
start = numpy.sum(self.comm.allgather(real2.size)[:self.comm.rank], dtype='intp')
end = start + real2.size
real2.unravel(ds[start:end])
return real2
def to_complex_field(self):
"""
Return the ComplexField stored on disk.
.. note::
The mesh stored on disk must be stored with ``mode=complex``
Returns
-------
real : pmesh.pm.ComplexField
an array-like object holding the mesh loaded from disk in Fourier
space
"""
if not self.isfourier:
return NotImplemented
pmread = self.pm
if self.comm.rank == 0:
self.logger.info("reading complex field from %s" % self.path)
with FileMPI(comm=self.comm, filename=self.path)[self.dataset] as ds:
complex2 = ComplexField(pmread)
assert self.comm.allreduce(complex2.size) == ds.size
start = numpy.sum(self.comm.allgather(complex2.size)[:self.comm.rank], dtype='intp')
end = start + complex2.size
complex2.unravel(ds[start:end])
return complex2
|
bccpREPO_NAMEnbodykitPATH_START.@nbodykit_extracted@nbodykit-master@nbodykit@source@mesh@bigfile.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "timstaley/drive-ami",
"repo_path": "drive-ami_extracted/drive-ami-master/setup.py",
"type": "Python"
}
|
#!/usr/bin/env python
from setuptools import setup
import versioneer
requirements = ['pexpect>=4.0.1,<5',
'astropy>=1.0,<2',
'colorlog',
]
setup(
name="drive-ami",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=['driveami'],
scripts=['bin/driveami_filter_rawfile_listing.py',
'bin/driveami_list_rawfiles.py',
'bin/driveami_calibrate_rawfiles.py'],
description="An interface layer for scripting the AMI-Reduce pipeline.",
author="Tim Staley",
author_email="timstaley337@gmail.com",
url="https://github.com/timstaley/drive-ami",
install_requires=requirements,
)
|
timstaleyREPO_NAMEdrive-amiPATH_START.@drive-ami_extracted@drive-ami-master@setup.py@.PATH_END.py
|
{
"filename": "CompatibleFaceSpecificThermalEnergyPolicyInst.cc.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/src/SVPH/CompatibleFaceSpecificThermalEnergyPolicyInst.cc.py",
"type": "Python"
}
|
text = """
//------------------------------------------------------------------------------
// Explicit instantiation.
//------------------------------------------------------------------------------
#include "SVPH/CompatibleFaceSpecificThermalEnergyPolicy.cc"
#include "Geometry/Dimension.hh"
namespace Spheral {
template class CompatibleFaceSpecificThermalEnergyPolicy<Dim< %(ndim)s > >;
}
"""
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@src@SVPH@CompatibleFaceSpecificThermalEnergyPolicyInst.cc.py@.PATH_END.py
|
{
"filename": "momento_vector_index.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/vectorstores/momento_vector_index.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import MomentoVectorIndex
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"MomentoVectorIndex": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MomentoVectorIndex",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@vectorstores@momento_vector_index.py@.PATH_END.py
|
{
"filename": "test_gaiatap.py",
"repo_name": "astropy/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/gaia/tests/test_gaiatap.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
=============
Gaia TAP plus
=============
@author: Juan Carlos Segovia
@contact: juan.carlos.segovia@sciops.esa.int
European Space Astronomy Centre (ESAC)
European Space Agency (ESA)
Created on 30 jun. 2016
"""
import datetime
import os
import zipfile
from pathlib import Path
from unittest.mock import patch
import astropy.units as u
import numpy as np
import pytest
from astropy.coordinates.sky_coordinate import SkyCoord
from astropy.table import Column, Table
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyDeprecationWarning
from requests import HTTPError
from astroquery.gaia import conf
from astroquery.gaia.core import GaiaClass
from astroquery.utils.tap.conn.tests.DummyConnHandler import DummyConnHandler
from astroquery.utils.tap.conn.tests.DummyResponse import DummyResponse
from astroquery.utils.tap.core import TapPlus
GAIA_QUERIER = GaiaClass(show_server_messages=False)
package = "astroquery.gaia.tests"
JOB_DATA_FILE_NAME = get_pkg_data_filename(os.path.join("data", 'job_1.vot'), package=package)
JOB_DATA_FILE_NAME_NEW = get_pkg_data_filename(os.path.join("data", '1710099405832VAL-result.vot'), package=package)
JOB_DATA_CONE_SEARCH_ASYNC_JSON_FILE_NAME = get_pkg_data_filename(os.path.join("data", 'cone_search_async.json'),
package=package)
JOB_DATA_QUERIER_ASYNC_FILE_NAME = get_pkg_data_filename(os.path.join("data", 'launch_job_async.json'), package=package)
JOB_DATA_QUERIER_ECSV_FILE_NAME = get_pkg_data_filename(os.path.join("data", '1712337806100O-result.ecsv'),
package=package)
DL_PRODUCTS_VOT = get_pkg_data_filename(
os.path.join("data", 'gaia_dr3_source_id_5937083312263887616_dl_products_vot.zip'),
package=package)
DL_PRODUCTS_ECSV = get_pkg_data_filename(
os.path.join("data", 'gaia_dr3_source_id_5937083312263887616_dl_products_ecsv.zip'),
package=package)
DL_PRODUCTS_CSV = get_pkg_data_filename(
os.path.join("data", 'gaia_dr3_source_id_5937083312263887616_dl_products_csv.zip'),
package=package)
DL_PRODUCTS_FITS = get_pkg_data_filename(
os.path.join("data", 'gaia_dr3_source_id_5937083312263887616_dl_products_fits.zip'),
package=package)
JOB_DATA = Path(JOB_DATA_FILE_NAME).read_text()
JOB_DATA_NEW = Path(JOB_DATA_FILE_NAME_NEW).read_text()
JOB_DATA_CONE_SEARCH_ASYNC_JSON = Path(JOB_DATA_CONE_SEARCH_ASYNC_JSON_FILE_NAME).read_text()
JOB_DATA_QUERIER_ASYNC_JSON = Path(JOB_DATA_QUERIER_ASYNC_FILE_NAME).read_text()
JOB_DATA_ECSV = Path(JOB_DATA_QUERIER_ECSV_FILE_NAME).read_text()
ids_ints = [1104405489608579584, '1104405489608579584, 1809140662896080256', (1104405489608579584, 1809140662896080256),
('1104405489608579584', '1809140662896080256'), '4295806720-38655544960',
'4295806720-38655544960, 549755818112-1275606125952',
('4295806720-38655544960', '549755818112-1275606125952')]
ids_designator = ['Gaia DR3 1104405489608579584', 'Gaia DR3 1104405489608579584, Gaia DR3 1809140662896080256',
('Gaia DR3 1104405489608579584', 'Gaia DR3 1809140662896080256'),
'Gaia DR3 4295806720-Gaia DR3 38655544960',
'Gaia DR3 4295806720-Gaia DR3 38655544960, Gaia DR3 549755818112-Gaia DR3 1275606125952',
('Gaia DR3 4295806720-Gaia DR3 38655544960', 'Gaia DR3 549755818112-Gaia DR3 1275606125952'),
'Gaia DR3 4295806720-Gaia DR3 38655544960, Gaia DR2 549755818112-Gaia DR2 1275606125952',
('Gaia DR3 4295806720-Gaia DR3 38655544960', 'Gaia DR2 549755818112-Gaia DR2 1275606125952')]
RADIUS = 1 * u.deg
SKYCOORD = SkyCoord(ra=19 * u.deg, dec=20 * u.deg, frame="icrs")
FAKE_TIME = datetime.datetime(2024, 1, 1, 0, 0, 59, 1)
@pytest.fixture
def patch_datetime_now(monkeypatch):
class mydatetime(datetime.datetime):
@classmethod
def now(cls, tz=None):
return FAKE_TIME
monkeypatch.setattr(datetime, 'datetime', mydatetime)
def test_patch_datetime(patch_datetime_now):
assert datetime.datetime.now() == FAKE_TIME
@pytest.fixture(scope="module")
def column_attrs():
dtypes = {
"alpha": np.float64,
"delta": np.float64,
"source_id": object,
"table1_oid": np.int32
}
columns = {k: Column(name=k, description=k, dtype=v) for k, v in dtypes.items()}
columns["source_id"].meta = {"_votable_string_dtype": "char"}
return columns
@pytest.fixture(scope="module")
def column_attrs_lower_case_new():
columns = {}
columns["designation"] = Column(name="designation",
description='Unique source designation (unique across all Data Releases)',
dtype=object)
columns["source_id"] = Column(name="source_id",
description='Unique source identifier (unique within a particular Data Release)',
unit=None, dtype=np.int64)
columns["ra"] = Column(name="ra", description='Right ascension', unit='deg', dtype=np.float64)
columns["dec"] = Column(name="dec", description='Declination', unit='deg', dtype=np.float64)
columns["parallax"] = Column(name="parallax", description='Parallax', unit='mas', dtype=np.float64)
return columns
@pytest.fixture(scope="module")
def column_attrs_upper_case_new():
columns = {}
columns["DESIGNATION"] = Column(name="DESIGNATION",
description='Unique source designation (unique across all Data Releases)',
dtype=object)
columns["SOURCE_ID"] = Column(name="SOURCE_ID",
description='Unique source identifier (unique within a particular Data Release)',
unit=None, dtype=np.int64)
columns["ra"] = Column(name="ra", description='Right ascension', unit='deg', dtype=np.float64)
columns["dec"] = Column(name="dec", description='Declination', unit='deg', dtype=np.float64)
columns["parallax"] = Column(name="parallax", description='Parallax', unit='mas', dtype=np.float64)
return columns
@pytest.fixture(scope="module")
def column_attrs_conesearch_json():
columns = {}
columns["solution_id"] = Column(name="solution_id", description='Solution Identifier', unit=None, dtype=np.int64)
columns["ref_epoch"] = Column(name="ref_epoch", description='Reference epoch', unit='yr', dtype=np.float64)
columns["ra"] = Column(name="ra", description='Right ascension', unit='deg', dtype=np.float64)
columns["ra_error"] = Column(name="ra_error", description='Standard error of right ascension', unit='mas',
dtype=np.float64)
columns["pm"] = Column(name="pm", description='Total proper motion', unit='mas / yr', dtype=object)
return columns
@pytest.fixture(scope="module")
def column_attrs_launch_json():
columns = {}
columns["source_id"] = Column(name="source_id",
description='Unique source identifier (unique within a particular Data Release)',
unit=None, dtype=np.int64)
columns["ra"] = Column(name="ra", description='Right ascension', unit='deg', dtype=np.float64)
columns["dec"] = Column(name="dec", description='Declination', unit='deg', dtype=np.float64)
columns["parallax"] = Column(name="parallax", description='Parallax', unit='mas', dtype=np.float64)
return columns
@pytest.fixture(scope="module")
def mock_querier():
conn_handler = DummyConnHandler()
tapplus = TapPlus(url="http://test:1111/tap", connhandler=conn_handler)
launch_response = DummyResponse(200)
launch_response.set_data(method="POST", body=JOB_DATA)
# The query contains decimals: default response is more robust.
conn_handler.set_default_response(launch_response)
return GaiaClass(tap_plus_conn_handler=conn_handler, datalink_handler=tapplus, show_server_messages=False)
@pytest.fixture(scope="function")
def mock_datalink_querier(patch_datetime_now):
assert datetime.datetime.now(datetime.timezone.utc) == FAKE_TIME
conn_handler = DummyConnHandler()
tapplus = TapPlus(url="http://test:1111/tap", connhandler=conn_handler)
launch_response = DummyResponse(200)
launch_response.set_data(method="POST", body=DL_PRODUCTS_VOT)
# The query contains decimals: default response is more robust.
conn_handler.set_default_response(launch_response)
conn_handler.set_response(
'?DATA_STRUCTURE=INDIVIDUAL&FORMAT=votable&ID=5937083312263887616&RELEASE=Gaia+DR3&RETRIEVAL_TYPE=ALL'
'&USE_ZIP_ALWAYS=true&VALID_DATA=false',
launch_response)
return GaiaClass(tap_plus_conn_handler=conn_handler, datalink_handler=tapplus, show_server_messages=False)
@pytest.fixture(scope="module")
def mock_datalink_querier_ecsv():
conn_handler = DummyConnHandler()
tapplus = TapPlus(url="http://test:1111/tap", connhandler=conn_handler)
launch_response = DummyResponse(200)
launch_response.set_data(method="POST", body=DL_PRODUCTS_ECSV)
# The query contains decimals: default response is more robust.
conn_handler.set_default_response(launch_response)
conn_handler.set_response(
'?DATA_STRUCTURE=INDIVIDUAL&FORMAT=ecsv&ID=5937083312263887616&RELEASE=Gaia+DR3&RETRIEVAL_TYPE=ALL'
'&USE_ZIP_ALWAYS=true&VALID_DATA=false',
launch_response)
return GaiaClass(tap_plus_conn_handler=conn_handler, datalink_handler=tapplus, show_server_messages=False)
@pytest.fixture(scope="module")
def mock_datalink_querier_csv():
conn_handler = DummyConnHandler()
tapplus = TapPlus(url="http://test:1111/tap", connhandler=conn_handler)
launch_response = DummyResponse(200)
launch_response.set_data(method="POST", body=DL_PRODUCTS_CSV)
# The query contains decimals: default response is more robust.
conn_handler.set_default_response(launch_response)
conn_handler.set_response(
'?DATA_STRUCTURE=INDIVIDUAL&FORMAT=csv&ID=5937083312263887616&RELEASE=Gaia+DR3&RETRIEVAL_TYPE=ALL'
'&USE_ZIP_ALWAYS=true&VALID_DATA=false',
launch_response)
return GaiaClass(tap_plus_conn_handler=conn_handler, datalink_handler=tapplus, show_server_messages=False)
@pytest.fixture(scope="module")
def mock_datalink_querier_fits():
conn_handler = DummyConnHandler()
tapplus = TapPlus(url="http://test:1111/tap", connhandler=conn_handler)
launch_response = DummyResponse(200)
launch_response.set_data(method="POST", body=DL_PRODUCTS_FITS)
# The query contains decimals: default response is more robust.
conn_handler.set_default_response(launch_response)
conn_handler.set_response(
'?DATA_STRUCTURE=INDIVIDUAL&FORMAT=fits&ID=5937083312263887616&RELEASE=Gaia+DR3&RETRIEVAL_TYPE=ALL'
'&USE_ZIP_ALWAYS=true&VALID_DATA=false',
launch_response)
return GaiaClass(tap_plus_conn_handler=conn_handler, datalink_handler=tapplus, show_server_messages=False)
@pytest.fixture(scope="module")
def mock_querier_ecsv():
conn_handler = DummyConnHandler()
tapplus = TapPlus(url="http://test:1111/tap", connhandler=conn_handler)
launch_response = DummyResponse(200)
launch_response.set_data(method="POST", body=JOB_DATA_ECSV)
# The query contains decimals: default response is more robust.
conn_handler.set_default_response(launch_response)
return GaiaClass(tap_plus_conn_handler=conn_handler, datalink_handler=tapplus, show_server_messages=False)
@pytest.fixture(scope="module")
def mock_querier_async():
conn_handler = DummyConnHandler()
tapplus = TapPlus(url="http://test:1111/tap", connhandler=conn_handler)
jobid = "12345"
launch_response = DummyResponse(303)
launch_response_headers = [["location", "http://test:1111/tap/async/" + jobid]]
launch_response.set_data(method="POST", headers=launch_response_headers)
conn_handler.set_default_response(launch_response)
phase_response = DummyResponse(200)
phase_response.set_data(method="GET", body="COMPLETED")
conn_handler.set_response("async/" + jobid + "/phase", phase_response)
results_response = DummyResponse(200)
results_response.set_data(method="GET", body=JOB_DATA)
conn_handler.set_response("async/" + jobid + "/results/result", results_response)
return GaiaClass(tap_plus_conn_handler=conn_handler, datalink_handler=tapplus, show_server_messages=False)
@pytest.fixture(scope="module")
def mock_querier_async_names_new():
conn_handler = DummyConnHandler()
tapplus = TapPlus(url="http://test:1111/tap", connhandler=conn_handler, use_names_over_ids=True)
jobid = "12345"
launch_response = DummyResponse(303)
launch_response_headers = [["location", "http://test:1111/tap/async/" + jobid]]
launch_response.set_data(method="POST", headers=launch_response_headers)
conn_handler.set_default_response(launch_response)
phase_response = DummyResponse(200)
phase_response.set_data(method="GET", body="COMPLETED")
conn_handler.set_response("async/" + jobid + "/phase", phase_response)
results_response = DummyResponse(200)
results_response.set_data(method="GET", body=JOB_DATA_NEW)
conn_handler.set_response("async/" + jobid + "/results/result", results_response)
return GaiaClass(tap_plus_conn_handler=conn_handler, datalink_handler=tapplus, show_server_messages=False)
@pytest.fixture(scope="module")
def mock_querier_async_ids_new():
conn_handler = DummyConnHandler()
tapplus = TapPlus(url="http://test:1111/tap", connhandler=conn_handler, use_names_over_ids=False)
jobid = "12345"
launch_response = DummyResponse(303)
launch_response_headers = [["location", "http://test:1111/tap/async/" + jobid]]
launch_response.set_data(method="POST", headers=launch_response_headers)
conn_handler.set_default_response(launch_response)
phase_response = DummyResponse(200)
phase_response.set_data(method="GET", body="COMPLETED")
conn_handler.set_response("async/" + jobid + "/phase", phase_response)
results_response = DummyResponse(200)
results_response.set_data(method="GET", body=JOB_DATA_NEW)
conn_handler.set_response("async/" + jobid + "/results/result", results_response)
GaiaClass.USE_NAMES_OVER_IDS = False
return GaiaClass(tap_plus_conn_handler=conn_handler, datalink_handler=tapplus, show_server_messages=False)
@pytest.fixture(scope="module")
def mock_querier_names_new():
conn_handler = DummyConnHandler()
tapplus = TapPlus(url="http://test:1111/tap", connhandler=conn_handler, use_names_over_ids=True)
launch_response = DummyResponse(200)
launch_response.set_data(method="POST", body=JOB_DATA_QUERIER_ASYNC_JSON)
conn_handler.set_default_response(launch_response)
GaiaClass.USE_NAMES_OVER_IDS = True
return GaiaClass(tap_plus_conn_handler=conn_handler, datalink_handler=tapplus, show_server_messages=False)
@pytest.fixture(scope="module")
def mock_querier_ids_new():
conn_handler = DummyConnHandler()
tapplus = TapPlus(url="http://test:1111/tap", connhandler=conn_handler, use_names_over_ids=False)
launch_response = DummyResponse(200)
launch_response.set_data(method="POST", body=JOB_DATA_QUERIER_ASYNC_JSON)
conn_handler.set_default_response(launch_response)
GaiaClass.USE_NAMES_OVER_IDS = False
return GaiaClass(tap_plus_conn_handler=conn_handler, datalink_handler=tapplus, show_server_messages=False)
@pytest.fixture(scope="module")
def mock_cone_search_json():
conn_handler = DummyConnHandler()
tapplus = TapPlus(url="http://test:1111/tap", connhandler=conn_handler)
launch_response = DummyResponse(200)
launch_response.set_data(method="POST", body=JOB_DATA_CONE_SEARCH_ASYNC_JSON)
conn_handler.set_default_response(launch_response)
return GaiaClass(tap_plus_conn_handler=conn_handler, datalink_handler=tapplus, show_server_messages=False)
@pytest.fixture(scope="module")
def mock_cone_search_async_json():
conn_handler = DummyConnHandler()
tapplus = TapPlus(url="http://test:1111/tap", connhandler=conn_handler)
jobid = "12345"
launch_response = DummyResponse(303)
launch_response_headers = [["location", "http://test:1111/tap/async/" + jobid]]
launch_response.set_data(method="GET", headers=launch_response_headers, body=JOB_DATA_CONE_SEARCH_ASYNC_JSON)
conn_handler.set_default_response(launch_response)
phase_response = DummyResponse(200)
phase_response.set_data(method="GET", body="COMPLETED")
conn_handler.set_response("async/" + jobid + "/phase", phase_response)
results_response = DummyResponse(200)
results_response.set_data(method="GET", body=JOB_DATA)
conn_handler.set_response("async/" + jobid + "/results/result", results_response)
return GaiaClass(tap_plus_conn_handler=conn_handler, datalink_handler=tapplus, show_server_messages=False)
@pytest.fixture(scope="module")
def mock_querier_json():
conn_handler = DummyConnHandler()
tapplus = TapPlus(url="http://test:1111/tap", connhandler=conn_handler)
launch_response = DummyResponse(200)
launch_response.set_data(method="POST", body=JOB_DATA_QUERIER_ASYNC_JSON)
conn_handler.set_default_response(launch_response)
return GaiaClass(tap_plus_conn_handler=conn_handler, datalink_handler=tapplus, show_server_messages=False)
@pytest.fixture(scope="module")
def mock_querier_async_json():
conn_handler = DummyConnHandler()
tapplus = TapPlus(url="http://test:1111/tap", connhandler=conn_handler)
jobid = "12345"
launch_response = DummyResponse(303)
launch_response_headers = [["location", "http://test:1111/tap/async/" + jobid]]
launch_response.set_data(method="GET", headers=launch_response_headers, body=JOB_DATA_QUERIER_ASYNC_JSON)
conn_handler.set_default_response(launch_response)
phase_response = DummyResponse(200)
phase_response.set_data(method="GET", body="COMPLETED")
conn_handler.set_response("async/" + jobid + "/phase", phase_response)
results_response = DummyResponse(200)
results_response.set_data(method="GET", body=JOB_DATA)
conn_handler.set_response("async/" + jobid + "/results/result", results_response)
return GaiaClass(tap_plus_conn_handler=conn_handler, datalink_handler=tapplus, show_server_messages=False)
@pytest.fixture
def cross_match_kwargs():
return {"full_qualified_table_name_a": "schemaA.tableA",
"full_qualified_table_name_b": "schemaB.tableB",
"results_table_name": "results"}
def test_show_message():
print(JOB_DATA_FILE_NAME)
connHandler = DummyConnHandler()
dummy_response = DummyResponse(200)
message_text = "1653401204784D[type: -100,-1]=Gaia dev is under maintenance"
dummy_response.set_data(method='GET', body=message_text)
connHandler.set_default_response(dummy_response)
# show_server_messages
tableRequest = 'notification?action=GetNotifications'
connHandler.set_response(tableRequest, dummy_response)
tapplus = TapPlus(url="http://test:1111/tap", connhandler=connHandler)
GaiaClass(tap_plus_conn_handler=connHandler, datalink_handler=tapplus, show_server_messages=True)
@pytest.mark.parametrize(
"kwargs", [{"width": 12 * u.deg, "height": 10 * u.deg}, {"radius": RADIUS}])
def test_query_object(column_attrs, mock_querier, kwargs):
table = mock_querier.query_object(SKYCOORD, **kwargs)
assert len(table) == 3
for colname, attrs in column_attrs.items():
assert table[colname].attrs_equal(attrs)
@pytest.mark.parametrize(
"kwargs,reported_missing", [({}, "width"), ({"width": 12 * u.deg}, "height")])
def test_query_object_missing_argument(kwargs, reported_missing):
with pytest.raises(ValueError, match=f"^Missing required argument: {reported_missing}$"):
GAIA_QUERIER.query_object(SKYCOORD, **kwargs)
@pytest.mark.parametrize(
"kwargs", ({"width": 12 * u.deg, "height": 10 * u.deg}, {"radius": RADIUS}))
def test_query_object_async(column_attrs, mock_querier_async, kwargs):
assert mock_querier_async.USE_NAMES_OVER_IDS is True
table = mock_querier_async.query_object_async(SKYCOORD, **kwargs)
assert len(table) == 3
for colname, attrs in column_attrs.items():
assert table[colname].attrs_equal(attrs)
def test_cone_search_sync(column_attrs, mock_querier):
assert mock_querier.USE_NAMES_OVER_IDS is True
job = mock_querier.cone_search(SKYCOORD, radius=RADIUS)
assert job.async_ is False
assert job.get_phase() == "COMPLETED"
assert job.failed is False
# results
results = job.get_results()
assert len(results) == 3
for col_name, attrs in column_attrs.items():
assert results[col_name].attrs_equal(attrs)
def test_cone_search_sync_ecsv_format(column_attrs, mock_querier_ecsv):
job = mock_querier_ecsv.cone_search(SKYCOORD, radius=RADIUS, output_format="ecsv")
assert job.async_ is False
assert job.get_phase() == "COMPLETED"
assert job.failed is False
# results
results = job.get_results()
print(results)
assert len(results) == 5
assert results['designation'][0] == 'Gaia DR3 6636090334814214528'
assert results['designation'][1] == 'Gaia DR3 6636090339112400000'
def test_cone_search_async(column_attrs, mock_querier_async):
assert mock_querier_async.USE_NAMES_OVER_IDS is True
job = mock_querier_async.cone_search_async(SKYCOORD, radius=RADIUS)
assert job.async_ is True
assert job.get_phase() == "COMPLETED"
assert job.failed is False
# results
results = job.get_results()
assert len(results) == 3
for col_name, attrs in column_attrs.items():
assert results[col_name].attrs_equal(attrs)
def test_cone_search_async_json_format(tmp_path_factory, column_attrs_conesearch_json, mock_cone_search_async_json):
d = tmp_path_factory.mktemp("data") / 'cone_search_async.json'
d.write_text(JOB_DATA_CONE_SEARCH_ASYNC_JSON, encoding="utf-8")
output_file = str(d)
dump_to_file = True
output_format = 'json'
assert mock_cone_search_async_json.USE_NAMES_OVER_IDS is True
job = mock_cone_search_async_json.cone_search_async(SKYCOORD, radius=RADIUS, output_file=output_file,
output_format=output_format, dump_to_file=dump_to_file,
verbose=True)
assert job.async_ is True
assert job.get_phase() == "COMPLETED"
assert job.failed is False
# results
results = job.get_results()
assert type(results) is Table
assert 50 == len(results), len(results)
for col_name, attrs in column_attrs_conesearch_json.items():
assert results[col_name].name == attrs.name
assert results[col_name].description == attrs.description
assert results[col_name].unit == attrs.unit
assert results[col_name].dtype == attrs.dtype
def test_cone_search_json_format(tmp_path_factory, column_attrs_conesearch_json, mock_cone_search_json):
d = tmp_path_factory.mktemp("data") / 'cone_search.json'
d.write_text(JOB_DATA_CONE_SEARCH_ASYNC_JSON, encoding="utf-8")
output_file = str(d)
dump_to_file = True
output_format = 'json'
job = mock_cone_search_json.cone_search(SKYCOORD, radius=RADIUS, output_file=output_file,
output_format=output_format, dump_to_file=dump_to_file,
verbose=True)
assert job.async_ is False
assert job.get_phase() == "COMPLETED"
assert job.failed is False
# results
results = job.get_results()
assert type(results) is Table
assert 50 == len(results), len(results)
for colname, attrs in column_attrs_conesearch_json.items():
assert results[colname].name == attrs.name
assert results[colname].description == attrs.description
assert results[colname].unit == attrs.unit
assert results[colname].dtype == attrs.dtype
def test_launch_job_async_json_format(tmp_path_factory, column_attrs_launch_json, mock_querier_async_json):
d = tmp_path_factory.mktemp("data") / 'launch_job_async.json'
d.write_text(JOB_DATA_QUERIER_ASYNC_JSON, encoding="utf-8")
output_file = str(d)
dump_to_file = True
output_format = 'json'
query = "SELECT TOP 1 source_id, ra, dec, parallax from gaiadr3.gaia_source"
job = mock_querier_async_json.launch_job_async(query, output_file=output_file, output_format=output_format,
dump_to_file=dump_to_file)
assert job.async_ is True
assert job.get_phase() == "COMPLETED"
assert job.failed is False
assert job.use_names_over_ids is True
# results
results = job.get_results()
assert type(results) is Table
assert 1 == len(results), len(results)
for colname, attrs in column_attrs_launch_json.items():
assert results[colname].name == attrs.name
assert results[colname].description == attrs.description
assert results[colname].unit == attrs.unit
assert results[colname].dtype == attrs.dtype
def test_launch_job_json_format(tmp_path_factory, column_attrs_launch_json, mock_querier_json):
d = tmp_path_factory.mktemp("data") / 'launch_job.json'
d.write_text(JOB_DATA_QUERIER_ASYNC_JSON, encoding="utf-8")
output_file = str(d)
dump_to_file = True
output_format = 'json'
query = "SELECT TOP 1 source_id, ra, dec, parallax from gaiadr3.gaia_source"
job = mock_querier_json.launch_job(query, output_file=output_file, output_format=output_format,
dump_to_file=dump_to_file)
assert job.async_ is False
assert job.get_phase() == "COMPLETED"
assert job.failed is False
assert job.use_names_over_ids is True
# results
results = job.get_results()
assert type(results) is Table
assert 1 == len(results), len(results)
for colname, attrs in column_attrs_launch_json.items():
assert results[colname].name == attrs.name
assert results[colname].description == attrs.description
assert results[colname].unit == attrs.unit
assert results[colname].dtype == attrs.dtype
def test_launch_job_json_format_no_dump(tmp_path_factory, column_attrs_launch_json, mock_querier_json):
dump_to_file = False
output_format = 'json'
query = "SELECT TOP 1 source_id, ra, dec, parallax from gaiadr3.gaia_source"
job = mock_querier_json.launch_job(query, output_format=output_format, dump_to_file=dump_to_file)
assert job.async_ is False
assert job.get_phase() == "COMPLETED"
assert job.failed is False
assert job.use_names_over_ids is True
# results
results = job.get_results()
assert type(results) is Table
assert 1 == len(results), len(results)
for colname, attrs in column_attrs_launch_json.items():
assert results[colname].name == attrs.name
assert results[colname].description == attrs.description
assert results[colname].unit == attrs.unit
assert results[colname].dtype == attrs.dtype
def test_launch_job_async_vot_format_no_dump_names(tmp_path_factory, column_attrs_lower_case_new,
mock_querier_async_names_new):
dump_to_file = False
output_format = 'votable_plain'
query = "SELECT TOP 1 source_id, ra, dec, parallax from gaiadr3.gaia_source"
assert mock_querier_async_names_new.USE_NAMES_OVER_IDS is True
job = mock_querier_async_names_new.launch_job_async(query, output_format=output_format, dump_to_file=dump_to_file)
assert job.async_ is True
assert job.get_phase() == "COMPLETED"
assert job.failed is False
assert job.use_names_over_ids is True
# results
results = job.get_results()
assert type(results) is Table
assert 2 == len(results)
print(results.columns)
for colname, attrs in column_attrs_lower_case_new.items():
assert results[colname].name == attrs.name
assert results[colname].description == attrs.description
assert results[colname].unit == attrs.unit
assert results[colname].dtype == attrs.dtype
def test_launch_job_async_vot_format_no_dump_ids(tmp_path_factory, column_attrs_upper_case_new,
mock_querier_async_ids_new):
dump_to_file = False
output_format = 'votable_plain'
query = "SELECT TOP 1 source_id, ra, dec, parallax from gaiadr3.gaia_source"
assert mock_querier_async_ids_new.USE_NAMES_OVER_IDS is False
job = mock_querier_async_ids_new.launch_job_async(query, output_format=output_format, dump_to_file=dump_to_file)
assert job.async_ is True
assert job.get_phase() == "COMPLETED"
assert job.failed is False
assert job.use_names_over_ids is False
# results
results = job.get_results()
assert type(results) is Table
assert 2 == len(results)
print(results.columns)
for colname, attrs in column_attrs_upper_case_new.items():
assert results[colname].name == attrs.name
assert results[colname].description == attrs.description
assert results[colname].unit == attrs.unit
assert results[colname].dtype == attrs.dtype
def test_launch_job_vot_format_names(tmp_path_factory, column_attrs_lower_case_new, mock_querier_names_new):
d = tmp_path_factory.mktemp("data") / 'launch_job.vot'
d.write_text(JOB_DATA_NEW, encoding="utf-8")
output_file = str(d)
dump_to_file = True
output_format = 'votable_plain'
query = "SELECT TOP 1 source_id, ra, dec, parallax from gaiadr3.gaia_source"
job = mock_querier_names_new.launch_job(query, output_file=output_file, output_format=output_format,
dump_to_file=dump_to_file)
assert job.async_ is False
assert job.get_phase() == "COMPLETED"
assert job.failed is False
assert job.use_names_over_ids is True
# results
results = job.get_results()
assert type(results) is Table
assert 2 == len(results)
for colname, attrs in column_attrs_lower_case_new.items():
assert results[colname].name == attrs.name
assert results[colname].description == attrs.description
assert results[colname].unit == attrs.unit
assert results[colname].dtype == attrs.dtype
def test_launch_job_vot_format_ids(tmp_path_factory, column_attrs_upper_case_new, mock_querier_ids_new):
d = tmp_path_factory.mktemp("data") / 'launch_job.vot'
d.write_text(JOB_DATA_NEW, encoding="utf-8")
output_file = str(d)
dump_to_file = True
output_format = 'votable_plain'
query = "SELECT TOP 1 source_id, ra, dec, parallax from gaiadr3.gaia_source"
job = mock_querier_ids_new.launch_job(query, output_file=output_file, output_format=output_format,
dump_to_file=dump_to_file)
assert job.async_ is False
assert job.get_phase() == "COMPLETED"
assert job.failed is False
assert job.use_names_over_ids is False
# results
results = job.get_results()
assert type(results) is Table
assert 2 == len(results)
for colname, attrs in column_attrs_upper_case_new.items():
assert results[colname].name == attrs.name
assert results[colname].description == attrs.description
assert results[colname].unit == attrs.unit
assert results[colname].dtype == attrs.dtype
def test_cone_search_and_changing_MAIN_GAIA_TABLE(mock_querier_async):
# Regression test for #2093 and #2099 - changing the MAIN_GAIA_TABLE
# had no effect.
job = mock_querier_async.cone_search_async(SKYCOORD, radius=RADIUS)
assert 'gaiadr3.gaia_source' in job.parameters['query']
with conf.set_temp("MAIN_GAIA_TABLE", "name_from_conf"):
job = mock_querier_async.cone_search_async(SKYCOORD, radius=RADIUS)
assert "name_from_conf" in job.parameters["query"]
# Changing the value through the class should overrule conf.
mock_querier_async.MAIN_GAIA_TABLE = "name_from_class"
job = mock_querier_async.cone_search_async(SKYCOORD, radius=RADIUS)
assert "name_from_class" in job.parameters["query"]
@pytest.mark.parametrize("overwrite_output_file", [True])
def test_datalink_querier_load_data_vot_exception(mock_datalink_querier, overwrite_output_file):
assert datetime.datetime.now(datetime.timezone.utc) == FAKE_TIME
now = datetime.datetime.now(datetime.timezone.utc)
output_file = 'datalink_output_' + now.strftime("%Y%m%dT%H%M%S") + '.zip'
file_final = Path(os.getcwd(), output_file)
Path(file_final).touch()
assert os.path.exists(file_final)
if not overwrite_output_file:
with pytest.raises(ValueError) as excinfo:
mock_datalink_querier.load_data(ids=[5937083312263887616], data_release='Gaia DR3',
data_structure='INDIVIDUAL',
retrieval_type="ALL",
linking_parameter='SOURCE_ID', valid_data=False, band=None,
avoid_datatype_check=False,
format="votable", dump_to_file=True,
overwrite_output_file=overwrite_output_file,
verbose=False)
assert str(
excinfo.value) == (
f"{file_final} file already exists. Please use overwrite_output_file='True' to overwrite output file.")
else:
mock_datalink_querier.load_data(ids=[5937083312263887616], data_release='Gaia DR3',
data_structure='INDIVIDUAL',
retrieval_type="ALL",
linking_parameter='SOURCE_ID', valid_data=False, band=None,
avoid_datatype_check=False,
format="votable", dump_to_file=True,
overwrite_output_file=overwrite_output_file,
verbose=False)
os.remove(file_final)
assert not os.path.exists(file_final)
def test_datalink_querier_load_data_vot(mock_datalink_querier):
result_dict = mock_datalink_querier.load_data(ids=[5937083312263887616], data_release='Gaia DR3',
data_structure='INDIVIDUAL',
retrieval_type="ALL",
linking_parameter='SOURCE_ID', valid_data=False, band=None,
avoid_datatype_check=False,
format="votable", dump_to_file=True, overwrite_output_file=True,
verbose=False)
direc = os.getcwd()
files = os.listdir(direc)
# Filtering only the files.
files = [f for f in files if
Path(direc, f).is_file() and f.endswith(".zip") and f.startswith('datalink_output')]
assert len(files) == 1
datalink_output = files[0]
extracted_files = []
with zipfile.ZipFile(datalink_output, "r") as zip_ref:
extracted_files.extend(zip_ref.namelist())
assert len(extracted_files) == 3
assert len(result_dict) == 3
files = list(result_dict.keys())
files.sort()
assert files[0] == 'MCMC_MSC-Gaia DR3 5937083312263887616.xml'
assert files[1] == 'XP_CONTINUOUS-Gaia DR3 5937083312263887616.xml'
assert files[2] == 'XP_SAMPLED-Gaia DR3 5937083312263887616.xml'
os.remove(os.path.join(os.getcwd(), datalink_output))
assert not os.path.exists(os.path.join(os.getcwd(), datalink_output))
def test_datalink_querier_load_data_ecsv(mock_datalink_querier_ecsv):
result_dict = mock_datalink_querier_ecsv.load_data(ids=[5937083312263887616], data_release='Gaia DR3',
data_structure='INDIVIDUAL',
retrieval_type="ALL",
linking_parameter='SOURCE_ID', valid_data=False, band=None,
avoid_datatype_check=False,
format="ecsv", dump_to_file=True, overwrite_output_file=True,
verbose=False)
direc = os.getcwd()
files = os.listdir(direc)
# Filtering only the files.
files = [f for f in files if
Path(direc, f).is_file() and f.endswith(".zip") and f.startswith('datalink_output')]
assert len(files) == 1
datalink_output = files[0]
extracted_files = []
with zipfile.ZipFile(datalink_output, "r") as zip_ref:
extracted_files.extend(zip_ref.namelist())
assert len(extracted_files) == 3
assert len(result_dict) == 3
files = list(result_dict.keys())
files.sort()
assert files[0] == 'MCMC_MSC-Gaia DR3 5937083312263887616.ecsv'
assert files[1] == 'XP_CONTINUOUS-Gaia DR3 5937083312263887616.ecsv'
assert files[2] == 'XP_SAMPLED-Gaia DR3 5937083312263887616.ecsv'
os.remove(os.path.join(os.getcwd(), datalink_output))
assert not os.path.exists(datalink_output)
def test_datalink_querier_load_data_csv(mock_datalink_querier_csv):
result_dict = mock_datalink_querier_csv.load_data(ids=[5937083312263887616], data_release='Gaia DR3',
data_structure='INDIVIDUAL',
retrieval_type="ALL",
linking_parameter='SOURCE_ID', valid_data=False, band=None,
avoid_datatype_check=False,
format="csv", dump_to_file=True, overwrite_output_file=True,
verbose=False)
direc = os.getcwd()
files = os.listdir(direc)
# Filtering only the files.
files = [f for f in files if
Path(direc, f).is_file() and f.endswith(".zip") and f.startswith('datalink_output')]
assert len(files) == 1
datalink_output = files[0]
extracted_files = []
with zipfile.ZipFile(datalink_output, "r") as zip_ref:
extracted_files.extend(zip_ref.namelist())
assert len(extracted_files) == 3
assert len(result_dict) == 3
files = list(result_dict.keys())
files.sort()
assert files[0] == 'MCMC_MSC-Gaia DR3 5937083312263887616.csv'
assert files[1] == 'XP_CONTINUOUS-Gaia DR3 5937083312263887616.csv'
assert files[2] == 'XP_SAMPLED-Gaia DR3 5937083312263887616.csv'
os.remove(os.path.join(os.getcwd(), datalink_output))
assert not os.path.exists(datalink_output)
@pytest.mark.skip(reason="Thes fits files generate an error relatate to the unit 'log(cm.s**-2)")
def test_datalink_querier_load_data_fits(mock_datalink_querier_fits):
result_dict = mock_datalink_querier_fits.load_data(ids=[5937083312263887616], data_release='Gaia DR3',
data_structure='INDIVIDUAL',
retrieval_type="ALL",
linking_parameter='SOURCE_ID', valid_data=False, band=None,
avoid_datatype_check=False,
format="fits", dump_to_file=True, overwrite_output_file=True,
verbose=False)
direc = os.getcwd()
files = os.listdir(direc)
# Filtering only the files.
files = [f for f in files if
Path(direc, f).is_file() and f.endswith(".zip") and f.startswith('datalink_output')]
assert len(files) == 1
datalink_output = files[0]
extracted_files = []
with zipfile.ZipFile(datalink_output, "r") as zip_ref:
extracted_files.extend(zip_ref.namelist())
assert len(extracted_files) == 3
assert len(result_dict) == 3
files = list(result_dict.keys())
files.sort()
assert files[0] == 'MCMC_MSC-Gaia DR3 5937083312263887616.fits'
assert files[1] == 'XP_CONTINUOUS-Gaia DR3 5937083312263887616.fits'
assert files[2] == 'XP_SAMPLED-Gaia DR3 5937083312263887616.fits'
os.remove(os.path.join(os.getcwd(), datalink_output))
assert not os.path.exists(datalink_output)
def test_load_data_vot(monkeypatch, tmp_path, tmp_path_factory, patch_datetime_now):
assert datetime.datetime.now(datetime.timezone.utc) == FAKE_TIME
now = datetime.datetime.now(datetime.timezone.utc)
output_file = 'datalink_output_' + now.strftime("%Y%m%dT%H%M%S.%f") + '.zip'
path = Path(os.getcwd(), output_file)
with open(DL_PRODUCTS_VOT, 'rb') as file:
zip_bytes = file.read()
path.write_bytes(zip_bytes)
def load_data_monkeypatched(self, params_dict, output_file, verbose):
assert params_dict == {
"VALID_DATA": "true",
"ID": "1,2,3,4",
"FORMAT": "votable",
"RETRIEVAL_TYPE": "epoch_photometry",
"DATA_STRUCTURE": "INDIVIDUAL",
"USE_ZIP_ALWAYS": "true"}
assert str(path) == output_file
assert verbose is True
monkeypatch.setattr(TapPlus, "load_data", load_data_monkeypatched)
# Keep the tests, just remove the argument once the deprecation is removed
with pytest.warns(AstropyDeprecationWarning,
match='"output_file" was deprecated in version 0.4.8'):
GAIA_QUERIER.load_data(
valid_data=True,
ids="1,2,3,4",
format='votable',
retrieval_type="epoch_photometry",
verbose=True,
dump_to_file=True,
overwrite_output_file=True,
output_file=tmp_path / "output_file")
path.unlink()
@pytest.mark.skip(reason="Thes fits files generate an error relatate to the unit 'log(cm.s**-2)")
def test_load_data_fits(monkeypatch, tmp_path, tmp_path_factory):
now = datetime.datetime.now(datetime.timezone.utc)
output_file = 'datalink_output_' + now.strftime("%Y%m%dT%H%M%S") + '.zip'
path = Path(os.getcwd(), output_file)
with open(DL_PRODUCTS_FITS, 'rb') as file:
zip_bytes = file.read()
path.write_bytes(zip_bytes)
def load_data_monkeypatched(self, params_dict, output_file, verbose):
assert params_dict == {
"VALID_DATA": "true",
"ID": "1,2,3,4",
"FORMAT": "fits",
"RETRIEVAL_TYPE": "epoch_photometry",
"DATA_STRUCTURE": "INDIVIDUAL",
"USE_ZIP_ALWAYS": "true"}
assert output_file == Path(os.getcwd(), 'datalink_output.zip')
assert verbose is True
monkeypatch.setattr(TapPlus, "load_data", load_data_monkeypatched)
GAIA_QUERIER.load_data(
valid_data=True,
ids="1,2,3,4",
format='fits',
retrieval_type="epoch_photometry",
verbose=True,
dump_to_file=True,
overwrite_output_file=True)
path.unlink()
def test_load_data_csv(monkeypatch, tmp_path, tmp_path_factory, patch_datetime_now):
assert datetime.datetime.now(datetime.timezone.utc) == FAKE_TIME
now = datetime.datetime.now(datetime.timezone.utc)
output_file = 'datalink_output_' + now.strftime("%Y%m%dT%H%M%S.%f") + '.zip'
path = Path(os.getcwd(), output_file)
with open(DL_PRODUCTS_CSV, 'rb') as file:
zip_bytes = file.read()
path.write_bytes(zip_bytes)
def load_data_monkeypatched(self, params_dict, output_file, verbose):
assert params_dict == {
"VALID_DATA": "true",
"ID": "1,2,3,4",
"FORMAT": "csv",
"RETRIEVAL_TYPE": "epoch_photometry",
"DATA_STRUCTURE": "INDIVIDUAL",
"USE_ZIP_ALWAYS": "true"}
assert str(path) == output_file
assert verbose is True
monkeypatch.setattr(TapPlus, "load_data", load_data_monkeypatched)
GAIA_QUERIER.load_data(
valid_data=True,
ids="1,2,3,4",
format='csv',
retrieval_type="epoch_photometry",
verbose=True,
dump_to_file=True,
overwrite_output_file=True)
path.unlink()
def test_load_data_ecsv(monkeypatch, tmp_path, tmp_path_factory, patch_datetime_now):
assert datetime.datetime.now(datetime.timezone.utc) == FAKE_TIME
now = datetime.datetime.now(datetime.timezone.utc)
output_file = 'datalink_output_' + now.strftime("%Y%m%dT%H%M%S.%f") + '.zip'
path = Path(os.getcwd(), output_file)
with open(DL_PRODUCTS_ECSV, 'rb') as file:
zip_bytes = file.read()
path.write_bytes(zip_bytes)
def load_data_monkeypatched(self, params_dict, output_file, verbose):
assert params_dict == {
"VALID_DATA": "true",
"ID": "1,2,3,4",
"FORMAT": "ecsv",
"RETRIEVAL_TYPE": "epoch_photometry",
"DATA_STRUCTURE": "INDIVIDUAL",
"USE_ZIP_ALWAYS": "true"}
assert str(path) == output_file
assert verbose is True
monkeypatch.setattr(TapPlus, "load_data", load_data_monkeypatched)
GAIA_QUERIER.load_data(
valid_data=True,
ids="1,2,3,4",
format='ecsv',
retrieval_type="epoch_photometry",
verbose=True,
dump_to_file=True,
overwrite_output_file=True)
path.unlink()
def test_load_data_linking_parameter(monkeypatch, tmp_path, patch_datetime_now):
assert datetime.datetime.now(datetime.timezone.utc) == FAKE_TIME
now = datetime.datetime.now(datetime.timezone.utc)
output_file = 'datalink_output_' + now.strftime("%Y%m%dT%H%M%S.%f") + '.zip'
path = Path(os.getcwd(), output_file)
with open(DL_PRODUCTS_VOT, 'rb') as file:
zip_bytes = file.read()
path.write_bytes(zip_bytes)
def load_data_monkeypatched(self, params_dict, output_file, verbose):
assert params_dict == {
"VALID_DATA": "true",
"ID": "1,2,3,4",
"FORMAT": "votable",
"RETRIEVAL_TYPE": "epoch_photometry",
"DATA_STRUCTURE": "INDIVIDUAL",
"USE_ZIP_ALWAYS": "true"}
assert str(path) == output_file
assert verbose is True
monkeypatch.setattr(TapPlus, "load_data", load_data_monkeypatched)
GAIA_QUERIER.load_data(
ids="1,2,3,4",
retrieval_type="epoch_photometry",
linking_parameter="SOURCE_ID",
valid_data=True,
verbose=True,
dump_to_file=True,
overwrite_output_file=True)
path.unlink()
@pytest.mark.parametrize("linking_param", ['TRANSIT_ID', 'IMAGE_ID'])
def test_load_data_linking_parameter_with_values(monkeypatch, tmp_path, linking_param, patch_datetime_now):
assert datetime.datetime.now(datetime.timezone.utc) == FAKE_TIME
now = datetime.datetime.now(datetime.timezone.utc)
output_file = 'datalink_output_' + now.strftime("%Y%m%dT%H%M%S.%f") + '.zip'
path = Path(os.getcwd(), output_file)
with open(DL_PRODUCTS_VOT, 'rb') as file:
zip_bytes = file.read()
path.write_bytes(zip_bytes)
def load_data_monkeypatched(self, params_dict, output_file, verbose):
direc = os.getcwd()
files = os.listdir(direc)
# Filtering only the files.
files = [f for f in files if
Path(direc, f).is_file() and f.endswith(".zip") and f.startswith('datalink_output')]
assert len(files) == 1
datalink_output = files[0]
assert params_dict == {
"VALID_DATA": "true",
"ID": "1,2,3,4",
"FORMAT": "votable",
"RETRIEVAL_TYPE": "epoch_photometry",
"DATA_STRUCTURE": "INDIVIDUAL",
"LINKING_PARAMETER": linking_param,
"USE_ZIP_ALWAYS": "true", }
assert output_file == str(Path(os.getcwd(), datalink_output))
assert verbose is True
monkeypatch.setattr(TapPlus, "load_data", load_data_monkeypatched)
GAIA_QUERIER.load_data(
ids="1,2,3,4",
retrieval_type="epoch_photometry",
linking_parameter=linking_param,
valid_data=True,
verbose=True,
dump_to_file=True,
overwrite_output_file=True)
path.unlink()
def test_get_datalinks(monkeypatch):
def get_datalinks_monkeypatched(self, ids, linking_parameter, verbose):
return Table()
# `GaiaClass` is a subclass of `TapPlus`, but it does not inherit
# `get_datalinks()`, it replaces it with a call to the `get_datalinks()`
# of its `__gaiadata`.
monkeypatch.setattr(TapPlus, "get_datalinks", get_datalinks_monkeypatched)
result = GAIA_QUERIER.get_datalinks(ids=["1", "2", "3", "4"], verbose=True)
assert isinstance(result, Table)
@pytest.mark.parametrize("linking_param", ['SOURCE_ID', 'TRANSIT_ID', 'IMAGE_ID'])
def test_get_datalinks_linking_parameter(monkeypatch, linking_param):
def get_datalinks_monkeypatched(self, ids, linking_parameter, verbose):
return Table()
# `GaiaClass` is a subclass of `TapPlus`, but it does not inherit
# `get_datalinks()`, it replaces it with a call to the `get_datalinks()`
# of its `__gaiadata`.
monkeypatch.setattr(TapPlus, "get_datalinks", get_datalinks_monkeypatched)
result = GAIA_QUERIER.get_datalinks(ids=["1", "2", "3", "4"], linking_parameter=linking_param, verbose=True)
assert isinstance(result, Table)
@pytest.mark.parametrize("id_int", ids_ints)
def test_get_datalinks_linking_parameter_ids_int(monkeypatch, id_int):
def get_datalinks_monkeypatched(self, ids, linking_parameter, verbose):
return Table()
# `GaiaClass` is a subclass of `TapPlus`, but it does not inherit
# `get_datalinks()`, it replaces it with a call to the `get_datalinks()`
# of its `__gaiadata`.
monkeypatch.setattr(TapPlus, "get_datalinks", get_datalinks_monkeypatched)
result = GAIA_QUERIER.get_datalinks(ids=id_int, verbose=True)
assert isinstance(result, Table)
@pytest.mark.parametrize("id_des", ids_designator)
def test_get_datalinks_linking_parameter_ids_designator(monkeypatch, id_des):
def get_datalinks_monkeypatched(self, ids, linking_parameter, verbose):
return Table()
# `GaiaClass` is a subclass of `TapPlus`, but it does not inherit
# `get_datalinks()`, it replaces it with a call to the `get_datalinks()`
# of its `__gaiadata`.
monkeypatch.setattr(TapPlus, "get_datalinks", get_datalinks_monkeypatched)
result = GAIA_QUERIER.get_datalinks(ids=id_des, verbose=True)
assert isinstance(result, Table)
def test_get_datalinks_exception(monkeypatch):
def get_datalinks_monkeypatched(self, ids, linking_parameter, verbose):
return Table()
linking_parameter = 'NOT_VALID'
# `GaiaClass` is a subclass of `TapPlus`, but it does not inherit
# `get_datalinks()`, it replaces it with a call to the `get_datalinks()`
# of its `__gaiadata`.
monkeypatch.setattr(TapPlus, "get_datalinks", get_datalinks_monkeypatched)
with pytest.raises(ValueError, match=f"^Invalid linking_parameter value '{linking_parameter}' .*"):
GAIA_QUERIER.get_datalinks(ids=["1", "2", "3", "4"], linking_parameter=linking_parameter, verbose=True)
@pytest.mark.parametrize("background", [False, True])
def test_cross_match(background, cross_match_kwargs, mock_querier_async):
job = mock_querier_async.cross_match(**cross_match_kwargs, background=background)
assert job.async_ is True
assert job.get_phase() == "EXECUTING" if background else "COMPLETED"
assert job.failed is False
@pytest.mark.parametrize(
"kwarg,invalid_value,error_message",
[("full_qualified_table_name_a",
"tableA",
"^Not found schema name in full qualified table A: 'tableA'$"),
("full_qualified_table_name_b",
"tableB",
"^Not found schema name in full qualified table B: 'tableB'$"),
("results_table_name",
"schema.results",
"^Please, do not specify schema for 'results_table_name'$")])
def test_cross_match_invalid_mandatory_kwarg(
cross_match_kwargs, kwarg, invalid_value, error_message
):
cross_match_kwargs[kwarg] = invalid_value
with pytest.raises(ValueError, match=error_message):
GAIA_QUERIER.cross_match(**cross_match_kwargs)
@pytest.mark.parametrize("radius", [0.01, 10.1])
def test_cross_match_invalid_radius(cross_match_kwargs, radius):
with pytest.raises(
ValueError,
match=rf"^Invalid radius value. Found {radius}, valid range is: 0.1 to 10.0$",
):
GAIA_QUERIER.cross_match(**cross_match_kwargs, radius=radius)
@pytest.mark.parametrize(
"missing_kwarg",
["full_qualified_table_name_a", "full_qualified_table_name_b", "results_table_name"])
def test_cross_match_missing_mandatory_kwarg(cross_match_kwargs, missing_kwarg):
del cross_match_kwargs[missing_kwarg]
with pytest.raises(
TypeError, match=rf"missing 1 required keyword-only argument: '{missing_kwarg}'$"
):
GAIA_QUERIER.cross_match(**cross_match_kwargs)
@patch.object(TapPlus, 'login')
def test_login(mock_login):
conn_handler = DummyConnHandler()
tapplus = TapPlus(url="http://test:1111/tap", connhandler=conn_handler)
tap = GaiaClass(tap_plus_conn_handler=conn_handler, datalink_handler=tapplus, show_server_messages=False)
tap.login(user="user", password="password")
assert (mock_login.call_count == 2)
mock_login.side_effect = HTTPError("Login error")
tap.login(user="user", password="password")
assert (mock_login.call_count == 3)
@patch.object(TapPlus, 'login_gui')
@patch.object(TapPlus, 'login')
def test_login_gui(mock_login_gui, mock_login):
conn_handler = DummyConnHandler()
tapplus = TapPlus(url="http://test:1111/tap", connhandler=conn_handler)
tap = GaiaClass(tap_plus_conn_handler=conn_handler, datalink_handler=tapplus, show_server_messages=False)
tap.login_gui()
assert (mock_login_gui.call_count == 1)
mock_login_gui.side_effect = HTTPError("Login error")
tap.login(user="user", password="password")
assert (mock_login.call_count == 1)
@patch.object(TapPlus, 'logout')
def test_logout(mock_logout):
conn_handler = DummyConnHandler()
tapplus = TapPlus(url="http://test:1111/tap", connhandler=conn_handler)
tap = GaiaClass(tap_plus_conn_handler=conn_handler, datalink_handler=tapplus, show_server_messages=False)
tap.logout()
assert (mock_logout.call_count == 2)
mock_logout.side_effect = HTTPError("Login error")
tap.logout()
assert (mock_logout.call_count == 3)
|
astropyREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@gaia@tests@test_gaiatap.py@.PATH_END.py
|
{
"filename": "you.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/llms/you.py",
"type": "Python"
}
|
import os
from typing import Any, Dict, Generator, Iterator, List, Literal, Optional
import requests
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from pydantic import Field
SMART_ENDPOINT = "https://chat-api.you.com/smart"
RESEARCH_ENDPOINT = "https://chat-api.you.com/research"
def _request(base_url: str, api_key: str, **kwargs: Any) -> Dict[str, Any]:
"""
NOTE: This function can be replaced by a OpenAPI-generated Python SDK in the future,
for better input/output typing support.
"""
headers = {"x-api-key": api_key}
response = requests.post(base_url, headers=headers, json=kwargs)
response.raise_for_status()
return response.json()
def _request_stream(
base_url: str, api_key: str, **kwargs: Any
) -> Generator[str, None, None]:
headers = {"x-api-key": api_key}
params = dict(**kwargs, stream=True)
response = requests.post(base_url, headers=headers, stream=True, json=params)
response.raise_for_status()
# Explicitly coercing the response to a generator to satisfy mypy
event_source = (bytestring for bytestring in response)
try:
import sseclient
client = sseclient.SSEClient(event_source)
except ImportError:
raise ImportError(
(
"Could not import `sseclient`. "
"Please install it with `pip install sseclient-py`."
)
)
for event in client.events():
if event.event in ("search_results", "done"):
pass
elif event.event == "token":
yield event.data
elif event.event == "error":
raise ValueError(f"Error in response: {event.data}")
else:
raise NotImplementedError(f"Unknown event type {event.event}")
class You(LLM):
"""Wrapper around You.com's conversational Smart and Research APIs.
Each API endpoint is designed to generate conversational
responses to a variety of query types, including inline citations
and web results when relevant.
Smart Endpoint:
- Quick, reliable answers for a variety of questions
- Cites the entire web page URL
Research Endpoint:
- In-depth answers with extensive citations for a variety of questions
- Cites the specific web page snippet relevant to the claim
To connect to the You.com api requires an API key which
you can get at https://api.you.com.
For more information, check out the documentations at
https://documentation.you.com/api-reference/.
Args:
endpoint: You.com conversational endpoints. Choose from "smart" or "research"
ydc_api_key: You.com API key, if `YDC_API_KEY` is not set in the environment
"""
endpoint: Literal["smart", "research"] = Field(
"smart",
description=(
'You.com conversational endpoints. Choose from "smart" or "research"'
),
)
ydc_api_key: Optional[str] = Field(
None,
description="You.com API key, if `YDC_API_KEY` is not set in the envrioment",
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if stop:
raise NotImplementedError(
"Stop words are not implemented for You.com endpoints."
)
params = {"query": prompt}
response = _request(self._request_endpoint, api_key=self._api_key, **params)
return response["answer"]
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
if stop:
raise NotImplementedError(
"Stop words are not implemented for You.com endpoints."
)
params = {"query": prompt}
for token in _request_stream(
self._request_endpoint, api_key=self._api_key, **params
):
yield GenerationChunk(text=token)
@property
def _request_endpoint(self) -> str:
if self.endpoint == "smart":
return SMART_ENDPOINT
return RESEARCH_ENDPOINT
@property
def _api_key(self) -> str:
return self.ydc_api_key or os.environ["YDC_API_KEY"]
@property
def _llm_type(self) -> str:
return "you.com"
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@llms@you.py@.PATH_END.py
|
{
"filename": "mynormalize.py",
"repo_name": "Alymantara/pydoppler",
"repo_path": "pydoppler_extracted/pydoppler-master/pydoppler/mynormalize.py",
"type": "Python"
}
|
# The Normalize class is largely based on code provided by Sarah Graves.
import numpy as np
import numpy.ma as ma
import matplotlib.cbook as cbook
from matplotlib.colors import Normalize
class MyNormalize(Normalize):
'''
A Normalize class for imshow that allows different stretching functions
for astronomical images.
'''
def __init__(self, stretch='linear', exponent=5, vmid=None, vmin=None,
vmax=None, clip=False):
'''
Initalize an APLpyNormalize instance.
Optional Keyword Arguments:
*vmin*: [ None | float ]
Minimum pixel value to use for the scaling.
*vmax*: [ None | float ]
Maximum pixel value to use for the scaling.
*stretch*: [ 'linear' | 'log' | 'sqrt' | 'arcsinh' | 'power' ]
The stretch function to use (default is 'linear').
*vmid*: [ None | float ]
Mid-pixel value used for the log and arcsinh stretches. If
set to None, a default value is picked.
*exponent*: [ float ]
if self.stretch is set to 'power', this is the exponent to use.
*clip*: [ True | False ]
If clip is True and the given value falls outside the range,
the returned value will be 0 or 1, whichever is closer.
'''
if vmax < vmin:
raise Exception("vmax should be larger than vmin")
# Call original initalization routine
Normalize.__init__(self, vmin=vmin, vmax=vmax, clip=clip)
# Save parameters
self.stretch = stretch
self.exponent = exponent
if stretch == 'power' and np.equal(self.exponent, None):
raise Exception("For stretch=='power', an exponent should be specified")
if np.equal(vmid, None):
if stretch == 'log':
if vmin > 0:
self.midpoint = vmax / vmin
else:
raise Exception("When using a log stretch, if vmin < 0, then vmid has to be specified")
elif stretch == 'arcsinh':
self.midpoint = -1. / 30.
else:
self.midpoint = None
else:
if stretch == 'log':
if vmin < vmid:
raise Exception("When using a log stretch, vmin should be larger than vmid")
self.midpoint = (vmax - vmid) / (vmin - vmid)
elif stretch == 'arcsinh':
self.midpoint = (vmid - vmin) / (vmax - vmin)
else:
self.midpoint = None
def __call__(self, value, clip=None):
#read in parameters
method = self.stretch
exponent = self.exponent
midpoint = self.midpoint
# ORIGINAL MATPLOTLIB CODE
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin == vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (val - vmin) * (1.0 / (vmax - vmin))
# CUSTOM APLPY CODE
# Keep track of negative values
negative = result < 0.
if self.stretch == 'linear':
pass
elif self.stretch == 'log':
result = ma.log10(result * (self.midpoint - 1.) + 1.) \
/ ma.log10(self.midpoint)
elif self.stretch == 'sqrt':
result = ma.sqrt(result)
elif self.stretch == 'arcsinh':
result = ma.arcsinh(result / self.midpoint) \
/ ma.arcsinh(1. / self.midpoint)
elif self.stretch == 'power':
result = ma.power(result, exponent)
else:
raise Exception("Unknown stretch in APLpyNormalize: %s" %
self.stretch)
# Now set previously negative values to 0, as these are
# different from true NaN values in the FITS image
result[negative] = -np.inf
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
# ORIGINAL MATPLOTLIB CODE
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
# CUSTOM APLPY CODE
if cbook.iterable(value):
val = ma.asarray(value)
else:
val = value
if self.stretch == 'linear':
pass
elif self.stretch == 'log':
val = (ma.power(10., val * ma.log10(self.midpoint)) - 1.) / (self.midpoint - 1.)
elif self.stretch == 'sqrt':
val = val * val
elif self.stretch == 'arcsinh':
val = self.midpoint * \
ma.sinh(val * ma.arcsinh(1. / self.midpoint))
elif self.stretch == 'power':
val = ma.power(val, (1. / self.exponent))
else:
raise Exception("Unknown stretch in APLpyNormalize: %s" %
self.stretch)
return vmin + val * (vmax - vmin)
|
AlymantaraREPO_NAMEpydopplerPATH_START.@pydoppler_extracted@pydoppler-master@pydoppler@mynormalize.py@.PATH_END.py
|
{
"filename": "_dss.py",
"repo_name": "sfarrens/sfof",
"repo_path": "sfof_extracted/sfof-master/sfof/python/euclid/dm/_dss.py",
"type": "Python"
}
|
# /home/sartor/pymodule/euclid/dm/_dss.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:44ff076caf94dd1666a330718f99f59b80790879
# Generated 2014-07-24 16:26:39.931924 by PyXB version 1.2.3
# Namespace http://euclid.esa.org/schema/sys/dss [xmlns:dss]
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:869ae486-133e-11e4-88d8-90b11c83965f')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.3'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
import euclid.dm._sys as _ImportedBinding_euclid_dm__sys
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI(u'http://euclid.esa.org/schema/sys/dss', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, unicode):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Atomic simple type: {http://euclid.esa.org/schema/sys/dss}protocol
class protocol (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'protocol')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 21, 4)
_Documentation = None
protocol._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=protocol, enum_prefix=None)
protocol.http = protocol._CF_enumeration.addEnumeration(unicode_value=u'http', tag=u'http')
protocol.https = protocol._CF_enumeration.addEnumeration(unicode_value=u'https', tag=u'https')
protocol.ftp = protocol._CF_enumeration.addEnumeration(unicode_value=u'ftp', tag=u'ftp')
protocol.sftp = protocol._CF_enumeration.addEnumeration(unicode_value=u'sftp', tag=u'sftp')
protocol.file = protocol._CF_enumeration.addEnumeration(unicode_value=u'file', tag=u'file')
protocol.gridftp = protocol._CF_enumeration.addEnumeration(unicode_value=u'gridftp', tag=u'gridftp')
protocol.srm = protocol._CF_enumeration.addEnumeration(unicode_value=u'srm', tag=u'srm')
protocol._InitializeFacetMap(protocol._CF_enumeration)
Namespace.addCategoryObject('typeBinding', u'protocol', protocol)
# Atomic simple type: {http://euclid.esa.org/schema/sys/dss}checksumAlgorithm
class checksumAlgorithm (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'checksumAlgorithm')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 35, 4)
_Documentation = None
checksumAlgorithm._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=checksumAlgorithm, enum_prefix=None)
checksumAlgorithm.MD5 = checksumAlgorithm._CF_enumeration.addEnumeration(unicode_value=u'MD5', tag=u'MD5')
checksumAlgorithm.Adler32 = checksumAlgorithm._CF_enumeration.addEnumeration(unicode_value=u'Adler32', tag=u'Adler32')
checksumAlgorithm._InitializeFacetMap(checksumAlgorithm._CF_enumeration)
Namespace.addCategoryObject('typeBinding', u'checksumAlgorithm', checksumAlgorithm)
# Atomic simple type: {http://euclid.esa.org/schema/sys/dss}dataContainerFileStatus
class dataContainerFileStatus (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""The status of the file on the permanent storage node."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'dataContainerFileStatus')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 49, 4)
_Documentation = u'The status of the file on the permanent storage node.'
dataContainerFileStatus._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=dataContainerFileStatus, enum_prefix=None)
dataContainerFileStatus.PROPOSED = dataContainerFileStatus._CF_enumeration.addEnumeration(unicode_value=u'PROPOSED', tag=u'PROPOSED')
dataContainerFileStatus.PROCESSING = dataContainerFileStatus._CF_enumeration.addEnumeration(unicode_value=u'PROCESSING', tag=u'PROCESSING')
dataContainerFileStatus.COMMITED = dataContainerFileStatus._CF_enumeration.addEnumeration(unicode_value=u'COMMITED', tag=u'COMMITED')
dataContainerFileStatus.VALIDATED = dataContainerFileStatus._CF_enumeration.addEnumeration(unicode_value=u'VALIDATED', tag=u'VALIDATED')
dataContainerFileStatus.ARCHIVED = dataContainerFileStatus._CF_enumeration.addEnumeration(unicode_value=u'ARCHIVED', tag=u'ARCHIVED')
dataContainerFileStatus.DELETED = dataContainerFileStatus._CF_enumeration.addEnumeration(unicode_value=u'DELETED', tag=u'DELETED')
dataContainerFileStatus._InitializeFacetMap(dataContainerFileStatus._CF_enumeration)
Namespace.addCategoryObject('typeBinding', u'dataContainerFileStatus', dataContainerFileStatus)
# Atomic simple type: {http://euclid.esa.org/schema/sys/dss}fileContainerFileStatus
class fileContainerFileStatus (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""The status of the file on the permanent storage node."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'fileContainerFileStatus')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 99, 5)
_Documentation = u'The status of the file on the permanent storage node.'
fileContainerFileStatus._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=fileContainerFileStatus, enum_prefix=None)
fileContainerFileStatus.ONLINE = fileContainerFileStatus._CF_enumeration.addEnumeration(unicode_value=u'ONLINE', tag=u'ONLINE')
fileContainerFileStatus.OFFLINE = fileContainerFileStatus._CF_enumeration.addEnumeration(unicode_value=u'OFFLINE', tag=u'OFFLINE')
fileContainerFileStatus.DELETED = fileContainerFileStatus._CF_enumeration.addEnumeration(unicode_value=u'DELETED', tag=u'DELETED')
fileContainerFileStatus.emptyString = fileContainerFileStatus._CF_enumeration.addEnumeration(unicode_value=u'', tag='emptyString')
fileContainerFileStatus._InitializeFacetMap(fileContainerFileStatus._CF_enumeration)
Namespace.addCategoryObject('typeBinding', u'fileContainerFileStatus', fileContainerFileStatus)
# Atomic simple type: {http://euclid.esa.org/schema/sys/dss}dataDistributionCommandType
class dataDistributionCommandType (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'dataDistributionCommandType')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 187, 4)
_Documentation = None
dataDistributionCommandType._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=dataDistributionCommandType, enum_prefix=None)
dataDistributionCommandType.SUBMIT = dataDistributionCommandType._CF_enumeration.addEnumeration(unicode_value=u'SUBMIT', tag=u'SUBMIT')
dataDistributionCommandType.RETRIEVE = dataDistributionCommandType._CF_enumeration.addEnumeration(unicode_value=u'RETRIEVE', tag=u'RETRIEVE')
dataDistributionCommandType.COPY = dataDistributionCommandType._CF_enumeration.addEnumeration(unicode_value=u'COPY', tag=u'COPY')
dataDistributionCommandType.CHECK = dataDistributionCommandType._CF_enumeration.addEnumeration(unicode_value=u'CHECK', tag=u'CHECK')
dataDistributionCommandType.DELETE = dataDistributionCommandType._CF_enumeration.addEnumeration(unicode_value=u'DELETE', tag=u'DELETE')
dataDistributionCommandType._InitializeFacetMap(dataDistributionCommandType._CF_enumeration)
Namespace.addCategoryObject('typeBinding', u'dataDistributionCommandType', dataDistributionCommandType)
# Atomic simple type: {http://euclid.esa.org/schema/sys/dss}dataDistributionCommandStatus
class dataDistributionCommandStatus (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'dataDistributionCommandStatus')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 197, 4)
_Documentation = None
dataDistributionCommandStatus._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=dataDistributionCommandStatus, enum_prefix=None)
dataDistributionCommandStatus.NEW = dataDistributionCommandStatus._CF_enumeration.addEnumeration(unicode_value=u'NEW', tag=u'NEW')
dataDistributionCommandStatus.APPROVED = dataDistributionCommandStatus._CF_enumeration.addEnumeration(unicode_value=u'APPROVED', tag=u'APPROVED')
dataDistributionCommandStatus.SCHEDULED = dataDistributionCommandStatus._CF_enumeration.addEnumeration(unicode_value=u'SCHEDULED', tag=u'SCHEDULED')
dataDistributionCommandStatus.PENDING = dataDistributionCommandStatus._CF_enumeration.addEnumeration(unicode_value=u'PENDING', tag=u'PENDING')
dataDistributionCommandStatus.EXECUTING = dataDistributionCommandStatus._CF_enumeration.addEnumeration(unicode_value=u'EXECUTING', tag=u'EXECUTING')
dataDistributionCommandStatus.DELETED = dataDistributionCommandStatus._CF_enumeration.addEnumeration(unicode_value=u'DELETED', tag=u'DELETED')
dataDistributionCommandStatus.ERROR = dataDistributionCommandStatus._CF_enumeration.addEnumeration(unicode_value=u'ERROR', tag=u'ERROR')
dataDistributionCommandStatus.COMPLETED = dataDistributionCommandStatus._CF_enumeration.addEnumeration(unicode_value=u'COMPLETED', tag=u'COMPLETED')
dataDistributionCommandStatus._InitializeFacetMap(dataDistributionCommandStatus._CF_enumeration)
Namespace.addCategoryObject('typeBinding', u'dataDistributionCommandStatus', dataDistributionCommandStatus)
# Complex type {http://euclid.esa.org/schema/sys/dss}checksumType with content type ELEMENT_ONLY
class checksumType (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://euclid.esa.org/schema/sys/dss}checksumType with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'checksumType')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 42, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://euclid.esa.org/schema/sys/dss}Algorithm uses Python identifier Algorithm
__Algorithm = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'Algorithm'), 'Algorithm', '__httpeuclid_esa_orgschemasysdss_checksumType_httpeuclid_esa_orgschemasysdssAlgorithm', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 44, 12), )
Algorithm = property(__Algorithm.value, __Algorithm.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}Value uses Python identifier Value
__Value = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'Value'), 'Value', '__httpeuclid_esa_orgschemasysdss_checksumType_httpeuclid_esa_orgschemasysdssValue', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 45, 12), )
Value = property(__Value.value, __Value.set, None, None)
_ElementMap.update({
__Algorithm.name() : __Algorithm,
__Value.name() : __Value
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'checksumType', checksumType)
# Complex type {http://euclid.esa.org/schema/sys/dss}dataContainerStorage with content type ELEMENT_ONLY
class dataContainerStorage (pyxb.binding.basis.complexTypeDefinition):
"""
Data entity which is identified by a unique name possibly available as identical copies at different locations.
"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'dataContainerStorage')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 82, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://euclid.esa.org/schema/sys/dss}Id uses Python identifier Id
__Id = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'Id'), 'Id', '__httpeuclid_esa_orgschemasysdss_dataContainerStorage_httpeuclid_esa_orgschemasysdssId', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 89, 12), )
Id = property(__Id.value, __Id.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}Filename uses Python identifier Filename
__Filename = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'Filename'), 'Filename', '__httpeuclid_esa_orgschemasysdss_dataContainerStorage_httpeuclid_esa_orgschemasysdssFilename', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 90, 12), )
Filename = property(__Filename.value, __Filename.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}Description uses Python identifier Description
__Description = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'Description'), 'Description', '__httpeuclid_esa_orgschemasysdss_dataContainerStorage_httpeuclid_esa_orgschemasysdssDescription', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 91, 12), )
Description = property(__Description.value, __Description.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}Type uses Python identifier Type
__Type = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'Type'), 'Type', '__httpeuclid_esa_orgschemasysdss_dataContainerStorage_httpeuclid_esa_orgschemasysdssType', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 92, 12), )
Type = property(__Type.value, __Type.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}CreationDate uses Python identifier CreationDate
__CreationDate = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'CreationDate'), 'CreationDate', '__httpeuclid_esa_orgschemasysdss_dataContainerStorage_httpeuclid_esa_orgschemasysdssCreationDate', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 93, 12), )
CreationDate = property(__CreationDate.value, __CreationDate.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}CheckSum uses Python identifier CheckSum
__CheckSum = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'CheckSum'), 'CheckSum', '__httpeuclid_esa_orgschemasysdss_dataContainerStorage_httpeuclid_esa_orgschemasysdssCheckSum', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 94, 12), )
CheckSum = property(__CheckSum.value, __CheckSum.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}Location uses Python identifier Location
__Location = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'Location'), 'Location', '__httpeuclid_esa_orgschemasysdss_dataContainerStorage_httpeuclid_esa_orgschemasysdssLocation', True, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 95, 12), )
Location = property(__Location.value, __Location.set, None, None)
_ElementMap.update({
__Id.name() : __Id,
__Filename.name() : __Filename,
__Description.name() : __Description,
__Type.name() : __Type,
__CreationDate.name() : __CreationDate,
__CheckSum.name() : __CheckSum,
__Location.name() : __Location
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'dataContainerStorage', dataContainerStorage)
# Complex type {http://euclid.esa.org/schema/sys/dss}fileContainer with content type ELEMENT_ONLY
class fileContainer (pyxb.binding.basis.complexTypeDefinition):
"""
Specification of storage node and directory where the file is stored.
"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'fileContainer')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 111, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://euclid.esa.org/schema/sys/dss}Id uses Python identifier Id
__Id = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'Id'), 'Id', '__httpeuclid_esa_orgschemasysdss_fileContainer_httpeuclid_esa_orgschemasysdssId', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 118, 12), )
Id = property(__Id.value, __Id.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}Path uses Python identifier Path
__Path = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'Path'), 'Path', '__httpeuclid_esa_orgschemasysdss_fileContainer_httpeuclid_esa_orgschemasysdssPath', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 119, 12), )
Path = property(__Path.value, __Path.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}URL uses Python identifier URL
__URL = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'URL'), 'URL', '__httpeuclid_esa_orgschemasysdss_fileContainer_httpeuclid_esa_orgschemasysdssURL', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 120, 12), )
URL = property(__URL.value, __URL.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}StorageNode uses Python identifier StorageNode
__StorageNode = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'StorageNode'), 'StorageNode', '__httpeuclid_esa_orgschemasysdss_fileContainer_httpeuclid_esa_orgschemasysdssStorageNode', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 121, 12), )
StorageNode = property(__StorageNode.value, __StorageNode.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}FileStatus uses Python identifier FileStatus
__FileStatus = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'FileStatus'), 'FileStatus', '__httpeuclid_esa_orgschemasysdss_fileContainer_httpeuclid_esa_orgschemasysdssFileStatus', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 122, 12), )
FileStatus = property(__FileStatus.value, __FileStatus.set, None, None)
# Attribute storagetype uses Python identifier storagetype
__storagetype = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'storagetype'), 'storagetype', '__httpeuclid_esa_orgschemasysdss_fileContainer_storagetype', pyxb.binding.datatypes.string)
__storagetype._DeclarationLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 124, 9)
__storagetype._UseLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 124, 9)
storagetype = property(__storagetype.value, __storagetype.set, None, u'\n Type of the storage - online, near-on-line\n ')
_ElementMap.update({
__Id.name() : __Id,
__Path.name() : __Path,
__URL.name() : __URL,
__StorageNode.name() : __StorageNode,
__FileStatus.name() : __FileStatus
})
_AttributeMap.update({
__storagetype.name() : __storagetype
})
Namespace.addCategoryObject('typeBinding', u'fileContainer', fileContainer)
# Complex type {http://euclid.esa.org/schema/sys/dss}storageNode with content type ELEMENT_ONLY
class storageNode (pyxb.binding.basis.complexTypeDefinition):
"""Details for accessing a storage node"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'storageNode')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 133, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://euclid.esa.org/schema/sys/dss}Id uses Python identifier Id
__Id = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'Id'), 'Id', '__httpeuclid_esa_orgschemasysdss_storageNode_httpeuclid_esa_orgschemasysdssId', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 138, 12), )
Id = property(__Id.value, __Id.set, None, u'Should be of the form: sdc_protocol_domain_port_basePath')
# Element {http://euclid.esa.org/schema/sys/dss}Protocol uses Python identifier Protocol
__Protocol = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'Protocol'), 'Protocol', '__httpeuclid_esa_orgschemasysdss_storageNode_httpeuclid_esa_orgschemasysdssProtocol', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 143, 12), )
Protocol = property(__Protocol.value, __Protocol.set, None, u'Protocol used to retrieve the file from this storage node.')
# Element {http://euclid.esa.org/schema/sys/dss}Domain uses Python identifier Domain
__Domain = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'Domain'), 'Domain', '__httpeuclid_esa_orgschemasysdss_storageNode_httpeuclid_esa_orgschemasysdssDomain', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 148, 12), )
Domain = property(__Domain.value, __Domain.set, None, u'Name of the domain for this storage node.')
# Element {http://euclid.esa.org/schema/sys/dss}Port uses Python identifier Port
__Port = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'Port'), 'Port', '__httpeuclid_esa_orgschemasysdss_storageNode_httpeuclid_esa_orgschemasysdssPort', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 153, 12), )
Port = property(__Port.value, __Port.set, None, u'Port number this storage node can be accessed.')
# Element {http://euclid.esa.org/schema/sys/dss}BasePath uses Python identifier BasePath
__BasePath = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'BasePath'), 'BasePath', '__httpeuclid_esa_orgschemasysdss_storageNode_httpeuclid_esa_orgschemasysdssBasePath', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 158, 12), )
BasePath = property(__BasePath.value, __BasePath.set, None, u'Root path for the where to refer file locations to.')
# Element {http://euclid.esa.org/schema/sys/dss}Sdc uses Python identifier Sdc
__Sdc = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'Sdc'), 'Sdc', '__httpeuclid_esa_orgschemasysdss_storageNode_httpeuclid_esa_orgschemasysdssSdc', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 163, 12), )
Sdc = property(__Sdc.value, __Sdc.set, None, u'Name of the SDC this storage node is associated with.')
_ElementMap.update({
__Id.name() : __Id,
__Protocol.name() : __Protocol,
__Domain.name() : __Domain,
__Port.name() : __Port,
__BasePath.name() : __BasePath,
__Sdc.name() : __Sdc
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'storageNode', storageNode)
# Complex type {http://euclid.esa.org/schema/sys/dss}fileDescriptor with content type ELEMENT_ONLY
class fileDescriptor (pyxb.binding.basis.complexTypeDefinition):
"""
Descriptor of a file used to store transient data such as data managed by the IAL
until moved to the workspace as input for the pipeline processing tasks.
"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'fileDescriptor')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 172, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://euclid.esa.org/schema/sys/dss}Id uses Python identifier Id
__Id = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'Id'), 'Id', '__httpeuclid_esa_orgschemasysdss_fileDescriptor_httpeuclid_esa_orgschemasysdssId', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 180, 12), )
Id = property(__Id.value, __Id.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}Filename uses Python identifier Filename
__Filename = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'Filename'), 'Filename', '__httpeuclid_esa_orgschemasysdss_fileDescriptor_httpeuclid_esa_orgschemasysdssFilename', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 181, 12), )
Filename = property(__Filename.value, __Filename.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}Path uses Python identifier Path
__Path = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'Path'), 'Path', '__httpeuclid_esa_orgschemasysdss_fileDescriptor_httpeuclid_esa_orgschemasysdssPath', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 182, 12), )
Path = property(__Path.value, __Path.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}StorageId uses Python identifier StorageId
__StorageId = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'StorageId'), 'StorageId', '__httpeuclid_esa_orgschemasysdss_fileDescriptor_httpeuclid_esa_orgschemasysdssStorageId', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 183, 12), )
StorageId = property(__StorageId.value, __StorageId.set, None, None)
_ElementMap.update({
__Id.name() : __Id,
__Filename.name() : __Filename,
__Path.name() : __Path,
__StorageId.name() : __StorageId
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'fileDescriptor', fileDescriptor)
# Complex type {http://euclid.esa.org/schema/sys/dss}dataDistributionDefinitionElement with content type ELEMENT_ONLY
class dataDistributionDefinitionElement (pyxb.binding.basis.complexTypeDefinition):
"""
dataDistributionDefinitionElement is a single file operation in DDO
"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'dataDistributionDefinitionElement')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 211, 5)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://euclid.esa.org/schema/sys/dss}DDOElementId uses Python identifier DDOElementId
__DDOElementId = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'DDOElementId'), 'DDOElementId', '__httpeuclid_esa_orgschemasysdss_dataDistributionDefinitionElement_httpeuclid_esa_orgschemasysdssDDOElementId', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 218, 12), )
DDOElementId = property(__DDOElementId.value, __DDOElementId.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}DDOElementStatus uses Python identifier DDOElementStatus
__DDOElementStatus = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'DDOElementStatus'), 'DDOElementStatus', '__httpeuclid_esa_orgschemasysdss_dataDistributionDefinitionElement_httpeuclid_esa_orgschemasysdssDDOElementStatus', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 219, 12), )
DDOElementStatus = property(__DDOElementStatus.value, __DDOElementStatus.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}DDOElementCommand uses Python identifier DDOElementCommand
__DDOElementCommand = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'DDOElementCommand'), 'DDOElementCommand', '__httpeuclid_esa_orgschemasysdss_dataDistributionDefinitionElement_httpeuclid_esa_orgschemasysdssDDOElementCommand', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 220, 12), )
DDOElementCommand = property(__DDOElementCommand.value, __DDOElementCommand.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}DDOElementFilename uses Python identifier DDOElementFilename
__DDOElementFilename = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'DDOElementFilename'), 'DDOElementFilename', '__httpeuclid_esa_orgschemasysdss_dataDistributionDefinitionElement_httpeuclid_esa_orgschemasysdssDDOElementFilename', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 221, 12), )
DDOElementFilename = property(__DDOElementFilename.value, __DDOElementFilename.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}DDOElementFrom uses Python identifier DDOElementFrom
__DDOElementFrom = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'DDOElementFrom'), 'DDOElementFrom', '__httpeuclid_esa_orgschemasysdss_dataDistributionDefinitionElement_httpeuclid_esa_orgschemasysdssDDOElementFrom', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 222, 12), )
DDOElementFrom = property(__DDOElementFrom.value, __DDOElementFrom.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}DDOElementTo uses Python identifier DDOElementTo
__DDOElementTo = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'DDOElementTo'), 'DDOElementTo', '__httpeuclid_esa_orgschemasysdss_dataDistributionDefinitionElement_httpeuclid_esa_orgschemasysdssDDOElementTo', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 223, 12), )
DDOElementTo = property(__DDOElementTo.value, __DDOElementTo.set, None, None)
_ElementMap.update({
__DDOElementId.name() : __DDOElementId,
__DDOElementStatus.name() : __DDOElementStatus,
__DDOElementCommand.name() : __DDOElementCommand,
__DDOElementFilename.name() : __DDOElementFilename,
__DDOElementFrom.name() : __DDOElementFrom,
__DDOElementTo.name() : __DDOElementTo
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'dataDistributionDefinitionElement', dataDistributionDefinitionElement)
# Complex type {http://euclid.esa.org/schema/sys/dss}dataDistributionDefinition with content type ELEMENT_ONLY
class dataDistributionDefinition (pyxb.binding.basis.complexTypeDefinition):
"""
dataDistributionDefinition contains pattern for all commands to DSS, including:
- submit file
- retrieve file
- copy
- delete
- check
"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'dataDistributionDefinition')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 228, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://euclid.esa.org/schema/sys/dss}DDOId uses Python identifier DDOId
__DDOId = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'DDOId'), 'DDOId', '__httpeuclid_esa_orgschemasysdss_dataDistributionDefinition_httpeuclid_esa_orgschemasysdssDDOId', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 240, 12), )
DDOId = property(__DDOId.value, __DDOId.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}DDOOrigin uses Python identifier DDOOrigin
__DDOOrigin = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'DDOOrigin'), 'DDOOrigin', '__httpeuclid_esa_orgschemasysdss_dataDistributionDefinition_httpeuclid_esa_orgschemasysdssDDOOrigin', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 241, 12), )
DDOOrigin = property(__DDOOrigin.value, __DDOOrigin.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}DDOPPOId uses Python identifier DDOPPOId
__DDOPPOId = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'DDOPPOId'), 'DDOPPOId', '__httpeuclid_esa_orgschemasysdss_dataDistributionDefinition_httpeuclid_esa_orgschemasysdssDDOPPOId', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 242, 12), )
DDOPPOId = property(__DDOPPOId.value, __DDOPPOId.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}DDOStatus uses Python identifier DDOStatus
__DDOStatus = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'DDOStatus'), 'DDOStatus', '__httpeuclid_esa_orgschemasysdss_dataDistributionDefinition_httpeuclid_esa_orgschemasysdssDDOStatus', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 243, 12), )
DDOStatus = property(__DDOStatus.value, __DDOStatus.set, None, None)
# Element {http://euclid.esa.org/schema/sys/dss}DDOElements uses Python identifier DDOElements
__DDOElements = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'DDOElements'), 'DDOElements', '__httpeuclid_esa_orgschemasysdss_dataDistributionDefinition_httpeuclid_esa_orgschemasysdssDDOElements', True, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 244, 12), )
DDOElements = property(__DDOElements.value, __DDOElements.set, None, None)
_ElementMap.update({
__DDOId.name() : __DDOId,
__DDOOrigin.name() : __DDOOrigin,
__DDOPPOId.name() : __DDOPPOId,
__DDOStatus.name() : __DDOStatus,
__DDOElements.name() : __DDOElements
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'dataDistributionDefinition', dataDistributionDefinition)
# Complex type {http://euclid.esa.org/schema/sys/dss}dataContainer with content type ELEMENT_ONLY
class dataContainer (pyxb.binding.basis.complexTypeDefinition):
"""
Data entity which is identified by a unique name possibly available as identical copies at different locations.
"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'dataContainer')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 64, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://euclid.esa.org/schema/sys/dss}FileName uses Python identifier FileName
__FileName = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, u'FileName'), 'FileName', '__httpeuclid_esa_orgschemasysdss_dataContainer_httpeuclid_esa_orgschemasysdssFileName', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 71, 12), )
FileName = property(__FileName.value, __FileName.set, None, None)
# Attribute filestatus uses Python identifier filestatus
__filestatus = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'filestatus'), 'filestatus', '__httpeuclid_esa_orgschemasysdss_dataContainer_filestatus', dataContainerFileStatus, required=True)
__filestatus._DeclarationLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 73, 8)
__filestatus._UseLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 73, 8)
filestatus = property(__filestatus.value, __filestatus.set, None, u'\n The status of the file - committed, validated, archived, deleted \n ')
_ElementMap.update({
__FileName.name() : __FileName
})
_AttributeMap.update({
__filestatus.name() : __filestatus
})
Namespace.addCategoryObject('typeBinding', u'dataContainer', dataContainer)
checksumType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Algorithm'), checksumAlgorithm, scope=checksumType, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 44, 12)))
checksumType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Value'), pyxb.binding.datatypes.string, scope=checksumType, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 45, 12)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(checksumType._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'Algorithm')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 44, 12))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(checksumType._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'Value')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 45, 12))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
checksumType._Automaton = _BuildAutomaton()
dataContainerStorage._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Id'), pyxb.binding.datatypes.string, scope=dataContainerStorage, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 89, 12)))
dataContainerStorage._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Filename'), _ImportedBinding_euclid_dm__sys.dataFileName, scope=dataContainerStorage, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 90, 12)))
dataContainerStorage._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Description'), pyxb.binding.datatypes.string, scope=dataContainerStorage, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 91, 12)))
dataContainerStorage._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Type'), pyxb.binding.datatypes.string, scope=dataContainerStorage, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 92, 12)))
dataContainerStorage._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'CreationDate'), _ImportedBinding_euclid_dm__sys.systemDateTime, scope=dataContainerStorage, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 93, 12)))
dataContainerStorage._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'CheckSum'), checksumType, scope=dataContainerStorage, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 94, 12)))
dataContainerStorage._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Location'), fileContainer, scope=dataContainerStorage, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 95, 12)))
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 95, 12))
counters.add(cc_0)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(dataContainerStorage._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'Id')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 89, 12))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = None
symbol = pyxb.binding.content.ElementUse(dataContainerStorage._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'Filename')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 90, 12))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = None
symbol = pyxb.binding.content.ElementUse(dataContainerStorage._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'Description')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 91, 12))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = None
symbol = pyxb.binding.content.ElementUse(dataContainerStorage._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'Type')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 92, 12))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = None
symbol = pyxb.binding.content.ElementUse(dataContainerStorage._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'CreationDate')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 93, 12))
st_4 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
symbol = pyxb.binding.content.ElementUse(dataContainerStorage._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'CheckSum')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 94, 12))
st_5 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(dataContainerStorage._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'Location')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 95, 12))
st_6 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_6)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_5, [
]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_6, [
]))
st_5._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
st_6._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
dataContainerStorage._Automaton = _BuildAutomaton_()
fileContainer._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Id'), pyxb.binding.datatypes.string, scope=fileContainer, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 118, 12)))
fileContainer._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Path'), pyxb.binding.datatypes.string, scope=fileContainer, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 119, 12)))
fileContainer._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'URL'), pyxb.binding.datatypes.string, scope=fileContainer, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 120, 12)))
fileContainer._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'StorageNode'), storageNode, scope=fileContainer, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 121, 12)))
fileContainer._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'FileStatus'), fileContainerFileStatus, scope=fileContainer, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 122, 12)))
def _BuildAutomaton_2 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_2
del _BuildAutomaton_2
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0L, max=1, metadata=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 120, 12))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0L, max=1, metadata=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 122, 12))
counters.add(cc_1)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(fileContainer._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'Id')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 118, 12))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = None
symbol = pyxb.binding.content.ElementUse(fileContainer._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'Path')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 119, 12))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = None
symbol = pyxb.binding.content.ElementUse(fileContainer._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'URL')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 120, 12))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
symbol = pyxb.binding.content.ElementUse(fileContainer._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'StorageNode')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 121, 12))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(fileContainer._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'FileStatus')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 122, 12))
st_4 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
transitions.append(fac.Transition(st_3, [
]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, True) ]))
st_4._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
fileContainer._Automaton = _BuildAutomaton_2()
storageNode._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Id'), pyxb.binding.datatypes.string, scope=storageNode, documentation=u'Should be of the form: sdc_protocol_domain_port_basePath', location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 138, 12)))
storageNode._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Protocol'), protocol, scope=storageNode, documentation=u'Protocol used to retrieve the file from this storage node.', location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 143, 12)))
storageNode._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Domain'), pyxb.binding.datatypes.string, scope=storageNode, documentation=u'Name of the domain for this storage node.', location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 148, 12)))
storageNode._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Port'), pyxb.binding.datatypes.int, scope=storageNode, documentation=u'Port number this storage node can be accessed.', location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 153, 12)))
storageNode._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'BasePath'), pyxb.binding.datatypes.string, scope=storageNode, documentation=u'Root path for the where to refer file locations to.', location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 158, 12)))
storageNode._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Sdc'), _ImportedBinding_euclid_dm__sys.infraName, scope=storageNode, documentation=u'Name of the SDC this storage node is associated with.', location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 163, 12)))
def _BuildAutomaton_3 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_3
del _BuildAutomaton_3
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(storageNode._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'Id')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 138, 12))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = None
symbol = pyxb.binding.content.ElementUse(storageNode._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'Protocol')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 143, 12))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = None
symbol = pyxb.binding.content.ElementUse(storageNode._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'Domain')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 148, 12))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = None
symbol = pyxb.binding.content.ElementUse(storageNode._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'Port')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 153, 12))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = None
symbol = pyxb.binding.content.ElementUse(storageNode._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'BasePath')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 158, 12))
st_4 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
symbol = pyxb.binding.content.ElementUse(storageNode._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'Sdc')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 163, 12))
st_5 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_5, [
]))
st_4._set_transitionSet(transitions)
transitions = []
st_5._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
storageNode._Automaton = _BuildAutomaton_3()
fileDescriptor._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Id'), pyxb.binding.datatypes.string, scope=fileDescriptor, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 180, 12)))
fileDescriptor._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Filename'), pyxb.binding.datatypes.string, scope=fileDescriptor, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 181, 12)))
fileDescriptor._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Path'), pyxb.binding.datatypes.string, scope=fileDescriptor, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 182, 12)))
fileDescriptor._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'StorageId'), pyxb.binding.datatypes.string, scope=fileDescriptor, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 183, 12)))
def _BuildAutomaton_4 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_4
del _BuildAutomaton_4
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(fileDescriptor._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'Id')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 180, 12))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = None
symbol = pyxb.binding.content.ElementUse(fileDescriptor._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'Filename')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 181, 12))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = None
symbol = pyxb.binding.content.ElementUse(fileDescriptor._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'Path')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 182, 12))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
symbol = pyxb.binding.content.ElementUse(fileDescriptor._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'StorageId')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 183, 12))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
]))
st_2._set_transitionSet(transitions)
transitions = []
st_3._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
fileDescriptor._Automaton = _BuildAutomaton_4()
dataDistributionDefinitionElement._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DDOElementId'), pyxb.binding.datatypes.string, scope=dataDistributionDefinitionElement, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 218, 12)))
dataDistributionDefinitionElement._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DDOElementStatus'), dataDistributionCommandStatus, scope=dataDistributionDefinitionElement, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 219, 12)))
dataDistributionDefinitionElement._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DDOElementCommand'), dataDistributionCommandType, scope=dataDistributionDefinitionElement, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 220, 12)))
dataDistributionDefinitionElement._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DDOElementFilename'), _ImportedBinding_euclid_dm__sys.dataFileName, scope=dataDistributionDefinitionElement, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 221, 12)))
dataDistributionDefinitionElement._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DDOElementFrom'), fileContainer, scope=dataDistributionDefinitionElement, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 222, 12)))
dataDistributionDefinitionElement._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DDOElementTo'), fileContainer, scope=dataDistributionDefinitionElement, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 223, 12)))
def _BuildAutomaton_5 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_5
del _BuildAutomaton_5
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0L, max=1, metadata=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 222, 12))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0L, max=1, metadata=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 223, 12))
counters.add(cc_1)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(dataDistributionDefinitionElement._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'DDOElementId')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 218, 12))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = None
symbol = pyxb.binding.content.ElementUse(dataDistributionDefinitionElement._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'DDOElementStatus')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 219, 12))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = None
symbol = pyxb.binding.content.ElementUse(dataDistributionDefinitionElement._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'DDOElementCommand')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 220, 12))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
symbol = pyxb.binding.content.ElementUse(dataDistributionDefinitionElement._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'DDOElementFilename')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 221, 12))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(dataDistributionDefinitionElement._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'DDOElementFrom')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 222, 12))
st_4 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(dataDistributionDefinitionElement._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'DDOElementTo')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 223, 12))
st_5 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
]))
transitions.append(fac.Transition(st_5, [
]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, False) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_1, True) ]))
st_5._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
dataDistributionDefinitionElement._Automaton = _BuildAutomaton_5()
dataDistributionDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DDOId'), pyxb.binding.datatypes.string, scope=dataDistributionDefinition, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 240, 12)))
dataDistributionDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DDOOrigin'), _ImportedBinding_euclid_dm__sys.infraName, scope=dataDistributionDefinition, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 241, 12)))
dataDistributionDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DDOPPOId'), pyxb.binding.datatypes.string, scope=dataDistributionDefinition, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 242, 12)))
dataDistributionDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DDOStatus'), dataDistributionCommandStatus, scope=dataDistributionDefinition, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 243, 12)))
dataDistributionDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DDOElements'), dataDistributionDefinitionElement, scope=dataDistributionDefinition, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 244, 12)))
def _BuildAutomaton_6 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_6
del _BuildAutomaton_6
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(dataDistributionDefinition._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'DDOId')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 240, 12))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = None
symbol = pyxb.binding.content.ElementUse(dataDistributionDefinition._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'DDOOrigin')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 241, 12))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = None
symbol = pyxb.binding.content.ElementUse(dataDistributionDefinition._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'DDOPPOId')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 242, 12))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = None
symbol = pyxb.binding.content.ElementUse(dataDistributionDefinition._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'DDOStatus')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 243, 12))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
symbol = pyxb.binding.content.ElementUse(dataDistributionDefinition._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'DDOElements')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 244, 12))
st_4 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
]))
st_4._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
dataDistributionDefinition._Automaton = _BuildAutomaton_6()
dataContainer._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'FileName'), _ImportedBinding_euclid_dm__sys.dataFileName, scope=dataContainer, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 71, 12)))
def _BuildAutomaton_7 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_7
del _BuildAutomaton_7
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(dataContainer._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'FileName')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/sys/dss/euc-test-dss.xsd', 71, 12))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
dataContainer._Automaton = _BuildAutomaton_7()
|
sfarrensREPO_NAMEsfofPATH_START.@sfof_extracted@sfof-master@sfof@python@euclid@dm@_dss.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/isosurface/lighting/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._vertexnormalsepsilon import VertexnormalsepsilonValidator
from ._specular import SpecularValidator
from ._roughness import RoughnessValidator
from ._fresnel import FresnelValidator
from ._facenormalsepsilon import FacenormalsepsilonValidator
from ._diffuse import DiffuseValidator
from ._ambient import AmbientValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._vertexnormalsepsilon.VertexnormalsepsilonValidator",
"._specular.SpecularValidator",
"._roughness.RoughnessValidator",
"._fresnel.FresnelValidator",
"._facenormalsepsilon.FacenormalsepsilonValidator",
"._diffuse.DiffuseValidator",
"._ambient.AmbientValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@isosurface@lighting@__init__.py@.PATH_END.py
|
{
"filename": "mle_rates_for_realtime.py",
"repo_name": "Swift-BAT/NITRATES",
"repo_path": "NITRATES_extracted/NITRATES-main/nitrates/analysis_seeds/mle_rates_for_realtime.py",
"type": "Python"
}
|
import numpy as np
from astropy.io import fits
from astropy.table import Table, vstack
from scipy import optimize, stats
import argparse
import os
import multiprocessing as mp
from ..lib.logllh_ebins_funcs import get_cnt_ebins_normed, log_pois_prob
from ..response.ray_trace_funcs import ray_trace_square
from ..lib.drm_funcs import get_ebin_ind_edges, DRMs
from ..lib.event2dpi_funcs import det2dpis, mask_detxy
from ..lib.trans_func import get_pb_absortion
quad_dicts = {
"all": {
"quads": [0, 1, 2, 3],
"drm_fname": "drm_0.200_0.150_.fits",
"imx": 0.2,
"imy": 0.15,
"id": 0,
},
"left": {
"quads": [0, 1],
"drm_fname": "drm_1.000_0.150_.fits",
"imx": 1.0,
"imy": 0.15,
"id": 1,
},
"top": {
"quads": [1, 2],
"drm_fname": "drm_0.000_-0.500_.fits",
"imx": 0.0,
"imy": -0.5,
"id": 2,
},
"right": {
"quads": [2, 3],
"drm_fname": "drm_-1.000_0.150_.fits",
"imx": -1.0,
"imy": 0.15,
"id": 3,
},
"bottom": {
"quads": [3, 0],
"drm_fname": "drm_0.000_0.450_.fits",
"imx": 0.0,
"imy": 0.45,
"id": 4,
},
"quad0": {
"quads": [0],
"drm_fname": "drm_1.000_0.500_.fits",
"imx": 1.0,
"imy": 0.5,
"id": 5,
},
"quad1": {
"quads": [1],
"drm_fname": "drm_0.800_-0.400_.fits",
"imx": 0.8,
"imy": -0.4,
"id": 6,
},
"quad2": {
"quads": [2],
"drm_fname": "drm_-0.750_-0.450_.fits",
"imx": -0.75,
"imy": -0.45,
"id": 7,
},
"quad3": {
"quads": [3],
"drm_fname": "drm_-1.100_0.500_.fits",
"imx": -1.1,
"imy": 0.5,
"id": 8,
},
}
def dpi2cnts_perquad(dpi):
x_mid = 142
y_mid = 86
quads = []
quads.append(np.sum(dpi[:y_mid, :x_mid]))
quads.append(np.sum(dpi[y_mid:, :x_mid]))
quads.append(np.sum(dpi[y_mid:, x_mid:]))
quads.append(np.sum(dpi[:y_mid, x_mid:]))
return quads
def ev2quad_cnts(ev):
x_mid = 142
y_mid = 86
quads = [np.sum((ev["DETX"] < x_mid) & (ev["DETY"] < y_mid))]
quads.append(np.sum((ev["DETX"] < x_mid) & (ev["DETY"] > y_mid)))
quads.append(np.sum((ev["DETX"] > x_mid) & (ev["DETY"] > y_mid)))
quads.append(np.sum((ev["DETX"] > x_mid) & (ev["DETY"] < y_mid)))
return np.array(quads)
def dmask2ndets_perquad(dmask):
quads = dpi2cnts_perquad((dmask == 0).reshape(dmask.shape))
return quads
def quads2drm_imxy():
# bottom left, top left, top right, bottom right
quads_imxy = [(1.0, 0.5)(0.8, -0.4), (-0.75, -0.45), (-1.1, 0.5)]
return quads_imxy
def halves2drm_imxy():
# left, top, right, bottom
halves_imxy = [(1.0, 0.15), (0.0, -0.5), (-1.0, 0.15), (0.0, 0.45)]
return halves_imxy
def get_abs_cor_rates(imx, imy, drm):
drm_emids = (drm[1].data["ENERG_LO"] + drm[1].data["ENERG_HI"]) / 2.0
absorbs = get_pb_absortion(drm_emids, imx, imy)
abs_cor = (2.0 - absorbs) / (absorbs)
return abs_cor
def profiled_bkg_llh(
data_cnts, sig_rate, sdt, off_cnts, odt, off_cnts_err, ret_f=False
):
sig2 = off_cnts_err**2
d_i = np.sqrt(
(sdt * sig2 - odt * off_cnts + (odt**2) * sig_rate) ** 2
- 4.0
* (odt**2)
* (sdt * sig2 * sig_rate - data_cnts * sig2 - odt * off_cnts * sig_rate)
)
f_i = (-(sdt * sig2 - odt * off_cnts + sig_rate * (odt**2)) + d_i) / (
2.0 * odt**2
)
llh = (
sdt * (sig_rate + f_i)
- data_cnts * np.log(sdt * (sig_rate + f_i))
+ np.square(off_cnts - odt * f_i) / (2.0 * sig2)
- data_cnts * (1.0 - np.log(data_cnts))
)
if ret_f:
return np.sum(llh), f_i
return np.sum(llh)
def gauss_sig_bkg_nllh(cnts, nsig, nbkg, bkg_err):
sigma2 = nbkg + nsig + bkg_err**2
N_sub_bkg = cnts - nbkg
nllh = -1 * np.sum(stats.norm.logpdf(N_sub_bkg - nsig, scale=np.sqrt(sigma2)))
return nllh
def rates_llh(data, nsig, sig_e_normed, bkg_cnts, bkg_err, sig_dt, bkg_dt, ret_f=False):
# sig_rates = (nsig/sig_dt)*sig_e_normed
# if ret_f:
# llh, f = profiled_bkg_llh(data, sig_rates, sig_dt,\
# bkg_cnts, bkg_dt, bkg_err,\
# ret_f=ret_f)
# return llh, f
# llh = profiled_bkg_llh(data, sig_rates, sig_dt,\
# bkg_cnts, bkg_dt, bkg_err,\
# ret_f=ret_f)
nsigs = nsig * sig_e_normed
llh = gauss_sig_bkg_nllh(data, nsigs, bkg_cnts, bkg_err)
return llh
def rate_llh2min(theta, data, bkg_cnts, bkg_err, sig_dt, bkg_dt, cnorm_obj):
nsig = 10.0 ** theta[0]
ind = theta[1]
if (ind < -1) or (ind > 3):
return np.inf
cnt_ebn = cnorm_obj(ind)
llh = rates_llh(data, nsig, cnt_ebn, bkg_cnts, bkg_err, sig_dt, bkg_dt)
return llh
class cnts_norm_intp(object):
def __init__(self, cnt_ebins_norm_ind_mat, ind_ax):
self.ind_ax = ind_ax
self.cnt_ebins_norm_ind_mat = cnt_ebins_norm_ind_mat
self.ind0 = np.min(ind_ax)
self.ind1 = np.max(ind_ax)
def __call__(self, ind):
if (ind <= self.ind0) or (ind >= self.ind1):
return np.nan * np.ones(np.shape(self.cnt_ebins_norm_ind_mat)[1])
ind_ind0 = np.argmin(np.abs(ind - self.ind_ax))
ind_ind1 = ind_ind0 + 1 if ind > self.ind_ax[ind_ind0] else ind_ind0 - 1
A0 = np.abs(ind - self.ind_ax[ind_ind1]) / np.abs(
self.ind_ax[ind_ind0] - self.ind_ax[ind_ind1]
)
A1 = 1 - A0
cnts_norm = (
A0 * self.cnt_ebins_norm_ind_mat[ind_ind0]
+ A1 * self.cnt_ebins_norm_ind_mat[ind_ind1]
)
return cnts_norm
def get_cnts_intp_obj(ind_ax, drm, ebin_ind_edges, abs_cor):
nebins = len(ebin_ind_edges)
cnt_ebins_norm_ind_mat = np.zeros((len(ind_ax), nebins))
for i in range(len(ind_ax)):
cnt_ebins_norm_ind_mat[i] = get_cnt_ebins_normed(
ind_ax[i], drm, ebin_ind_edges, abs_cor=abs_cor
)
intp_obj = cnts_norm_intp(cnt_ebins_norm_ind_mat, ind_ax)
return intp_obj
def get_cnts_per_tbins(t_bins0, t_bins1, ebins0, ebins1, ev_data, dmask):
ntbins = len(t_bins0)
nebins = len(ebins0)
cnts_per_tbin = np.zeros((ntbins, nebins))
for i in range(ntbins):
sig_bl = (ev_data["TIME"] >= t_bins0[i]) & (ev_data["TIME"] < (t_bins1[i]))
sig_data = ev_data[sig_bl]
sig_data_dpis = det2dpis(sig_data, ebins0, ebins1)
if dmask is None:
cnts_per_tbin[i] = np.array([np.sum(dpi) for dpi in sig_data_dpis])
else:
cnts_per_tbin[i] = np.array(
[np.sum(dpi[(dmask == 0)]) for dpi in sig_data_dpis]
)
return cnts_per_tbin
def get_quad_cnts_tbins(tbins0, tbins1, ebins0, ebins1, evd):
ntbins = len(tbins0)
nebins = len(ebins0)
cnts_mat = np.zeros((ntbins, nebins, 4))
for i in range(ntbins):
sig_bl = (evd["TIME"] >= tbins0[i]) & (evd["TIME"] < (tbins1[i]))
sig_data = evd[sig_bl]
for j in range(nebins):
e_bl = (sig_data["ENERGY"] >= ebins0[j]) & (
sig_data["ENERGY"] < (ebins1[j])
)
cnts_mat[i, j] = ev2quad_cnts(sig_data[e_bl])
return cnts_mat
def get_cnts(ev, t_bins0, t_bins1, ebin_inds, nebins):
ntbins = len(t_bins0)
cnts = np.zeros((ntbins, nebins))
for i in range(ntbins):
blt = (ev["TIME"] >= t_bins0[i]) & (ev["TIME"] < t_bins1[i])
ebin_inds_ = ebin_inds[blt]
for j in range(nebins):
cnts[i, j] = np.sum(ebin_inds_ == j)
return cnts
def get_data_cube(tbins0, tbins1, ebins0, ebins1, evd, dmask_bl):
ntbins = len(tbins0)
nebins = len(ebins0)
ndets = np.sum(dmask_bl)
data_cube = np.zeros((ntbins, nebins, ndets), dtype=np.int64)
for i in range(ntbins):
blt = (evd["TIME"] >= tbins0[i]) & (evd["TIME"] < tbins1[i])
ev_ = evd[blt]
data_cube[i] = np.array(det2dpis(ev_, ebins0, ebins1, bl_dmask=dmask_bl))
return data_cube
def get_lin_fits(evdata, ebins0, ebins1, args):
trig_time = args.trigtime
tstep = 0.512
bin_size = 0.512
# t_bins0 = np.arange(-15.008, 15.008, tstep) + trig_time
t_bins0 = np.arange(-50.0 * 1.024, 50.0 * 1.024, tstep) + trig_time
t_bins1 = t_bins0 + bin_size
quad_cnts_mat = get_quad_cnts_tbins(t_bins0, t_bins1, ebins0, ebins1, evdata)
res_dict, lin_t_ax = get_linear_bkg_rates(
quad_cnts_mat, t_bins0, t_bins1, trig_time, quad_dicts
)
return res_dict, lin_t_ax
def filter_data(ev_data, dmask, emin, emax, good_t0, good_t1):
mask_vals = mask_detxy(dmask, ev_data)
bl_ev = (
(ev_data["TIME"] > good_t0)
& (ev_data["TIME"] < good_t1)
& (ev_data["EVENT_FLAGS"] < 1)
& (mask_vals == 0)
& (ev_data["ENERGY"] <= emax)
& (ev_data["ENERGY"] >= emin)
)
ev_data0 = ev_data[bl_ev]
return ev_data0
def do_rate_mle(
cnts_per_tbin, bkg_rate_obj, cnts_intp, t_bins0, t_bins1, bkg_err_fact=2.0
):
ntbins = len(t_bins0)
bin_size = t_bins1[0] - t_bins0[0]
t_ax = (t_bins0 + t_bins1) / 2.0
bf_nsigs = np.zeros(ntbins)
bf_inds = np.zeros(ntbins)
llhs = np.zeros(ntbins)
bkg_llhs = np.zeros(ntbins)
cnts_norm = cnts_intp(1.0)
lowers = [-2.0, 0.5]
uppers = [4.0, 2.5]
bounds = optimize.Bounds(np.array(lowers), np.array(uppers))
x0s = [[1.0, 1.0], [2.0, 1.0], [1.0, 2.0], [2.0, 2.0]]
for i in range(ntbins):
bkg_rate, bkg_rate_err = bkg_rate_obj.get_rate(t_ax[i])
bkgdt = bkg_rate_obj.bkg_exp
bkg_cnts = bkg_rate * bin_size
bkg_err = bkg_err_fact * bkg_rate_err * bin_size
bkg_llhs[i] = rates_llh(
cnts_per_tbin[i], 0.0, cnts_norm, bkg_cnts, bkg_err, bin_size, bkgdt
)
x0 = [1.0, 1.5]
_args = (cnts_per_tbin[i], bkg_cnts, bkg_err, bin_size, bkgdt, cnts_intp)
# res = optimize.fmin(rate_llh2min, x0, args=_args, disp=False,\
# full_output=True)
ress = []
nlogls = np.zeros(len(x0s))
for j, x0 in enumerate(x0s):
res = optimize.minimize(
rate_llh2min, x0, args=_args, bounds=bounds, method="L-BFGS-B"
)
ress.append(res)
nlogls[j] = res.fun
if np.all(np.isnan(nlogls)):
best_ind = 0
else:
best_ind = np.nanargmin(nlogls)
# bf_nsigs[i] = 10.**res[0][0]
# bf_inds[i] = res[0][1]
# llhs[i] = res[1]
bf_nsigs[i] = 10.0 ** ress[best_ind].x[0]
bf_inds[i] = ress[best_ind].x[1]
llhs[i] = ress[best_ind].fun
return bkg_llhs, llhs, bf_nsigs, bf_inds
def min_rate_mle_mp(args):
lowers = [-2.0, 0.5]
uppers = [4.0, 2.5]
bounds = optimize.Bounds(np.array(lowers), np.array(uppers))
x0s = [[1.0, 1.0], [2.0, 1.0], [1.0, 2.0], [2.0, 2.0]]
# x0 = [1., 1.]
ress = []
nlogls = np.zeros(len(x0s))
for j, x0 in enumerate(x0s):
res = optimize.minimize(
rate_llh2min, x0, args=args, bounds=bounds, method="L-BFGS-B"
)
ress.append(res)
nlogls[j] = res.fun
if np.all(np.isnan(nlogls)):
best_ind = 0
else:
best_ind = np.nanargmin(nlogls)
return ress[best_ind]
def do_rate_mle_mp(
cnts_per_tbin, bkg_rate_obj, cnts_intp, t_bins0, t_bins1, nproc=4, bkg_err_fact=2.0
):
ntbins = len(t_bins0)
bin_size = t_bins1[0] - t_bins0[0]
t_ax = (t_bins0 + t_bins1) / 2.0
bf_nsigs = np.zeros(ntbins)
bf_inds = np.zeros(ntbins)
llhs = np.zeros(ntbins)
bkg_llhs = np.zeros(ntbins)
cnts_norm = cnts_intp(1.0)
arg_list = []
for i in range(ntbins):
bkg_rate, bkg_rate_err = bkg_rate_obj.get_rate(t_ax[i])
bkgdt = bkg_rate_obj.bkg_exp
bkg_cnts = bkg_rate * bin_size
bkg_err = bkg_err_fact * bkg_rate_err * bin_size
bkg_llhs[i] = rates_llh(
cnts_per_tbin[i], 0.0, cnts_norm, bkg_cnts, bkg_err, bin_size, bkgdt
)
_args = (cnts_per_tbin[i], bkg_cnts, bkg_err, bin_size, bkgdt, cnts_intp)
arg_list.append(_args)
pool = mp.Pool(nproc)
res_list = pool.map(min_rate_mle_mp, arg_list)
pool.close()
for i in range(ntbins):
res = res_list[i]
# bf_nsigs[i] = 10.**res[0][0]
# bf_inds[i] = res[0][1]
# llhs[i] = res[1]
bf_nsigs[i] = 10.0 ** res.x[0]
bf_inds[i] = res.x[1]
llhs[i] = res.fun
return bkg_llhs, llhs, bf_nsigs, bf_inds
def main(args):
from ..analysis_seeds.bkg_linear_rates import get_lin_rate_quad_objs
ebins0 = np.array([14.0, 20.0, 26.0, 36.3, 51.1, 70.9, 91.7, 118.2, 151.4])
ebins1 = np.append(ebins0[1:], [194.9])
ebins0 = np.array([14.0, 24.0, 36.3, 55.4, 80.0, 120.7])
ebins1 = np.append(ebins0[1:], [194.9])
nebins = len(ebins0)
ev_data = fits.open(args.evf)[1].data
dmask = fits.open(args.dmask)[0].data
bl_dmask = dmask == 0
# good_dt0 = args.bkgt0 - args.trigtime - 1.
# good_dt1 = 20.
good_dt0 = -60.0
good_dt1 = 60.0
trig_time = args.trigtime
good_t0 = trig_time + good_dt0
good_t1 = trig_time + good_dt1
ev_data0 = filter_data(ev_data, dmask, ebins0[0], ebins1[-1], good_t0, good_t1)
ebins = np.append(ebins0, [ebins1[-1]])
ebin_ind = np.digitize(ev_data0["ENERGY"], ebins) - 1
bkg_lin_rate_dict = get_lin_rate_quad_objs(
quad_dicts, ev_data0, trig_time, ebins0, ebins1
)
ndets_quad = dmask2ndets_perquad(dmask)
tstep = 0.032
bin_size = 0.128
t_bins0 = np.arange(-15.008, 15.008, tstep) + trig_time
t_bins1 = t_bins0 + bin_size
t_ax = (t_bins0 + t_bins1) / 2.0
ntbins = len(t_bins0)
print(ntbins)
quad_cnts_mat = get_quad_cnts_tbins(t_bins0, t_bins1, ebins0, ebins1, ev_data0)
drm_fnames = sorted([fn for fn in os.listdir(args.drmdir) if "drm_" in fn])
imxs = np.array([float(fn.split("_")[1]) for fn in drm_fnames])
imys = np.array([float(fn.split("_")[2]) for fn in drm_fnames])
drm = fits.open(os.path.join(args.drmdir, drm_fnames[0]))
ebin_ind_edges = get_ebin_ind_edges(drm, ebins0, ebins1)
ind_ax = np.linspace(-1.5, 3.5, 20 * 5 + 1)
# cnts_per_tbin = get_cnts(ev_data0, t_bins0, t_bins1, ebin_ind, nebins)
# names = ['tstart', 'tstop', 'bkg_llh', 'sig_llh', 'Nsig', 'plaw_ind']
N_dbl_dt = args.ndbl
tabs = []
for ii in range(N_dbl_dt):
for direction, quad_dict in quad_dicts.items():
tab = Table()
drm = fits.open(os.path.join(args.drmdir, quad_dict["drm_fname"]))
imx = float(quad_dict["drm_fname"].split("_")[1])
imy = float(quad_dict["drm_fname"].split("_")[2])
abs_cor = get_abs_cor_rates(imx, imy, drm)
cnts_intp = get_cnts_intp_obj(ind_ax, drm, ebin_ind_edges, abs_cor)
cnts_per_tbin = np.sum(
[quad_cnts_mat[:, :, q] for q in quad_dict["quads"]], axis=0
)
print(imx, imy)
bkg_llh_tbins, llhs, bf_nsigs, bf_inds = do_rate_mle(
cnts_per_tbin, bkg_lin_rate_dict[direction], cnts_intp, t_bins0, t_bins1
)
tab["tstart"] = t_bins0
tab["tstop"] = t_bins1
tab["bkg_llh"] = bkg_llh_tbins
tab["sig_llh"] = llhs
tab["Nsig"] = bf_nsigs
tab["plaw_ind"] = bf_inds
tab["imx"] = imx * np.ones_like(bf_inds)
tab["imy"] = imy * np.ones_like(bf_inds)
tab["direction"] = direction
tab["counts"] = np.sum(cnts_per_tbin, axis=1)
tabs.append(tab)
tstep *= 2
bin_size *= 2
t_bins0 = np.arange(-15.008, 15.008, tstep) + trig_time
t_bins1 = t_bins0 + bin_size
ntbins = len(t_bins0)
print(ntbins)
quad_cnts_mat = get_quad_cnts_tbins(t_bins0, t_bins1, ebins0, ebins1, ev_data0)
tab = vstack(tabs)
fname = os.path.join(args.obsid, "rate_llhs_trigtime_%.1f_.fits" % (args.trigtime))
tab.write(fname)
return
def cli():
parser = argparse.ArgumentParser()
parser.add_argument(
"--obsid", type=str, help="Obsid as a string, as it appears in file names"
)
parser.add_argument("--evf", type=str, help="Event File Name")
parser.add_argument("--e0", type=float, help="Min energy", default=14.0)
parser.add_argument("--e1", type=float, help="Max energy", default=194.9)
parser.add_argument("--dmask", type=str, help="Detmask fname")
parser.add_argument(
"--drmdir",
type=str,
help="Directory to find the DRMs",
default="/gpfs/scratch/jjd330/bat_data/drms4quads/",
)
parser.add_argument("--trigtime", type=float, help="Trigger time in MET seconds")
# parser.add_argument('--bkgt0', type=float,\
# help="Bkg start time in MET seconds")
# parser.add_argument('--bkgdt', type=float,\
# help="Bkg duration time in seconds")
parser.add_argument(
"--ndbl",
type=int,
help="Number of times to double the time bin duration",
default=5,
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = cli()
main(args)
|
Swift-BATREPO_NAMENITRATESPATH_START.@NITRATES_extracted@NITRATES-main@nitrates@analysis_seeds@mle_rates_for_realtime.py@.PATH_END.py
|
{
"filename": "perf_crawl.py",
"repo_name": "mpi4py/mpi4py",
"repo_path": "mpi4py_extracted/mpi4py-master/demo/futures/perf_crawl.py",
"type": "Python"
}
|
"""
Compare the speed of downloading URLs sequentially vs. using futures.
"""
import sys
import time
import functools
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError:
ThreadPoolExecutor = lambda n: None
try:
from concurrent.futures import ProcessPoolExecutor
except ImportError:
ProcessPoolExecutor = lambda n: None
from mpi4py.futures import MPIPoolExecutor, as_completed
URLS = [
'http://www.google.com/',
'http://www.apple.com/',
'http://www.ibm.com',
'http://www.thisurlprobablydoesnotexist.com',
'http://www.slashdot.org/',
'http://www.python.org/',
'http://www.bing.com/',
'http://www.facebook.com/',
'http://www.yahoo.com/',
'http://www.youtube.com/',
'http://www.blogger.com/',
]
def load_url(url, timeout):
return urlopen(url, timeout=timeout).read()
def download_urls_sequential(urls, timeout=60):
url_to_content = {}
for url in urls:
try:
url_to_content[url] = load_url(url, timeout=timeout)
except:
pass
return url_to_content
def download_urls_with_executor(executor, urls, timeout=60):
if executor is None: return {}
try:
url_to_content = {}
future_to_url = {
executor.submit(load_url, url, timeout): url
for url in urls
}
for future in as_completed(future_to_url):
try:
url_to_content[future_to_url[future]] = future.result()
except:
pass
return url_to_content
finally:
executor.shutdown()
def main():
for meth, fn in [('sequential',
functools.partial(download_urls_sequential,
URLS)),
('threads',
functools.partial(download_urls_with_executor,
ThreadPoolExecutor(10), URLS)),
('processes',
functools.partial(download_urls_with_executor,
ProcessPoolExecutor(10), URLS)),
('mpi4py',
functools.partial(download_urls_with_executor,
MPIPoolExecutor(10), URLS))]:
sys.stdout.write('%s: ' % meth.ljust(11))
sys.stdout.flush()
start = time.time()
url_map = fn()
elapsed = time.time() - start
sys.stdout.write('%5.2f seconds (%2d of %d downloaded)\n' %
(elapsed, len(url_map), len(URLS)))
sys.stdout.flush()
if __name__ == '__main__':
main()
|
mpi4pyREPO_NAMEmpi4pyPATH_START.@mpi4py_extracted@mpi4py-master@demo@futures@perf_crawl.py@.PATH_END.py
|
{
"filename": "PathVariable.py",
"repo_name": "rat-pac/rat-pac",
"repo_path": "rat-pac_extracted/rat-pac-master/python/SCons/Variables/PathVariable.py",
"type": "Python"
}
|
"""SCons.Variables.PathVariable
This file defines an option type for SCons implementing path settings.
To be used whenever a a user-specified path override should be allowed.
Arguments to PathVariable are:
option-name = name of this option on the command line (e.g. "prefix")
option-help = help string for option
option-dflt = default value for this option
validator = [optional] validator for option value. Predefined
validators are:
PathAccept -- accepts any path setting; no validation
PathIsDir -- path must be an existing directory
PathIsDirCreate -- path must be a dir; will create
PathIsFile -- path must be a file
PathExists -- path must exist (any type) [default]
The validator is a function that is called and which
should return True or False to indicate if the path
is valid. The arguments to the validator function
are: (key, val, env). The key is the name of the
option, the val is the path specified for the option,
and the env is the env to which the Otions have been
added.
Usage example:
Examples:
prefix=/usr/local
opts = Variables()
opts = Variables()
opts.Add(PathVariable('qtdir',
'where the root of Qt is installed',
qtdir, PathIsDir))
opts.Add(PathVariable('qt_includes',
'where the Qt includes are installed',
'$qtdir/includes', PathIsDirCreate))
opts.Add(PathVariable('qt_libraries',
'where the Qt library is installed',
'$qtdir/lib'))
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/PathVariable.py 4043 2009/02/23 09:06:45 scons"
__all__ = ['PathVariable',]
import os
import os.path
import SCons.Errors
class _PathVariableClass:
def PathAccept(self, key, val, env):
"""Accepts any path, no checking done."""
pass
def PathIsDir(self, key, val, env):
"""Validator to check if Path is a directory."""
if not os.path.isdir(val):
if os.path.isfile(val):
m = 'Directory path for option %s is a file: %s'
else:
m = 'Directory path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def PathIsDirCreate(self, key, val, env):
"""Validator to check if Path is a directory,
creating it if it does not exist."""
if os.path.isfile(val):
m = 'Path for option %s is a file, not a directory: %s'
raise SCons.Errors.UserError(m % (key, val))
if not os.path.isdir(val):
os.makedirs(val)
def PathIsFile(self, key, val, env):
"""validator to check if Path is a file"""
if not os.path.isfile(val):
if os.path.isdir(val):
m = 'File path for option %s is a directory: %s'
else:
m = 'File path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def PathExists(self, key, val, env):
"""validator to check if Path exists"""
if not os.path.exists(val):
m = 'Path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def __call__(self, key, help, default, validator=None):
# NB: searchfunc is currenty undocumented and unsupported
"""
The input parameters describe a 'path list' option, thus they
are returned with the correct converter and validator appended. The
result is usable for input to opts.Add() .
The 'default' option specifies the default path to use if the
user does not specify an override with this option.
validator is a validator, see this file for examples
"""
if validator is None:
validator = self.PathExists
if SCons.Util.is_List(key) or SCons.Util.is_Tuple(key):
return (key, '%s ( /path/to/%s )' % (help, key[0]), default,
validator, None)
else:
return (key, '%s ( /path/to/%s )' % (help, key), default,
validator, None)
PathVariable = _PathVariableClass()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
rat-pacREPO_NAMErat-pacPATH_START.@rat-pac_extracted@rat-pac-master@python@SCons@Variables@PathVariable.py@.PATH_END.py
|
{
"filename": "test_matching.py",
"repo_name": "LSSTDESC/BlendingToolKit",
"repo_path": "BlendingToolKit_extracted/BlendingToolKit-main/tests/test_matching.py",
"type": "Python"
}
|
from astropy.table import Table
from btk.match import PixelHungarianMatcher
def test_matching():
x1 = [12.0, 31.0]
y1 = [10.0, 30.0]
x2 = [34.0, 12.1, 20.1]
y2 = [33.0, 10.1, 22.0]
t1 = Table()
t1["x_peak"] = x1
t1["y_peak"] = y1
t2 = Table()
t2["x_peak"] = x2
t2["y_peak"] = y2
catalog_list1 = [t1]
catalog_list2 = [t2]
matcher1 = PixelHungarianMatcher(pixel_max_sep=1)
match = matcher1(catalog_list1, catalog_list2)
assert match.n_true == 2
assert match.n_pred == 3
assert match.tp == 1
assert match.fp == 2
assert match.true_matches == [[0]]
assert match.pred_matches == [[1]]
|
LSSTDESCREPO_NAMEBlendingToolKitPATH_START.@BlendingToolKit_extracted@BlendingToolKit-main@tests@test_matching.py@.PATH_END.py
|
{
"filename": "pgf.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/galleries/users_explain/text/pgf.py",
"type": "Python"
}
|
r"""
.. redirect-from:: /tutorials/text/pgf
.. _pgf:
************************************************************
Text rendering with XeLaTeX/LuaLaTeX via the ``pgf`` backend
************************************************************
Using the ``pgf`` backend, Matplotlib can export figures as pgf drawing
commands that can be processed with pdflatex, xelatex or lualatex. XeLaTeX and
LuaLaTeX have full Unicode support and can use any font that is installed in
the operating system, making use of advanced typographic features of OpenType,
AAT and Graphite. Pgf pictures created by ``plt.savefig('figure.pgf')``
can be embedded as raw commands in LaTeX documents. Figures can also be
directly compiled and saved to PDF with ``plt.savefig('figure.pdf')`` by
switching the backend ::
matplotlib.use('pgf')
or by explicitly requesting the use of the ``pgf`` backend ::
plt.savefig('figure.pdf', backend='pgf')
or by registering it for handling pdf output ::
from matplotlib.backends.backend_pgf import FigureCanvasPgf
matplotlib.backend_bases.register_backend('pdf', FigureCanvasPgf)
The last method allows you to keep using regular interactive backends and to
save xelatex, lualatex or pdflatex compiled PDF files from the graphical user
interface. Note that, in that case, the interactive display will still use the
standard interactive backends (e.g., QtAgg), and in particular use latex to
compile relevant text snippets.
Matplotlib's pgf support requires a recent LaTeX_ installation that includes
the TikZ/PGF packages (such as TeXLive_), preferably with XeLaTeX or LuaLaTeX
installed. If either pdftocairo or ghostscript is present on your system,
figures can optionally be saved to PNG images as well. The executables
for all applications must be located on your :envvar:`PATH`.
`.rcParams` that control the behavior of the pgf backend:
================= =====================================================
Parameter Documentation
================= =====================================================
pgf.preamble Lines to be included in the LaTeX preamble
pgf.rcfonts Setup fonts from rc params using the fontspec package
pgf.texsystem Either "xelatex" (default), "lualatex" or "pdflatex"
================= =====================================================
.. note::
TeX defines a set of special characters, such as::
# $ % & ~ _ ^ \ { }
Generally, these characters must be escaped correctly. For convenience,
some characters (_, ^, %) are automatically escaped outside of math
environments. Other characters are not escaped as they are commonly needed
in actual TeX expressions. However, one can configure TeX to treat them as
"normal" characters (known as "catcode 12" to TeX) via a custom preamble,
such as::
plt.rcParams["pgf.preamble"] = (
r"\AtBeginDocument{\catcode`\&=12\catcode`\#=12}")
.. _pgf-rcfonts:
Multi-Page PDF Files
====================
The pgf backend also supports multipage pdf files using
`~.backend_pgf.PdfPages`
.. code-block:: python
from matplotlib.backends.backend_pgf import PdfPages
import matplotlib.pyplot as plt
with PdfPages('multipage.pdf', metadata={'author': 'Me'}) as pdf:
fig1, ax1 = plt.subplots()
ax1.plot([1, 5, 3])
pdf.savefig(fig1)
fig2, ax2 = plt.subplots()
ax2.plot([1, 5, 3])
pdf.savefig(fig2)
.. redirect-from:: /gallery/userdemo/pgf_fonts
Font specification
==================
The fonts used for obtaining the size of text elements or when compiling
figures to PDF are usually defined in the `.rcParams`. You can also use the
LaTeX default Computer Modern fonts by clearing the lists for :rc:`font.serif`,
:rc:`font.sans-serif` or :rc:`font.monospace`. Please note that the glyph
coverage of these fonts is very limited. If you want to keep the Computer
Modern font face but require extended Unicode support, consider installing the
`Computer Modern Unicode`__ fonts *CMU Serif*, *CMU Sans Serif*, etc.
__ https://sourceforge.net/projects/cm-unicode/
When saving to ``.pgf``, the font configuration Matplotlib used for the
layout of the figure is included in the header of the text file.
.. code-block:: python
import matplotlib.pyplot as plt
plt.rcParams.update({
"font.family": "serif",
# Use LaTeX default serif font.
"font.serif": [],
# Use specific cursive fonts.
"font.cursive": ["Comic Neue", "Comic Sans MS"],
})
fig, ax = plt.subplots(figsize=(4.5, 2.5))
ax.plot(range(5))
ax.text(0.5, 3., "serif")
ax.text(0.5, 2., "monospace", family="monospace")
ax.text(2.5, 2., "sans-serif", family="DejaVu Sans") # Use specific sans font.
ax.text(2.5, 1., "comic", family="cursive")
ax.set_xlabel("µ is not $\\mu$")
.. redirect-from:: /gallery/userdemo/pgf_preamble_sgskip
.. _pgf-preamble:
Custom preamble
===============
Full customization is possible by adding your own commands to the preamble.
Use :rc:`pgf.preamble` if you want to configure the math fonts,
using ``unicode-math`` for example, or for loading additional packages. Also,
if you want to do the font configuration yourself instead of using the fonts
specified in the rc parameters, make sure to disable :rc:`pgf.rcfonts`.
.. code-block:: python
import matplotlib as mpl
mpl.use("pgf")
import matplotlib.pyplot as plt
plt.rcParams.update({
"font.family": "serif", # use serif/main font for text elements
"text.usetex": True, # use inline math for ticks
"pgf.rcfonts": False, # don't setup fonts from rc parameters
"pgf.preamble": "\n".join([
r"\usepackage{url}", # load additional packages
r"\usepackage{unicode-math}", # unicode math setup
r"\setmainfont{DejaVu Serif}", # serif font via preamble
])
})
fig, ax = plt.subplots(figsize=(4.5, 2.5))
ax.plot(range(5))
ax.set_xlabel("unicode text: я, ψ, €, ü")
ax.set_ylabel(r"\url{https://matplotlib.org}")
ax.legend(["unicode math: $λ=∑_i^∞ μ_i^2$"])
.. redirect-from:: /gallery/userdemo/pgf_texsystem
.. _pgf-texsystem:
Choosing the TeX system
=======================
The TeX system to be used by Matplotlib is chosen by :rc:`pgf.texsystem`.
Possible values are ``'xelatex'`` (default), ``'lualatex'`` and ``'pdflatex'``.
Please note that when selecting pdflatex, the fonts and Unicode handling must
be configured in the preamble.
.. code-block:: python
import matplotlib.pyplot as plt
plt.rcParams.update({
"pgf.texsystem": "pdflatex",
"pgf.preamble": "\n".join([
r"\usepackage[utf8x]{inputenc}",
r"\usepackage[T1]{fontenc}",
r"\usepackage{cmbright}",
]),
})
fig, ax = plt.subplots(figsize=(4.5, 2.5))
ax.plot(range(5))
ax.text(0.5, 3., "serif", family="serif")
ax.text(0.5, 2., "monospace", family="monospace")
ax.text(2.5, 2., "sans-serif", family="sans-serif")
ax.set_xlabel(r"µ is not $\mu$")
.. _pgf-troubleshooting:
Troubleshooting
===============
* On Windows, the :envvar:`PATH` environment variable may need to be modified
to include the directories containing the latex, dvipng and ghostscript
executables. See :ref:`environment-variables` and
:ref:`setting-windows-environment-variables` for details.
* Sometimes the font rendering in figures that are saved to png images is
very bad. This happens when the pdftocairo tool is not available and
ghostscript is used for the pdf to png conversion.
* Make sure what you are trying to do is possible in a LaTeX document,
that your LaTeX syntax is valid and that you are using raw strings
if necessary to avoid unintended escape sequences.
* :rc:`pgf.preamble` provides lots of flexibility, and lots of
ways to cause problems. When experiencing problems, try to minimalize or
disable the custom preamble.
* Configuring an ``unicode-math`` environment can be a bit tricky. The
TeXLive distribution for example provides a set of math fonts which are
usually not installed system-wide. XeLaTeX, unlike LuaLaTeX, cannot find
these fonts by their name, which is why you might have to specify
``\setmathfont{xits-math.otf}`` instead of ``\setmathfont{XITS Math}`` or
alternatively make the fonts available to your OS. See this
`tex.stackexchange.com question`__ for more details.
__ https://tex.stackexchange.com/q/43642/
* If the font configuration used by Matplotlib differs from the font setting
in yout LaTeX document, the alignment of text elements in imported figures
may be off. Check the header of your ``.pgf`` file if you are unsure about
the fonts Matplotlib used for the layout.
* Vector images and hence ``.pgf`` files can become bloated if there are a lot
of objects in the graph. This can be the case for image processing or very
big scatter graphs. In an extreme case this can cause TeX to run out of
memory: "TeX capacity exceeded, sorry" You can configure latex to increase
the amount of memory available to generate the ``.pdf`` image as discussed on
`tex.stackexchange.com <https://tex.stackexchange.com/q/7953/>`_.
Another way would be to "rasterize" parts of the graph causing problems
using either the ``rasterized=True`` keyword, or ``.set_rasterized(True)`` as
per :doc:`this example </gallery/misc/rasterization_demo>`.
* Various math fonts are compiled and rendered only if corresponding font
packages are loaded. Specifically, when using ``\mathbf{}`` on Greek letters,
the default computer modern font may not contain them, in which case the
letter is not rendered. In such scenarios, the ``lmodern`` package should be
loaded.
* If you still need help, please see :ref:`reporting-problems`
.. _LaTeX: http://www.tug.org
.. _TeXLive: http://www.tug.org/texlive/
"""
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@galleries@users_explain@text@pgf.py@.PATH_END.py
|
{
"filename": "_tickformatstopdefaults.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/isosurface/colorbar/_tickformatstopdefaults.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickformatstopdefaultsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name="tickformatstopdefaults",
parent_name="isosurface.colorbar",
**kwargs,
):
super(TickformatstopdefaultsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@isosurface@colorbar@_tickformatstopdefaults.py@.PATH_END.py
|
{
"filename": "create_trajectories.py",
"repo_name": "nuc-astro/winnet",
"repo_path": "winnet_extracted/winnet-master/data/Example_data/Example_BigBang_many/create_trajectories.py",
"type": "Python"
}
|
# Import packages
import os, shutil
import numpy as np
import matplotlib.pyplot as plt
# Determining the trajectory of standart Big Bang.
# We need to know:
# - Temperature evolution
# - Density evolution
# - Electron fraction at weak freezeout
etas = np.logspace(-12,-7,num=100)
for e in etas:
# The trajectory is derived for a flat isotropic and homogeneous universe, with adiabatic expandion, following (Winteler 2013).
# The Friedmann equations for this are given by:
#
# (1): d^2R/dt^2 = -4pi//(3c^2) * (rho_eps + 3P) GR + 1/(3c^2) Lambda R
# (2): (dR/dt)^2 = H(t)^2 = 8piG/(3c^2) rho_eps - kc^2/(R^2) + 1/(3c^2) Lambda
# (3): d(rho_eps R^3)/dt + P dR^3/dt = 0
# (for a flat universe k=0 and Lambda = 0)
# At aproximately kb T_weak = 0.8 MeV [Winteler 2013, Fields 2006] the neutron to proton ratio is frozen out. From Maxwell Boltzmann distribution and the
# equilibrium of chemical potentials one can derive n/p = 1/6 (with n+p = 1 => n=1/7 and p=1/6)
# Constants
mn = 939.5654133 # mass neutron [ MeV / c**2 ]
mp = 938.2720813 # mass proton [ MeV / c**2 ]
Delta = mn - mp # mass difference [ MeV / c**2 ]
# Assume weak freezout at T=0.8 MeV
kB_Tweak = 0.8 # [MeV]
n_p_ratio = np.exp(-Delta/kB_Tweak) # Followed from Maxwell Boltzmann expression for the chemical potentials
# From n+p = 1:
neutron_freezout = n_p_ratio/(1. + n_p_ratio)
proton_freezout = 1. - neutron_freezout
# The initial electron fraction is therefore given as:
ye_freezout = proton_freezout/(neutron_freezout+proton_freezout) # Same as neutron abundance, because there are only nucleons
# Calculate the first time after freezeout.
# MeV -> GK: 11.604519
first_time = (13.36 * 1./(kB_Tweak * 11.604519))**2.
# Get the time
time = np.logspace(np.log10(first_time),10,num=200) # [s]
# From adiabatic expansion and from relativistic degrees of freedom g = 3.3626:
temperature = 13.336 * 1./(time**0.5) # [GK]
# The density can be calculated in terms of the photon to baryon ratio eta:
# (Change eta for creating the plot shown in Schramm, Turner 1998)
eta = e # Different etas
# Define some other constants:
m_u = 1.660540e-27 * 1e3 # Weight of a nucleon
pi = 3.14159265359
kB = 8.6173303e-2 # [MeV / GK]
hbarc = 197.3269718 * 1.e-13 # [MeV * cm]
# Calculate the density in [g/ccm]. (From adiabatic expansion and assuming ultra relativistic particle [see Winteler 2012])
dens = 2.404 * eta/(pi**2.) * ((kB*temperature)/(hbarc))**3. * m_u # [g/ccm]
# Create ye column. Note that the network only uses the first value, so we can keep it constant
ye = [ye_freezout for i in range(len(time))]
# Save the trajectory
out = np.array([time,temperature,dens,ye]).T
np.savetxt('bbn_'+str(e),out,header='time [s], T9 [GK], density [g/cm^3], Ye \n')
|
nuc-astroREPO_NAMEwinnetPATH_START.@winnet_extracted@winnet-master@data@Example_data@Example_BigBang_many@create_trajectories.py@.PATH_END.py
|
{
"filename": "get_speedups.py",
"repo_name": "manodeep/Corrfunc",
"repo_path": "Corrfunc_extracted/Corrfunc-master/paper/scripts/get_speedups.py",
"type": "Python"
}
|
#!/usr/bin/env python
from __future__ import print_function, division
import numpy as np
try:
import pandas as pd
except ImportError:
pd = None
try:
import cPickle as pickle
except ImportError:
import pickle
from Corrfunc.io import read_catalog
import multiprocessing
max_threads = multiprocessing.cpu_count()
def read_file(filename):
"""
Reads in the file I created manually (by recompiling and adding timers)
Not used any more but left for historical reasons (the first 'speedup'
plots were generated with this function)
"""
dtype = np.dtype([('same_cell', np.int32),
('N1', np.int),
('N2', np.int),
('time', np.float)
])
if pd is not None:
timings = pd.read_csv(filename, header=None,
engine="c",
dtype={'same_cell': np.int32,
'N1': np.int,
'N2': np.int,
'time': np.float},
index_col=None,
names=['same_cell', 'N1', 'N2', 'time'],
delim_whitespace=True)
else:
timings = np.loadtxt(filename, dtype=dtype)
return timings
class nf(float):
def __repr__(self):
str_repr = '%.1f' % (self.__float__(),)
if str_repr[-1] == '0':
return '%.0f' % self.__float__()
else:
return '%.1f' % self.__float__()
def run_wp(boxsize, x, y, z, pimax, nthreads=max_threads, isa=None):
import Corrfunc
from Corrfunc.theory import wp
from os.path import dirname, abspath, join as pjoin
binfile = pjoin(dirname(abspath(Corrfunc.__file__)),
"../theory/tests/", "bins")
# binfile = './bins'
_, cell_time = wp(boxsize, pimax, nthreads, binfile,
x, y, z, c_cell_timer=True, isa=isa,
verbose=True)
return cell_time
def main():
import sys
if len(sys.argv) == 1:
print("Running cell timers for wp")
all_isa = ['avx512f', 'avx2', 'avx', 'sse42', 'fallback']
x, y, z = read_catalog()
# boxsize = 1100.0
# pimax = 45.0
# points = np.loadtxt('halos_emulator_1100box_Neff3_00.txt')
# numpart = int(1.*len(points))
# assert (points >= 0).all() and (points < 1100.).all()
# dtype = points.dtype # float64
# points = points.reshape(-1).view(dtype=[('x',dtype,3)])
# subsample = np.random.choice(points, numpart, replace=False)
# subsample = subsample.view(dtype=dtype).reshape(-1,3)
# x, y, z = subsample.T
boxsize = 420.0
pimax = 40.0
cell_timings = dict()
serial_timings = dict()
for isa in all_isa:
cell_timings[isa] = dict()
serial_timings[isa] = dict()
# First run the serial (single threaded)
timings = run_wp(boxsize, x, y, z, pimax,
nthreads=1, isa=isa)
(serial_timings[isa])[1] = timings
# then the one with all threads
# max_threads is not required but being explicit
timings = run_wp(boxsize, x, y, z, pimax,
nthreads=max_threads, isa=isa)
(cell_timings[isa])[max_threads] = timings
with open('wp_cell_timers_ozstar.pkl', 'wb') as outfile:
pickle.dump([all_isa, cell_timings, serial_timings], outfile,
protocol=pickle.HIGHEST_PROTOCOL)
else:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib.cm as cm
timings_file = sys.argv[1]
print("Loading benchmarks from file = {0}".format(timings_file))
with open(timings_file, 'rb') as pkl_file:
all_isa, _, serial_timings = pickle.load(pkl_file)
legend = ['AVX512F', 'AVX2', 'AVX', 'SSE4.2', 'Fallback']
base_string = 'wp'
all_speedup = []
base_timing = (serial_timings['fallback'])[1]['time_in_ns']
N1_parts = (serial_timings['fallback'])[1]['N1']
N2_parts = (serial_timings['fallback'])[1]['N2']
gridsize = 40
cb_range = [0.0, 100.0]
contour_nlevels = 4
xlimits = [0, 1000]
ylimits = xlimits
xlabel = 'Number of points in a cell'
ylabel = xlabel
cb_diff = (cb_range[1] - cb_range[0])
'''
positive_Ncolors = int((cb_range[1] - 1.0) / cb_diff * 256)
negative_Ncolors = 256 - positive_Ncolors
colors1 = cm.OrRd(np.linspace(0.0, 1.0, negative_Ncolors))
colors2 = cm.viridis(np.linspace(0.0, 1.0, positive_Ncolors))
# combine them and build a new colormap
colors = np.vstack((colors1, colors2))
mycmap = mcolors.LinearSegmentedColormap.from_list('my_colormap',
colors)
'''
mycmap = 'viridis'
matplotlib.style.use('default')
# Label levels with specially formatted floats
if plt.rcParams["text.usetex"]:
cntr_fmt = r'%r\%%'
else:
cntr_fmt = '%r%%'
# Want fallback to appear first
all_isa.reverse()
legend.reverse()
for ii, isa in enumerate(all_isa):
if ii == 0:
continue
this_timing = (serial_timings[isa])[1]['time_in_ns']
ind = (np.where((this_timing > 0.0) & (base_timing > 0.0)))[0]
speedup = base_timing[ind] / this_timing[ind]
all_speedup.append(speedup)
print("Min speedup = {0:0.2f}. Max = {1:0.2f} Median = {2:0.2f}".format(
min(speedup), max(speedup), np.median(speedup)))
bad = (np.where(speedup <= 1.0))[0]
bad_timings_base = np.sum(base_timing[ind[bad]])
bad_timings = np.sum(this_timing[ind[bad]])
print("Cells with slowdown {3}({4:4.3f}%): Base takes - {0:8.3f} "
"sec while {1} takes {2:8.3f} seconds".format(
bad_timings_base/1e9,
legend[ii],
bad_timings/1e9,
len(bad),
100.0 * len(bad) / len(ind)))
good = (np.where(speedup > 1.0))[0]
good_timings_base = np.sum(base_timing[ind[good]])
good_timings = np.sum(this_timing[ind[good]])
print("Cells with speedup {3}({4:4.3f}%): Base takes - {0:8.3f} sec "
"while {1} takes {2:8.3f} seconds".format(
good_timings_base/1e9,
legend[ii],
good_timings/1e9,
len(good),
100.0 * len(good) / len(ind)))
fig = plt.figure(1, figsize=(8, 8))
figsize = 0.6
left = 0.1
bottom = 0.1
top_aspect = 0.15
hist_area = [left, bottom + figsize, figsize, figsize * top_aspect]
axhist = plt.axes(hist_area)
axhist.autoscale(enable=True, axis="y")
axhist.set_xlim(xlimits)
plt.setp(axhist.get_xticklabels(), visible=False)
axhist.axis('off')
axhist.hist(N1_parts[ind], gridsize, range=xlimits,
color='0.5')
hist_time_area = [left + figsize, bottom,
figsize*top_aspect, figsize]
ax_time = plt.axes(hist_time_area)
ax_time.autoscale(enable=True, axis="x")
ax_time.set_ylim(ylimits)
plt.setp(ax_time.get_yticklabels(), visible=False)
plt.setp(ax_time.get_xticklabels(), visible=False)
ax_time.axis('off')
ax_time.hist(N1_parts[ind], gridsize, weights=this_timing[ind],
range=xlimits, orientation="horizontal",
color='0.5')
im_area = [left, bottom, figsize, figsize]
ax = plt.axes(im_area)
ax.set_autoscale_on(False)
ax.set_xlim(xlimits)
ax.set_ylim(ylimits)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
xedges = np.linspace(xlimits[0], xlimits[1], gridsize)
yedges = np.linspace(ylimits[0], ylimits[1], gridsize)
cell_time, xedges, yedges = np.histogram2d(
N1_parts, N2_parts, (xedges, yedges),
weights=base_timing, normed=False)
cell_time /= np.sum(cell_time)
cell_time *= 100.0
cell_time_1d = cell_time.flatten()
sorted_ind = np.argsort(cell_time_1d)
cum_sorted_time = np.cumsum(cell_time_1d[sorted_ind])
correct_order_cum_time = np.empty_like(cum_sorted_time)
for kk, ct in zip(sorted_ind, cum_sorted_time):
correct_order_cum_time[kk] = ct
correct_order_cum_time = correct_order_cum_time.reshape(
cell_time.shape)
extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
xarr, yarr = np.meshgrid(xedges[0:-1], yedges[0:-1])
contours = ax.contour(xarr, yarr,
correct_order_cum_time, contour_nlevels,
linewidths=3.0,
extent=extent,
cmap=cm.Greys)
# Recast levels to new class
# Reverse the levels to show that the contours represent
# enclosed fraction of time spent
contours.levels = [nf(val) for val in contours.levels[::-1]]
ax.clabel(contours, contours.levels, fmt=cntr_fmt,
inline=True, fontsize=10)
# Now plot the image for the speedup
normalized_this_timing = this_timing/this_timing.sum()
im = ax.hexbin(N1_parts[ind], N2_parts[ind],
#C=speedup[ind],
C=normalized_this_timing[ind],
vmin=cb_range[0], vmax=cb_range[1],
cmap=mycmap, gridsize=gridsize)
plt.figtext(left + figsize - 0.03, bottom + figsize - 0.05,
'{0}'.format(legend[ii]), fontsize=16, ha='right')
cbar_offset = 0.08
cbar_width = 0.03
cbar_ax = fig.add_axes([left + figsize + figsize*top_aspect +
cbar_offset, bottom,
cbar_width, figsize])
cb = fig.colorbar(im, extend='both', format="%.1f",
ticks=np.linspace(cb_range[0], cb_range[1],
cb_diff + 1.0),
cax=cbar_ax)
cb.set_label('Speedup rel. to non-vectorized code')
if 'laptop' in timings_file:
exts = 'laptop_'
elif 'stampede' in timings_file:
exts = 'stampede_'
elif 'bender' in timings_file:
exts = 'bender_'
elif 'ozstar' in timings_file:
exts = 'ozstar_'
else:
exts = ''
plt.savefig('{1}_{2}Speedup_{0}.png'.format(legend[ii],
base_string,
exts),
dpi=400)
plt.savefig('{1}_{2}Speedup_{0}.pdf'.format(legend[ii],
base_string,
exts),
dpi=400)
fig.clear()
ax.clear()
axhist.clear()
ax_time.clear()
plt.close(fig)
np.savez('isa_and_speedups.npz',all_isa=all_isa, all_speedup=all_speedup)
if __name__ == '__main__':
main()
|
manodeepREPO_NAMECorrfuncPATH_START.@Corrfunc_extracted@Corrfunc-master@paper@scripts@get_speedups.py@.PATH_END.py
|
{
"filename": "OSystem.py",
"repo_name": "bill-cotton/Obit",
"repo_path": "Obit_extracted/Obit-master/ObitSystem/Obit/python/OSystem.py",
"type": "Python"
}
|
# $Id$
#-----------------------------------------------------------------------
# Copyright (C) 2004-2019
# Associated Universities, Inc. Washington DC, USA.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 675 Massachusetts Ave, Cambridge,
# MA 02139, USA.
#
# Correspondence concerning this software should be addressed as follows:
# Internet email: bcotton@nrao.edu.
# Postal address: William Cotton
# National Radio Astronomy Observatory
# 520 Edgemont Road
# Charlottesville, VA 22903-2475 USA
#-----------------------------------------------------------------------
# Python shadow class to ObitSystem class
from __future__ import absolute_import
import Obit, _Obit
class OSystem(Obit.OSystem):
""" Obit System Object class
An Obit system is needed to access persistent forms of data objects.
In particular, the ObitSystem keeps track of and scratch files creatd and deletes
any left when the system is shutdown (OSystem object deleted)
Currently Obit can access FITS files or AIPS (some classes) and the locations
of data directories are specified at OSystem startup. All external files will
be in one of the directories specified and indicated by the "disk" variable
inside Python Obit software.
The system is started by creating an OSystem,
the constructor has the following arguments:
pgmName = A name for the program, used in creating scratch files
Available from python object as member pgmName
pgmNumber = A version number of the program (POPS number for AIPS)
Available from python object as member pgmNumber
AIPSuser = AIPS user number if accessing AIPS data structures
numberAIPSdisk = Number of AIPS disk directories
if -1 use values of $DA01, $DA02...
AIPSdir = List of numberAIPSdisk directories for AIPS data areas
For default case, give a list with "Def"
numberFITSdisk = Number of FITS disk directories
if -1 use values of $DA01, $DA02...
FITSdir = List of numberFITSdisk directories for FITS data areas
For default case, give a list with "Def"
F_TRUE = Value of boolean TRUE (needed for Fortran interface)
F_FALSE = Value of boolean FALSE (needed for Fortran interface)
err = Obit Error stack on which to report problems.
An example defining one FITS directory and no AIPS directories:
# Init Obit
err=OErr.OErr()
ObitSys=OSystem.OSystem ("OTFSub", 1, 103, 1, ["None"], 1, ["../FITSdata/"], 1, 0, err)
"""
def __init__(self,pgmName, pgmNumber, AIPSuser,
numberAIPSdisk, AIPSdir, numberFITSdisk, FITSdir,
F_TRUE, F_FALSE, err):
super(OSystem, self).__init__()
Obit.CreateOSystem(self.this, pgmName, pgmNumber, AIPSuser,
numberAIPSdisk, AIPSdir,
numberFITSdisk, FITSdir,
F_TRUE, F_FALSE, err.me)
if err.isErr:
printErrMsg(err, "Error in Obit initialization")
# save info visible to python
self.pgmName = pgmName
self.pgmNumber = pgmNumber
# Rember who I am - can be only one
OSystem.ObitSys = self
def __del__(self, DeleteOSystem=_Obit.DeleteOSystem):
# Better not shutdown?
if _Obit!=None and self!=None:
DeleteOSystem(self.this)
def __setattr__(self,name,value):
self.__dict__[name] = value
def __getattr__(self,name):
if name == "me" :
return Obit.OSystem_Get_me(self.this)
raise AttributeError(name)
def __repr__(self):
return "<C OSystem instance>"
# End class definitions
def Shutdown (inObj=None):
"""
Shuts down Obit in all known IO related modules
* inObj = Python Obit System object, defaults to OSystem.ObitSys
"""
################################################################
# Use default if none given
if not isinstance(inObj, OSystem):
obj = OSystem.ObitSys
else:
obj = inObj
Obit.Shutdown(obj.me)
del (obj)
# end Shutdown
def PIsInit ():
"""
Tells if Obit initialized
returns True if Obit initialized else False
"""
################################################################
retval = Obit.SystemIsInit();
return retval!=0;
# end PIsInit
def PGetAIPSuser ():
"""
Tells AIPS user number
returns AIPS user number
"""
################################################################
return Obit.SystemGetAIPSuser()
# end PGetAIPSuser
def PSetAIPSuser (user):
"""
Sets AIPS user number
* user = AIPS user number
"""
################################################################
Obit.SystemSetAIPSuser(user)
# end PSetAIPSuser
def PGetPgmName ():
"""
Tells Program name
returns name as character string
"""
################################################################
return Obit.SystemGetPgmName()
# end PGetPgmName
def PToday ():
"""
Get today's date in string suitable for descriptors
returns date as character string
"""
################################################################
return Obit.SystemToday()
# end PToday
def PSetPgmName (pgmName):
"""
Sets Program name
* pgmName = new program name
"""
################################################################
Obit.SystemSetPgmName(pgmName)
# end PSetPgmName
def PGetPgmNumber ():
"""
Tells Program number
returns number
"""
################################################################
return Obit.SystemGetPgmNumber()
# end PGetPgmNumber
def PSetPgmNumber (pgmNumber):
"""
Sets Program number
* pgmNumber = new program number
"""
################################################################
Obit.SystemSetPgmNumber(pgmNumber)
# end PSetPgmNumber
def PMemPrint ():
"""
Prints contents of Obit Memory allocation on stdout
"""
################################################################
#
Obit.MemPrint()
# end PMemPrint
def PAllowThreads (nThreads):
"""
Sets maximum number of threads in an Obit thread pool
nThreads maximum number of threads in an Obit thread pool
"""
################################################################
Obit.SystemAllowThreads(nThreads)
# end PAllowThreads
def PGetNoThreads ():
"""
Tells Number of Threads enabled in Obit
returns number
"""
################################################################
return Obit.SystemGetNoThreads()
# end PGetNoThreads
|
bill-cottonREPO_NAMEObitPATH_START.@Obit_extracted@Obit-master@ObitSystem@Obit@python@OSystem.py@.PATH_END.py
|
{
"filename": "binomialFPEffectiveness-checkpoint.ipynb",
"repo_name": "stevepur/DR25-occurrence-public",
"repo_path": "DR25-occurrence-public_extracted/DR25-occurrence-public-main/GKbaseline_gaiaRadCut/.ipynb_checkpoints/binomialFPEffectiveness-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
Reliability is given by
$$ R = \frac{N_{\mathrm{truePC}}}{N_{\mathrm{obsPC}}} = 1 - \frac{N_{\mathrm{obsFP}}}{N_{\mathrm{obsPC}}} \left( \frac{1 - E}{E} \right) = 1 - \frac{F_{\mathrm{obsFP}}}{F_{\mathrm{obsPC}}} \left( \frac{1 - E}{E} \right) $$
where $E = N_{\mathrm{obsFP}}/N_{\mathrm{trueFP}}$ is the false positive effectiveness, $F_{\mathrm{obsFP}} = N_{\mathrm{obsFP}}/N_{\mathrm{obsTCEs}}$ is the fraction of observed TCEs that are dispositioned as FP and $F_{\mathrm{obsPC}} = N_{\mathrm{obsPC}}/N_{\mathrm{obsTCEs}}$ is the fraction of TCEs dispositioned as PC.
We will separately measure $E$ and $F_{\mathrm{obsFP}}$ as binomial point processes with probabilities that depend on period and MES. Once we have $F_{\mathrm{obsFP}}$ then $F_{\mathrm{obsPC}} = 1 - F_{\mathrm{obsFP}}$, assuming that $N_{\mathrm{obsTCEs}} = N_{\mathrm{obsPC}} + N_{\mathrm{obsFP}}$.
We think of TCEs as consisting of two sets: those that are dispositioned as FP and those that are dispositioned as PC. We do this for both the observed TCEs, and for inverted/scrambled TCEs, where all TCEs are true false positives. Then we can think of the vetting process as drawing from the set of TCEs, with a probability $r$ of selecting either PCs or FPs. Then the probability distribution of selecting $c$ FPs from $n$ TCEs is given by the binomial distribution
$$P\{c\} = \left( \begin{array}{c} n \\ c \end{array} \right) r^c (1-r)^{n-c}.$$
To measure $E$ we use the inverted and scrambled data sets, where all detected TCEs are by definition FPs. We define $E$ as the probability of drawing FPs from inverted/scrambled TCEs, found via the Bayesian inference $p(E|n, c) \propto p(c|E, n) p(E)$, where
$$p(c|E, n) = \left( \begin{array}{c} n \\ c \end{array} \right) E^c (1-E)^{n-c}$$ and
$p(E)$ is a prior distribution of the probability $E$. By putting the data on a grid indexed by $i,j$, we can fit effectiveness as a function parameterized by a vector $\theta$, $E(\theta,\mathrm{period},\mathrm{MES})$, as $p(\theta)|n_{i,j}, c_{i,j}, \mathrm{period}_{i,j},\mathrm{MES}_{i,j}) \propto p(c_{i,j}|\theta, n_{i,j}, \mathrm{period}_{i,j},\mathrm{MES}_{i,j}) p(\theta)$, where $p(\theta)$ is some prior distribution of the parameters.
To measure $F_{\mathrm{obsFP}}$ we perform a similar inference using the set of observed TCEs, and inferring the probability of drawing c FPs from n observed TCEs. The inference in this case becomes $p(F_{\mathrm{obsFP}}|n, c) \propto p(c|F_{\mathrm{obsFP}}, n) p(F_{\mathrm{obsFP}})$, which we can parameterize interms of a function similar to effectiveness.
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as spec
import pandas as pd
from astropy.io import ascii
from astropy.table import Table, vstack
import sys
sys.path.insert(0, '..')
import dr25Models as funcModels
```
First we set the parameters space for our analysis.
```python
stellarType = "GK"
scoreCut = 0.0;
if False:
periodMin = 200;
periodMax = 400;
# rpMin = 0.75;
# rpMax = 2.0;
rpMin = 0;
rpMax = 100;
mesMin = 7;
mesMax = 15;
else:
periodMin = 50;
periodMax = 600;
rpMin = 0.5;
rpMax = 15;
mesMin = 7;
mesMax = 30;
```
```python
def drawHeatMap(dataArray, imageSize, x, y, nData=[], colorBarLabel="", textOn=True, forceInt=True):
dx = x[(1,0)] - x[(0,0)];
dy = y[(0,1)] - y[(0,0)];
extent = [x[(0,0)], x[(-1,0)]+dx,y[(0,0)],y[(0,-1)]+dy];
plt.figure(figsize=imageSize);
# fig, ax = plt.subplots(figsize=imageSize);
ax = plt.gca()
da = np.transpose(dataArray);
# im = ax.imshow(da, extent = extent, origin='lower');
im = ax.imshow(da, extent = extent, origin='lower', cmap="Greys");
ax.set_aspect(10);
if len(nData) == 0:
nData = np.ones(dataArray.shape)
# ax.imshow(da, origin='lower');
arrayShape = da.shape;
minda = np.min(da)
maxda = np.max(da)
daRange = maxda - minda;
for i in range(arrayShape[0]):
for j in range(arrayShape[1]):
if da[i, j] > minda + daRange*0.5:
# cstr = "k"
cstr = "w"
else:
# cstr = "w"
cstr = "k"
if np.abs(da[i,j]) < 100:
fsz = 9
else:
fsz = 9
if textOn:
if nData[(j,i)] > 0:
if forceInt:
ax.text(x[(j,i)]+dx/2, y[(j,i)]+dy/2, da[i, j].astype("int"),
ha="center", va="center", color=cstr, fontsize=fsz)
else:
ax.text(x[(j,i)]+dx/2, y[(j,i)]+dy/2, da[i, j],
ha="center", va="center", color=cstr, fontsize=fsz)
else:
ax.text(x[(j,i)]+dx/2, y[(j,i)]+dy/2, "-",
ha="center", va="center", color=cstr, fontsize=fsz)
ax.tick_params(axis = "both", labelsize = 12)
im_ratio = float(da.shape[0])/da.shape[1]
cbh = plt.colorbar(im,fraction=0.024*im_ratio, pad=0.02)
cbh.ax.set_ylabel(colorBarLabel, fontSize = 16);
# ax.invert_yaxis();
```
Then we load our data, which consists of the stellar catalog, observed TCEs, inverted TCEs and scrambled TCEs. We convert the tables to Pandas for manipulation.
```python
# not currently used
# load skygroup maps for the FOV position dependence study
# obsSkygroupMap = ascii.read("../data/obsTceToSkygroup.txt");
```
```python
dataLoc = "../data/"
starlist = "../stellarCatalogs/dr25_stellar_berger2019_clean_GaiaRadCut_GK.txt"
kic = pd.read_csv(starlist)
invList = dataLoc + "kplr_dr25_inv_tces.txt"
scr1List = dataLoc + "kplr_dr25_scr1_tces.txt"
scr2List = dataLoc + "kplr_dr25_scr2_tces.txt"
scr3List = dataLoc + "kplr_dr25_scr3_tces.txt"
invTcesFull = ascii.read(invList);
scr1TcesFull = ascii.read(scr1List);
scr2TcesFull = ascii.read(scr2List);
scr3TcesFull = ascii.read(scr3List);
invTcesFullPd = invTcesFull.to_pandas();
scr1TcesFullPd = scr1TcesFull.to_pandas();
scr2TcesFullPd = scr2TcesFull.to_pandas();
scr3TcesFullPd = scr3TcesFull.to_pandas();
```
We have to remove the TCEs that are on the drop lists
```python
invDropList = ascii.read(dataLoc + "kplr_droplist_inv.txt");
scr1DropList = ascii.read(dataLoc + "kplr_droplist_scr1.txt");
scr2DropList = ascii.read(dataLoc + "kplr_droplist_scr2.txt");
scr3DropList = ascii.read(dataLoc + "kplr_droplist_scr3.txt");
invDropList = invDropList.to_pandas();
scr1DropList = scr1DropList.to_pandas();
scr2DropList = scr2DropList.to_pandas();
scr3DropList = scr3DropList.to_pandas();
invTcesPd = invTcesFullPd[~invTcesFullPd['TCE_ID'].isin(invDropList['TCE_ID'])];
scr1TcesPd = scr1TcesFullPd[~scr1TcesFullPd['TCE_ID'].isin(scr1DropList['TCE_ID'])];
scr2TcesPd = scr2TcesFullPd[~scr2TcesFullPd['TCE_ID'].isin(scr2DropList['TCE_ID'])];
scr3TcesPd = scr3TcesFullPd[~scr3TcesFullPd['TCE_ID'].isin(scr3DropList['TCE_ID'])];
print("length of invTcesFull = " + str(len(invTcesFullPd))
+ ", length of invTces = " + str(len(invTcesPd)))
print("length of scr1TcesFull = " + str(len(scr1TcesFullPd))
+ ", length of scr1Tces = " + str(len(scr1TcesPd)))
print("length of scr2TcesFull = " + str(len(scr2TcesFullPd))
+ ", length of scr2Tces = " + str(len(scr2TcesPd)))
print("length of scr3TcesFull = " + str(len(scr3TcesFullPd))
+ ", length of scr3Tces = " + str(len(scr3TcesPd)))
```
length of invTcesFull = 19531, length of invTces = 14952
length of scr1TcesFull = 24209, length of scr1Tces = 13778
length of scr2TcesFull = 24217, length of scr2Tces = 13817
length of scr3TcesFull = 19811, length of scr3Tces = 9240
Now convert back to astropy tables, and combine the scrambled TCEs into one list
```python
invTces = Table.from_pandas(invTcesPd)
scr1Tces = Table.from_pandas(scr1TcesPd)
scr2Tces = Table.from_pandas(scr2TcesPd)
scr3Tces = Table.from_pandas(scr3TcesPd)
# create a final set of scrambled TCEs
scrTces = vstack([scr1Tces, scr2Tces, scr3Tces])
# scrTces = scr1Tces
print("length of scrTces = " + str(len(scrTces)))
```
length of scrTces = 36835
Resstrict to the chosen stellar population
```python
invTces = invTces[np.in1d(invTces['KIC'],kic.kepid)]
scrTces = scrTces[np.in1d(scrTces['KIC'],kic.kepid)]
print("length of invTces = " + str(len(invTces)))
print("length of scrTces = " + str(len(scrTces)))
```
length of invTces = 1200
length of scrTces = 4166
There are three examples of TCEs compared to one set of inverted TCEs. So maybe subsample the scrTCEs by a factor of three.
```python
subSample = np.random.rand(len(scrTces))
# scrTcesSub = scrTces[subSample < 1./3]
scrTcesSub = scrTces
print("length of scrTcesSub = " + str(len(scrTcesSub)))
```
length of scrTcesSub = 4166
Create the final list of synthetic FPs by concatanating the inverted and subset of the scrambled lists.
```python
# syntheticTrueFps = vstack([invTces,scrTcesSub])
syntheticTrueFps = vstack([invTces,invTces,invTces,scrTcesSub])
print("length of syntheticTrueFps = " + str(len(syntheticTrueFps)))
```
length of syntheticTrueFps = 7766
Restrict to the desired radius and period range
```python
```
```python
spIndex = np.where(np.all([
syntheticTrueFps['Rp']>rpMin,syntheticTrueFps['Rp']<rpMax,\
syntheticTrueFps['period']>periodMin,syntheticTrueFps['period']<periodMax], axis=0))
spSyntheticTrueFps = syntheticTrueFps[spIndex]
print("length of spSyntheticTrueFps = " + str(len(spSyntheticTrueFps)))
```
length of spSyntheticTrueFps = 6743
Separate out the dispositioned PC and FPs
```python
# spSyntheticPcs = spSyntheticTrueFps[(spSyntheticTrueFps['NTL']==0) & (spSyntheticTrueFps['SS']==0)
# & (spSyntheticTrueFps['CO']==0) & (spSyntheticTrueFps['Score']>=scoreCut)]
# spSyntheticFps = spSyntheticTrueFps[(spSyntheticTrueFps['NTL']==1) | (spSyntheticTrueFps['SS']==1)
# | (spSyntheticTrueFps['CO']==1) | (spSyntheticTrueFps['Score']<scoreCut)]
spSyntheticPcs = spSyntheticTrueFps[(spSyntheticTrueFps['Disp']=='PC') & (spSyntheticTrueFps['Score']>=scoreCut)]
spSyntheticFps = spSyntheticTrueFps[(spSyntheticTrueFps['Disp']=='FP') | (spSyntheticTrueFps['Score']<scoreCut)]
print("length of spSyntheticPcs = " + str(len(spSyntheticPcs)))
print("length of spSyntheticFps = " + str(len(spSyntheticFps)))
```
length of spSyntheticPcs = 82
length of spSyntheticFps = 6661
Let's see what this population looks like.
```python
plt.figure(figsize=(10,5));
# plt.subplot(2,2,1);
plt.plot(spSyntheticFps['period'], spSyntheticFps['MES'], ".",
spSyntheticPcs['period'], spSyntheticPcs['MES'], "*");
plt.ylim(mesMin,mesMax);
plt.xlim(periodMin,periodMax);
plt.legend(("inv/scr FPs", "inv/scr PC"));
plt.ylabel('MES');
plt.xlabel('Period');
```

Bin the data so we have n TCEs and c FPs in each bin.
```python
dPeriod = 20;
dMes = 1;
p0 = periodMin;
pEnd = periodMax;
m0 = mesMin;
mEnd = mesMax;
# make the period-mes grid
NPeriod = int((pEnd - p0)/dPeriod);
NMes = int((mEnd - m0)/dMes);
tceGrid = np.zeros((NPeriod,NMes));
cellPeriod = np.zeros((NPeriod,NMes));
cellMes = np.zeros((NPeriod,NMes));
pcGrid = np.zeros((NPeriod,NMes));
fpGrid = np.zeros((NPeriod,NMes));
# count how many points are in each cell
for p in range(NPeriod):
for m in range(NMes):
cellPeriod[(p,m)] = p0 + p*dPeriod;
cellMes[(p,m)] = m0 + m*dMes;
pointsInCell = np.where(
(spSyntheticTrueFps['period'] > cellPeriod[(p,m)])
& (spSyntheticTrueFps['period'] <= cellPeriod[(p,m)]+dPeriod)
& (spSyntheticTrueFps['MES'] > cellMes[(p,m)])
& (spSyntheticTrueFps['MES'] <= cellMes[(p,m)]+dMes));
tceGrid[(p,m)] = len(pointsInCell[0]);
pointsInCell = np.where(
(spSyntheticPcs['period'] > cellPeriod[(p,m)])
& (spSyntheticPcs['period'] <= cellPeriod[(p,m)]+dPeriod)
& (spSyntheticPcs['MES'] > cellMes[(p,m)])
& (spSyntheticPcs['MES'] <= cellMes[(p,m)]+dMes));
pcGrid[(p,m)] = len(pointsInCell[0]);
pointsInCell = np.where(
(spSyntheticFps['period'] > cellPeriod[(p,m)])
& (spSyntheticFps['period'] <= cellPeriod[(p,m)]+dPeriod)
& (spSyntheticFps['MES'] > cellMes[(p,m)])
& (spSyntheticFps['MES'] <= cellMes[(p,m)]+dMes));
fpGrid[(p,m)] = len(pointsInCell[0]);
drawHeatMap(tceGrid, (15,15), cellPeriod, cellMes, colorBarLabel="# of TCEs");
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
plt.savefig("fpEffNTces.pdf",bbox_inches='tight')
plt.title("All Inverted/Scrambled TCEs");
drawHeatMap(pcGrid, (15,15), cellPeriod, cellMes);
plt.title("Inverted/Scrambled PCs");
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
drawHeatMap(fpGrid, (15,15), cellPeriod, cellMes);
plt.title("Inverted/Scrambled FPs");
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
```



Compute the PC and FC fractions in each cell to get a sense of what the fractions look like. These are not used in the inference.
```python
minTcePerCell = 0;
pcFrac = np.zeros(np.shape(tceGrid))
pcFrac[tceGrid>minTcePerCell] = pcGrid[tceGrid>minTcePerCell]/tceGrid[tceGrid>minTcePerCell];
drawHeatMap(np.round(100*pcFrac), (10,10), cellPeriod, cellMes);
plt.title("Inverted/Scrambled PC Fraction (%)");
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
```

```python
fpFrac = np.zeros(np.shape(tceGrid))
fpFrac[tceGrid>minTcePerCell] = fpGrid[tceGrid>minTcePerCell]/tceGrid[tceGrid>minTcePerCell];
drawHeatMap(np.round(100*fpFrac), (15,15), cellPeriod, cellMes, colorBarLabel="FP Fraction (%)", nData = tceGrid);
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
plt.savefig("fpEffFPFrac.pdf",bbox_inches='tight')
plt.title("Inverted/Scrambled FP Fraction (%)");
```

Prepare the data for the call to emcee to do the Bayesian inference.
```python
```
```python
cFp = fpGrid.flatten();
nTce = tceGrid.flatten();
# convert to homogeneous coordinates on unit square [0,1]
cellX, cellY = funcModels.normalizeRange(cellPeriod, cellMes, [periodMin, periodMax], [mesMin, mesMax]);
gridShape = np.shape(cellX);
dx = 1./gridShape[0];
dy = 1./gridShape[1];
print("gridShape = " + str(gridShape) + ", dx = " + str(dx) + ", dy = " + str(dy))
cellXFlat = cellX.flatten();
cellYFlat = cellY.flatten();
cFp[nTce<minTcePerCell] = 0;
nTce[nTce<minTcePerCell] = 0;
tceData = [cellXFlat, cellYFlat, nTce, cFp];
```
gridShape = (27, 23), dx = 0.037037037037, dy = 0.0434782608696
We're ready to compute a Bayesian inference of the success probability $r$:
$$p(r|c, n) \propto p(c|r, n) p(r).$$
But we're computing $r$ as a function of period $p$, MES $m$, and parameters $\theta$, $r(\theta, p, m)$. So our inference becomes
$$p(\theta|c, n, p, m) \propto p(c|\theta, n, p, m) p(\theta).$$
Because each cell is independent, we linearize the array to a list of cells indexed by $k$. Then the likelihood for each cell is
$$p(c_k|\theta, n_k, p_k, m_k) = \left( \begin{array}{c_k} n_k \\ c_k \end{array} \right) r(\theta, p_k , m_k )^{c_k} (1-r(\theta, p_k , m_k ))^{n_k-c_k}$$
Because the $N$ cells are independent, the likelihood for the collection of cells is
$$p(c|\theta, n, p, m) \equiv p(c_1, \ldots, c_N|\theta, n_1, \ldots, n_N, p_1, \ldots, p_N, m_1, \ldots, m_N) = \prod_k \left( \begin{array}{c_k} n_k \\ c_k \end{array} \right) r(\theta, p_k , m_k )^{c_k} (1-r(\theta, p_k , m_k ))^{n_k-c_k}.$$
The log-likelihood is then
$$\log p(c|\theta, n, p, m) = \sum_k \log \left(\left( \begin{array}{c_k} n_k \\ c_k \end{array} \right) r(\theta, p_k , m_k )^{c_k} (1-r(\theta, p_k , m_k ))^{n_k-c_k} \right)$$
$$= \sum_k \left[ \log \left(\begin{array}{c_k} n_k \\ c_k \end{array} \right) + c_k \log \left(r(\theta, p_k , m_k ) \right) + \left( n_k-c_k \right) \log(1-r(\theta, p_k , m_k )) \right] $$
Define a nice collection of models to play with.
Define the likelihood and prior functions.
```python
def lnBinlike(theta, data, model):
x, y, n, c = data
r = funcModels.rateModel(x,y,theta,model);
clnr = c*np.log(r);
clnr[c==0] = 0;
lpl = np.sum(np.log(spec.comb(n,c)) + clnr + (n-c)*np.log(1-r));
return lpl
def lnBinprior(theta, data, model):
x, y, n, c = data
# print(theta)
if model == "constant":
if 0.0 <= theta[0] <= 1:
return 1.0
elif model == "linearX":
if -5000.0 < theta[0] < 5000.0 and 0.0 < theta[1] < 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "linearXY":
if -5000.0 < theta[0] < 5000.0 and -5000.0 < theta[1] < 5000.0 \
and 0.0 < theta[2] < 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "gaussian":
if 0.45 < theta[0] < 1 and 0 <= theta[1] < 0.5 \
and 1e-2 < theta[2] < 10 and 1e-2 < theta[3] < 10 and -1 <= theta[4] <= 1 and 0 <= theta[5] <= 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "logisticX":
if -1 < theta[0] < 2.0 \
and 1e-4 < theta[1] < 1e4 and 0 < theta[2] < 1e6 and 0 < theta[3] < 1e6 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "logisticY":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 100 and 0 < theta[2] <= 0.1 and 0.9 < theta[3] <= 1 \
and 0 < theta[2] + theta[3] <= 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "rotatedLogisticY":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 100 and -180 < theta[2] <= 180 \
and 0 < theta[3] <= 0.2 and 0.8 < theta[4] <= 1 \
and 0 < theta[3] + theta[4] <= 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "rotatedLogisticY2":
if -1 <= theta[0] <= 2 and 1e-4 < theta[1] < 100 \
and 0.1 < theta[2] <= 1000 and -180 < theta[3] <= 180 \
and 0 < theta[4] <= 0.2 and 0.8 < theta[5] <= 1 \
and 0 < theta[4] + theta[5] <= 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "rotatedLogisticYXFixedLogisticY":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 100 and -180 < theta[2] <= 180 \
and 0 < theta[3] <= 0.2 and 0.8 < theta[4] <= 1 \
and 0 < theta[3] + theta[4] <= 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "rotatedLogisticYXLogisticY":
if -1 <= theta[0] <= 2 and 1e-4 < theta[1] < 100 \
and -1 <= theta[2] <= 2 and 1e-4 < theta[3] < 100 \
and -180 < theta[4] <= 180 \
and 0 < theta[5] <= 0.2 and 0.8 < theta[6] <= 1 \
and 0 < theta[5] + theta[6] <= 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "logisticY2":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 1e4 and 0.1 < theta[2] <= 1000 and 0 < theta[3] <= 0.1 and 0.9 < theta[4] <= 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "logisticX0":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 1e4 and 0 < theta[2] < 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "logisticY0":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 1e4 and 0 < theta[2] <= 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "logisticY02":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 1e4 and 0.1 < theta[2] <= 1000 and 0 < theta[3] <= 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "logisticX0xlogisticY0":
if 0 <= theta[0] <= 1e3 and 0 <= theta[1] <= 1e3 \
and 1e-4 < theta[2] < 1e4 and 1e-4 < theta[3] < 1e4 \
and 0 < theta[4] < 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "rotatedLogisticX0xlogisticY0":
if -1 <= theta[0] <= 2 and -1 <= theta[1] <= 2 \
and 1e-4 < theta[2] < 1e4 and 1e-4 < theta[3] < 1e4 \
and -3 < theta[4] < 3 and -180 < theta[5] < 180 \
and 0 < theta[6] < 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "rotatedLogisticX0":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 100 and 0 < theta[2] <= 1 and -180 < theta[3] < 180 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "rotatedLogisticX02":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 100 and 0.1 < theta[2] <= 1000 and 0 < theta[3] <= 1 and -180 < theta[4] < 180 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "rotatedLogisticX0+gaussian":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 100 and 0 < theta[2] <= 1 and -180 < theta[3] < 180 \
and 0.45 < theta[4] < 1 and 0 <= theta[5] < 0.5 \
and 1e-2 < theta[6] < 2 and 1e-2 < theta[7] < 2 and -1 <= theta[8] <= 0 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "dualBrokenPowerLaw":
# print(funcModels.rateModel(x, y, theta, model))
if 0 <= theta[0] <= 1 and 0 <= theta[1] <= 1 \
and -2 < theta[2] < 0 and -2 < theta[3] < 0 and 0 < theta[4] < 1e4 and 0 < theta[5] < 1e4 \
and 0 < theta[6] < 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
else:
raise ValueError('Bad model name');
# print(theta)
return -np.inf
def lnBinprob(theta, data, model):
lp = lnBinprior(theta, data, model)
# print("lnPoisprior = " + str(lp))
if not np.isfinite(lp):
return -np.inf
# print(str(lnPoislike(theta, A, c)))
return lp + lnBinlike(theta, data, model)
```
Fit the effectiveness. We perform the MCMC calculation of the log posterior. We use a manual three-part strategy to find the best seed point: first we use the initial values in the model definitions. This MCMC result is used to seed the second iterations, whose result is used to seed the third and final iteration. We then initialize the MCMC walkers with a small Gaussian distribution around these starting points.
```python
model = "rotatedLogisticX0"
if model == "constant":
initialPos = funcModels.initRateModel(model);
elif model == "logisticY0":
initialPos = funcModels.initRateModel(model);
elif model == "logisticY":
initialPos = [-2.63785585e-02, 20, 0.02, 0.98];
elif model == "rotatedLogisticY":
# initialPos = [ 0.04944521, 23.31556305, -25.49590252, 0.04268818, 0.95325393];
initialPos = [ 4.83412482e-02, 3.95216090e+01, -3.82447066e+01, 3.88681300e-02, 9.54596550e-01]
elif model == "rotatedLogisticY2":
initialPos = [ 4.83412482e-02, 3.95216090e+01, 1, -3.82447066e+01, 3.88681300e-02, 9.54596550e-01]
elif model == "rotatedLogisticYXLogisticY":
initialPos = [ 0.04944521, 23.31556305, -2.63785585e-02, 20, -25.49590252, 0.04268818, 0.95325393];
elif model == "logisticY02":
initialPos = [-1.73396324e-01, 10, 2, 9.90011008e-01];
elif model == "gaussian":
# initialPos = funcModels.initRateModel(model);
initialPos = [ 0.62, 0.04, 1, 1, -0.01, 9.91039169e-01]
elif model == "dualBrokenPowerLaw":
initialPos = [ 0.96811698, 0.01257312, -0.00171099, -1e-2, 0.73671063, 0.00555828, 0.99154711]
elif model == "rotatedLogisticX0":
initialPos = [ 1.2, 9.7715652, 0.99773284, 106.91479715];
elif model == "rotatedLogisticX02":
initialPos = [ 1.2, 9.7715652, 1.0, 0.99773284, 106.91479715];
elif model == "rotatedLogisticX0+gaussian":
initialPos = funcModels.initRateModel(model);
elif model == "rotatedLogisticX0xlogisticY0":
initialPos = [ 1.50565457e+00, -3.09826269e-01, 4.92718937e+03, 1.36096954e+01,
5.52126518e-02, -1.23504297e+01, 9.97222157e-01];
else:
initialPos = funcModels.initRateModel(model);
print(initialPos)
```
[1.2, 9.7715652, 0.99773284, 106.91479715]
```python
import scipy.optimize as op
nll = lambda *args: -lnBinlike(*args)
result = op.minimize(nll, initialPos, args=(tceData, model))
maxLikelihoodResult = result["x"];
modelLabels = funcModels.getModelLabels(model)
for i in range(0,len(maxLikelihoodResult)):
print("maximum Likelihood " + modelLabels[i] + ":={:.3f}".format(maxLikelihoodResult[i]))
if lnBinprior(maxLikelihoodResult, tceData, model) == -np.inf:
maxLikelihoodResult = initialPos;
print("violates prior, replacing maxLikelihoodResult result with initialPos")
x, y, n, c = tceData
r = funcModels.rateModel(x,y,maxLikelihoodResult,model);
print("maximum Likelihood rate min = {:.3f}".format(np.min(np.min(r))) + ", max = {:.3f}".format(np.max(np.max(r))))
```
maximum Likelihood $x_0$:=534.194
maximum Likelihood $k_x$:=23.485
maximum Likelihood $A$:=887.214
maximum Likelihood $\phi$:=108.305
violates prior, replacing maxLikelihoodResult result with initialPos
maximum Likelihood rate min = 0.676, max = 0.998
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:6: RuntimeWarning: invalid value encountered in log
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/numpy/core/fromnumeric.py:83: RuntimeWarning: invalid value encountered in reduce
return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
```python
import emcee
ndim, nwalkers = len(maxLikelihoodResult), 100
pos = [maxLikelihoodResult + 1e-3*np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnBinprob, args=(tceData, model))
# sampler = emcee.EnsembleSampler(nwalkers, ndim, lnBinprob, args=(cellXFlat, cellYFlat, nTce, cFp, model))
sampler.run_mcmc(pos, 10000);
samples = sampler.chain[:, 5000:, :].reshape((-1, ndim))
dataResult = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
dataResult = list(dataResult)
np.save("binEffPosteriors_" + str(model) + ".npy", samples)
modelLabels = funcModels.getModelLabels(model)
for i in range(0,ndim):
v = dataResult[i];
print("MCMC " + modelLabels[i] + ":={:.3f}".format(v[0]) + "+{:.3f}".format(v[1]) + "-{:.3f}".format(v[2]))
# print("true " + modelLabels[i] + ":={:.3f}".format(trueTheta[i]))
# print("compared to: rate={:.3f}".format(rate))
resultSize = np.shape(dataResult);
fitTheta = np.zeros(resultSize[0]);
for i in range(resultSize[0]):
fitTheta[i] = dataResult[i][0]
print("fitTheta = " + str(fitTheta))
plt.figure(figsize=(10,5))
for i in range(0,ndim):
plt.subplot(ndim,1,i+1)
plt.plot(np.transpose(sampler.chain[:, :, i]), color="k", alpha=0.1);
plt.ylabel(modelLabels[i]);
```
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/emcee/ensemble.py:335: RuntimeWarning: invalid value encountered in subtract
lnpdiff = (self.dim - 1.) * np.log(zz) + newlnprob - lnprob0
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/emcee/ensemble.py:336: RuntimeWarning: invalid value encountered in greater
accept = (lnpdiff > np.log(self._random.rand(len(lnpdiff))))
MCMC $x_0$:=1.165+0.116-0.052
MCMC $k_x$:=23.423+10.755-9.816
MCMC $A$:=0.996+0.001-0.002
MCMC $\phi$:=99.123+7.214-3.272
fitTheta = [ 1.16471528 23.42329557 0.9961747 99.12256661]

```python
import corner
modelLabels = funcModels.getModelLabels(model)
# trueTheta = funcModels.initRateModel(model)
# fig = corner.corner(samples, labels=["$m$", "$b$", "$\ln\,f$"],
# truths=[m_true, b_true, np.log(f_true)])
fig = corner.corner(samples, labels = modelLabels, label_kwargs = {"fontsize": 32}, truths = fitTheta)
plt.savefig("fpEffPost.pdf",bbox_inches='tight')
```

We test the result by reconstructing the distribution of FPs and % of FPs using binomial-distributed random numbers, to see if they match the actual data.
```python
fitGrid = np.zeros(np.shape(tceGrid));
for p in range(NPeriod):
for m in range(NMes):
fitGrid[(p,m)] = np.random.binomial(tceGrid[(p,m)],
funcModels.rateModel(cellX[(p,m)]+dx/2, cellY[(p,m)]+dx/2, fitTheta, model), 1);
drawHeatMap(fitGrid, (15,15), cellPeriod, cellMes);
plt.title('reconstructed FPs from the fit');
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
fitFrac = np.zeros(np.shape(tceGrid))
fitFrac[tceGrid>minTcePerCell] = fitGrid[tceGrid>minTcePerCell]/tceGrid[tceGrid>minTcePerCell];
drawHeatMap(np.round(100*fitFrac), (15,15), cellPeriod, cellMes);
plt.title('reconstructed FP rate from the fit');
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
```


```python
from ipywidgets import FloatProgress
from IPython.display import display
nFits = 1000;
fitGrid = np.zeros([np.shape(tceGrid)[0],np.shape(tceGrid)[1],nFits]);
sidx = [0]*nFits
progress = FloatProgress(min=0, max=nFits)
display(progress)
for f in range(nFits):
sidx[f] = int(np.random.uniform(high=samples.shape[0]-1));
tTheta = samples[sidx[f],:]
for p in range(NPeriod):
for m in range(NMes):
rm = funcModels.rateModel(cellX[(p,m)]+dx/2, cellY[(p,m)]+dy/2, tTheta, model)
if rm > 1:
rm = 1;
fitGrid[(p,m,f)] = np.random.binomial(tceGrid[(p,m)], rm, 1);
progress.value += 1
meanFit = np.mean(fitGrid, 2)
stdFit = np.std(fitGrid, 2)
```
FloatProgress(value=0.0, max=1000.0)
```python
drawHeatMap(meanFit, (15,15), cellPeriod, cellMes);
plt.title("Mean reconstructed Observed FPs from the fit");
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
fitFracMean = np.zeros(np.shape(tceGrid))
fitFracMean[tceGrid>minTcePerCell] = meanFit[tceGrid>minTcePerCell]/tceGrid[tceGrid>minTcePerCell];
drawHeatMap(np.round(100*fitFracMean), (15,15), cellPeriod, cellMes, nData = tceGrid, colorBarLabel="Mean FP %");
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
plt.savefig("fpEffMean.pdf",bbox_inches='tight')
plt.title("Mean reconstructed Observed FP rate from the fit");
stdFrac = np.zeros(np.shape(tceGrid))
stdFrac[tceGrid>minTcePerCell] = stdFit[tceGrid>minTcePerCell]/tceGrid[tceGrid>minTcePerCell];
drawHeatMap(np.round(100*stdFrac), (15,15), cellPeriod, cellMes, nData = tceGrid, colorBarLabel="Mean FP %");
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
plt.savefig("fpEffStd.pdf",bbox_inches='tight')
plt.title("Standard deviation of the reconstructed Observed FP rate from the fit");
```



```python
fitDiff = fitFracMean - fpFrac
fitDiffNorm =np.zeros(fitDiff.shape)
fitDiffNorm[stdFit>0] = fitDiff[stdFit>0]/stdFit[stdFit>0];
drawHeatMap(np.round(100*fitDiff), (15,15), cellPeriod, cellMes, nData = tceGrid);
plt.title("Residual from mean");
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
drawHeatMap(np.round(fitDiffNorm, 2), (15,15), cellPeriod, cellMes, nData = tceGrid,
colorBarLabel="Mean Residual (in standard deviations)", forceInt = False);
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
plt.savefig("fpEffMeanResid.pdf",bbox_inches='tight')
plt.title("Residual from mean (in standard deviations)");
plt.figure(figsize=(15,5));
plt.hist(fitDiffNorm.flatten()[nTce > 0], 100);
np.median(fitDiffNorm.flatten()[nTce > 0])
```
-0.0276851061867395



```python
```
```python
def binPdf(n, r, c):
return spec.comb(n,c)*(r**c)*((1-r)**(n-c));
def plot_norm_bin(n, r, scale):
xx = np.arange(0, n, 0.01)
ix = np.arange(0, n)
plt.plot(xx/n - r,scale*binPdf(n,r,xx)/max(binPdf(n,r,xx)));
residuals = np.zeros(np.shape(tceGrid));
rateGrid = np.zeros(np.shape(tceGrid));
for p in range(NPeriod):
for m in range(NMes):
rateGrid[(p,m)] = funcModels.rateModel(cellX[(p,m)]+dx/2, cellY[(p,m)]+dy/2, fitTheta, model);
if tceGrid[(p,m)] > 0:
residuals[(p,m)] = fpGrid[(p,m)]/tceGrid[(p,m)] - rateGrid[(p,m)]
rateFlat = rateGrid.flatten();
residualsFlat = residuals.flatten();
# group residuals by rate
plt.figure(figsize=(15,5));
histVals = plt.hist(residualsFlat[(rateFlat>0.9)&(nTce>0)], 100);
tcePctiles = np.percentile(nTce[(rateFlat>0.9)&(nTce>0)], [10, 50, 90], axis=0);
plot_norm_bin(np.mean(nTce[(rateFlat>0.9)&(nTce>0)]), np.median(rateFlat), np.max(histVals[0]))
plot_norm_bin(tcePctiles[0], np.median(rateFlat), np.max(histVals[0]))
plot_norm_bin(tcePctiles[1], np.median(rateFlat), np.max(histVals[0]))
plot_norm_bin(tcePctiles[2], np.median(rateFlat), np.max(histVals[0]))
plt.grid(linestyle = ':');
```

```python
np.median(rateFlat)
```
0.9961745222726369
```python
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
fig = plt.figure(figsize=plt.figaspect(0.3));
ax = fig.add_subplot(1, 3, 1, projection='3d')
Z = funcModels.rateModel(cellX, cellY, fitTheta, model);
fpFracPlot = fpFrac
fpFrac[fpFrac == 0] = 1
surf = ax.plot_surface(cellPeriod, cellMes, Z, alpha = 0.5);
scat = ax.scatter(cellPeriod, cellMes, fpFrac, c='r', marker = '.');
plt.xlabel("period");
plt.ylabel("MES");
ax.view_init(0,0)
ax = fig.add_subplot(1, 3, 2, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, Z, alpha = 0.5);
scat = ax.scatter(cellPeriod, cellMes, fpFrac, c='r', marker = '.');
plt.xlabel("period");
plt.ylabel("MES");
ax.view_init(0,-90)
plt.title("Effectiveness");
ax = fig.add_subplot(1, 3, 3, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, Z, alpha = 0.5);
scat = ax.scatter(cellPeriod, cellMes, fpFrac, c='r', marker = '.');
plt.xlabel("period");
plt.ylabel("MES");
fig, ax = plt.subplots(figsize=(5,5));
CS = ax.contour(cellPeriod, cellMes, Z);
ax.clabel(CS, inline=1, fontsize=10);
plt.xlabel("period");
plt.ylabel("MES");
```


```python
funcModels.normalizeRange(372., 8., [periodMin, periodMax], [mesMin, mesMax])
```
(0.5854545454545454, 0.043478260869565216)
```python
fig, ax = plt.subplots(figsize=(15,10));
Z = funcModels.rateModel(cellX, cellY, fitTheta, model);
CS = ax.contour(cellPeriod, cellMes, Z, colors='k');
ax.clabel(CS, inline=1, fontsize=18);
scf = ax.scatter(cellPeriod[tceGrid>0], cellMes[tceGrid>0], cmap="cividis", c=fpFrac[tceGrid>0], s=5*tceGrid[tceGrid>0], alpha = 0.5);
plt.xlabel("period", fontSize = 24);
plt.ylabel("MES", fontSize = 24);
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Measured Rate", fontSize = 24);
plt.tick_params(labelsize = 16)
plt.savefig("fpEffContours.pdf",bbox_inches='tight')
plt.title("Vetting Effectiveness. Size of marker = # of TCEs in cell");
```

```python
fig = plt.figure(figsize=plt.figaspect(0.3));
Z0 = funcModels.rateModel(cellX, cellY, fitTheta, model);
Z = (1-Z0)/Z0;
ax = fig.add_subplot(1, 3, 1, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, Z, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
ax.view_init(0,0)
ax = fig.add_subplot(1, 3, 2, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, Z, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
ax.view_init(0,-90)
plt.title("(1-E)/E");
ax = fig.add_subplot(1, 3, 3, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, Z, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
```

Fit $F_{\mathrm{obsFP}}$.
```python
aic = 2*len(fitTheta) - 2*lnBinlike(fitTheta, tceData, model)
aic
```
269.5446448460733
```python
maxLikelihoodAic = 2*len(maxLikelihoodResult) - 2*lnBinlike(maxLikelihoodResult, tceData, model)
maxLikelihoodAic
```
703.5299257662557
```python
l = np.zeros(np.shape(samples)[0])
aicDist = np.zeros(np.shape(samples)[0])
progress = FloatProgress(min=0, max=samples.shape[0])
display(progress)
for i in range(np.shape(samples)[0]):
l[i] = lnBinprob(samples[i,:], tceData, model)
aicDist[i] = 2*len(samples[i,:]) - 2*lnBinlike(samples[i,:], tceData, model)
progress.value += 1
```
FloatProgress(value=0.0, max=500000.0)
```python
plt.hist(aicDist, 100);
```

```python
minAic = min(aicDist)
```
```python
plt.hist(np.exp(l - np.median(l)), 100);
```

```python
np.median(l)
```
-131.6966103899107
```python
from skmonaco import mcquad
lbnds = np.empty([np.shape(samples)[1]])
ubnds = np.empty([np.shape(samples)[1]])
for i in range(len(lbnds)):
lbnds[i] = np.min(samples[:,i])
ubnds[i] = np.max(samples[:,i])
regularizationOffset = np.median(l)
def linBinProbInt(theta):
return np.exp(lnBinprob(theta, tceData, model) - regularizationOffset)
BF, BFerror = mcquad(linBinProbInt, xl=lbnds,xu=ubnds, npoints=1e7,nprocs=8 )
print("BF = {:.3e} +/- {:.3e}").format(BF,BFerror)
```
BF = 4.124e-02 +/- 7.955e-04
```python
import os.path
import pickle
fname = "fpEffectivenessTable.pkl"
if os.path.isfile(fname):
modelComparisonTable = pd.read_pickle(fname)
else:
modelComparisonTable = pd.DataFrame({"Model": ["rotatedLogisticX0", "rotatedLogisticX02", "constant",
"dualBrokenPowerLaw", "gaussian", "rotatedLogisticX0xlogisticY0",
"rotatedLogisticX0+gaussian", "rotatedLogisticY", "rotatedLogisticYXLogisticY",
"logisticY", "rotatedLogisticYXFixedLogisticY"],
"medianMCMCAIC": [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
"minMCMCAIC": [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
"maxLikelihoodAIC": [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
"MedianLogPost": [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
"IntegralPost": [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
"IntegralPostErr": [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
"medianMCMCTheta": [[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],
"maxLikelihoodTheta": [[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],
"periodRange": [[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],
"mesRange": [[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]]},
columns=["Model","BayesFactor","BayesFactorError","AICRelativeProb",
"medianMCMCAIC","minMCMCAIC","maxLikelihoodAIC","IntegralPost",
"IntegralPostErr","MedianLogPost","medianMCMCTheta",
"maxLikelihoodTheta","periodRange","mesRange"])
modelComparisonTable['IntegralPost'] = modelComparisonTable['IntegralPost'].map('{:,.3e}'.format)
modelComparisonTable['IntegralPostErr'] = modelComparisonTable['IntegralPostErr'].map('{:,.3e}'.format)
```
```python
mctIndex = np.where(modelComparisonTable["Model"].isin([model]))[0][0]
print(mctIndex)
modelComparisonTable["medianMCMCAIC"][mctIndex] = aic;
modelComparisonTable["minMCMCAIC"][mctIndex] = minAic;
modelComparisonTable["maxLikelihoodAIC"][mctIndex] = maxLikelihoodAic;
modelComparisonTable["MedianLogPost"][mctIndex] = regularizationOffset;
modelComparisonTable["IntegralPost"][mctIndex] = BF;
modelComparisonTable["IntegralPostErr"][mctIndex] = BFerror;
modelComparisonTable["medianMCMCTheta"][mctIndex] = fitTheta;
modelComparisonTable["maxLikelihoodTheta"][mctIndex] = maxLikelihoodResult;
modelComparisonTable["periodRange"][mctIndex] = [periodMin, periodMax];
modelComparisonTable["mesRange"][mctIndex] = [mesMin, mesMax];
```
0
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
This is separate from the ipykernel package so we can avoid doing imports until
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:4: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
after removing the cwd from sys.path.
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:5: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
"""
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:6: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:7: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
import sys
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:8: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:9: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
if __name__ == '__main__':
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:10: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
# Remove the CWD from sys.path while we load stuff.
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:11: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
# This is added back by InteractiveShellApp.init_path()
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:12: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
if sys.path[0] == '':
```python
modelToCompareIndex = 0
modelComparisonTable["AICRelativeProb"] = 0.
minAic = np.min(modelComparisonTable["medianMCMCAIC"][modelToCompareIndex])
for i in range(len(modelComparisonTable)):
modelComparisonTable["AICRelativeProb"].iloc[i] = np.exp((minAic - modelComparisonTable["medianMCMCAIC"].iloc[i])/2.)
```
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/pandas/core/indexing.py:189: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
self._setitem_with_indexer(indexer, value)
```python
modelComparisonTable["BayesFactor"] = 0.
c1 = modelComparisonTable["MedianLogPost"].iloc[modelToCompareIndex]
i1 = np.double(modelComparisonTable["IntegralPost"].iloc[modelToCompareIndex])
for i in range(len(modelComparisonTable)):
c2 = modelComparisonTable["MedianLogPost"].iloc[i]
modelComparisonTable["BayesFactor"].iloc[i] = np.exp(c2 - c1)*np.double(modelComparisonTable["IntegralPost"].iloc[i])/i1
```
```python
modelComparisonTable["BayesFactorError"] = 0.
B = np.double(modelComparisonTable["IntegralPost"].iloc[modelToCompareIndex])
sB = np.double(modelComparisonTable["IntegralPostErr"].iloc[modelToCompareIndex])
c1 = modelComparisonTable["MedianLogPost"].iloc[modelToCompareIndex]
for i in range(len(modelComparisonTable)):
c2 = modelComparisonTable["MedianLogPost"].iloc[i]
f = np.double(modelComparisonTable["BayesFactor"].iloc[i])
A = np.exp(c2 - c1)*np.double(modelComparisonTable["IntegralPost"].iloc[i])
sA = np.exp(c2 - c1)*np.double(modelComparisonTable["IntegralPostErr"].iloc[i])
modelComparisonTable["BayesFactorError"].iloc[i] = f*np.sqrt((sA/A)**2 + (sB/B)**2)
```
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:10: RuntimeWarning: invalid value encountered in double_scalars
# Remove the CWD from sys.path while we load stuff.
```python
modelComparisonTable
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Model</th>
<th>BayesFactor</th>
<th>BayesFactorError</th>
<th>AICRelativeProb</th>
<th>medianMCMCAIC</th>
<th>minMCMCAIC</th>
<th>maxLikelihoodAIC</th>
<th>IntegralPost</th>
<th>IntegralPostErr</th>
<th>MedianLogPost</th>
<th>medianMCMCTheta</th>
<th>maxLikelihoodTheta</th>
<th>periodRange</th>
<th>mesRange</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>rotatedLogisticX0</td>
<td>1.0</td>
<td>0.027278</td>
<td>1.000000e+00</td>
<td>269.544645</td>
<td>269.143733</td>
<td>703.529926</td>
<td>0.0412423</td>
<td>0.000795512</td>
<td>-131.69661</td>
<td>[1.1647152826183385, 23.42329556776239, 0.9961...</td>
<td>[1.2, 9.7715652, 0.99773284, 106.91479715]</td>
<td>[50, 600]</td>
<td>[7, 30]</td>
</tr>
<tr>
<th>1</th>
<td>rotatedLogisticX02</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>2</th>
<td>constant</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>3</th>
<td>dualBrokenPowerLaw</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>4</th>
<td>gaussian</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>5</th>
<td>rotatedLogisticX0xlogisticY0</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>6</th>
<td>rotatedLogisticX0+gaussian</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>7</th>
<td>rotatedLogisticY</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>8</th>
<td>rotatedLogisticYXLogisticY</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>9</th>
<td>logisticY</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>10</th>
<td>rotatedLogisticYXFixedLogisticY</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
</tbody>
</table>
</div>
```python
modelComparisonTable.to_pickle(fname)
```
```python
tt = pd.read_pickle(fname)
tt
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Model</th>
<th>BayesFactor</th>
<th>BayesFactorError</th>
<th>AICRelativeProb</th>
<th>medianMCMCAIC</th>
<th>minMCMCAIC</th>
<th>maxLikelihoodAIC</th>
<th>IntegralPost</th>
<th>IntegralPostErr</th>
<th>MedianLogPost</th>
<th>medianMCMCTheta</th>
<th>maxLikelihoodTheta</th>
<th>periodRange</th>
<th>mesRange</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>rotatedLogisticX0</td>
<td>1.0</td>
<td>0.027278</td>
<td>1.000000e+00</td>
<td>269.544645</td>
<td>269.143733</td>
<td>703.529926</td>
<td>0.0412423</td>
<td>0.000795512</td>
<td>-131.69661</td>
<td>[1.1647152826183385, 23.42329556776239, 0.9961...</td>
<td>[1.2, 9.7715652, 0.99773284, 106.91479715]</td>
<td>[50, 600]</td>
<td>[7, 30]</td>
</tr>
<tr>
<th>1</th>
<td>rotatedLogisticX02</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>2</th>
<td>constant</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>3</th>
<td>dualBrokenPowerLaw</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>4</th>
<td>gaussian</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>5</th>
<td>rotatedLogisticX0xlogisticY0</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>6</th>
<td>rotatedLogisticX0+gaussian</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>7</th>
<td>rotatedLogisticY</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>8</th>
<td>rotatedLogisticYXLogisticY</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>9</th>
<td>logisticY</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>10</th>
<td>rotatedLogisticYXFixedLogisticY</td>
<td>0.0</td>
<td>NaN</td>
<td>3.395283e+58</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.00000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
</tbody>
</table>
</div>
```javascript
%%javascript
IPython.notebook.save_notebook()
```
<IPython.core.display.Javascript object>
```bash
%%bash -s "$model"
jupyter nbconvert --to html binomialFPEffectiveness.ipynb
mv binomialFPEffectiveness.html htmlArchive/binomialFPEffectiveness_$1.html
```
[NbConvertApp] Converting notebook binomialFPEffectiveness.ipynb to html
[NbConvertApp] Writing 1829166 bytes to binomialFPEffectiveness.html
```python
```
```python
plt.hist(spSyntheticPcs["Score"])
```
(array([ 6., 11., 12., 18., 10., 10., 2., 0., 10., 3.]),
array([0.051 , 0.1382, 0.2254, 0.3126, 0.3998, 0.487 , 0.5742, 0.6614,
0.7486, 0.8358, 0.923 ]),
<a list of 10 Patch objects>)

```python
np.max(spSyntheticPcs["Score"])
```
0.923
```python
```
|
stevepurREPO_NAMEDR25-occurrence-publicPATH_START.@DR25-occurrence-public_extracted@DR25-occurrence-public-main@GKbaseline_gaiaRadCut@.ipynb_checkpoints@binomialFPEffectiveness-checkpoint.ipynb@.PATH_END.py
|
{
"filename": "kepler.py",
"repo_name": "clemson-cal/sailfish",
"repo_path": "sailfish_extracted/sailfish-master/sailfish/physics/kepler.py",
"type": "Python"
}
|
"""
Code to solve the Kepler two-body problem, and its inverse.
"""
from typing import NamedTuple
from math import sin, cos, sqrt, atan2, pi, floor
"""
Newton's gravitational constant is G=1.0, so mass M really means G M.
"""
NEWTON_G = 1.0
class PointMass(NamedTuple):
"""
The mass, 2D position, and 2D velocity of a point-like particle
"""
mass: float
position_x: float
position_y: float
velocity_x: float
velocity_y: float
@property
def kinetic_energy(self) -> float:
"""
The kinetic energy of a point mass
"""
vx = p.velocity_x
vy = p.velocity_y
return 0.5 * p.mass * (vx * vx + vy * vy)
@property
def angular_momentum(self) -> float:
"""
The angular momentum of a point mass
"""
x = p.position_x
y = p.position_y
vx = p.velocity_x
vy = p.velocity_y
return p.mass * (x * vy - y * vx)
def gravitational_potential(
self, x: float, y: float, softening_length: float
) -> float:
"""
Return the gravitational potential of a point mass, with softening.
"""
dx = x - p.position_x
dy = y - p.position_y
r2 = dx * dx + dy * dy
s2 = softening_length.powi(2)
return -NEWTON_G * p.mass / sqrt(r2 + s2)
def gravitational_acceleration(
p, x: float, y: float, softening_length: float
) -> (float, float):
"""
Return the gravitational acceleration due to a point mass.
"""
dx = x - p.position_x
dy = y - p.position_y
r2 = dx * dx + dy * dy
s2 = softening_length ** 2.0
ax = -NEWTON_G * p.mass / (r2 + s2) ** 1.5 * dx
ay = -NEWTON_G * p.mass / (r2 + s2) ** 1.5 * dy
return (ax, ay)
def perturb(
self, dm: float = 0.0, dpx: float = 0.0, dpy: float = 0.0
) -> "PointMass":
"""
Perturb the mass and momentum of a point mass.
Since the point mass maintains a velocity rather than momentum,
the velocity is changed according to
dv = (dp - v dm) / m
"""
return p._replace(
mass=p.mass + dm,
velocity_x=velocity_x + (dpx - p.velocity_x * dm) / p.mass,
velocity_y=velocity_y + (dpy - p.velocity_y * dm) / p.mass,
)
class OrbitalState(NamedTuple):
primary: PointMass
secondary: PointMass
@property
def total_mass(self) -> float:
"""
The sum of the two point masses
"""
self[0].mass + self[1].mass
@property
def mass_ratio(self) -> float:
"""
The system mass ratio, secondary / primary
"""
self[1].mass / self[0].mass
@property
def separation(self) -> float:
"""
The orbital separation
This will always be the semi-major axis if the eccentricity is zero.
"""
x1 = self[0].position_x
y1 = self[0].position_y
x2 = self[1].position_x
y2 = self[1].position_y
return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
@property
def total_energy(self) -> float:
"""
The system total energy
"""
return self.kinetic_energy - G * self[0].mass * self[1].mass / self.separation
@property
def kinetic_energy(self) -> float:
"""
The total kinetic energy of the system
"""
return self[0].kinetic_energy + self[1].kinetic_energy
@property
def angular_momentum(self) -> float:
"""
The total anuglar momentum of the system
"""
return self[0].angular_momentum + self[1].angular_momentum
def gravitational_potential(
self, x: float, y: float, softening_length: float
) -> float:
"""
Return the combined gravitational potential at a point, with softening.
"""
p0 = self[0].gravitational_potential(x, y, softening_length)
p1 = self[1].gravitational_potential(x, y, softening_length)
return p0 + p1
def gravitational_acceleration(
self, x: float, y: float, softening_length: float
) -> float:
"""
Return the combined gravitational acceleration at a point, with softening.
"""
a0 = self[0].gravitational_acceleration(x, y, softening_length)
a1 = self[1].gravitational_acceleration(x, y, softening_length)
return (a0[0] + a1[0], a0[1] + a1[1])
def perturb(
self, dm1: float, dm2: float, dpx1: float, dpx2: float, dpy1: float, dpy2: float
) -> "OrbitalState":
"""
Returns a new orbital state vector if this one is perturbed by the
given masses and momenta.
- :code:`dm1` Mass added to the primary
- :code:`dm2` Mass added to the secondary
- :code:`dpx1` Impulse (x) added to the primary
- :code:`dpx2` Impulse (x) added to the secondary
- :code:`dpy1` Impulse (y) added to the primary
- :code:`dpy2` Impulse (y) added to the secondary
"""
return OrbitalState(
self[0].perturb_mass_and_momentum(dm1, dpx1, dpy1),
self[1].perturb_mass_and_momentum(dm2, dpx2, dpy2),
)
def orbital_parameters(self, t: float) -> ("OrbitalElements", "OrbitalOrientation"):
"""
Compute the inverse Kepler two-body problem.
This function determines the orbital elements and orientation from the
orbital state vector and an absolute time.
"""
c1 = self[0]
c2 = self[1]
# component masses, total mass, and mass ratio
m1 = c1.mass
m2 = c2.mass
m = m1 + m2
q = m2 / m1
# position and velocity of the CM frame
x_cm = (c1.position_x * c1.mass + c2.position_x * c2.mass) / m
y_cm = (c1.position_y * c1.mass + c2.position_y * c2.mass) / m
vx_cm = (c1.velocity_x * c1.mass + c2.velocity_x * c2.mass) / m
vy_cm = (c1.velocity_y * c1.mass + c2.velocity_y * c2.mass) / m
# positions and velocities of the components in the CM frame
x1 = c1.position_x - x_cm
y1 = c1.position_y - y_cm
x2 = c2.position_x - x_cm
y2 = c2.position_y - y_cm
r1 = sqrt(x1 * x1 + y1 * y1)
r2 = sqrt(x2 * x2 + y2 * y2)
vx1 = c1.velocity_x - vx_cm
vy1 = c1.velocity_y - vy_cm
vx2 = c2.velocity_x - vx_cm
vy2 = c2.velocity_y - vy_cm
vf1 = -vx1 * y1 / r1 + vy1 * x1 / r1
vf2 = -vx2 * y2 / r2 + vy2 * x2 / r2
v1 = sqrt(vx1 * vx1 + vy1 * vy1)
# energy and angular momentum (t := kinetic energy, l := angular
# momentum, h := total energy)
t1 = 0.5 * m1 * (vx1 * vx1 + vy1 * vy1)
t2 = 0.5 * m2 * (vx2 * vx2 + vy2 * vy2)
l1 = m1 * r1 * vf1
l2 = m2 * r2 * vf2
r = r1 + r2
l = l1 + l2
h = t1 + t2 - G * m1 * m2 / r
if h >= 0.0:
raise ValueError("the orbit is unbound")
# semi-major, semi-minor axes eccentricity, apsides
a = -0.5 * NEWTON_G * m1 * m2 / h
b = sqrt(-0.5 * l * l / h * (m1 + m2) / (m1 * m2))
e = sqrt(clamp_between_zero_and_one(1.0 - b * b / a / a))
omega = sqrt(G * m / a / a / a)
# semi-major and semi-minor axes of the primary
a1 = a * q / (1.0 + q)
b1 = b * q / (1.0 + q)
# cos of nu and f: phase angle and true anomaly
if e == 0.0:
cn = x1 / r1
else:
cn = (1.0 - r1 / a1) / e
cf = a1 / r1 * (cn - e)
# sin of nu and f
if e == 0.0:
sn = y1 / r1
else:
sn = (vx1 * x1 + vy1 * y1) / (e * v1 * r1) * sqrt(1.0 - e * e * cn * cn)
sf = (b1 / r1) * sn
# cos and sin of eccentric anomaly
ck = (e + cf) / (1.0 + e * cf)
sk = sqrt(1.0 - e * e) * sf / (1.0 + e * cf)
# mean anomaly and tau
k = atan2(sk, ck)
n = k - e * sk
tau = t - n / omega
# cartesian components of semi-major axis, and the argument of periapse
ax = (cn - e) * x1 + sn * sqrt(1.0 - e * e) * y1
ay = (cn - e) * y1 - sn * sqrt(1.0 - e * e) * x1
pomega = atan2(ay, ax)
# final result
elements = OrbitalElements(a, m, q, e)
orientation = OrbitalOrientation(x_cm, y_cm, vx_cm, vy_cm, pomega, tau)
return elements, orientation
class OrbitalOrientation(NamedTuple):
"""
The position, velocity, and orientation of a two-body orbit
"""
cm_position_x: float
cm_position_y: float
cm_velocity_x: float
cm_velocity_y: float
periapse_argument: float
periapse_time: float
class OrbitalElements(NamedTuple):
"""
The orbital elements of a two-body system on a bound orbit
"""
semimajor_axis: float
total_mass: float
mass_ratio: float
eccentricity: float
@property
def omega(self) -> float:
"""
The orbital angular frequency
"""
m = self.total_mass
a = self.semimajor_axis
return sqrt(NEWTON_G * m / a / a / a)
@property
def period(self) -> float:
"""
The orbital period
"""
return 2.0 * pi / self.omega
@property
def angular_momentum(self) -> float:
"""
The orbital angular momentum
"""
a = self.semimajor_axis
m = self.total_mass
q = self.mass_ratio
e = self.eccentricity
m1 = m / (1.0 + q)
m2 = m - m1
return m1 * m2 / m * sqrt(NEWTON_G * m * a * (1.0 - e * e))
def orbital_state_from_eccentric_anomaly(
self, eccentric_anomaly: float
) -> OrbitalState:
"""
Compute the orbital state, given the eccentric anomaly.
"""
a = self.semimajor_axis
m = self.total_mass
q = self.mass_ratio
e = self.eccentricity
w = self.omega
m1 = m / (1.0 + q)
m2 = m - m1
ck = cos(eccentric_anomaly)
sk = sin(eccentric_anomaly)
x1 = -a * q / (1.0 + q) * (e - ck)
y1 = +a * q / (1.0 + q) * (sk) * sqrt(1.0 - e * e)
x2 = -x1 / q
y2 = -y1 / q
vx1 = -a * q / (1.0 + q) * w / (1.0 - e * ck) * sk
vy1 = +a * q / (1.0 + q) * w / (1.0 - e * ck) * ck * sqrt(1.0 - e * e)
vx2 = -vx1 / q
vy2 = -vy1 / q
c1 = PointMass(m1, x1, y1, vx1, vy1)
c2 = PointMass(m2, x2, y2, vx2, vy2)
return OrbitalState(c1, c2)
def eccentric_anomaly(self, time_since_periapse: float) -> float:
"""
Compute the eccentric anomaly from the time since any periapse.
"""
p = self.period
t = time_since_periapse - self.period * floor(time_since_periapse / p)
e = self.eccentricity
n = self.omega * t # n := mean anomaly M
f = lambda k: k - e * sin(k) - n # k := eccentric anomaly E
g = lambda k: 1.0 - e * cos(k)
return solve_newton_rapheson(f, g, n)
def orbital_state(self, time_since_periapse: float) -> OrbitalState:
"""
Compute the orbital state vector from the time since any periapse.
"""
E = self.eccentric_anomaly(time_since_periapse)
return self.orbital_state_from_eccentric_anomaly(E)
def orbital_state_with_orientation(
self, absolute_time, orientation: OrbitalOrientation
) -> OrbitalState:
"""
Compute the orbital state from an absolute time and orientation.
"""
t = absolute_time - orientation.periapse_time
E = self.eccentric_anomaly(t)
state = self.orbital_state_from_eccentric_anomaly(E)
m1 = state[0].mass
m2 = state[1].mass
x1 = state[0].position_x
x2 = state[1].position_x
y1 = state[0].position_y
y2 = state[1].position_y
vx1 = state[0].velocity_x
vx2 = state[1].velocity_x
vy1 = state[0].velocity_y
vy2 = state[1].velocity_y
c = cos(-orientation.periapse_argument)
s = sin(-orientation.periapse_argument)
x1p = +x1 * c + y1 * s + orientation.cm_position_x
y1p = -x1 * s + y1 * c + orientation.cm_position_y
x2p = +x2 * c + y2 * s + orientation.cm_position_x
y2p = -x2 * s + y2 * c + orientation.cm_position_y
vx1p = +vx1 * c + vy1 * s + orientation.cm_velocity_x
vy1p = -vx1 * s + vy1 * c + orientation.cm_velocity_y
vx2p = +vx2 * c + vy2 * s + orientation.cm_velocity_x
vy2p = -vx2 * s + vy2 * c + orientation.cm_velocity_y
c1 = PointMass(m1, x1p, y1p, vx1p, vy1p)
c2 = PointMass(m2, x2p, y2p, vx2p, vy2p)
return OrbitalState(c1, c2)
def solve_newton_rapheson(f, g, x: float) -> float:
n = 0
while abs(f(x)) > 1e-15:
x -= f(x) / g(x)
n += 1
if n > 10:
raise ValueError("solve_newton_rapheson: no solution")
return x
def clamp_between_zero_and_one(x: float) -> float:
return min(1.0, max(0.0, x))
|
clemson-calREPO_NAMEsailfishPATH_START.@sailfish_extracted@sailfish-master@sailfish@physics@kepler.py@.PATH_END.py
|
{
"filename": "medium_filter.py",
"repo_name": "dfm/python-bls",
"repo_path": "python-bls_extracted/python-bls-main/medium_filter.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy
import scipy.ndimage
#this shit only works with DFM's injected LCs
def find_da_gap_bitch(lc,pidx):
gaps = []
gap = []
#pr
#print lc[:,pidx]
#flux = lc[:,pidx]
flux = lc
for i in range(len(lc)-1):
if numpy.isnan(flux[i]):
if numpy.isnan(flux[i+1]):
gap.append(i)
else:
gap.append(i)
gaps.append(gap)
gap = []
return [(len(i),i) for i in gaps]
def fill_da_gap_bitch(v1,v2,n):
#v1 - flux value before the gap starts
#v2 - flux value after the gap ends
#n - number of points in the gap
x = numpy.linspace(0,1,n+2)
y = (v2-v1)*x + v1
return y[1:-1]
def slice_da_bitch_up(lc,pidx,hsize=10):
gaps = find_da_gap_bitch(lc,pidx)
lc_chunks = []
flux = lc#[:,pidx]
for g in gaps:
gs = g[1][0] #gap starting index
ge = g[1][-1] #gap starting index
if g[0]<hsize: #fill the gap if it's smaller than hsize points
flux[numpy.array(g[1])] = fill_da_gap_bitch(flux[gs-1],
flux[ge+1],g[0])
else:
lc_chunks.append(gs)
lc_chunks.append(ge)
lc_chunks.insert(0,0)
lc_chunks.append(len(lc))
return lc_chunks
def detrend_using_mf(lc,flavor='SAP',size=14,mode='constant'):
# either use a gap (size is int):
#
# size
#<.......--------------.........................................>
#_/▔﹀\_︿╱﹀╲/╲︿_/︺╲▁︹_/﹀\_︿╱▔︺\/\︹▁╱﹀▔╲︿_/︺▔╲▁︹_/﹀▔\⁄﹀\╱
#
#
#
# or a gap with with a hole (size is 2-element array):
#
# size[0] size[1] size[0]
#<.......--------_______--------................................>
#_/▔﹀\_︿╱﹀╲/╲︿_/︺╲▁︹_/﹀\_︿╱▔︺\/\︹▁╱﹀▔╲︿_/︺▔╲▁︹_/﹀▔\⁄﹀\╱
pidx={'SAP':1,'PDC':3}[flavor]
lc_chunks = slice_da_bitch_up(lc,pidx)
dtlc = numpy.array(lc)#[:,pidx])
for i in range(0,len(lc_chunks),2):
try:
len(size)
sidesize = size[0]
gapsize = size[1]
footprint = [True]*sidesize + [False]*gapsize + [True]*sidesize
dt = scipy.ndimage.filters.median_filter(
#lc[:,pidx][lc_chunks[i]:lc_chunks[i+1]],footprint=footprint,
lc[lc_chunks[i]:lc_chunks[i+1]],footprint=footprint,
mode=mode,cval=1.0)
except TypeError:
dt = scipy.ndimage.filters.median_filter(
#lc[:,pidx][lc_chunks[i]:lc_chunks[i+1]],size=size,
lc[lc_chunks[i]:lc_chunks[i+1]],size=size,
mode=mode,cval=1.0)
#dtlc[lc_chunks[i]:lc_chunks[i+1]] = lc[:,pidx][lc_chunks[i]:lc_chunks[i+1]] / dt
dtlc[lc_chunks[i]:lc_chunks[i+1]] = lc[lc_chunks[i]:lc_chunks[i+1]] / dt
return dtlc
if __name__ == '__main__':
import matplotlib.pyplot as plt
fn='/Users/angusr/angusr/data2/SAMSI/kplr000892203-2009131105131_llc.txt'
lc = numpy.loadtxt(fn,delimiter=',')
dt = detrend_using_mf(lc,size=15)
dth = detrend_using_mf(lc,size=(10,10))
plt.plot(lc[:,0],dt)
plt.plot(lc[:,0],dth+0.005)
dt = detrend_using_mf(lc,flavor='PDC',size=15)
dth = detrend_using_mf(lc,flavor='PDC',size=(10,10))
plt.plot(lc[:,0],dt+0.015)
plt.plot(lc[:,0],dth+0.02)
plt.show()
|
dfmREPO_NAMEpython-blsPATH_START.@python-bls_extracted@python-bls-main@medium_filter.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/waterfall/hoverlabel/_font.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "waterfall.hoverlabel"
_path_str = "waterfall.hoverlabel.font"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.waterfall.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.waterfall.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.waterfall.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@waterfall@hoverlabel@_font.py@.PATH_END.py
|
{
"filename": "error.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/fonttools/fontTools/voltLib/error.py",
"type": "Python"
}
|
class VoltLibError(Exception):
def __init__(self, message, location):
Exception.__init__(self, message)
self.location = location
def __str__(self):
message = Exception.__str__(self)
if self.location:
path, line, column = self.location
return "%s:%d:%d: %s" % (path, line, column, message)
else:
return message
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@fonttools@fontTools@voltLib@error.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "Keck-DataReductionPipelines/KPF-Pipeline",
"repo_path": "KPF-Pipeline_extracted/KPF-Pipeline-master/kpfpipe/models/metadata/__init__.py",
"type": "Python"
}
|
Keck-DataReductionPipelinesREPO_NAMEKPF-PipelinePATH_START.@KPF-Pipeline_extracted@KPF-Pipeline-master@kpfpipe@models@metadata@__init__.py@.PATH_END.py
|
|
{
"filename": "_width.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/surface/contours/z/_width.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="width", parent_name="surface.contours.z", **kwargs):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 16),
min=kwargs.pop("min", 1),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@surface@contours@z@_width.py@.PATH_END.py
|
{
"filename": "vis_header.py",
"repo_name": "OxfordSKA/OSKAR",
"repo_path": "OSKAR_extracted/OSKAR-master/python/oskar/vis_header.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2020, The University of Oxford
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the University of Oxford nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
"""Interfaces to the OSKAR visibility header."""
from __future__ import absolute_import, division, print_function
from oskar.binary import Binary
try:
from . import _vis_header_lib
except ImportError:
_vis_header_lib = None
# pylint: disable=useless-object-inheritance
class VisHeader(object):
"""This class provides a Python interface to an OSKAR visibility header.
The :class:`oskar.VisHeader` class encapsulates the small amount of
metadata used to describe a visibility data set generated by OSKAR, and
it will be saved as part of any visibility data files written by the
:class:`oskar.Interferometer` class.
To load the header data from an OSKAR visibility binary file (note this
is not a CASA Measurement Set, although it can be converted into one),
use the :meth:`read() <oskar.VisHeader.read>` class method. This will
return a tuple containing both the header data and also a handle to the
binary file, which can be used to read subsequent :class:`oskar.VisBlock`
data structures from the same file.
The visibility data header for the current simulation can be returned
using the method :meth:`oskar.Interferometer.vis_header` if required.
Examples:
To load the header from a file ``example.vis`` and print some values
from it:
>>> (header, handle) = oskar.VisHeader.read('example.vis')
>>> print(header.num_blocks)
3
>>> print(header.num_times_total)
24
>>> print(header.num_channels_total)
3
>>> print(header.freq_start_hz)
100000000.0
>>> print(header.freq_inc_hz)
20000000.0
>>> print(header.phase_centre_ra_deg)
20.0
>>> print(header.phase_centre_dec_deg)
-29.999999999999996
"""
def __init__(self):
"""Constructs a handle to a visibility header."""
if _vis_header_lib is None:
raise RuntimeError("OSKAR library not found.")
self._capsule = None
def capsule_ensure(self):
"""Ensures the C capsule exists."""
if self._capsule is None:
raise RuntimeError(
"Call Interferometer.vis_header() for the visibility header.")
def capsule_get(self):
"""Returns the C capsule wrapped by the class."""
return self._capsule
def capsule_set(self, new_capsule):
"""Sets the C capsule wrapped by the class.
Args:
new_capsule (capsule): The new capsule to set.
"""
if _vis_header_lib.capsule_name(new_capsule) == 'oskar_VisHeader':
del self._capsule
self._capsule = new_capsule
else:
raise RuntimeError("Capsule is not of type oskar_VisHeader.")
def get_amp_type(self):
"""Returns the OSKAR data type of the visibility amplitude array."""
self.capsule_ensure()
return _vis_header_lib.amp_type(self._capsule)
def get_channel_bandwidth_hz(self):
"""Returns the width of each frequency channel, in Hz."""
self.capsule_ensure()
return _vis_header_lib.channel_bandwidth_hz(self._capsule)
def get_coord_precision(self):
"""Returns the OSKAR data type of the baseline coordinate arrays."""
self.capsule_ensure()
return _vis_header_lib.coord_precision(self._capsule)
def get_freq_inc_hz(self):
"""Returns the frequency channel increment, in Hz."""
self.capsule_ensure()
return _vis_header_lib.freq_inc_hz(self._capsule)
def get_freq_start_hz(self):
"""Returns the frequency of the first channel, in Hz."""
self.capsule_ensure()
return _vis_header_lib.freq_start_hz(self._capsule)
def get_max_channels_per_block(self):
"""Returns the maximum number of channels per visibility block."""
self.capsule_ensure()
return _vis_header_lib.max_channels_per_block(self._capsule)
def get_max_times_per_block(self):
"""Returns the maximum number of time samples per visibility block."""
self.capsule_ensure()
return _vis_header_lib.max_times_per_block(self._capsule)
def get_num_blocks(self):
"""Returns the expected number of visibility blocks for a run."""
self.capsule_ensure()
return _vis_header_lib.num_blocks(self._capsule)
def get_num_channels_total(self):
"""Returns the total number of frequency channels."""
self.capsule_ensure()
return _vis_header_lib.num_channels_total(self._capsule)
def get_num_stations(self):
"""Returns the number of stations."""
self.capsule_ensure()
return _vis_header_lib.num_stations(self._capsule)
def get_num_tags_per_block(self):
"""Returns the number of binary data tags per visibility block."""
self.capsule_ensure()
return _vis_header_lib.num_tags_per_block(self._capsule)
def get_num_times_total(self):
"""Returns the total number of time samples."""
self.capsule_ensure()
return _vis_header_lib.num_times_total(self._capsule)
def get_phase_centre_ra_deg(self):
"""Returns the phase centre Right Ascension, in degrees."""
self.capsule_ensure()
return _vis_header_lib.phase_centre_ra_deg(self._capsule)
def get_phase_centre_dec_deg(self):
"""Returns the phase centre Declination, in degrees."""
self.capsule_ensure()
return _vis_header_lib.phase_centre_dec_deg(self._capsule)
def get_time_start_mjd_utc(self):
"""Returns the start time, as MJD(UTC)."""
self.capsule_ensure()
return _vis_header_lib.time_start_mjd_utc(self._capsule)
def get_time_inc_sec(self):
"""Returns the time increment, in seconds."""
self.capsule_ensure()
return _vis_header_lib.time_inc_sec(self._capsule)
def get_time_average_sec(self):
"""Returns the time averaging period, in seconds."""
self.capsule_ensure()
return _vis_header_lib.time_average_sec(self._capsule)
# Properties
amp_type = property(get_amp_type)
capsule = property(capsule_get, capsule_set)
channel_bandwidth_hz = property(get_channel_bandwidth_hz)
coord_precision = property(get_coord_precision)
freq_inc_hz = property(get_freq_inc_hz)
freq_start_hz = property(get_freq_start_hz)
max_channels_per_block = property(get_max_channels_per_block)
max_times_per_block = property(get_max_times_per_block)
num_blocks = property(get_num_blocks)
num_channels_total = property(get_num_channels_total)
num_stations = property(get_num_stations)
num_tags_per_block = property(get_num_tags_per_block)
num_times_total = property(get_num_times_total)
phase_centre_ra_deg = property(get_phase_centre_ra_deg)
phase_centre_dec_deg = property(get_phase_centre_dec_deg)
time_start_mjd_utc = property(get_time_start_mjd_utc)
time_inc_sec = property(get_time_inc_sec)
time_average_sec = property(get_time_average_sec)
@classmethod
def read(cls, binary_file):
"""Reads a visibility header from an OSKAR binary file and returns it.
Args:
binary_file (str or oskar.Binary):
Path or handle to an OSKAR binary file.
Returns:
tuple: A two-element tuple containing the visibility header and
a handle to the OSKAR binary file, opened for reading.
"""
if _vis_header_lib is None:
raise RuntimeError("OSKAR library not found.")
hdr = VisHeader()
if isinstance(binary_file, Binary):
hdr.capsule = _vis_header_lib.read_header(binary_file.capsule)
return (hdr, binary_file)
fhan = Binary(binary_file, b'r')
hdr.capsule = _vis_header_lib.read_header(fhan.capsule)
return (hdr, fhan)
|
OxfordSKAREPO_NAMEOSKARPATH_START.@OSKAR_extracted@OSKAR-master@python@oskar@vis_header.py@.PATH_END.py
|
{
"filename": "_hoverinfo.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/heatmapgl/_hoverinfo.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HoverinfoValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="hoverinfo", parent_name="heatmapgl", **kwargs):
super(HoverinfoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
extras=kwargs.pop("extras", ["all", "none", "skip"]),
flags=kwargs.pop("flags", ["x", "y", "z", "text", "name"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@heatmapgl@_hoverinfo.py@.PATH_END.py
|
{
"filename": "CONTRIBUTING.md",
"repo_name": "RadioAstronomySoftwareGroup/pyuvdata",
"repo_path": "pyuvdata_extracted/pyuvdata-main/.github/CONTRIBUTING.md",
"type": "Markdown"
}
|
# Contributing to pyuvdata
Thank you for considering contributing to pyuvdata! It's our community of users and contributors that makes pyuvdata powerful and relevant.
pyuvdata is an open source project, driven by our community of contributors. There are many ways to contribute, including writing tutorials, improving the documentation, submitting bug reports and feature requests or writing code which can be incorporated into pyuvdata itself. Following the guidelines and patterns suggested below helps us maintain a high quality code base and review your contributions faster.
Please note we have a [code of conduct](../CODE_OF_CONDUCT.md), please follow it in all your interactions with the project.
## How to report a bug
First check the issues to see if your bug already exists. Feel free to comment on the existing issue to provide more context or just to note that it is affecting you as well. If your bug is not in the issue list, make a new issue.
When making an issue, try to provide as much context as possible including:
1. What version of python and pyuvdata are you using?
2. What operating system are you using?
3. What did you do?
4. What did you expect to see?
5. What did you see instead?
6. Any code to reproduce the bug (as minimal an example as possible)
If you're really inspired, you can make a pull request adding a test that fails because of the bug. This is likely to lead to the bug being fixed more quickly.
## How to suggest a feature or enhancement
First check the issues to see if your feature request already exists. Feel free to comment on the existing issue to provide more context or just to note that you would like to see the feature implemented as well. If your feature request is not in the issue list, make a new issue.
When making a feature request, try to provide as much context as possible. Feel free to include suggestions for implementations.
## Guidelines for contributing to the code
* Create issues for any major changes and enhancements that you wish to make. Discuss things transparently and get community feedback.
* Keep pull requests as small as possible. Ideally each pull request should implement ONE feature or bugfix. If you want to add or fix more than one thing, submit more than one pull request.
* Do not commit changes to files that are irrelevant to your feature or bugfix.
* Be aware that the pull request review process is not immediate, and is generally proportional to the size of the pull request.
* Be welcoming to newcomers and encourage diverse new contributors from all backgrounds. See our [Code of Conduct](../CODE_OF_CONDUCT.md).
### Your First Contribution
Contributing for the first time can seem daunting, but we value contributions from our user community and we will do our best to help you through the process. Here’s some advice to help make your work on pyuvdata more useful and rewarding.
* Use issue labels to guide you
- Unsure where to begin contributing to pyuvdata? You can start by looking through issues labeled `good first issue` and `help wanted` issues.
* Pick a subject area that you care about, that you are familiar with, or that you want to learn about
- Some of the feature request issues involve file formats that the core developers may not be very familiar with. If you know about those formats, consider helping on those issues. If you're most interested, for example, in representations of beams for simulation, consider focusing on issues labeled with `beam`.
* Start small
- It’s easier to get feedback on a little issue or pull request than on a big one.
* If you’re going to take on a big change, make sure that your idea has support first
- This means getting someone else to confirm that a bug is real before you fix the issue, and ensuring that there’s consensus on a proposed feature before you work to implement it. Use the issue log to start conversations about major changes and enhancements.
* Be bold! Leave feedback!
- Sometimes it can be scary to make new issues or comment on existing issues or pull requests, but contributions from the wider community are what ensure that pyuvdata serves the whole community as well as possible.
* Be rigorous
- Our requirements on code style, testing and documentation are important. If you have questions about them or difficulty meeting them, please ask for help, we will do our best to support you. Your contributions will be reviewed and integrated much more quickly if your pull request meets the requirements.
If you are new to the GitHub or the pull request process you can start by taking a look at these tutorials:
http://makeapullrequest.com/ and http://www.firsttimersonly.com/. If you have more questions, feel free to ask for help, everyone is a beginner at first and all of us are still learning!
### Getting started
1. Create your own fork or branch of the code.
2. Do the changes in your fork or branch.
3. Follow the [Developer Installation](../README.md#developer-installation) instructions to ensure that you have all the required packages for testing your changes.
4. If you like the change and think the project could use it:
- If you're fixing a bug, include a new test that breaks as a result of the bug (if possible).
- Ensure that all your new code is covered by tests and that the existing tests pass. Tests can be run by running `pytest` in the top level `pyuvdata` directory. To run tests and automatically create a coverage report, run the script `test_coverage.sh` in the `scripts` directory. Coverage reports require the `pytest-cov` plug-in. Testing of `UVFlag` module requires the `pytest-cases` plug-in.
- Ensure that your code meets the Black style guidelines. You can check that your code will pass our linting tests by running `pre-commit run -a` in the top-level pyuvdata directory.
- Ensure that you fully document any new features via docstrings and in the [tutorial](../docs/tutorial.rst)
- You can see the full pull request checklist [here](PULL_REQUEST_TEMPLATE.md)
### Code review process
The core team looks at pull requests on a regular basis and tries to provide feedback as quickly as possible. Larger pull requests generally require more time for review and may require discussion among the core team.
# Community
In addition to conversations on github, we also communicate via Slack and on monthly telecons. We welcome contributors who want to join those discussions, [contact the RASG managers](mailto:rasgmanagers@gmail.com) if you'd like to be invited to join them.
|
RadioAstronomySoftwareGroupREPO_NAMEpyuvdataPATH_START.@pyuvdata_extracted@pyuvdata-main@.github@CONTRIBUTING.md@.PATH_END.py
|
{
"filename": "OErr.py",
"repo_name": "bill-cotton/Obit",
"repo_path": "Obit_extracted/Obit-master/ObitSystem/Obit/python/OErr.py",
"type": "Python"
}
|
# $Id$
#-----------------------------------------------------------------------
# Copyright (C) 2004,2019
# Associated Universities, Inc. Washington DC, USA.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 675 Massachusetts Ave, Cambridge,
# MA 02139, USA.
#
# Correspondence concerning this software should be addressed as follows:
# Internet email: bcotton@nrao.edu.
# Postal address: William Cotton
# National Radio Astronomy Observatory
# 520 Edgemont Road
# Charlottesville, VA 22903-2475 USA
#-----------------------------------------------------------------------
# Python shadow class to ObitErr class
from __future__ import absolute_import
from __future__ import print_function
import Obit, InfoList, _Obit
class OErr(Obit.OErr):
"""
Python ObitErr message and error stack
This is an error stack class for obtaining tracebacks for error conditions.
This is also the mechanism for passing informative messages.
No messages, error or informative, are displayed until the contents
of the stack are explicitly printed
"""
def __init__(self) :
super(OErr, self).__init__()
Obit.CreateOErr(self.this)
def __del__(self, DeleteOErr=_Obit.DeleteOErr):
if _Obit!=None:
DeleteOErr(self.this)
def __setattr__(self,name,value):
if name == "me" :
Obit.OErr_Set_me(self.this,value)
return
self.__dict__[name] = value
def __getattr__(self,name):
if name == "me" :
return Obit.OErr_Get_me(self.this)
if name == "isErr" :
return PIsErr(self)
raise AttributeError(name)
def __repr__(self):
return "<C OErr instance>"
def __str__(self):
messages = ''
msg = Obit.OErrMsg(self.me)
while msg:
messages += '%s\n' % msg
msg = Obit.OErrMsg(self.me)
continue
Obit.ObitErrClear (self.me); # Clear stack
return messages
def Clear(self):
""" Clear Obit error stack """
PClear(self)
# end Clear
def IsA (self):
"""
Tells if input really a Python Obit OErr
return True, False
* self = Python OErr object
"""
################################################################
# Allow derived types
return Obit.ObitErrIsA(self.me)!=0
# end IsA
# Error levels
NoErr = 0
Info = 1
Warn = 2
Traceback = 3
MildError = 4
Error = 5
StrongError = 6
Fatal = 7
def PIsErr(err):
"""
Tells if an error condition exists
Returns True if error condition exists, else False
* err = Python Obit Error/message stack
"""
################################################################
# Checks
if not OErrIsA(err):
print("calls itself",err,"\n\n")
raise TypeError("err MUST be a Python ObitErr")
return Obit.isError(err.me) != 0
# end PIsErr
def PInit(err, prtLv=0, taskLog=" "):
"""
Initializes logging
* err = Python Obit Error/message stack to init
* prtLv = Message print level, 0=little, 5=LOTS
* taskLog = Name of task log file, if given messages go here
and NOT to the terminal (visible) output.
"""
################################################################
# Checks
if not OErrIsA(err):
raise TypeError("err MUST be a Python ObitErr")
info = InfoList.InfoList()
info.set("prtLv",prtLv)
info.set("taskLog",taskLog)
Obit.ErrorInit(err.me, info.me)
# end PInit
def PClear(err):
"""
Clear Obit error stack
* err = Python Obit Error/message stack
"""
################################################################
# Checks
if not OErrIsA(err):
raise TypeError("err MUST be a Python ObitErr")
Obit.ObitErrClear(err.me)
#end PClear
def PSet(err):
"""
Set Obit error flag
* err = Python Obit Error/message stack
"""
################################################################
# Checks
if not OErrIsA(err):
raise TypeError("err MUST be a Python ObitErr")
Obit.SetError(err.me)
#end PSet
def PLog(err, eCode, message):
"""
Add message To Obit Error/message stack
* err = Python Obit Error/message stack
* eCode = error code defined above:
NoErr, Info, Warn, Traceback,
MildError, Error, StrongError, Fatal
"""
################################################################
# Checks
if not OErrIsA(err):
raise TypeError("err MUST be a Python ObitErr")
Obit.LogError(err.me, eCode, message)
#end PLog
def printErr(err):
"""
Prints Obit error stack
* err = Python Obit Error/message stack
"""
################################################################
# Checks
if not OErrIsA(err):
raise TypeError("err MUST be a Python ObitErr")
Obit.ObitErrLog(err.me)
# end PrintErr
def printErrMsg(err, message="Error"):
"""
Prints Obit error stack and throws runtime exception on error
* err = Python Obit Error/message stack
* message = message string for exception
"""
################################################################
# Checks
if not OErrIsA(err):
raise TypeError("err MUST be a Python ObitErr")
ierr = Obit.isError(err.me)
Obit.ObitErrLog(err.me)
if ierr:
print(message)
raise Exception
# end printErrMsg
def OErrIsA (err):
"""
Tells if object thinks it's a Python ObitErr
return true, false (1,0)
* err = input Python ObitErr stack
"""
################################################################
# Checks
if not isinstance(err, OErr):
return False
#
return Obit.ObitErrIsA(err.me)
# end OErrIsA
def Bomb ():
"""
Throws an exception to stop the debugger
"""
################################################################
#
Obit.Bomb()
# end Bomb
|
bill-cottonREPO_NAMEObitPATH_START.@Obit_extracted@Obit-master@ObitSystem@Obit@python@OErr.py@.PATH_END.py
|
{
"filename": "kitti.py",
"repo_name": "pytorch/vision",
"repo_path": "vision_extracted/vision-main/torchvision/datasets/kitti.py",
"type": "Python"
}
|
import csv
import os
from pathlib import Path
from typing import Any, Callable, List, Optional, Tuple, Union
from PIL import Image
from .utils import download_and_extract_archive
from .vision import VisionDataset
class Kitti(VisionDataset):
"""`KITTI <http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark>`_ Dataset.
It corresponds to the "left color images of object" dataset, for object detection.
Args:
root (str or ``pathlib.Path``): Root directory where images are downloaded to.
Expects the following folder structure if download=False:
.. code::
<root>
└── Kitti
└─ raw
├── training
| ├── image_2
| └── label_2
└── testing
└── image_2
train (bool, optional): Use ``train`` split if true, else ``test`` split.
Defaults to ``train``.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.PILToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample
and its target as entry and returns a transformed version.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
data_url = "https://s3.eu-central-1.amazonaws.com/avg-kitti/"
resources = [
"data_object_image_2.zip",
"data_object_label_2.zip",
]
image_dir_name = "image_2"
labels_dir_name = "label_2"
def __init__(
self,
root: Union[str, Path],
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
transforms: Optional[Callable] = None,
download: bool = False,
):
super().__init__(
root,
transform=transform,
target_transform=target_transform,
transforms=transforms,
)
self.images = []
self.targets = []
self.train = train
self._location = "training" if self.train else "testing"
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You may use download=True to download it.")
image_dir = os.path.join(self._raw_folder, self._location, self.image_dir_name)
if self.train:
labels_dir = os.path.join(self._raw_folder, self._location, self.labels_dir_name)
for img_file in os.listdir(image_dir):
self.images.append(os.path.join(image_dir, img_file))
if self.train:
self.targets.append(os.path.join(labels_dir, f"{img_file.split('.')[0]}.txt"))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""Get item at a given index.
Args:
index (int): Index
Returns:
tuple: (image, target), where
target is a list of dictionaries with the following keys:
- type: str
- truncated: float
- occluded: int
- alpha: float
- bbox: float[4]
- dimensions: float[3]
- locations: float[3]
- rotation_y: float
"""
image = Image.open(self.images[index])
target = self._parse_target(index) if self.train else None
if self.transforms:
image, target = self.transforms(image, target)
return image, target
def _parse_target(self, index: int) -> List:
target = []
with open(self.targets[index]) as inp:
content = csv.reader(inp, delimiter=" ")
for line in content:
target.append(
{
"type": line[0],
"truncated": float(line[1]),
"occluded": int(line[2]),
"alpha": float(line[3]),
"bbox": [float(x) for x in line[4:8]],
"dimensions": [float(x) for x in line[8:11]],
"location": [float(x) for x in line[11:14]],
"rotation_y": float(line[14]),
}
)
return target
def __len__(self) -> int:
return len(self.images)
@property
def _raw_folder(self) -> str:
return os.path.join(self.root, self.__class__.__name__, "raw")
def _check_exists(self) -> bool:
"""Check if the data directory exists."""
folders = [self.image_dir_name]
if self.train:
folders.append(self.labels_dir_name)
return all(os.path.isdir(os.path.join(self._raw_folder, self._location, fname)) for fname in folders)
def download(self) -> None:
"""Download the KITTI data if it doesn't exist already."""
if self._check_exists():
return
os.makedirs(self._raw_folder, exist_ok=True)
# download files
for fname in self.resources:
download_and_extract_archive(
url=f"{self.data_url}{fname}",
download_root=self._raw_folder,
filename=fname,
)
|
pytorchREPO_NAMEvisionPATH_START.@vision_extracted@vision-main@torchvision@datasets@kitti.py@.PATH_END.py
|
{
"filename": "shot_sensitivity.py",
"repo_name": "HETDEX/hetdex_api",
"repo_path": "hetdex_api_extracted/hetdex_api-master/hetdex_api/flux_limits/shot_sensitivity.py",
"type": "Python"
}
|
"""
Unlike the sensitivity cubes, this module
generates flux limit estimates on the fly
from the Fiber class.
AUTHOR: Daniel Farrow
"""
import logging
from os.path import isfile, join
from tempfile import NamedTemporaryFile
from numpy import (arange, meshgrid, ones, array, sum, sqrt, square, newaxis,
repeat, loadtxt, where, argmin, invert, logical_not, isfinite,
isnan, newaxis, ceil, nansum, savetxt, transpose, mean, any)
from numpy.ma import MaskedArray
import astropy.units as u
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy.convolution import convolve
from pyhetdex.coordinates.tangent_projection import TangentPlane
from pyhetdex.het.fplane import FPlane
from hetdex_api.survey import Survey
from hetdex_api.config import HDRconfig
from hetdex_api.extract import Extract
from hetdex_api.mask import create_gal_ellipse, amp_flag_from_fiberid, meteor_flag_from_coords, create_dummy_wcs
from hetdex_api.flux_limits.sensitivity_cube import WavelengthException, create_sensitivity_cube_from_astrom
from hetdex_api.flux_limits.flim_models import return_flux_limit_model
def bad_central_mask(weights, badmask, index, wmin = 0.1):
"""
Karl Gebhardt's false positive cut. If any fiber
with a weight above wmin is flagged within the
central three wave bins of a source remove it.
Also remove sources where everything is flagged
Parameters
----------
weights : array
the weights of the fibers
badmask : 2d numpy array (nfibers x nwaves)
True where data is bad
index : int
the wavelength index of the
wmin : float (optional)
the minimum weight of fiber to
consider when looking for flagged
values
Returns
-------
maskval : boolean
False if bad, True if data is
good
"""
# Weights of fibers at the source index
sel = weights[:, index] > wmin
#print(weights[:, index])
#print(badmask[sel, index - 1 : index + 2])
bad_test = any(badmask[sel, index - 1 : index + 2].ravel())
bad_test = bad_test| all(badmask[:, index - 1 : index + 2].ravel())
if bad_test:
return False
else:
return True
class ShotSensitivity(object):
"""
Generate completeness estimates for a shot
on the fly, using the Extract class. This
is written to be as backward compatible as
possible with the user interface of
hdf5_sensitivity_cubes:SensitivityCubeHDF5Container
A lot of this is adapted from
`hetdex_tools/get_spec.py` `hetdex_api/extract.py`
and scripts written by Erin Mentuch Cooper.
The aperture correction stuff was taken
from 'Remedy' by Greg Zeimann:
grzeimann Remedy
Parameters
----------
datevshot : str
the name of the shot in the form
YYYYMMDDvSSS
release : str (Optional)
The name of the release e.g. hdr2.1
defaults to latest
flim_model : str
The flux limit model to convert
the noise to completeness, defaults
to the latest (which might not be
compatible with the release, see README)
rad : float
A radius in arcseconds to grab fibers
from when computing the flux limit,
default 3.5
ffsky : boolean
Full frame sky subtraction (default: False)
wavenpix : int
Number of wave pixels either side of the
pixel the source is on to add in
quadrature, or sets the size of tophat
convolution when producing data cubes as
2*wavenpix + 1 (default 3)
d25scale : float
Sets the multiplier for the galaxy masks
applied (default 1.5)
sclean_bad : bool
Replace bad data using the sclean
tool (see hetdex_api.extract:Extract)
verbose : bool
Print information about the flux limit model
to the screen
"""
def __init__(self, datevshot, release=None, flim_model=None, rad=3.5,
ffsky=False, wavenpix=3, d25scale=1.5, verbose=False,
sclean_bad = True, log_level="WARNING"):
self.conf = HDRconfig()
self.extractor = Extract()
self.shotid = int(datevshot.replace("v", ""))
self.date = datevshot[:8]
self.rad = rad
self.ffsky = ffsky
self.wavenpix = wavenpix
self.sclean_bad = sclean_bad
logger = logging.getLogger(name="ShotSensitivity")
logger.setLevel(log_level)
if verbose:
raise DeprecationWarning("Using verbose is deprecated, set log_level instead")
logger.setLevel("DEBUG")
logger.info("shotid: {:d}".format(self.shotid))
if not release:
self.release = self.conf.LATEST_HDR_NAME
else:
self.release = release
logger.info("Data release: {:s}".format(self.release))
self.survey = Survey(survey=self.release)
# Set up flux limit model
self.f50_from_noise, self.sinterp, interp_sigmas \
= return_flux_limit_model(flim_model,
cache_sim_interp=False,
verbose=verbose)
# Generate astrometry for this shot
survey_sel = (self.survey.shotid == self.shotid)
self.shot_pa = self.survey.pa[survey_sel][0]
self.shot_ra = self.survey.ra[survey_sel][0]
self.shot_dec = self.survey.dec[survey_sel][0]
rot = 360.0 - (self.shot_pa + 90.)
self.tp = TangentPlane(self.shot_ra, self.shot_dec, rot)
#Set up masking
logger.info("Using d25scale {:f}".format(d25scale))
self.setup_mask(d25scale)
# Set up spectral extraction
if release == "hdr1":
fwhm = self.survey.fwhm_moffat[survey_sel][0]
else:
fwhm = self.survey.fwhm_virus[survey_sel][0]
logger.info("Using Moffat PSF with FWHM {:f}".format(fwhm))
self.moffat = self.extractor.moffat_psf(fwhm, 3.*rad, 0.25)
self.extractor.load_shot(self.shotid, fibers=True, survey=self.release)
# Set up the focal plane astrometry
fplane_table = self.extractor.shoth5.root.Astrometry.fplane
# Bit of a hack to avoid changing pyhetdex
with NamedTemporaryFile(mode='w') as tpf:
for row in fplane_table.iterrows():
tpf.write("{:03d} {:8.5f} {:8.5f} {:03d} {:03d} {:03d} {:8.5f} {:8.5f}\n".format(
row['ifuslot'], row['fpx'], row['fpy'], row['specid'],
row['specslot'], row['ifuid'], row['ifurot'], row['platesc']))
tpf.seek(0)
self.fplane = FPlane(tpf.name)
def setup_mask(self, d25scale):
"""
Setup the masking, to speed up checking
if sources are in the mask later. This
is run at initialisation, so you need
only run again to change `d25scale`
Parameters
----------
d25scale : float
Sets the multiplier for the galaxy masks
applied (default 1.5)
"""
logger = logging.getLogger(name="ShotSensitivity")
# see if this is a bad shot
#print("Bad shot from ", self.conf.badshot)
badshot = loadtxt(self.conf.badshot, dtype=int)
badtpshots = loadtxt(self.conf.lowtpshots, dtype=int)
if (self.shotid in badshot) or (self.shotid in badtpshots):
logger.warn("Shot is in bad. Making mask zero everywhere")
self.badshot = True
else:
self.badshot = False
# set up bad amps
logger.info("Bad amps from {:s}".format(self.conf.badamp))
self.bad_amps = Table.read(self.conf.badamp)
sel_shot = (self.bad_amps["shotid"] == self.shotid)
self.bad_amps = self.bad_amps[sel_shot]
# set up galaxy mask
logger.info("Galaxy mask from {:s}".format(self.conf.rc3cat))
galaxy_cat = Table.read(self.conf.rc3cat, format='ascii')
gal_coords = SkyCoord(galaxy_cat['Coords'], frame='icrs')
shot_coords = SkyCoord(ra=self.shot_ra, dec=self.shot_dec,
unit="deg")
sel_reg = where(shot_coords.separation(gal_coords) < 1.*u.deg)[0]
self.gal_regions = []
if len(sel_reg) > 0:
for idx in sel_reg:
self.gal_regions.append(create_gal_ellipse(galaxy_cat,
row_index=idx,
d25scale=d25scale))
# set up meteor mask
# check if there are any meteors in the shot:
logger.info("Meteors from {:s}".format(self.conf.meteor))
self.met_tab = Table.read(self.conf.meteor, format="ascii")
self.met_tab = self.met_tab[self.shotid == self.met_tab["shotid"]]
def extract_ifu_sensitivity_cube(self, ifuslot, nx=31, ny=31,
ifusize=62, generate_sigma_array=True,
cache_sim_interp=True):
"""
Extract the sensitivity cube
from IFU `ifuslot`
Parameters
----------
ifuslot : string
the IFU slot to extract
nx, ny : int
the dimensions in pixels
of the cube (default 31,31)
ifusize : float
the length of the side of
the cube in arcseconds,
default is 62 arcseconds
generate_sigma_array: bool
this fills the 3D array
of noise, this makes it quite
slow to run, so if you want
to just use the cube for
the astrometry do not use
this option (default: True)
cache_sim_interp : bool
cache the simulation interpolator
to speed up execution (default: True)
Returns
-------
scube : hetdex_api.flux_limits.sensitivity_cube:SensitivityCube
the sensitivity cube
"""
waves = self.extractor.get_wave()
wrange = [waves[0], waves[-1]]
nz = len(waves)
pa = self.shot_pa
ifu = self.fplane.difus_ifuslot[ifuslot.replace("ifuslot_", "")]
ra_ifu, dec_ifu = self.tp.xy2raDec(ifu.y, ifu.x)
scube = create_sensitivity_cube_from_astrom(float(ra_ifu), float(dec_ifu),
pa, nx, ny, nz, ifusize,
wrange=wrange,
cache_sim_interp=cache_sim_interp)
if generate_sigma_array:
ix, iy = meshgrid(arange(0, nx, 1.0),
arange(0, ny, 1.0))
all_ra, all_dec, junk = scube.wcs.all_pix2world(ix.ravel(), iy.ravel(),
[500.], 0)
noises, norm, mask = self.get_f50(all_ra, all_dec, None, 1.0,
direct_sigmas = True)
sigmas = noises.ravel(order="F").reshape(nz, ny, nx)
mask = logical_not(mask.reshape(ny, nx))
mask3d = repeat(mask[newaxis, :, :], sigmas.shape[0], axis=0)
scube.sigmas = MaskedArray(sigmas, mask=mask3d, fill_value=999.0)
return scube
def itercubes(self, **kwargs):
"""
Iterate over the cubes in this shot
Parameters
----------
See `extract_ifu_sensitivity_cube`
Yields
------
ifuslot : str
the IFU slot
scube : SensitivityCube
a sensitivity cube
object
"""
for ifuslot in self.fplane.ifuslots:
yield ifuslot, self.extract_ifu_sensitivity_cube(ifuslot,
**kwargs)
def get_f50(self, ra, dec, wave, sncut, direct_sigmas = False,
nmax = 5000, return_amp = False, linewidth=None):
"""
Return flux at 50% for the input positions
most of this is cut and paste from
`hetdex_tools/get_spec.py` and
the old sensitivity_cube:SensitivityCube
class.
This class splits the data up into sets of
up to `nmax` to save memory
Parameters
----------
ra, dec : array
right ascension and dec in degrees
wave : array
wavelength in Angstroms. If None,
then return flux limits for
all wave bins.
sncut : float
cut in detection significance
that defines this catalogue
direct_sigmas : bool
return the noise values directly
without passing them through
the noise to 50% completeness
flux (default = False)
nmax : int
maximum number of sources to
consider at once, otherwise split
up and loop.
return_amp : bool
if True return amplifier information
for the closest fiber to each source
(default = False)
linewidth : array
optionally pass the linewidth of
the source (in AA) to activate the linewidth
dependent part of the completeness
model (default = None).
Returns
-------
f50s : array
50% completeness. If outside
of cube return 999. If None
was passed for wave this is
a 2D array of ra, dec and
all wavelengths
mask : array
Only returned if `wave=None`. This
mask is True where the ra/dec
positions passed are in good
regions of data
amp : array
Only returned in `return_amp=True`,
it's an array of amplifier information
for the closest fiber to each source
"""
if type(wave) != type(None):
wave_passed = True
else:
wave_passed = False
try:
nsrc = len(ra)
if wave_passed:
# Trim stuff very far away
gal_coords = SkyCoord(ra=ra, dec=dec, unit="deg")
shot_coords = SkyCoord(ra=self.shot_ra, dec=self.shot_dec,
unit="deg")
sel = array(shot_coords.separation(gal_coords) < 2.0*u.deg)
ra_sel = array(ra)[sel]
dec_sel = array(dec)[sel]
wave_sel = array(wave)[sel]
nsel = len(ra_sel)
else:
# If not passing wave always loop
# over all ra/dec in range
ra_sel = ra
dec_sel = dec
wave_sel = None
nsel = len(ra)
nsplit = int(ceil(float(nsel)/float(nmax)))
except TypeError as e:
# If the user does not pass arrays
nsplit = 1
nsrc = 1
nsel = 1
sel = True
ra_sel = array([ra])
dec_sel = array([dec])
wave_sel = array([wave])
# Array to store output actually in the shot
f50s_sel = []
mask_sel = []
amp_sel = []
norm_sel = []
wave_rect = self.extractor.get_wave()
pixsize_aa = wave_rect[1] - wave_rect[0]
# This will give 999 once the noise is scaled suitably
badval = 999*1e17/pixsize_aa
# Arrays to store full output
f50s = badval*ones(nsrc)
mask = ones(nsrc)
norm = ones(nsrc)
amp = array(["notinshot00_0_multi_00_000_000_00_000"]*nsrc)
if nsel > 0:
for i in range(nsplit):
tra = ra_sel[i*nmax : (i+1)*nmax]
tdec = dec_sel[i*nmax : (i+1)*nmax]
if wave_passed:
twave = wave_sel[i*nmax : (i+1)*nmax]
if not self.badshot:
tf50s, tamp, tnorm = self._get_f50_worker(tra, tdec, twave, sncut,
direct_sigmas = direct_sigmas,
linewidth = linewidth)
else:
tamp = ["bad"]*len(tra)
tf50s = [badval]*len(tra)
tnorm = [1.0]*len(tra)
else:
# if bad shot then the mask is all set to zero
tf50s, tmask, tamp, tnorm = \
self._get_f50_worker(tra, tdec, None, sncut,
direct_sigmas = direct_sigmas,
linewidth = linewidth)
mask_sel.extend(tmask)
f50s_sel.extend(tf50s)
amp_sel.extend(tamp)
norm_sel.extend(tnorm)
if return_amp:
if wave_passed:
# copy to output
f50s[sel] = f50s_sel
amp[sel] = amp_sel
norm[sel] = norm_sel
return f50s, norm, amp
else:
return array(f50s_sel), array(norm_sel), array(mask_sel), array(amp_sel)
else:
if wave_passed:
f50s[sel] = f50s_sel
norm[sel] = norm_sel
return f50s, norm
else:
return array(f50s_sel), array(norm_sel), array(mask_sel)
def _get_f50_worker(self, ra, dec, wave, sncut,
direct_sigmas = False, linewidth = None):
"""
Return flux at 50% for the input positions
most of this is cut and paste from
`hetdex_tools/get_spec.py` and
the old sensitivity_cube:SensitivityCube
class.
Parameters
----------
ra, dec : array
right ascension and dec in degrees
wave : array
wavelength in Angstroms. If None,
then return flux limits for
all wave bins.
sncut : float
cut in detection significance
that defines this catalogue
direct_sigmas : bool
return the noise values directly
without passing them through
the noise to 50% completeness
flux
linewidth : array
optionally pass the linewidth of
the source (in AA) to activate the linewidth
dependent part of the completeness
model (default = None).
Returns
-------
f50s : array
50% completeness. If outside
of cube return 999. If None
was passed for wave this is
a 2D array of ra, dec and
all wavelengths
norm_all : array
the aperture corrections
mask : array
Only returned if `wave=None`. This
mask is True where the ra/dec
positions passed are in good
regions of data
amp : array
Only returned in `return_amp=True`,
it's an array of amplifier information
for the closest fiber to each source
"""
logger = logging.getLogger(name="ShotSensitivity")
try:
[x for x in ra]
except TypeError:
ra = array([ra])
dec = array([dec])
wave = array([wave])
coords = SkyCoord(ra=ra, dec=dec, unit="deg")
wave_rect = self.extractor.get_wave()
pixsize_aa = wave_rect[1] - wave_rect[0]
# This will give 999 once the noise is scaled suitably
badval = 999*1e17/pixsize_aa
# Size of window in wave elements
filter_len = 2*self.wavenpix + 1
if type(wave) != type(None):
wave_passed = True
else:
wave_passed = False
convolution_filter = ones(filter_len)
mask = True*ones(len(coords), dtype=int)
noise = []
info_results = self.extractor.get_fiberinfo_for_coords(
coords,
radius=self.rad,
ffsky=self.ffsky,
return_fiber_info=True,
fiber_lower_limit=2,
verbose=False
)
id_, aseps, aifux, aifuy, axc, ayc, ara, adec, adata, aerror, afmask, afiberid, \
amultiframe = info_results
I = None
fac = None
norm_all = []
amp = []
nan_fib_mask = []
for i, c in enumerate(coords):
sel = (id_ == i)
if type(wave) != type(None):
logger.debug("Running on source {:f} {:f} {:f}".format(ra[i], dec[i], wave[i]))
else:
logger.debug("Running on position {:f} {:f}".format(ra[i], dec[i]))
logger.debug("Found {:d} fibers".format(sum(sel)))
if sum(sel) > 0:
# fiber properties
xc = axc[sel][0]
yc = ayc[sel][0]
ifux = aifux[sel]
ifuy = aifuy[sel]
data = adata[sel]
error = aerror[sel]
fmask = afmask[sel]
fiberid = afiberid[sel]
multiframe = amultiframe[sel]
seps = aseps[sel]
# Flag the zero elements as bad
fmask[(abs(data) < 1e-30) | (abs(error) < 1e-30)] = False
iclosest = argmin(seps)
amp.append(fiberid[iclosest])
if len(self.bad_amps) > 0:
amp_flag = amp_flag_from_fiberid(fiberid[iclosest],
self.bad_amps)
else:
amp_flag = True
# XXX Could be faster - reloads the file every run
meteor_flag = meteor_flag_from_coords(c, self.shotid)
if not (amp_flag and meteor_flag):
logger.debug("The data here are bad, position is masked")
if wave_passed:
noise.append(badval)
norm_all.append(1.0)
# value doesn't matter as in amp flag
nan_fib_mask.append(True)
continue
else:
mask[i] = False
weights, I, fac = self.extractor.build_weights(xc, yc, ifux, ifuy, self.moffat,
I=I, fac=fac, return_I_fac = True)
# (See Greg Zeimann's Remedy code)
# normalized in the fiber direction
norm = sum(weights, axis=0)
weights = weights/norm
result = self.extractor.get_spectrum(data, error, fmask, weights,
remove_low_weights = False,
sclean_bad = self.sclean_bad,
return_scleaned_mask = True)
spectrum_aper, spectrum_aper_error, scleaned, data, error, fiber_mask = [res for res in result]
# Changed by EMC 2024-11-05 to allow for update of Extract April 10 commit 8ffdf34
#spectrum_aper, spectrum_aper_error, scleaned = [res for res in result]
if wave_passed:
index = where(wave_rect >= wave[i])[0][0]
ilo = index - self.wavenpix
ihi = index + self.wavenpix + 1
# If lower index less than zero, truncate
if ilo < 0:
ilo = 0
if ihi < 0:
ihi = 0
# Output lots of information for very detailed debugging
if logger.getEffectiveLevel() == logging.DEBUG:
logger.debug("Table of fibers:")
logger.debug("# fiberid wave_index ifux ifuy weight noise mask")
for fibidx, fid in enumerate(fiberid):
for wi, (tw, tnoise, tmask) in enumerate(zip((weights*norm)[fibidx, ilo:ihi],
error[fibidx, ilo:ihi], fmask[fibidx, ilo:ihi]),
ilo):
logger.debug("{:s} {:d} {:f} {:f} {:f} {:f} {:s}".format(fid, wi, ifux[fibidx],
ifuy[fibidx], tw, tnoise,
str(tmask)))
# Mask source if bad values within the central 3 wavebins
nan_fib = bad_central_mask(weights*norm, logical_not(fmask),
index)
nan_fib_mask.append(nan_fib)
# Account for NaN and masked spectral bins
bad = isnan(spectrum_aper_error[ilo:ihi])
goodfrac = 1.0 - sum(bad)/len(bad)
if all(isnan(spectrum_aper_error[ilo:ihi])):
sum_sq = badval
else:
sum_sq = \
sqrt(nansum(square(spectrum_aper_error[ilo:ihi])/goodfrac))
norm_all.append(mean(norm[ilo:ihi]))
noise.append(sum_sq)
else:
logger.debug("Convolving with window to get flux limits versus wave")
# Use astropy convolution so NaNs are ignored
convolved_variance = convolve(square(spectrum_aper_error),
convolution_filter,
normalize_kernel=False)
std = sqrt(convolved_variance)
# Also need to convolve aperture corrections to get
# a total apcor across the wavelength window
convolved_norm = convolve(norm,
convolution_filter,
normalize_kernel=True)
# To get mean account for the edges in
# the convolution
for iend in range(self.wavenpix):
edge_fac = filter_len/(filter_len + iend - self.wavenpix)
convolved_norm[iend] *= edge_fac
convolved_norm[-iend - 1] *= edge_fac
# Mask wavelengths with too many bad pixels
# equivalent to nan_fib in the wave != None mode
wunorm = weights*norm
for index in range(len(convolved_variance)):
if not bad_central_mask(wunorm, logical_not(fmask), index):
std[index] = badval
noise.append(std)
norm_all.append(convolved_norm)
else:
if wave_passed:
noise.append(badval)
norm_all.append(1.0)
amp.append("000")
nan_fib_mask.append(True)
else:
noise.append(badval*ones(len(wave_rect)))
norm_all.append(ones(len(wave_rect)))
amp.append("000")
mask[i] = False
# Apply the galaxy mask
gal_mask = ones(len(coords), dtype=int)
for gal_region in self.gal_regions:
dummy_wcs = create_dummy_wcs(gal_region.center,
imsize=2*gal_region.height)
# zero if near galaxy
gal_mask = gal_mask & invert(gal_region.contains(coords, dummy_wcs))
noise = array(noise)
snoise = pixsize_aa*1e-17*noise
if wave_passed:
bad = (gal_mask < 0.5) | (snoise > 998) | isnan(snoise) | invert(nan_fib_mask)
normnoise = snoise/norm_all
if not direct_sigmas:
normnoise = self.f50_from_noise(normnoise, wave, sncut,
linewidth = linewidth)
normnoise[bad] = 999.
return normnoise, amp, norm_all
else:
mask[gal_mask < 0.5] = False
if self.badshot:
mask[:] = False
bad = (snoise > 998) | logical_not(isfinite(snoise))
normnoise = snoise/norm_all
if not direct_sigmas:
normnoise = self.f50_from_noise(normnoise, wave, sncut,
linewidth = linewidth)
normnoise[bad] = 999
return normnoise, mask, amp, norm_all
def return_completeness(self, flux, ra, dec, lambda_, sncut, f50s=None,
linewidth = None):
"""
Return completeness at a 3D position as an array.
If for whatever reason the completeness is NaN, it's
replaced by 0.0. This is cut and paste from
sensitivity_cube:SensitivityCube
Parameters
----------
flux : array
fluxes of objects
ra, dec : array
right ascension and dec in degrees
lambda_ : array
wavelength in Angstrom
sncut : float
the detection significance (S/N) cut
applied to the data
f50s : array (optional)
optional array of precomputed
50% completeness fluxes. Otherwise
the method will compute them itself
from the ra/dec/linewidth (default:None)
linewidth : array (optional)
optionally pass the linewidth of
the source (in AA) to activate the linewidth
dependent part of the completeness
model (default = None). Only does
anything when you don't pass the f50s
(default: None)
Return
------
fracdet : array
fraction detected
Raises
------
WavelengthException :
Annoys user if they pass
wavelength outside of
VIRUS range
"""
logger = logging.getLogger(name="ShotSensitivity")
try:
if lambda_[0] < 3000.0 or lambda_[0] > 6000.0:
raise WavelengthException("""Odd wavelength value. Are you
sure it's in Angstrom?""")
except TypeError as e:
if lambda_ < 3000.0 or lambda_ > 6000.0:
raise WavelengthException("""Odd wavelength value. Are you
sure it's in Angstrom?""")
if type(f50s) == type(None):
f50s, norm = self.get_f50(ra, dec, lambda_, sncut, linewidth = linewidth)
try:
# to stop bad values breaking interpolation
bad = (f50s > 998)
f50s[bad] = 1e-16
fracdet = self.sinterp(flux, f50s, lambda_, sncut)
#print(min(flux), max(flux), min(f50s), max(f50s))
# check to see if we're passing multiple fluxes
# for one f50 value
if any(bad):
logger.debug("There are bad values here to mask")
if len(f50s) == 1:
logger.debug("Just one ra/dec/wave passed.")
fracdet[:] = 0.0
f50s[:] = 999.0
else:
fracdet[bad] = 0.0
f50s[bad] = 999.0
except IndexError as e:
print("Interpolation failed!")
print(min(flux), max(flux), min(f50s), max(f50s))
print(min(lambda_), max(lambda_))
raise e
try:
fracdet[isnan(fracdet)] = 0.0
except TypeError:
if isnan(fracdet):
fracdet = 0.0
return fracdet
def close(self):
"""
Close the Extractor object
(especially if it has a Shot HDF
file open)
"""
self.extractor.close()
def __enter__(self):
""" Added to support using the `with` statement """
return self
def __exit__(self, type_, value, traceback):
""" Support tidying up after using the `with` statement """
self.close()
|
HETDEXREPO_NAMEhetdex_apiPATH_START.@hetdex_api_extracted@hetdex_api-master@hetdex_api@flux_limits@shot_sensitivity.py@.PATH_END.py
|
{
"filename": "ssh_cluster.py",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/utility/parallel/ssh_cluster.py",
"type": "Python"
}
|
""" Make a cluster from nodes, where you can ssh to any node from the head node with no auth.
The input is a set of hosts and a set of commands to be executed on them.
Uses the parallel-SSH library https://pypi.org/project/parallel-ssh/
To see how it works, try the "busy" example
WARNING: there is a ~60 second lag between the remote program finishing and this code finding out.
"""
import sys, time, random
from pssh.clients import ParallelSSHClient
def run_commands_on_hosts(cmdlist, hosts):
clients = [ParallelSSHClient([host]) for host in hosts]
outputs = [None for host in hosts]
tstart = time.time()
while 1:
nfinished = 0
for i in range(len(clients)):
if clients[i]:
# if a job is finished
if clients[i].finished():
# print the outputs
if outputs[i]:
for host_output in outputs[i]:
print("%d: Host %d: exit code %s" % (
time.time()-tstart, i, host_output.exit_code))
for line in list(host_output.stdout):
print(' ' + line)
# start another if there is a job
if len(cmdlist) > 0:
cmd = cmdlist.pop()
print('%d: Starting %s on host %d' % (
time.time()-tstart, cmd, i))
outputs[i] = clients[i].run_command(cmd)
else:
clients[i] = None
else:
nfinished += 1
if nfinished == len(hosts):
print('All finished in %d seconds' % int(time.time()-tstart))
break
else:
time.sleep(5)
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@utility@parallel@ssh_cluster.py@.PATH_END.py
|
{
"filename": "main.py",
"repo_name": "cdslaborg/paramonte",
"repo_path": "paramonte_extracted/paramonte-main/benchmark/fortran/pm_sampleCov/setCov_vs_setCovMean/main.py",
"type": "Python"
}
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
dirname = os.path.basename(os.getcwd())
fontsize = 14
df = pd.read_csv("main.out", delimiter = ",")
colnames = list(df.columns.values)
####################################################################################################################################
#### Plot the runtimes.
####################################################################################################################################
ax = plt.figure(figsize = 1.25 * np.array([6.4,4.6]), dpi = 200)
ax = plt.subplot()
for colname in colnames[1:]:
plt.plot( df[colnames[0]].values
, df[colname].values
, linewidth = 2
)
plt.xticks(fontsize = fontsize)
plt.yticks(fontsize = fontsize)
ax.set_xlabel(colnames[0], fontsize = fontsize)
ax.set_ylabel("Runtime [ seconds ]", fontsize = fontsize)
ax.set_title(" vs. ".join(colnames[1:])+"\nLower is better.", fontsize = fontsize)
ax.set_xscale("log")
ax.set_yscale("log")
plt.minorticks_on()
plt.grid(visible = True, which = "both", axis = "both", color = "0.85", linestyle = "-")
ax.tick_params(axis = "y", which = "minor")
ax.tick_params(axis = "x", which = "minor")
ax.legend ( colnames[1:]
#, loc='center left'
#, bbox_to_anchor=(1, 0.5)
, fontsize = fontsize
)
plt.tight_layout()
plt.savefig("benchmark." + dirname + ".runtime.png")
####################################################################################################################################
#### Plot the runtime ratios.
####################################################################################################################################
ax = plt.figure(figsize = 1.25 * np.array([6.4,4.6]), dpi = 200)
ax = plt.subplot()
plt.plot( df[colnames[0]].values
, np.ones(len(df[colnames[0]].values))
, linestyle = "--"
#, color = "black"
, linewidth = 2
)
for colname in colnames[2:]:
plt.plot( df[colnames[0]].values
, df[colname].values / df[colnames[1]].values
, linewidth = 2
)
plt.xticks(fontsize = fontsize)
plt.yticks(fontsize = fontsize)
ax.set_xlabel(colnames[0], fontsize = fontsize)
ax.set_ylabel("Runtime compared to {}".format(colnames[1]), fontsize = fontsize)
ax.set_title("Runtime Ratio Comparison. Lower means faster.\nLower than 1 means faster than {}().".format(colnames[1]), fontsize = fontsize)
ax.set_xscale("log")
ax.set_yscale("log")
plt.minorticks_on()
plt.grid(visible = True, which = "both", axis = "both", color = "0.85", linestyle = "-")
ax.tick_params(axis = "y", which = "minor")
ax.tick_params(axis = "x", which = "minor")
ax.legend ( colnames[1:]
#, bbox_to_anchor = (1, 0.5)
#, loc = "center left"
, fontsize = fontsize
)
plt.tight_layout()
plt.savefig("benchmark." + dirname + ".runtime.ratio.png")
|
cdslaborgREPO_NAMEparamontePATH_START.@paramonte_extracted@paramonte-main@benchmark@fortran@pm_sampleCov@setCov_vs_setCovMean@main.py@.PATH_END.py
|
{
"filename": "io.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/yt/frontends/amrvac/io.py",
"type": "Python"
}
|
"""
AMRVAC-specific IO functions
"""
import os
import numpy as np
from more_itertools import always_iterable
from yt.geometry.selection_routines import GridSelector
from yt.utilities.io_handler import BaseIOHandler
from yt.utilities.on_demand_imports import _f90nml as f90nml
def read_amrvac_namelist(parfiles):
"""Read one or more parfiles, and return a unified f90nml.Namelist object.
This function replicates the patching logic of MPI-AMRVAC where redundant parameters
only retain last-in-line values, with the exception of `&filelist:base_filename`,
which is accumulated. When passed a single file, this function acts as a mere
wrapper of f90nml.read().
Parameters
----------
parfiles : str, os.Pathlike, byte, or an iterable returning those types
A file path, or a list of file paths to MPI-AMRVAC configuration parfiles.
Returns
-------
unified_namelist : f90nml.Namelist
A single namelist object. The class inherits from ordereddict.
"""
parfiles = (os.path.expanduser(pf) for pf in always_iterable(parfiles))
# first merge the namelists
namelists = [f90nml.read(parfile) for parfile in parfiles]
unified_namelist = f90nml.Namelist()
for nml in namelists:
unified_namelist.patch(nml)
if "filelist" not in unified_namelist:
return unified_namelist
# accumulate `&filelist:base_filename`
base_filename = "".join(
nml.get("filelist", {}).get("base_filename", "") for nml in namelists
)
unified_namelist["filelist"]["base_filename"] = base_filename
return unified_namelist
class AMRVACIOHandler(BaseIOHandler):
_particle_reader = False
_dataset_type = "amrvac"
def __init__(self, ds):
BaseIOHandler.__init__(self, ds)
self.ds = ds
self.datfile = ds.parameter_filename
header = self.ds.parameters
self.block_shape = np.append(header["block_nx"], header["nw"])
def _read_particle_coords(self, chunks, ptf):
"""Not implemented yet."""
# This needs to *yield* a series of tuples of (ptype, (x, y, z)).
# chunks is a list of chunks, and ptf is a dict where the keys are
# ptypes and the values are lists of fields.
raise NotImplementedError
def _read_particle_fields(self, chunks, ptf, selector):
"""Not implemented yet."""
# This gets called after the arrays have been allocated. It needs to
# yield ((ptype, field), data) where data is the masked results of
# reading ptype, field and applying the selector to the data read in.
# Selector objects have a .select_points(x,y,z) that returns a mask, so
# you need to do your masking here.
raise NotImplementedError
def _read_data(self, fid, grid, field):
"""Retrieve field data from a grid.
Parameters
----------
fid: file descriptor (open binary file with read access)
grid : yt.frontends.amrvac.data_structures.AMRVACGrid
The grid from which data is to be read.
field : str
A field name.
Returns
-------
data : np.ndarray
A 3D array of float64 type representing grid data.
"""
ileaf = grid.id
offset = grid._index.block_offsets[ileaf]
field_idx = self.ds.parameters["w_names"].index(field)
field_shape = self.block_shape[:-1]
count = np.prod(field_shape)
byte_size_field = count * 8 # size of a double
fid.seek(offset + byte_size_field * field_idx)
data = np.fromfile(fid, "=f8", count=count)
data.shape = field_shape[::-1]
data = data.T
# Always convert data to 3D, as grid.ActiveDimensions is always 3D
while len(data.shape) < 3:
data = data[..., np.newaxis]
return data
def _read_fluid_selection(self, chunks, selector, fields, size):
"""Retrieve field(s) data in a selected region of space.
Parameters
----------
chunks : generator
A generator for multiple chunks, each of which contains a list of grids.
selector : yt.geometry.selection_routines.SelectorObject
A spatial region selector.
fields : list
A list of tuples (ftype, fname).
size : np.int64
The cumulative number of objs contained in all chunks.
Returns
-------
data_dict : dict
keys are the (ftype, fname) tuples, values are arrays that have been masked
using whatever selector method is appropriate. Arrays have dtype float64.
"""
# @Notes from Niels:
# The chunks list has YTDataChunk objects containing the different grids.
# The list of grids can be obtained by doing eg.
# grids_list = chunks[0].objs or chunks[1].objs etc.
# Every element in "grids_list" is then an AMRVACGrid object,
# and has hence all attributes of a grid :
# (Level, ActiveDimensions, LeftEdge, etc.)
chunks = list(chunks)
data_dict = {} # <- return variable
if isinstance(selector, GridSelector):
if not len(chunks) == len(chunks[0].objs) == 1:
raise RuntimeError
grid = chunks[0].objs[0]
with open(self.datfile, "rb") as fh:
for ftype, fname in fields:
data_dict[ftype, fname] = self._read_data(fh, grid, fname)
else:
if size is None:
size = sum(g.count(selector) for chunk in chunks for g in chunk.objs)
for field in fields:
data_dict[field] = np.empty(size, dtype="float64")
# nb_grids = sum(len(chunk.objs) for chunk in chunks)
with open(self.datfile, "rb") as fh:
ind = 0
for chunk in chunks:
for grid in chunk.objs:
nd = 0
for field in fields:
ftype, fname = field
data = self._read_data(fh, grid, fname)
nd = grid.select(selector, data, data_dict[field], ind)
ind += nd
return data_dict
def _read_chunk_data(self, chunk, fields):
"""Not implemented yet."""
# This reads the data from a single chunk without doing any selection,
# and is only used for caching data that might be used by multiple
# different selectors later. For instance, this can speed up ghost zone
# computation.
# it should be used by _read_fluid_selection instead of _read_data
raise NotImplementedError
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@yt@frontends@amrvac@io.py@.PATH_END.py
|
{
"filename": "processOSPREI.py",
"repo_name": "ckay314/OSPREI",
"repo_path": "OSPREI_extracted/OSPREI-master/processCode/processOSPREI.py",
"type": "Python"
}
|
import numpy as np
import math
import matplotlib.pyplot as plt
from matplotlib import cm, colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
import sys
import os
from scipy.interpolate import CubicSpline
from scipy.stats import norm, pearsonr
from scipy import ndimage
import datetime
import matplotlib.dates as mdates
from matplotlib.ticker import FuncFormatter
import warnings
warnings.filterwarnings("ignore")
global dtor
dtor = math.pi / 180.
# make label text size bigger
plt.rcParams.update({'font.size':14})
# Set up the path variable
# I like keeping all the code in a single folder called code
# but you do you (and update this to match whatever you do)
import OSPREI as OSP
mainpath = OSP.mainpath
sys.path.append(os.path.abspath(OSP.codepath)) #MTMYS
sys.path.append(os.path.abspath(OSP.propath)) #MTMYS
import setupPro as SP
import proMetrics as met
import proForeCAT as proFore
import proANTEATR as proANT
import proFIDO
# |----------------------------------------------------------|
# |------ Flags to control if plot all or a single one ------|
# |----------------------------------------------------------|
plotAll = True
# e.g. switch above to false then specific 'if plotAll' to just 'if True'
doMovie = False # option to include enlilesque movie
# |------------------------------------------------|
# |------ Set tag for type of figures to save -----|
# |------------------------------------------------|
global figtag
figtag = 'png' # can only be 'png' or 'pdf'
# |-------------------------------------------------------|
# |------ Switch for giving the BF for each sat ----------|
# |------ it's own special color in each fig ------------|
# |-------------------------------------------------------|
colorEns = True
# Set up a list of nice colors to use to highlight best fit for each satellite
#BFcols = ['#882255', '#88CCEE', '#CC6677', '#44AA99', '#DDCC77', '#AA4499', '#117733', '#332288', ]
BFcols = ['#03135B', '#A50026', '#364B9A', '#E34D34', '#5385BC', '#F99858', '#83B8D7', '#FDD081', '#B7DDEB', '#F3Fd81']
# Color for metric combining all sats
comboCol = 'red'
# |-----------------------------------------------------------|
# |------ The main processing code, just set as func to ------|
# |------ allow for external calls --------------------------|
# |-----------------------------------------------------------|
def runproOSP(inputPassed='noFile', onlyIS=False):
# |---------------------------------------------------------------------------------------|
# |---------------------------------------------------------------------------------------|
# |------------------------------------- Basic setup -------------------------------------|
# |---------------------------------------------------------------------------------------|
# |---------------------------------------------------------------------------------------|
# |-----------------------------------------------------------|
# |------ Use OSPREI to pull in the simulation parameters ----|
# |-----------------------------------------------------------|
OSP.setupOSPREI(inputPassed=inputPassed)
# |---------------------------------------------|
# |------ Read in the results from OSPREI ------|
# |---------------------------------------------|
global nSat, nEns, moreSilence, hitsSat, nFails, DoY, dObj, satColors
try:
ResArr, nSat, hitsSat, nFails, DoY, dObj = SP.txt2obj()
nEns = len(ResArr.keys())
# Stop it from printing too much if have a large ensemble
if nEns > 10:
moreSilence = True
# Set up satellite colors
satColors = [BFcols[i] for i in range(nSat)]
satColors = np.append(satColors, comboCol)
except:
sys.exit('Error reading in OSPREI results. Exiting without plotting anything.')
# |------------------------------------------------------------|
# |------ Check if we have observations and pull in if so -----|
# |------------------------------------------------------------|
global ObsData, hasObs
hasObs = False
ObsData = [None]
satNames = [''] # default for single sat case with no data
satLoc0 = [[] for i in range(nSat)] # array of sat locations at start of simulation
satLocI = [[] for i in range(nSat)] # array of sat locations at its own time of impact (for each one)
satLocAllI = [[] for i in range(nSat)] # array of sat locations at impact for each one
# e.g. if have PSP, STA then would be [[PSP at PSP time, STA at PSP time], [PSP at STA time, STA at STA time]]
if (OSP.ObsDataFile is not None) or ('satPath' in OSP.input_values):
hasObs = True
#print (OSP.ObsDataFile)
if OSP.ObsDataFile == None:
hasObs = False
try:
ObsData, satNames, satLoc0, satLocI, satLocAllI, hasObs = SP.processObs(ResArr, nSat, hasObs=hasObs)
except:
print('Error in reading in observations for comparison. Proceeding without them.')
hasObs = False
ObsData = [None]
else:
hasObs = False
ObsData, satNames, satLoc0, satLocI, satLocAllI, hasObs = SP.processObs(ResArr, nSat, hasObs=hasObs)
# |---------------------------------------------------------------------------------------|
# |---------------------------------------------------------------------------------------|
# |----------------------------------- Ensemble Figures ----------------------------------|
# |---------------------------------------------------------------------------------------|
# |---------------------------------------------------------------------------------------|
# |------------------------------------------------------------|
# |------ Calculate metrics first if we have ensemble ---------|
# |------------------------------------------------------------|
# setupMetrics will pull out the best fit for each satellite which
# we can use to color those ensemble members in all the other figs
BFbySat, BFall = None, None # holders for ens best fits
satNamesL = np.append(satNames, ['All'])
metFail = False
try:
if hasObs:
allScores, BFbySat, BFall, friends = met.setupMetrics(ResArr, ObsData, nEns, nSat, hasObs, hitsSat, satNames, DoY, silent=False)
if nEns > 1:
comboBFs = np.append(BFbySat, BFall)
else:
comboBFs = [None]
# reset to Nones if don't want to plot colors
if not colorEns:
BFbySat, BFall = None, None
except:
print('Error in calculating metrics. Proceeding without score analysis')
metFail = True
# |------------------------------------------------------------|
# |--------- Make the ensemble score scatter plots ------------|
# |------------------------------------------------------------|
# These plots are pretty slow for larger ensembles
if plotAll and (nEns > 1) and not metFail:
for i in range(nSat):
# friends at -1 will plot values from combined score
# switch -1 to i (satID) to use friends for that satellite
try:
met.plotEnsScoreScatter(ResArr, allScores, nEns, satID=i, BFs=comboBFs, satNames=satNames, BFcols=satColors, friends=friends[-1])
except:
print('Error in making ensemble score scatter for ', satNames[i])
try:
# Convert allScores to oneScores
oneScores = np.sum(allScores, axis=0)
# Reflag the bad cases after summing messes up
oneScores[np.where(oneScores < 0)] == -9998
met.plotEnsScoreScatter(ResArr, oneScores, nEns, satID=-1, BFs=comboBFs, satNames=satNames, BFcols=satColors, friends=friends[-1])
except:
print('Error in making combined ensemble score scatter')
# |------------------------------------------------------------|
# |----- Make the ensemble input/output scatter plots ---------|
# |------------------------------------------------------------|
if plotAll and (nEns > 1):
for i in range(nSat):
# friends at -1 will plot values from combined score
# switch -1 to i (satID) to use friends for that satellite
try:
met.makeEnsplot(ResArr, nEns, critCorr=0.5, satID=i, satNames=satNamesL, BFs=comboBFs, BFcols=satColors, friends=friends[-1])
except:
print('Error in making ensemble input/output scatter plot')
# |---------------------------------------------------------------------------------------|
# |---------------------------------------------------------------------------------------|
# |--------------------------------- ForeCAT figure(s) -----------------------------------|
# |---------------------------------------------------------------------------------------|
# |---------------------------------------------------------------------------------------|
if OSP.doFC:
# |------------------------------------------------------------|
# |------------------------ CPA plot --------------------------|
# |------------------------------------------------------------|
if plotAll:
try:
proFore.makeCPAplot(ResArr, nEns, BFs=comboBFs, satCols=satColors, satNames=satNamesL)
except:
print('Error in making CPA plot')
# |---------------------------------------------------------------------------------------|
# |---------------------------------------------------------------------------------------|
# |------------------------------ Interplanetary figures ---------------------------------|
# |---------------------------------------------------------------------------------------|
# |---------------------------------------------------------------------------------------|
if OSP.doANT:
# |------------------------------------------------------------|
# |----------------- Make the J map-like plot -----------------|
# |------------------------------------------------------------|
if plotAll:
for i in range(nSat):
# This has some weird packaging in the call bc the code will do multi sat
# but not something used in OSPREI for now
# Get time at start of ANTEATR portion
try:
if not OSP.noDate:
datestr = OSP.input_values['date'] + 'T'+ OSP.input_values['time']
start = (datetime.datetime.strptime(datestr, "%Y%m%dT%H:%M" ))
else:
start = None
proANT.makeJmap([ResArr[0]], [satLoc0[i]], [start], satName=satNames[i])
except:
print('Error in making J map plot for satellite ', satNames[i])
# |------------------------------------------------------------|
# |------------ Make the profiles with sheath data ------------|
# |------------------------------------------------------------|
# The sheath profiles don't always look the best if it is a borderline case
# where some fraction of the cases stop forming a shock halfway through
if plotAll and OSP.doPUP:
try:
proANT.makePUPplot(ResArr, nEns, BFs=comboBFs, satCols=satColors, satNames=satNamesL)
except:
print('Error in making PUP histogram')
# |------------------------------------------------------------|
# |---------- Make the profiles without sheath data -----------|
# |------------------------------------------------------------|
if plotAll:
try:
proANT.makeDragless(ResArr, nEns, BFs=comboBFs, satCols=satColors, satNames=satNamesL)
except:
print('Error in making drag plot')
# |------------------------------------------------------------|
# |---------------- Make the ANTEATR histogram ----------------|
# |------------------------------------------------------------|
if plotAll and (nEns>1):
for i in range(nSat):
try:
proANT.makeAThisto(ResArr, dObj=dObj, DoY=DoY, satID=i, BFs=comboBFs, satCols=satColors, satNames=satNamesL)
except:
print('Error in making ANTEATR histogram for satellite ', satNamesL[i])
# |----------------------------------------------------------------|
# |------------ Make impact and /or other contours plot -----------|
# |----------------------------------------------------------------|
if plotAll and (nEns>1):
for i in range(nSat):
try:
# Option for the multi panel plot
#proANT.makeContours(ResArr, nEns, nFails, satID=3, satLocs=satLocI, satNames=satNames, allSats=True, satCols=satColors, calcwid=95, plotwid=50 )
# Option for just single panel with percentage of impacts
proANT.makePercMap(ResArr, nEns, nFails, satID=i, satLocs=satLocAllI[i], satNames=satNames, allSats=True, satCols=satColors, calcwid=95, plotwid=50 )
except:
print('Error in making impact contours')
# |------------------------------------------------------------|
# |------------- Make tne Enlil-like movie figures ------------|
# |------------------------------------------------------------|
if doMovie:
try:
# vel0 = 300 -> sets min of contours at 300 (defaults to 300)
# vel1 = 750 -> sets max of contours at 750 (defaults to nearest 50 over vCME)
proANT.enlilesque(ResArr, bonusTime=0, doSat=True, planes='both', satNames=satNames, satCols=satColors)
except:
print('Error in making Enlilesque frames')
# |---------------------------------------------------------------------------------------|
# |---------------------------------------------------------------------------------------|
# |--------------------------------- In Situ figures -------------------------------------|
# |---------------------------------------------------------------------------------------|
# |---------------------------------------------------------------------------------------|
if OSP.doFIDO:
# |------------------------------------------------------------|
# |------------- Make the in situ spaghetti plots -------------|
# |------------------------------------------------------------|
if plotAll:
for i in range(nSat):
if hitsSat[i]:
try:
proFIDO.makeISplot(ResArr, dObj, DoY, satID=i, SWpadF=12, SWpadB=12, BFs=comboBFs, satCols=satColors, satNames=satNamesL, hasObs=hasObs, ObsData=ObsData)
except:
print('Error in making in situ plot for satellite ', satNames[i])
# |------------------------------------------------------------|
# |------------- Make histograms with sheath data -------------|
# |------------------------------------------------------------|
if OSP.doPUP:
if plotAll and (nEns>1):
for i in range(nSat):
try:
proFIDO.makeallIShistos(ResArr, dObj, DoY, satID=i, satNames=satNamesL, BFs=comboBFs, satCols=satColors)
except:
print('Error in making FIDO histogram for satellite ', satNamesL[i])
# |------------------------------------------------------------|
# |------------ Make histograms without sheath data -----------|
# |------------------------------------------------------------|
else:
if plotAll and (nEns>1):
for i in range(nSat):
try:
proFIDO.makeFIDOhistos(ResArr, dObj, DoY, satID=i, satNames=satNamesL, BFs=comboBFs, satCols=satColors)
except:
print('Error in making FIDO histogram for satellite ', satNamesL[i])
# |------------------------------------------------------------|
# |------------------ Make heat map timeline ------------------|
# |------------------------------------------------------------|
if plotAll and (nEns>1):
for i in range(nSat):
if hitsSat[i]:
# Adding BFs is possible but does make it pretty messy
#proFIDO.makeAllprob(ResArr, dObj, DoY, satID=i, BFs=comboBFs, satCols=satColors, satNames=satNamesL, hasObs=hasObs, ObsData=ObsData)
# Typically better to run without BFs
try:
proFIDO.makeAllprob(ResArr, dObj, DoY, satID=i, hasObs=hasObs, ObsData=ObsData, satNames=satNames)
except:
print('Error in making FIDO heat map for satellite ', satNamesL[i])
if __name__ == '__main__':
runproOSP()
|
ckay314REPO_NAMEOSPREIPATH_START.@OSPREI_extracted@OSPREI-master@processCode@processOSPREI.py@.PATH_END.py
|
{
"filename": "test_sersic_highn_alias2.py",
"repo_name": "GalSim-developers/GalSim",
"repo_path": "GalSim_extracted/GalSim-main/devel/external/test_sersic_highn/test_sersic_highn_alias2.py",
"type": "Python"
}
|
# Copyright (c) 2012-2023 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
import os
import logging
import test_sersic_highn_basic
# Start off with the basic config
config = test_sersic_highn_basic.config_basic
config['image']['gsparams']['alias_threshold'] = 2.5e-3
# Output filename
if not os.path.isdir("outputs"):
os.mkdir("outputs")
outfile = os.path.join(
"outputs", "sersic_highn_alias2_output_N"+str(test_sersic_highn_basic.NOBS)+".asc")
# Setup the logging
logging.basicConfig(level=test_sersic_highn_basic.LOGLEVEL)
logger = logging.getLogger("sersic_highn_alias2")
random_seed = 912424534
test_sersic_highn_basic.run_tests(
random_seed, outfile, config=config, logger=logger,
fail_value=test_sersic_highn_basic.FAIL_VALUE)
|
GalSim-developersREPO_NAMEGalSimPATH_START.@GalSim_extracted@GalSim-main@devel@external@test_sersic_highn@test_sersic_highn_alias2.py@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/isosurface/hoverlabel/font/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="shadow", parent_name="isosurface.hoverlabel.font", **kwargs
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@isosurface@hoverlabel@font@_shadow.py@.PATH_END.py
|
{
"filename": "combine_match_results.py",
"repo_name": "samuelyeewl/specmatch-emp",
"repo_path": "specmatch-emp_extracted/specmatch-emp-master/tests/combine_match_results.py",
"type": "Python"
}
|
#!/usr/bin/env python
import pandas as pd
import os
import sys
import re
from argparse import ArgumentParser
from specmatchemp import library
if __name__ == '__main__':
# Argument parser
psr = ArgumentParser(description="Combine match results")
psr.add_argument('libpath', type=str, help="Path to library file")
psr.add_argument('resdir', type=str, help="Path to results directory")
psr.add_argument('-s', '--suffix', type=str, default="", help="Suffix to append to results files")
args = psr.parse_args()
if not os.path.isdir(args.resdir):
print("Could not find folder at {0}".format(args.resdir))
sys.exit(1)
lib = library.read_hdf(args.libpath, wavlim='none')
# get names of stars
names = list(lib.library_params.cps_name)
# global dataframe to store
res_global = pd.DataFrame()
for name in names:
res_path = os.path.join(args.resdir, '{0}/{0}_match.csv'.format(name))
# individual dataframe for each star
res_star = pd.read_csv(res_path, index_col=0)
# concatenate rows to global dataframe
targ_idx = lib.library_params[lib.library_params.cps_name.str.contains('^'+name+'$')].index[0]
res_star['targ_idx'] = targ_idx
res_star['ref_idx'] = res_star.index
if res_global.empty:
res_global = res_star
else:
res_global = pd.concat((res_global, res_star), ignore_index=True)
# save global results
res_global.reset_index(inplace=True)
outpath_global = os.path.join(args.resdir, 'match_results.csv')
res_global.to_csv(outpath_global)
|
samuelyeewlREPO_NAMEspecmatch-empPATH_START.@specmatch-emp_extracted@specmatch-emp-master@tests@combine_match_results.py@.PATH_END.py
|
{
"filename": "bandpass_calibration.py",
"repo_name": "RTIP/artip",
"repo_path": "artip_extracted/artip-master/casa_scripts/bandpass_calibration.py",
"type": "Python"
}
|
import sys
script_parameters_start_index = sys.argv.index('-c') + 2
parameters = sys.argv[script_parameters_start_index:]
ms_dataset = parameters[0]
output_path = parameters[1]
field = parameters[2]
refant = parameters[3]
solint = float(parameters[4])
phase_calib_minsnr = float(parameters[5])
phase_calib_solint = float(parameters[6])
bpphase_gcal = output_path + "/" + 'bpphase.gcal'
bandpass_table = output_path + "/" + 'bandpass.bcal'
sys.stdout.write("\n##### Started calculating bandpass gains on {0}#####\n".format(ms_dataset))
gaincal(vis=ms_dataset, caltable=bpphase_gcal, field=field, refant=refant, calmode='p', solint=phase_calib_solint, minsnr=phase_calib_minsnr)
sys.stdout.write("\n##### Finished calculating bandpass gains on {0}#####\n".format(ms_dataset))
bandpass(vis=ms_dataset, caltable=bandpass_table, field=field, refant=refant, solint=solint, solnorm=T,
gaintable=[bpphase_gcal], combine='field')
|
RTIPREPO_NAMEartipPATH_START.@artip_extracted@artip-master@casa_scripts@bandpass_calibration.py@.PATH_END.py
|
{
"filename": "_z.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/surface/contours/_z.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Z(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "surface.contours"
_path_str = "surface.contours.z"
_valid_props = {
"color",
"end",
"highlight",
"highlightcolor",
"highlightwidth",
"project",
"show",
"size",
"start",
"usecolormap",
"width",
}
# color
# -----
@property
def color(self):
"""
Sets the color of the contour lines.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# end
# ---
@property
def end(self):
"""
Sets the end contour level value. Must be more than
`contours.start`
The 'end' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["end"]
@end.setter
def end(self, val):
self["end"] = val
# highlight
# ---------
@property
def highlight(self):
"""
Determines whether or not contour lines about the z dimension
are highlighted on hover.
The 'highlight' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["highlight"]
@highlight.setter
def highlight(self, val):
self["highlight"] = val
# highlightcolor
# --------------
@property
def highlightcolor(self):
"""
Sets the color of the highlighted contour lines.
The 'highlightcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["highlightcolor"]
@highlightcolor.setter
def highlightcolor(self, val):
self["highlightcolor"] = val
# highlightwidth
# --------------
@property
def highlightwidth(self):
"""
Sets the width of the highlighted contour lines.
The 'highlightwidth' property is a number and may be specified as:
- An int or float in the interval [1, 16]
Returns
-------
int|float
"""
return self["highlightwidth"]
@highlightwidth.setter
def highlightwidth(self, val):
self["highlightwidth"] = val
# project
# -------
@property
def project(self):
"""
The 'project' property is an instance of Project
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.contours.z.Project`
- A dict of string/value properties that will be passed
to the Project constructor
Supported dict properties:
x
Determines whether or not these contour lines
are projected on the x plane. If `highlight` is
set to True (the default), the projected lines
are shown on hover. If `show` is set to True,
the projected lines are shown in permanence.
y
Determines whether or not these contour lines
are projected on the y plane. If `highlight` is
set to True (the default), the projected lines
are shown on hover. If `show` is set to True,
the projected lines are shown in permanence.
z
Determines whether or not these contour lines
are projected on the z plane. If `highlight` is
set to True (the default), the projected lines
are shown on hover. If `show` is set to True,
the projected lines are shown in permanence.
Returns
-------
plotly.graph_objs.surface.contours.z.Project
"""
return self["project"]
@project.setter
def project(self, val):
self["project"] = val
# show
# ----
@property
def show(self):
"""
Determines whether or not contour lines about the z dimension
are drawn.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["show"]
@show.setter
def show(self, val):
self["show"] = val
# size
# ----
@property
def size(self):
"""
Sets the step between each contour level. Must be positive.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# start
# -----
@property
def start(self):
"""
Sets the starting contour level value. Must be less than
`contours.end`
The 'start' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["start"]
@start.setter
def start(self, val):
self["start"] = val
# usecolormap
# -----------
@property
def usecolormap(self):
"""
An alternate to "color". Determines whether or not the contour
lines are colored using the trace "colorscale".
The 'usecolormap' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["usecolormap"]
@usecolormap.setter
def usecolormap(self, val):
self["usecolormap"] = val
# width
# -----
@property
def width(self):
"""
Sets the width of the contour lines.
The 'width' property is a number and may be specified as:
- An int or float in the interval [1, 16]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the color of the contour lines.
end
Sets the end contour level value. Must be more than
`contours.start`
highlight
Determines whether or not contour lines about the z
dimension are highlighted on hover.
highlightcolor
Sets the color of the highlighted contour lines.
highlightwidth
Sets the width of the highlighted contour lines.
project
:class:`plotly.graph_objects.surface.contours.z.Project
` instance or dict with compatible properties
show
Determines whether or not contour lines about the z
dimension are drawn.
size
Sets the step between each contour level. Must be
positive.
start
Sets the starting contour level value. Must be less
than `contours.end`
usecolormap
An alternate to "color". Determines whether or not the
contour lines are colored using the trace "colorscale".
width
Sets the width of the contour lines.
"""
def __init__(
self,
arg=None,
color=None,
end=None,
highlight=None,
highlightcolor=None,
highlightwidth=None,
project=None,
show=None,
size=None,
start=None,
usecolormap=None,
width=None,
**kwargs
):
"""
Construct a new Z object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.surface.contours.Z`
color
Sets the color of the contour lines.
end
Sets the end contour level value. Must be more than
`contours.start`
highlight
Determines whether or not contour lines about the z
dimension are highlighted on hover.
highlightcolor
Sets the color of the highlighted contour lines.
highlightwidth
Sets the width of the highlighted contour lines.
project
:class:`plotly.graph_objects.surface.contours.z.Project
` instance or dict with compatible properties
show
Determines whether or not contour lines about the z
dimension are drawn.
size
Sets the step between each contour level. Must be
positive.
start
Sets the starting contour level value. Must be less
than `contours.end`
usecolormap
An alternate to "color". Determines whether or not the
contour lines are colored using the trace "colorscale".
width
Sets the width of the contour lines.
Returns
-------
Z
"""
super(Z, self).__init__("z")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.surface.contours.Z
constructor must be a dict or
an instance of :class:`plotly.graph_objs.surface.contours.Z`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("end", None)
_v = end if end is not None else _v
if _v is not None:
self["end"] = _v
_v = arg.pop("highlight", None)
_v = highlight if highlight is not None else _v
if _v is not None:
self["highlight"] = _v
_v = arg.pop("highlightcolor", None)
_v = highlightcolor if highlightcolor is not None else _v
if _v is not None:
self["highlightcolor"] = _v
_v = arg.pop("highlightwidth", None)
_v = highlightwidth if highlightwidth is not None else _v
if _v is not None:
self["highlightwidth"] = _v
_v = arg.pop("project", None)
_v = project if project is not None else _v
if _v is not None:
self["project"] = _v
_v = arg.pop("show", None)
_v = show if show is not None else _v
if _v is not None:
self["show"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("start", None)
_v = start if start is not None else _v
if _v is not None:
self["start"] = _v
_v = arg.pop("usecolormap", None)
_v = usecolormap if usecolormap is not None else _v
if _v is not None:
self["usecolormap"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@surface@contours@_z.py@.PATH_END.py
|
{
"filename": "_scalegroup.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/funnelarea/_scalegroup.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ScalegroupValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="scalegroup", parent_name="funnelarea", **kwargs):
super(ScalegroupValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@funnelarea@_scalegroup.py@.PATH_END.py
|
{
"filename": "polynomial.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/numpy/lib/polynomial.py",
"type": "Python"
}
|
"""
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Compute polynomial values.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Compute polynomial values.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if p.ndim != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
Weights to apply to the y-coordinates of the sample points. For
gaussian uncertainties, use 1/sigma (not 1/sigma**2).
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (deg + 1,) or (deg + 1, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Compute polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning, stacklevel=2)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
if len(x) <= order + 2:
raise ValueError("the number of data points must exceed order + 2 "
"for Bayesian estimate the covariance matrix")
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, an array of numbers, or an instance of poly1d, at
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print(p1)
1 x + 2
>>> print(p2)
2
9 x + 5 x + 4
>>> print(np.polyadd(p1, p2))
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print(p1)
2
1 x + 2 x + 3
>>> print(p2)
2
9 x + 5 x + 1
>>> print(np.polymul(p1, p2))
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print(np.poly1d(p))
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print(p)
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
__hash__ = None
@property
def coeffs(self):
""" A copy of the polynomial coefficients """
return self._coeffs.copy()
@property
def variable(self):
""" The name of the polynomial variable """
return self._variable
# calculated attributes
@property
def order(self):
""" The order or degree of the polynomial """
return len(self._coeffs) - 1
@property
def roots(self):
""" The roots of the polynomial, where self(x) == 0 """
return roots(self._coeffs)
# our internal _coeffs property need to be backed by __dict__['coeffs'] for
# scipy to work correctly.
@property
def _coeffs(self):
return self.__dict__['coeffs']
@_coeffs.setter
def _coeffs(self, coeffs):
self.__dict__['coeffs'] = coeffs
# alias attributes
r = roots
c = coef = coefficients = coeffs
o = order
def __init__(self, c_or_r, r=False, variable=None):
if isinstance(c_or_r, poly1d):
self._variable = c_or_r._variable
self._coeffs = c_or_r._coeffs
if set(c_or_r.__dict__) - set(self.__dict__):
msg = ("In the future extra properties will not be copied "
"across when constructing one poly1d from another")
warnings.warn(msg, FutureWarning, stacklevel=2)
self.__dict__.update(c_or_r.__dict__)
if variable is not None:
self._variable = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if c_or_r.ndim > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self._coeffs = c_or_r
if variable is None:
variable = 'x'
self._variable = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
return not self.__eq__(other)
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self._coeffs = NX.concatenate((zr, self.coeffs))
ind = 0
self._coeffs[ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@numpy@lib@polynomial.py@.PATH_END.py
|
{
"filename": "profiles.py",
"repo_name": "gammapy/gammapy",
"repo_path": "gammapy_extracted/gammapy-main/gammapy/astro/darkmatter/profiles.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Dark matter profiles."""
import abc
import html
import numpy as np
import astropy.units as u
from gammapy.modeling import Parameter, Parameters
from gammapy.utils.integrate import trapz_loglog
__all__ = [
"BurkertProfile",
"DMProfile",
"EinastoProfile",
"IsothermalProfile",
"MooreProfile",
"NFWProfile",
"ZhaoProfile",
]
class DMProfile(abc.ABC):
"""DMProfile model base class."""
LOCAL_DENSITY = 0.3 * u.GeV / (u.cm**3)
"""Local dark matter density as given in reference 2"""
DISTANCE_GC = 8.33 * u.kpc
"""Distance to the Galactic Center as given in reference 2"""
def __call__(self, radius):
"""Call evaluate method of derived classes."""
kwargs = {par.name: par.quantity for par in self.parameters}
return self.evaluate(radius, **kwargs)
def _repr_html_(self):
try:
return self.to_html()
except AttributeError:
return f"<pre>{html.escape(str(self))}</pre>"
def scale_to_local_density(self):
"""Scale to local density."""
scale = (self.LOCAL_DENSITY / self(self.DISTANCE_GC)).to_value("")
self.parameters["rho_s"].value *= scale
def _eval_substitution(self, radius, separation, squared):
"""Density at given radius together with the substitution part."""
exponent = 2 if squared else 1
return (
self(radius) ** exponent
* radius
/ np.sqrt(radius**2 - (self.DISTANCE_GC * np.sin(separation)) ** 2)
)
def integral(self, rmin, rmax, separation, ndecade, squared=True):
r"""Integrate dark matter profile numerically.
.. math::
F(r_{min}, r_{max}) = \int_{r_{min}}^{r_{max}}\rho(r)^\gamma dr \\
\gamma = 2 \text{for annihilation} \\
\gamma = 1 \text{for decay}
Parameters
----------
rmin, rmax : `~astropy.units.Quantity`
Lower and upper bound of integration range.
separation : `~numpy.ndarray`
Separation angle in radians.
ndecade : int, optional
Number of grid points per decade used for the integration.
Default is 10000.
squared : bool, optional
Square the profile before integration.
Default is True.
"""
integral = self.integrate_spectrum_separation(
self._eval_substitution, rmin, rmax, separation, ndecade, squared
)
inegral_unit = u.Unit("GeV2 cm-5") if squared else u.Unit("GeV cm-2")
return integral.to(inegral_unit)
def integrate_spectrum_separation(
self, func, xmin, xmax, separation, ndecade, squared=True
):
"""Squared dark matter profile integral.
Parameters
----------
xmin, xmax : `~astropy.units.Quantity`
Lower and upper bound of integration range.
separation : `~numpy.ndarray`
Separation angle in radians.
ndecade : int
Number of grid points per decade used for the integration.
squared : bool
Square the profile before integration.
Default is True.
"""
unit = xmin.unit
xmin = xmin.value
xmax = xmax.to_value(unit)
logmin = np.log10(xmin)
logmax = np.log10(xmax)
n = np.int32((logmax - logmin) * ndecade)
x = np.logspace(logmin, logmax, n) * unit
y = func(x, separation, squared)
val = trapz_loglog(y, x)
return val.sum()
class ZhaoProfile(DMProfile):
r"""Zhao Profile.
This is taken from equation 1 from Zhao (1996). It is a generalization of the NFW profile. The volume density
is parametrized with a double power-law. Scale radii smaller than the scale radius are described with a slope of
:math:`-\gamma` and scale radii larger than the scale radius are described with a slope of :math:`-\beta`.
:math:`\alpha` is a measure for the width of the transition region.
.. math::
\rho(r) = \rho_s \left(\frac{r_s}{r}\right)^\gamma \left(1 + \left(\frac{r}{r_s}\right)^\frac{1}{\alpha} \right)^{(\gamma - \beta) \alpha}
Parameters
----------
r_s : `~astropy.units.Quantity`
Scale radius, :math:`r_s`.
alpha : `~astropy.units.Quantity`
:math:`\alpha`.
beta: `~astropy.units.Quantity`
:math:`\beta`.
gamma : `~astropy.units.Quantity`
:math:`\gamma`.
rho_s : `~astropy.units.Quantity`
Characteristic density, :math:`\rho_s`.
References
----------
* `1996MNRAS.278..488Z <https://ui.adsabs.harvard.edu/abs/1996MNRAS.278..488Z>`_
* `2011JCAP...03..051C <https://ui.adsabs.harvard.edu/abs/2011JCAP...03..051C>`_
"""
DEFAULT_SCALE_RADIUS = 24.42 * u.kpc
DEFAULT_ALPHA = 1
DEFAULT_BETA = 3
DEFAULT_GAMMA = 1
"""
(alpha, beta, gamma) = (1,3,1) is NFW profile.
Default scale radius as given in reference 2 (same as for NFW profile)
"""
def __init__(
self, r_s=None, alpha=None, beta=None, gamma=None, rho_s=1 * u.Unit("GeV / cm3")
):
r_s = self.DEFAULT_SCALE_RADIUS if r_s is None else r_s
alpha = self.DEFAULT_ALPHA if alpha is None else alpha
beta = self.DEFAULT_BETA if beta is None else beta
gamma = self.DEFAULT_GAMMA if gamma is None else gamma
self.parameters = Parameters(
[
Parameter("r_s", u.Quantity(r_s)),
Parameter("rho_s", u.Quantity(rho_s)),
Parameter("alpha", alpha),
Parameter("beta", beta),
Parameter("gamma", gamma),
]
)
@staticmethod
def evaluate(radius, r_s, alpha, beta, gamma, rho_s):
"""Evaluate the profile."""
rr = radius / r_s
return rho_s / (rr**gamma * (1 + rr ** (1 / alpha)) ** ((beta - gamma) * alpha))
class NFWProfile(DMProfile):
r"""NFW Profile.
.. math::
\rho(r) = \rho_s \frac{r_s}{r}\left(1 + \frac{r}{r_s}\right)^{-2}
Parameters
----------
r_s : `~astropy.units.Quantity`
Scale radius, :math:`r_s`.
rho_s : `~astropy.units.Quantity`
Characteristic density, :math:`\rho_s`.
References
----------
* `1997ApJ...490..493 <https://ui.adsabs.harvard.edu/abs/1997ApJ...490..493N>`_
* `2011JCAP...03..051C <https://ui.adsabs.harvard.edu/abs/2011JCAP...03..051C>`_
"""
DEFAULT_SCALE_RADIUS = 24.42 * u.kpc
"""Default scale radius as given in reference 2"""
def __init__(self, r_s=None, rho_s=1 * u.Unit("GeV / cm3")):
r_s = self.DEFAULT_SCALE_RADIUS if r_s is None else r_s
self.parameters = Parameters(
[Parameter("r_s", u.Quantity(r_s)), Parameter("rho_s", u.Quantity(rho_s))]
)
@staticmethod
def evaluate(radius, r_s, rho_s):
"""Evaluate the profile."""
rr = radius / r_s
return rho_s / (rr * (1 + rr) ** 2)
class EinastoProfile(DMProfile):
r"""Einasto Profile.
.. math::
\rho(r) = \rho_s \exp{
\left(-\frac{2}{\alpha}\left[
\left(\frac{r}{r_s}\right)^{\alpha} - 1\right] \right)}
Parameters
----------
r_s : `~astropy.units.Quantity`
Scale radius, :math:`r_s`.
alpha : `~astropy.units.Quantity`
:math:`\alpha`.
rho_s : `~astropy.units.Quantity`
Characteristic density, :math:`\rho_s`.
References
----------
* `1965TrAlm...5...87E <https://ui.adsabs.harvard.edu/abs/1965TrAlm...5...87E>`_
* `2011JCAP...03..051C <https://ui.adsabs.harvard.edu/abs/2011JCAP...03..051C>`_
"""
DEFAULT_SCALE_RADIUS = 28.44 * u.kpc
"""Default scale radius as given in reference 2"""
DEFAULT_ALPHA = 0.17
"""Default scale radius as given in reference 2"""
def __init__(self, r_s=None, alpha=None, rho_s=1 * u.Unit("GeV / cm3")):
alpha = self.DEFAULT_ALPHA if alpha is None else alpha
r_s = self.DEFAULT_SCALE_RADIUS if r_s is None else r_s
self.parameters = Parameters(
[
Parameter("r_s", u.Quantity(r_s)),
Parameter("alpha", u.Quantity(alpha)),
Parameter("rho_s", u.Quantity(rho_s)),
]
)
@staticmethod
def evaluate(radius, r_s, alpha, rho_s):
"""Evaluate the profile."""
rr = radius / r_s
exponent = (2 / alpha) * (rr**alpha - 1)
return rho_s * np.exp(-1 * exponent)
class IsothermalProfile(DMProfile):
r"""Isothermal Profile.
.. math:: \rho(r) = \frac{\rho_s}{1 + (r/r_s)^2}
Parameters
----------
r_s : `~astropy.units.Quantity`
Scale radius, :math:`r_s`.
References
----------
* `1991MNRAS.249..523B <https://ui.adsabs.harvard.edu/abs/1991MNRAS.249..523B>`_
* `2011JCAP...03..051C <https://ui.adsabs.harvard.edu/abs/2011JCAP...03..051C>`_
"""
DEFAULT_SCALE_RADIUS = 4.38 * u.kpc
"""Default scale radius as given in reference 2"""
def __init__(self, r_s=None, rho_s=1 * u.Unit("GeV / cm3")):
r_s = self.DEFAULT_SCALE_RADIUS if r_s is None else r_s
self.parameters = Parameters(
[Parameter("r_s", u.Quantity(r_s)), Parameter("rho_s", u.Quantity(rho_s))]
)
@staticmethod
def evaluate(radius, r_s, rho_s):
"""Evaluate the profile."""
rr = radius / r_s
return rho_s / (1 + rr**2)
class BurkertProfile(DMProfile):
r"""Burkert Profile.
.. math:: \rho(r) = \frac{\rho_s}{(1 + r/r_s)(1 + (r/r_s)^2)}
Parameters
----------
r_s : `~astropy.units.Quantity`
Scale radius, :math:`r_s`.
References
----------
* `1995ApJ...447L..25B <https://ui.adsabs.harvard.edu/abs/1995ApJ...447L..25B>`_
* `2011JCAP...03..051C <https://ui.adsabs.harvard.edu/abs/2011JCAP...03..051C>`_
"""
DEFAULT_SCALE_RADIUS = 12.67 * u.kpc
"""Default scale radius as given in reference 2"""
def __init__(self, r_s=None, rho_s=1 * u.Unit("GeV / cm3")):
r_s = self.DEFAULT_SCALE_RADIUS if r_s is None else r_s
self.parameters = Parameters(
[Parameter("r_s", u.Quantity(r_s)), Parameter("rho_s", u.Quantity(rho_s))]
)
@staticmethod
def evaluate(radius, r_s, rho_s):
"""Evaluate the profile."""
rr = radius / r_s
return rho_s / ((1 + rr) * (1 + rr**2))
class MooreProfile(DMProfile):
r"""Moore Profile.
.. math::
\rho(r) = \rho_s \left(\frac{r_s}{r}\right)^{1.16}
\left(1 + \frac{r}{r_s} \right)^{-1.84}
Parameters
----------
r_s : `~astropy.units.Quantity`
Scale radius, :math:`r_s`.
References
----------
* `2004MNRAS.353..624D <https://ui.adsabs.harvard.edu/abs/2004MNRAS.353..624D>`_
* `2011JCAP...03..051C <https://ui.adsabs.harvard.edu/abs/2011JCAP...03..051C>`_
"""
DEFAULT_SCALE_RADIUS = 30.28 * u.kpc
"""Default scale radius as given in reference 2"""
def __init__(self, r_s=None, rho_s=1 * u.Unit("GeV / cm3")):
r_s = self.DEFAULT_SCALE_RADIUS if r_s is None else r_s
self.parameters = Parameters(
[Parameter("r_s", u.Quantity(r_s)), Parameter("rho_s", u.Quantity(rho_s))]
)
@staticmethod
def evaluate(radius, r_s, rho_s):
"""Evaluate the profile."""
rr = radius / r_s
rr_ = r_s / radius
return rho_s * rr_**1.16 * (1 + rr) ** (-1.84)
|
gammapyREPO_NAMEgammapyPATH_START.@gammapy_extracted@gammapy-main@gammapy@astro@darkmatter@profiles.py@.PATH_END.py
|
{
"filename": "jobs.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/integrations/prefect-dbt/prefect_dbt/cloud/jobs.py",
"type": "Python"
}
|
"""Module containing tasks and flows for interacting with dbt Cloud jobs"""
import asyncio
import shlex
import time
from json import JSONDecodeError
from typing import Any, Awaitable, Callable, Dict, List, Optional, Union
from httpx import HTTPStatusError
from pydantic import Field
from typing_extensions import Literal
from prefect import flow, task
from prefect.blocks.abstract import JobBlock, JobRun
from prefect.context import FlowRunContext
from prefect.logging import get_run_logger
from prefect.utilities.asyncutils import sync_compatible
from prefect_dbt.cloud.credentials import DbtCloudCredentials
from prefect_dbt.cloud.exceptions import (
DbtCloudGetJobFailed,
DbtCloudGetRunArtifactFailed,
DbtCloudGetRunFailed,
DbtCloudJobRunCancelled,
DbtCloudJobRunFailed,
DbtCloudJobRunIncomplete,
DbtCloudJobRunTimedOut,
DbtCloudJobRunTriggerFailed,
DbtCloudListRunArtifactsFailed,
)
from prefect_dbt.cloud.models import TriggerJobRunOptions
from prefect_dbt.cloud.runs import (
DbtCloudJobRunStatus,
get_dbt_cloud_run_artifact,
get_dbt_cloud_run_info,
list_dbt_cloud_run_artifacts,
wait_for_dbt_cloud_job_run,
)
from prefect_dbt.cloud.utils import extract_user_message
EXE_COMMANDS = ("build", "run", "test", "seed", "snapshot")
@task(
name="Get dbt Cloud job details",
description="Retrieves details of a dbt Cloud job "
"for the job with the given job_id.",
retries=3,
retry_delay_seconds=10,
)
async def get_dbt_cloud_job_info(
dbt_cloud_credentials: DbtCloudCredentials,
job_id: int,
order_by: Optional[str] = None,
) -> Dict:
"""
A task to retrieve information about a dbt Cloud job.
Args:
dbt_cloud_credentials: Credentials for authenticating with dbt Cloud.
job_id: The ID of the job to get.
Returns:
The job data returned by the dbt Cloud administrative API.
Example:
Get status of a dbt Cloud job:
```python
from prefect import flow
from prefect_dbt.cloud import DbtCloudCredentials
from prefect_dbt.cloud.jobs import get_job
@flow
def get_job_flow():
credentials = DbtCloudCredentials(api_key="my_api_key", account_id=123456789)
return get_job(
dbt_cloud_credentials=credentials,
job_id=42
)
get_job_flow()
```
""" # noqa
try:
async with dbt_cloud_credentials.get_administrative_client() as client:
response = await client.get_job(
job_id=job_id,
order_by=order_by,
)
except HTTPStatusError as ex:
raise DbtCloudGetJobFailed(extract_user_message(ex)) from ex
return response.json()["data"]
@task(
name="Trigger dbt Cloud job run",
description="Triggers a dbt Cloud job run for the job "
"with the given job_id and optional overrides.",
retries=3,
retry_delay_seconds=10,
)
async def trigger_dbt_cloud_job_run(
dbt_cloud_credentials: DbtCloudCredentials,
job_id: int,
options: Optional[TriggerJobRunOptions] = None,
) -> Dict:
"""
A task to trigger a dbt Cloud job run.
Args:
dbt_cloud_credentials: Credentials for authenticating with dbt Cloud.
job_id: The ID of the job to trigger.
options: An optional TriggerJobRunOptions instance to specify overrides
for the triggered job run.
Returns:
The run data returned from the dbt Cloud administrative API.
Examples:
Trigger a dbt Cloud job run:
```python
from prefect import flow
from prefect_dbt.cloud import DbtCloudCredentials
from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run
@flow
def trigger_dbt_cloud_job_run_flow():
credentials = DbtCloudCredentials(api_key="my_api_key", account_id=123456789)
trigger_dbt_cloud_job_run(dbt_cloud_credentials=credentials, job_id=1)
trigger_dbt_cloud_job_run_flow()
```
Trigger a dbt Cloud job run with overrides:
```python
from prefect import flow
from prefect_dbt.cloud import DbtCloudCredentials
from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run
from prefect_dbt.cloud.models import TriggerJobRunOptions
@flow
def trigger_dbt_cloud_job_run_flow():
credentials = DbtCloudCredentials(api_key="my_api_key", account_id=123456789)
trigger_dbt_cloud_job_run(
dbt_cloud_credentials=credentials,
job_id=1,
options=TriggerJobRunOptions(
git_branch="staging",
schema_override="dbt_cloud_pr_123",
dbt_version_override="0.18.0",
target_name_override="staging",
timeout_seconds_override=3000,
generate_docs_override=True,
threads_override=8,
steps_override=[
"dbt seed",
"dbt run --fail-fast",
"dbt test --fail-fast",
],
),
)
trigger_dbt_cloud_job_run()
```
""" # noqa
logger = get_run_logger()
logger.info(f"Triggering run for job with ID {job_id}")
try:
async with dbt_cloud_credentials.get_administrative_client() as client:
response = await client.trigger_job_run(job_id=job_id, options=options)
except HTTPStatusError as ex:
raise DbtCloudJobRunTriggerFailed(extract_user_message(ex)) from ex
run_data = response.json()["data"]
if "project_id" in run_data and "id" in run_data:
logger.info(
f"Run successfully triggered for job with ID {job_id}. "
"You can view the status of this run at "
f"https://{dbt_cloud_credentials.domain}/#/accounts/"
f"{dbt_cloud_credentials.account_id}/projects/{run_data['project_id']}/"
f"runs/{run_data['id']}/"
)
return run_data
@task(
name="Get dbt Cloud job run ID",
description="Extracts the run ID from a trigger job run API response",
)
def get_run_id(obj: Dict):
"""
Task that extracts the run ID from a trigger job run API response,
This task is mainly used to maintain dependency tracking between the
`trigger_dbt_cloud_job_run` task and downstream tasks/flows that use the run ID.
Args:
obj: The JSON body from the trigger job run response.
Example:
```python
from prefect import flow
from prefect_dbt.cloud import DbtCloudCredentials
from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run, get_run_id
@flow
def trigger_run_and_get_id():
dbt_cloud_credentials=DbtCloudCredentials(
api_key="my_api_key",
account_id=123456789
)
triggered_run_data = trigger_dbt_cloud_job_run(
dbt_cloud_credentials=dbt_cloud_credentials,
job_id=job_id,
options=trigger_job_run_options,
)
run_id = get_run_id(triggered_run_data)
return run_id
trigger_run_and_get_id()
```
"""
id = obj.get("id")
if id is None:
raise RuntimeError("Unable to determine run ID for triggered job.")
return id
@flow(
name="Trigger dbt Cloud job run and wait for completion",
description="Triggers a dbt Cloud job run and waits for the"
"triggered run to complete.",
)
async def trigger_dbt_cloud_job_run_and_wait_for_completion(
dbt_cloud_credentials: DbtCloudCredentials,
job_id: int,
trigger_job_run_options: Optional[TriggerJobRunOptions] = None,
max_wait_seconds: int = 900,
poll_frequency_seconds: int = 10,
retry_filtered_models_attempts: int = 3,
) -> Dict:
"""
Flow that triggers a job run and waits for the triggered run to complete.
Args:
dbt_cloud_credentials: Credentials for authenticating with dbt Cloud.
job_id: The ID of the job to trigger.
trigger_job_run_options: An optional TriggerJobRunOptions instance to
specify overrides for the triggered job run.
max_wait_seconds: Maximum number of seconds to wait for job to complete
poll_frequency_seconds: Number of seconds to wait in between checks for
run completion.
retry_filtered_models_attempts: Number of times to retry models selected by `retry_status_filters`.
Raises:
DbtCloudJobRunCancelled: The triggered dbt Cloud job run was cancelled.
DbtCloudJobRunFailed: The triggered dbt Cloud job run failed.
RuntimeError: The triggered dbt Cloud job run ended in an unexpected state.
Returns:
The run data returned by the dbt Cloud administrative API.
Examples:
Trigger a dbt Cloud job and wait for completion as a stand alone flow:
```python
import asyncio
from prefect_dbt.cloud import DbtCloudCredentials
from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run_and_wait_for_completion
asyncio.run(
trigger_dbt_cloud_job_run_and_wait_for_completion(
dbt_cloud_credentials=DbtCloudCredentials(
api_key="my_api_key",
account_id=123456789
),
job_id=1
)
)
```
Trigger a dbt Cloud job and wait for completion as a sub-flow:
```python
from prefect import flow
from prefect_dbt.cloud import DbtCloudCredentials
from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run_and_wait_for_completion
@flow
def my_flow():
...
run_result = trigger_dbt_cloud_job_run_and_wait_for_completion(
dbt_cloud_credentials=DbtCloudCredentials(
api_key="my_api_key",
account_id=123456789
),
job_id=1
)
...
my_flow()
```
Trigger a dbt Cloud job with overrides:
```python
import asyncio
from prefect_dbt.cloud import DbtCloudCredentials
from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run_and_wait_for_completion
from prefect_dbt.cloud.models import TriggerJobRunOptions
asyncio.run(
trigger_dbt_cloud_job_run_and_wait_for_completion(
dbt_cloud_credentials=DbtCloudCredentials(
api_key="my_api_key",
account_id=123456789
),
job_id=1,
trigger_job_run_options=TriggerJobRunOptions(
git_branch="staging",
schema_override="dbt_cloud_pr_123",
dbt_version_override="0.18.0",
target_name_override="staging",
timeout_seconds_override=3000,
generate_docs_override=True,
threads_override=8,
steps_override=[
"dbt seed",
"dbt run --fail-fast",
"dbt test --fail fast",
],
),
)
)
```
""" # noqa
logger = get_run_logger()
triggered_run_data_future = await trigger_dbt_cloud_job_run(
dbt_cloud_credentials=dbt_cloud_credentials,
job_id=job_id,
options=trigger_job_run_options,
)
run_id = (triggered_run_data_future).get("id")
if run_id is None:
raise RuntimeError("Unable to determine run ID for triggered job.")
final_run_status, run_data = await wait_for_dbt_cloud_job_run(
run_id=run_id,
dbt_cloud_credentials=dbt_cloud_credentials,
max_wait_seconds=max_wait_seconds,
poll_frequency_seconds=poll_frequency_seconds,
)
if final_run_status == DbtCloudJobRunStatus.SUCCESS:
try:
list_run_artifacts_future = await list_dbt_cloud_run_artifacts(
dbt_cloud_credentials=dbt_cloud_credentials,
run_id=run_id,
)
run_data["artifact_paths"] = list_run_artifacts_future
except DbtCloudListRunArtifactsFailed as ex:
logger.warning(
"Unable to retrieve artifacts for job run with ID %s. Reason: %s",
run_id,
ex,
)
logger.info(
"dbt Cloud job run with ID %s completed successfully!",
run_id,
)
return run_data
elif final_run_status == DbtCloudJobRunStatus.CANCELLED:
raise DbtCloudJobRunCancelled(
f"Triggered job run with ID {run_id} was cancelled."
)
elif final_run_status == DbtCloudJobRunStatus.FAILED:
while retry_filtered_models_attempts > 0:
logger.info(
f"Retrying job run with ID: {run_id} "
f"{retry_filtered_models_attempts} more times"
)
try:
retry_filtered_models_attempts -= 1
run_data = await retry_dbt_cloud_job_run_subset_and_wait_for_completion(
dbt_cloud_credentials=dbt_cloud_credentials,
run_id=run_id,
trigger_job_run_options=trigger_job_run_options,
max_wait_seconds=max_wait_seconds,
poll_frequency_seconds=poll_frequency_seconds,
)
return run_data
except Exception:
pass
else:
raise DbtCloudJobRunFailed(f"Triggered job run with ID: {run_id} failed.")
else:
raise RuntimeError(
f"Triggered job run with ID: {run_id} ended with unexpected"
f"status {final_run_status.value}."
)
async def _build_trigger_job_run_options(
dbt_cloud_credentials: DbtCloudCredentials,
trigger_job_run_options: TriggerJobRunOptions,
run_id: int,
run_info: Dict[str, Any],
job_info: Dict[str, Any],
):
"""
Compiles a list of steps (commands) to retry, then either build trigger job
run options from scratch if it does not exist, else overrides the existing.
"""
generate_docs = job_info.get("generate_docs", False)
generate_sources = job_info.get("generate_sources", False)
steps_override = []
for run_step in run_info["run_steps"]:
status = run_step["status_humanized"].lower()
# Skipping cloning, profile setup, and dbt deps - always the first three
# steps in any run, and note, index starts at 1 instead of 0
if run_step["index"] <= 3 or status == "success":
continue
# get dbt build from "Invoke dbt with `dbt build`"
command = run_step["name"].partition("`")[2].partition("`")[0]
# These steps will be re-run regardless if
# generate_docs or generate_sources are enabled for a given job
# so if we don't skip, it'll run twice
freshness_in_command = (
"dbt source snapshot-freshness" in command
or "dbt source freshness" in command
)
if "dbt docs generate" in command and generate_docs:
continue
elif freshness_in_command and generate_sources:
continue
# find an executable command like `build` or `run`
# search in a list so that there aren't false positives, like
# `"run" in "dbt run-operation"`, which is True; we actually want
# `"run" in ["dbt", "run-operation"]` which is False
command_components = shlex.split(command)
for exe_command in EXE_COMMANDS:
if exe_command in command_components:
break
else:
exe_command = ""
is_exe_command = exe_command in EXE_COMMANDS
is_not_success = status in ("error", "skipped", "cancelled")
is_skipped = status == "skipped"
if (not is_exe_command and is_not_success) or (is_exe_command and is_skipped):
# if no matches like `run-operation`, we will be rerunning entirely
# or if it's one of the expected commands and is skipped
steps_override.append(command)
else:
# errors and failures are when we need to inspect to figure
# out the point of failure
try:
run_artifact_future = await get_dbt_cloud_run_artifact.with_options(
retries=0, retry_delay_seconds=0
)(
dbt_cloud_credentials=dbt_cloud_credentials,
run_id=run_id,
path="run_results.json",
step=run_step["index"],
)
run_artifact = run_artifact_future
except JSONDecodeError:
# get the run results scoped to the step which had an error
# an error here indicates that either:
# 1) the fail-fast flag was set, in which case
# the run_results.json file was never created; or
# 2) there was a problem on dbt Cloud's side saving
# this artifact
steps_override.append(command)
else:
# we only need to find the individual nodes for those run commands
run_results = run_artifact["results"]
# select nodes that were not successful
# note "fail" here instead of "cancelled" because
# nodes do not have a cancelled state
run_nodes = " ".join(
run_result["unique_id"].split(".")[2]
for run_result in run_results
if run_result["status"] in ("error", "skipped", "fail")
)
select_arg = None
if "-s" in command_components:
select_arg = "-s"
elif "--select" in command_components:
select_arg = "--select"
# prevent duplicate --select/-s statements
if select_arg is not None:
# dbt --fail-fast run, -s, bad_mod --vars '{"env": "prod"}' to:
# dbt --fail-fast run -s other_mod bad_mod --vars '{"env": "prod"}'
command_start, select_arg, command_end = command.partition(
select_arg
)
modified_command = (
f"{command_start} {select_arg} {run_nodes} {command_end}"
)
else:
# dbt --fail-fast, build, --vars '{"env": "prod"}' to:
# dbt --fail-fast build --select bad_model --vars '{"env": "prod"}'
dbt_global_args, exe_command, exe_args = command.partition(
exe_command
)
modified_command = (
f"{dbt_global_args} {exe_command} -s {run_nodes} {exe_args}"
)
steps_override.append(modified_command)
if trigger_job_run_options is None:
trigger_job_run_options_override = TriggerJobRunOptions(
steps_override=steps_override
)
else:
trigger_job_run_options_override = trigger_job_run_options.copy()
trigger_job_run_options_override.steps_override = steps_override
return trigger_job_run_options_override
@flow(
name="Retry subset of dbt Cloud job run and wait for completion",
description=(
"Retries a subset of dbt Cloud job run, filtered by select statuses, "
"and waits for the triggered retry to complete."
),
)
async def retry_dbt_cloud_job_run_subset_and_wait_for_completion(
dbt_cloud_credentials: DbtCloudCredentials,
run_id: int,
trigger_job_run_options: Optional[TriggerJobRunOptions] = None,
max_wait_seconds: int = 900,
poll_frequency_seconds: int = 10,
) -> Dict:
"""
Flow that retrys a subset of dbt Cloud job run, filtered by select statuses,
and waits for the triggered retry to complete.
Args:
dbt_cloud_credentials: Credentials for authenticating with dbt Cloud.
trigger_job_run_options: An optional TriggerJobRunOptions instance to
specify overrides for the triggered job run.
max_wait_seconds: Maximum number of seconds to wait for job to complete
poll_frequency_seconds: Number of seconds to wait in between checks for
run completion.
run_id: The ID of the job run to retry.
Raises:
ValueError: If `trigger_job_run_options.steps_override` is set by the user.
Returns:
The run data returned by the dbt Cloud administrative API.
Examples:
Retry a subset of models in a dbt Cloud job run and wait for completion:
```python
from prefect import flow
from prefect_dbt.cloud import DbtCloudCredentials
from prefect_dbt.cloud.jobs import retry_dbt_cloud_job_run_subset_and_wait_for_completion
@flow
def retry_dbt_cloud_job_run_subset_and_wait_for_completion_flow():
credentials = DbtCloudCredentials.load("MY_BLOCK_NAME")
retry_dbt_cloud_job_run_subset_and_wait_for_completion(
dbt_cloud_credentials=credentials,
run_id=88640123,
)
retry_dbt_cloud_job_run_subset_and_wait_for_completion_flow()
```
""" # noqa
if trigger_job_run_options and trigger_job_run_options.steps_override is not None:
raise ValueError(
"Do not set `steps_override` in `trigger_job_run_options` "
"because this flow will automatically set it"
)
run_info_future = await get_dbt_cloud_run_info(
dbt_cloud_credentials=dbt_cloud_credentials,
run_id=run_id,
include_related=["run_steps"],
)
run_info = run_info_future
job_id = run_info["job_id"]
job_info_future = await get_dbt_cloud_job_info(
dbt_cloud_credentials=dbt_cloud_credentials,
job_id=job_id,
)
job_info = job_info_future
trigger_job_run_options_override = await _build_trigger_job_run_options(
dbt_cloud_credentials=dbt_cloud_credentials,
trigger_job_run_options=trigger_job_run_options,
run_id=run_id,
run_info=run_info,
job_info=job_info,
)
# to circumvent `RuntimeError: The task runner is already started!`
flow_run_context = FlowRunContext.get()
task_runner_type = type(flow_run_context.task_runner)
run_data = await trigger_dbt_cloud_job_run_and_wait_for_completion.with_options(
task_runner=task_runner_type()
)(
dbt_cloud_credentials=dbt_cloud_credentials,
job_id=job_id,
retry_filtered_models_attempts=0,
trigger_job_run_options=trigger_job_run_options_override,
max_wait_seconds=max_wait_seconds,
poll_frequency_seconds=poll_frequency_seconds,
)
return run_data
class DbtCloudJobRun(JobRun): # NOT A BLOCK
"""
Class that holds the information and methods to interact
with the resulting run of a dbt Cloud job.
"""
def __init__(self, run_id: int, dbt_cloud_job: "DbtCloudJob"):
self.run_id = run_id
self._dbt_cloud_job = dbt_cloud_job
self._dbt_cloud_credentials = dbt_cloud_job.dbt_cloud_credentials
@property
def _log_prefix(self):
return f"dbt Cloud job {self._dbt_cloud_job.job_id} run {self.run_id}."
async def _wait_until_state(
self,
in_final_state_fn: Awaitable[Callable],
get_state_fn: Awaitable[Callable],
log_state_fn: Callable = None,
timeout_seconds: int = 60,
interval_seconds: int = 1,
):
"""
Wait until the job run reaches a specific state.
Args:
in_final_state_fn: An async function that accepts a run state
and returns a boolean indicating whether the job run is
in a final state.
get_state_fn: An async function that returns
the current state of the job run.
log_state_fn: A callable that accepts a run
state and makes it human readable.
timeout_seconds: The maximum amount of time, in seconds, to wait
for the job run to reach the final state.
interval_seconds: The number of seconds to wait between checks of
the job run's state.
"""
start_time = time.time()
last_state = run_state = None
while not in_final_state_fn(run_state):
run_state = await get_state_fn()
if run_state != last_state:
if self.logger is not None:
self.logger.info(
"%s has new state: %s",
self._log_prefix,
log_state_fn(run_state),
)
last_state = run_state
elapsed_time_seconds = time.time() - start_time
if elapsed_time_seconds > timeout_seconds:
raise DbtCloudJobRunTimedOut(
f"Max wait time of {timeout_seconds} "
"seconds exceeded while waiting"
)
await asyncio.sleep(interval_seconds)
@sync_compatible
async def get_run(self) -> Dict[str, Any]:
"""
Makes a request to the dbt Cloud API to get the run data.
Returns:
The run data.
"""
try:
dbt_cloud_credentials = self._dbt_cloud_credentials
async with dbt_cloud_credentials.get_administrative_client() as client:
response = await client.get_run(self.run_id)
except HTTPStatusError as ex:
raise DbtCloudGetRunFailed(extract_user_message(ex)) from ex
run_data = response.json()["data"]
return run_data
@sync_compatible
async def get_status_code(self) -> int:
"""
Makes a request to the dbt Cloud API to get the run status.
Returns:
The run status code.
"""
run_data = await self.get_run()
run_status_code = run_data.get("status")
return run_status_code
@sync_compatible
async def wait_for_completion(self) -> None:
"""
Waits for the job run to reach a terminal state.
"""
await self._wait_until_state(
in_final_state_fn=DbtCloudJobRunStatus.is_terminal_status_code,
get_state_fn=self.get_status_code,
log_state_fn=DbtCloudJobRunStatus,
timeout_seconds=self._dbt_cloud_job.timeout_seconds,
interval_seconds=self._dbt_cloud_job.interval_seconds,
)
@sync_compatible
async def fetch_result(self, step: Optional[int] = None) -> Dict[str, Any]:
"""
Gets the results from the job run. Since the results
may not be ready, use wait_for_completion before calling this method.
Args:
step: The index of the step in the run to query for artifacts. The
first step in the run has the index 1. If the step parameter is
omitted, then this method will return the artifacts compiled
for the last step in the run.
"""
run_data = await self.get_run()
run_status = DbtCloudJobRunStatus(run_data.get("status"))
if run_status == DbtCloudJobRunStatus.SUCCESS:
try:
async with self._dbt_cloud_credentials.get_administrative_client() as client: # noqa
response = await client.list_run_artifacts(
run_id=self.run_id, step=step
)
run_data["artifact_paths"] = response.json()["data"]
self.logger.info("%s completed successfully!", self._log_prefix)
except HTTPStatusError as ex:
raise DbtCloudListRunArtifactsFailed(extract_user_message(ex)) from ex
return run_data
elif run_status == DbtCloudJobRunStatus.CANCELLED:
raise DbtCloudJobRunCancelled(f"{self._log_prefix} was cancelled.")
elif run_status == DbtCloudJobRunStatus.FAILED:
raise DbtCloudJobRunFailed(f"{self._log_prefix} has failed.")
else:
raise DbtCloudJobRunIncomplete(
f"{self._log_prefix} is still running; "
"use wait_for_completion() to wait until results are ready."
)
@sync_compatible
async def get_run_artifacts(
self,
path: Literal["manifest.json", "catalog.json", "run_results.json"],
step: Optional[int] = None,
) -> Union[Dict[str, Any], str]:
"""
Get an artifact generated for a completed run.
Args:
path: The relative path to the run artifact.
step: The index of the step in the run to query for artifacts. The
first step in the run has the index 1. If the step parameter is
omitted, then this method will return the artifacts compiled
for the last step in the run.
Returns:
The contents of the requested manifest. Returns a `Dict` if the
requested artifact is a JSON file and a `str` otherwise.
"""
try:
dbt_cloud_credentials = self._dbt_cloud_credentials
async with dbt_cloud_credentials.get_administrative_client() as client:
response = await client.get_run_artifact(
run_id=self.run_id, path=path, step=step
)
except HTTPStatusError as ex:
raise DbtCloudGetRunArtifactFailed(extract_user_message(ex)) from ex
if path.endswith(".json"):
artifact_contents = response.json()
else:
artifact_contents = response.text
return artifact_contents
def _select_unsuccessful_commands(
self,
run_results: List[Dict[str, Any]],
command_components: List[str],
command: str,
exe_command: str,
) -> List[str]:
"""
Select nodes that were not successful and rebuild a command.
"""
# note "fail" here instead of "cancelled" because
# nodes do not have a cancelled state
run_nodes = " ".join(
run_result["unique_id"].split(".")[2]
for run_result in run_results
if run_result["status"] in ("error", "skipped", "fail")
)
select_arg = None
if "-s" in command_components:
select_arg = "-s"
elif "--select" in command_components:
select_arg = "--select"
# prevent duplicate --select/-s statements
if select_arg is not None:
# dbt --fail-fast run, -s, bad_mod --vars '{"env": "prod"}' to:
# dbt --fail-fast run -s other_mod bad_mod --vars '{"env": "prod"}'
command_start, select_arg, command_end = command.partition(select_arg)
modified_command = f"{command_start} {select_arg} {run_nodes} {command_end}" # noqa
else:
# dbt --fail-fast, build, --vars '{"env": "prod"}' to:
# dbt --fail-fast build --select bad_model --vars '{"env": "prod"}'
dbt_global_args, exe_command, exe_args = command.partition(exe_command)
modified_command = (
f"{dbt_global_args} {exe_command} -s {run_nodes} {exe_args}"
)
return modified_command
async def _build_trigger_job_run_options(
self,
job: Dict[str, Any],
run: Dict[str, Any],
) -> TriggerJobRunOptions:
"""
Compiles a list of steps (commands) to retry, then either build trigger job
run options from scratch if it does not exist, else overrides the existing.
"""
generate_docs = job.get("generate_docs", False)
generate_sources = job.get("generate_sources", False)
steps_override = []
for run_step in run["run_steps"]:
status = run_step["status_humanized"].lower()
# Skipping cloning, profile setup, and dbt deps - always the first three
# steps in any run, and note, index starts at 1 instead of 0
if run_step["index"] <= 3 or status == "success":
continue
# get dbt build from "Invoke dbt with `dbt build`"
command = run_step["name"].partition("`")[2].partition("`")[0]
# These steps will be re-run regardless if
# generate_docs or generate_sources are enabled for a given job
# so if we don't skip, it'll run twice
freshness_in_command = (
"dbt source snapshot-freshness" in command
or "dbt source freshness" in command
)
if "dbt docs generate" in command and generate_docs:
continue
elif freshness_in_command and generate_sources:
continue
# find an executable command like `build` or `run`
# search in a list so that there aren't false positives, like
# `"run" in "dbt run-operation"`, which is True; we actually want
# `"run" in ["dbt", "run-operation"]` which is False
command_components = shlex.split(command)
for exe_command in EXE_COMMANDS:
if exe_command in command_components:
break
else:
exe_command = ""
is_exe_command = exe_command in EXE_COMMANDS
is_not_success = status in ("error", "skipped", "cancelled")
is_skipped = status == "skipped"
if (not is_exe_command and is_not_success) or (
is_exe_command and is_skipped
):
# if no matches like `run-operation`, we will be rerunning entirely
# or if it's one of the expected commands and is skipped
steps_override.append(command)
else:
# errors and failures are when we need to inspect to figure
# out the point of failure
try:
run_artifact = await self.get_run_artifacts(
"run_results.json", run_step["index"]
)
except JSONDecodeError:
# get the run results scoped to the step which had an error
# an error here indicates that either:
# 1) the fail-fast flag was set, in which case
# the run_results.json file was never created; or
# 2) there was a problem on dbt Cloud's side saving
# this artifact
steps_override.append(command)
else:
# we only need to find the individual nodes
# for those run commands
run_results = run_artifact["results"]
modified_command = self._select_unsuccessful_commands(
run_results=run_results,
command_components=command_components,
command=command,
exe_command=exe_command,
)
steps_override.append(modified_command)
if self._dbt_cloud_job.trigger_job_run_options is None:
trigger_job_run_options_override = TriggerJobRunOptions(
steps_override=steps_override
)
else:
trigger_job_run_options_override = (
self._dbt_cloud_job.trigger_job_run_options.copy()
)
trigger_job_run_options_override.steps_override = steps_override
return trigger_job_run_options_override
@sync_compatible
async def retry_failed_steps(self) -> "DbtCloudJobRun": # noqa: F821
"""
Retries steps that did not complete successfully in a run.
Returns:
A representation of the dbt Cloud job run.
"""
job = await self._dbt_cloud_job.get_job()
run = await self.get_run()
trigger_job_run_options_override = await self._build_trigger_job_run_options(
job=job, run=run
)
num_steps = len(trigger_job_run_options_override.steps_override)
if num_steps == 0:
self.logger.info(f"{self._log_prefix} does not have any steps to retry.")
else:
self.logger.info(f"{self._log_prefix} has {num_steps} steps to retry.")
run = await self._dbt_cloud_job.trigger(
trigger_job_run_options=trigger_job_run_options_override,
)
return run
class DbtCloudJob(JobBlock):
"""
Block that holds the information and methods to interact with a dbt Cloud job.
Attributes:
dbt_cloud_credentials: The credentials to use to authenticate with dbt Cloud.
job_id: The id of the dbt Cloud job.
timeout_seconds: The number of seconds to wait for the job to complete.
interval_seconds:
The number of seconds to wait between polling for job completion.
trigger_job_run_options: The options to use when triggering a job run.
Examples:
Load a configured dbt Cloud job block.
```python
from prefect_dbt.cloud import DbtCloudJob
dbt_cloud_job = DbtCloudJob.load("BLOCK_NAME")
```
Triggers a dbt Cloud job, waits for completion, and fetches the results.
```python
from prefect import flow
from prefect_dbt.cloud import DbtCloudCredentials, DbtCloudJob
@flow
def dbt_cloud_job_flow():
dbt_cloud_credentials = DbtCloudCredentials.load("dbt-token")
dbt_cloud_job = DbtCloudJob.load(
dbt_cloud_credentials=dbt_cloud_credentials,
job_id=154217
)
dbt_cloud_job_run = dbt_cloud_job.trigger()
dbt_cloud_job_run.wait_for_completion()
dbt_cloud_job_run.fetch_result()
return dbt_cloud_job_run
dbt_cloud_job_flow()
```
"""
_block_type_name = "dbt Cloud Job"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa
_documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
dbt_cloud_credentials: DbtCloudCredentials = Field(
default=...,
description="The dbt Cloud credentials to use to authenticate with dbt Cloud.",
) # noqa: E501
job_id: int = Field(
default=..., description="The id of the dbt Cloud job.", title="Job ID"
)
timeout_seconds: int = Field(
default=900,
description="The number of seconds to wait for the job to complete.",
)
interval_seconds: int = Field(
default=10,
description="The number of seconds to wait between polling for job completion.",
)
trigger_job_run_options: TriggerJobRunOptions = Field(
default_factory=TriggerJobRunOptions,
description="The options to use when triggering a job run.",
)
@sync_compatible
async def get_job(self, order_by: Optional[str] = None) -> Dict[str, Any]:
"""
Retrieve information about a dbt Cloud job.
Args:
order_by: The field to order the results by.
Returns:
The job data.
"""
try:
async with self.dbt_cloud_credentials.get_administrative_client() as client:
response = await client.get_job(
job_id=self.job_id,
order_by=order_by,
)
except HTTPStatusError as ex:
raise DbtCloudGetJobFailed(extract_user_message(ex)) from ex
return response.json()["data"]
@sync_compatible
async def trigger(
self, trigger_job_run_options: Optional[TriggerJobRunOptions] = None
) -> DbtCloudJobRun:
"""
Triggers a dbt Cloud job.
Returns:
A representation of the dbt Cloud job run.
"""
try:
trigger_job_run_options = (
trigger_job_run_options or self.trigger_job_run_options
)
async with self.dbt_cloud_credentials.get_administrative_client() as client:
response = await client.trigger_job_run(
job_id=self.job_id, options=trigger_job_run_options
)
except HTTPStatusError as ex:
raise DbtCloudJobRunTriggerFailed(extract_user_message(ex)) from ex
run_data = response.json()["data"]
run_id = run_data.get("id")
run = DbtCloudJobRun(
dbt_cloud_job=self,
run_id=run_id,
)
self.logger.info(
f"dbt Cloud job {self.job_id} run {run_id} successfully triggered. "
f"You can view the status of this run at "
f"https://{self.dbt_cloud_credentials.domain}/#/accounts/"
f"{self.dbt_cloud_credentials.account_id}/projects/"
f"{run_data['project_id']}/runs/{run_id}/"
)
return run
@flow
async def run_dbt_cloud_job(
dbt_cloud_job: DbtCloudJob,
targeted_retries: int = 3,
) -> Dict[str, Any]:
"""
Flow that triggers and waits for a dbt Cloud job run, retrying a
subset of failed nodes if necessary.
Args:
dbt_cloud_job: Block that holds the information and
methods to interact with a dbt Cloud job.
targeted_retries: The number of times to retry failed steps.
Examples:
```python
from prefect import flow
from prefect_dbt.cloud import DbtCloudCredentials, DbtCloudJob
from prefect_dbt.cloud.jobs import run_dbt_cloud_job
@flow
def run_dbt_cloud_job_flow():
dbt_cloud_credentials = DbtCloudCredentials.load("dbt-token")
dbt_cloud_job = DbtCloudJob(
dbt_cloud_credentials=dbt_cloud_credentials, job_id=154217
)
return run_dbt_cloud_job(dbt_cloud_job=dbt_cloud_job)
run_dbt_cloud_job_flow()
```
"""
logger = get_run_logger()
run = await task(dbt_cloud_job.trigger.aio)(dbt_cloud_job)
while targeted_retries > 0:
try:
await task(run.wait_for_completion.aio)(run)
result = await task(run.fetch_result.aio)(run)
return result
except DbtCloudJobRunFailed:
logger.info(
f"Retrying job run with ID: {run.run_id} "
f"{targeted_retries} more times"
)
run = await task(run.retry_failed_steps.aio)(run)
targeted_retries -= 1
raise DbtCloudJobRunFailed(
f"dbt Cloud job {run.run_id} failed after {targeted_retries} retries."
)
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@integrations@prefect-dbt@prefect_dbt@cloud@jobs.py@.PATH_END.py
|
{
"filename": "simulations.py",
"repo_name": "smsharma/dark-photons-perturbations",
"repo_path": "dark-photons-perturbations_extracted/dark-photons-perturbations-master/grf/simulations.py",
"type": "Python"
}
|
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from scipy.misc import derivative
import astropy.units as u
import astropy.constants as c
from astropy.cosmology import FlatLambdaCDM, z_at_value
from tqdm import *
from grf.grf import TransitionProbabilities
from classy import Class
from nbodykit.filters import TopHat
from nbodykit.lab import LinearMesh
from grf.units import *
class GaussianRandomFieldBoxes:
def __init__(self, z_fid=20, z_range=[15, 25], k_max=0.1, n_points=100, r_filter=2., eps=1e-6, omega=5.9e-6 * eV, z_dep_P=True, cosmo=None, A_s=2.105e-9, log_pk_interp=None, run=True):
"""
:param z_fid: Fiducial redshift
:param z_range: Range of redshifts to stack boxes over
:param k_max: Maximum comoving scale in h/Mpc
:param n_points: Number of points to simulate boxes with
:param r_filter: Top-hat filter size relative to spacing size
:param eps: Dark photon coupling \epsilon
:param omega: Photon frequency in eV. By default = omega_21 ~ 5.9e-6 eV.
:param z_dep_P: Whether power spectrum is varied by z
:param cosmo: The cosmology specified as an astropy.cosmology.FlatLambdaCDM instance. Defaults to Planck18 cosmology.
:param use_nbodykit: Whether to use nbodykit or home-grown code for GRF
"""
self.z_fid = z_fid
self.z_range = z_range
self.k_max = k_max
self.n_points = n_points
self.r_filter = r_filter
self.omega = omega
self.eps = eps
self.z_dep_P = z_dep_P
self.A_s = A_s
self.Y_p = 0.25 # Helium-to-hydrogen mass fraction, from 0901.0014
# Set cosmology
if cosmo is None:
self.cosmo = self.get_Planck18_cosmology()
else:
self.cosmo = cosmo
self.log_pk_interp = log_pk_interp
if run:
self.get_box_properties()
self.simulate_grf()
self.calculate_transition_prob()
def get_Planck18_cosmology(self):
"""
Stolen from https://github.com/abatten/fruitbat/blob/c074abff432c3b267d00fbb49781a0e0c6eeab75/fruitbat/cosmologies.py
Planck 2018 paper VI Table 2 Final column (68% confidence interval)
This is the Planck 2018 cosmology that will be added to Astropy when the
paper is accepted.
:return: astropy.cosmology.FlatLambdaCDM instance describing Planck18 cosmology
"""
planck18_cosmology = {'Oc0': 0.2607,
'Ob0': 0.04897,
'Om0': 0.3111,
'H0': 67.66,
'n': 0.9665,
'sigma8': 0.8102,
'tau': 0.0561,
'z_reion': 7.82,
't0': 13.787,
'Tcmb0': 2.7255,
'Neff': 3.046,
'm_nu': [0., 0., 0.06],
'z_recomb': 1089.80,
'reference': "Planck 2018 results. VI. Cosmological Parameters, "
"A&A, submitted, Table 2 (TT, TE, EE + lowE + lensing + BAO)"
}
Planck18 = FlatLambdaCDM(H0=planck18_cosmology['H0'],
Om0=planck18_cosmology['Om0'],
Tcmb0=planck18_cosmology['Tcmb0'],
Neff=planck18_cosmology['Neff'],
Ob0=planck18_cosmology['Ob0'], name="Planck18",
m_nu=u.Quantity(planck18_cosmology['m_nu'], u.eV))
return Planck18
def get_box_properties(self):
""" Assign properties to simulation boxes
"""
# Total comoving distance between requested redshift range
self.d_comoving_total = (self.cosmo.comoving_distance(self.z_range[1]) -
self.cosmo.comoving_distance(self.z_range[0])).value / (1 / self.cosmo.h)
# Number of points needed to get down to k_max resolution
self.n_points_needed = self.k_max * self.d_comoving_total / (2 * np.pi)
# Ratio of number of points needed and how many we're willing to run with
self.ratio_n_points = self.n_points_needed / self.n_points
# Number of boxes to simulate
self.n_boxes = int(np.ceil(self.ratio_n_points))
# Redshift bin edges and centers of boxes to simulate
self.z_bins = np.linspace(
self.z_range[0], self.z_range[1], self.n_boxes + 1)
# If power spectrum a function of z grab those redshifts, otherwise only calculate at fiducial redshift
if self.z_dep_P:
self.z_center_ary = (self.z_bins[1:] + self.z_bins[:-1]) / 2.
# self.z_center_ary = self.z_bins[:-1]
else:
self.z_center_ary = np.array(self.n_boxes * [self.z_fid])
# Comoving distance at redshift bin edges, size of each box and number of points to use to get resolution k_max
self.d_comoving_bins = self.cosmo.comoving_distance(
self.z_bins).value / (1 / self.cosmo.h)
self.delta_d_comoving_ary = self.d_comoving_bins[1:] - \
self.d_comoving_bins[:-1]
self.n_points_ary = np.array(np.ceil(
self.k_max * self.delta_d_comoving_ary / (2 * np.pi)), dtype=np.int32)
def simulate_grf(self, seeds=None):
""" Simulate Gaussian random field and get 1-D (1 + \delta) slices through boxes
"""
self.one_plus_delta_1d = [] # 1-D slice through 1 + \delta
self.z_ary = [] # Redshift array corresponding to 1 + \delta
if self.n_boxes < 10:
disable_tqdm = True
else:
disable_tqdm = False
self.fields = []
self.filter_sizes = []
if seeds is None:
seeds = [None] * self.n_boxes
for z_center, i_b in tqdm_notebook(zip(self.z_center_ary, range(self.n_boxes)), total=len(self.z_center_ary), disable=disable_tqdm):
if self.z_dep_P:
# Interpolated baryon P(k)
def pk_interp_b(
k): return 10 ** self.log_pk_interp(z_center, k)
else:
# Interpolated baryon P(k)
def pk_interp_b(
k): return 10 ** self.log_pk_interp(np.mean(self.z_range), k)
# Real-space spacing
self.spacing_size = self.delta_d_comoving_ary[i_b] / \
self.n_points_ary[i_b]
mesh = LinearMesh(
pk_interp_b, Nmesh=self.n_points_ary[i_b], BoxSize=self.delta_d_comoving_ary[i_b], seed=seeds[i_b])
# Apply tophat filter if needed
if self.r_filter == 0:
mesh_smooth = mesh
self.filter_size = 0.
else:
self.filter_size = self.r_filter * self.spacing_size
mesh_smooth = mesh.apply(TopHat(self.filter_size)).paint()
self.filter_sizes.append(self.filter_size)
self.field = mesh_smooth.preview()
self.fields.append(self.field)
self.one_plus_delta_1d += [
list(self.field[0, int(np.round(self.n_points_ary[i_b] / 2)), :])]
d_comov_ary = np.linspace(
self.d_comoving_bins[i_b], self.d_comoving_bins[i_b + 1], self.n_points_ary[i_b])
self.z_ary += [[z_at_value(self.cosmo.comoving_distance,
d / self.cosmo.h * u.Mpc, zmax=1e5) for d in d_comov_ary]]
def calculate_transition_prob(self):
""" Calculate A' -> A transition probabilities
"""
# Initialize transition probabilities class
AO = AnisotropicOscillations()
self.z_crossings_collect_temp = []
self.P_collect_temp = []
self.z_ary_new = []
self.m_A_perturb_ary = []
self.m_A_sq_ary = []
# Loop over boxes and get crossings and transition probabilities
for i_b in range(self.n_boxes):
AO.get_m_A_perturb(z_fid=self.z_fid, omega=self.omega, z_ary=self.z_ary[i_b],
one_plus_delta_ary=np.nan_to_num(self.one_plus_delta_1d[i_b]))
AO.get_crossings_and_derivatives()
AO.get_P_ary(eps=self.eps, omega=self.omega)
self.z_crossings_collect_temp += list(AO.z_crossings_ary)
self.P_collect_temp += list(AO.P_ary)
self.m_A_perturb_ary += list(AO.m_A_perturb_ary)
self.m_A_sq_ary += list(AO.m_A_sq_ary)
self.z_ary_new += list(AO.z_ary_new)
self.P_homo = AO.P_homo
self.m_A_fid = AO.m_A_fid
def calculate_transition_prob_custom(self, one_plus_delta_1d):
""" Calculate A' -> A transition probabilities
"""
# Initialize transition probabilities class
AO = AnisotropicOscillations()
self.z_crossings_collect_temp = []
self.P_collect_temp = []
self.z_ary_new = []
self.m_A_perturb_ary = []
self.m_A_sq_perturb_ary = []
self.m_A_sq_ary = []
# Loop over boxes and get crossings and transition probabilities
for i_b in range(self.n_boxes):
AO.get_m_A_perturb(z_fid=self.z_fid, omega=self.omega, z_ary=self.z_ary[i_b],
one_plus_delta_ary=np.nan_to_num(one_plus_delta_1d[i_b]))
AO.get_crossings_and_derivatives()
AO.get_P_ary(eps=self.eps, omega=self.omega)
self.z_crossings_collect_temp += list(AO.z_crossings_ary)
self.P_collect_temp += list(AO.P_ary)
self.m_A_perturb_ary += list(AO.m_A_perturb_ary)
self.m_A_sq_perturb_ary += list(AO.m_A_sq_perturb_ary)
self.m_A_sq_ary += list(AO.m_A_sq_ary)
self.z_ary_new += list(AO.z_ary_new)
self.P_homo = AO.P_homo
self.m_A_fid = AO.m_A_fid
class AnisotropicOscillations(TransitionProbabilities):
def __init__(self, cosmo=None, z_reio=7.82):
TransitionProbabilities.__init__(self, cosmo=cosmo, z_reio=z_reio)
def get_m_A_perturb(self, z_fid, omega, z_ary, one_plus_delta_ary):
""" Get perturbations in plasma mass
"""
self.z_ary_new = z_ary
self.one_plus_delta_ary = one_plus_delta_ary
self.m_A_fid = np.sqrt(self.m_A_sq(z_fid, omega))
self.m_A_sq_ary = self.m_A_sq(np.array(self.z_ary_new), omega)
self.m_A_ary = np.sqrt(self.m_A_sq_ary)
self.m_A_perturb_ary = self.m_A_ary * \
np.sqrt(np.nan_to_num(self.one_plus_delta_ary))
self.m_A_sq_perturb_ary = self.m_A_sq_ary * \
(np.nan_to_num(self.one_plus_delta_ary))
def get_crossings_and_derivatives(self):
""" Locate redshift of crossings and derivatives at crossings
"""
dz = self.z_ary_new[1] - self.z_ary_new[0]
zero_crossings_idxs = np.where(
np.diff(np.sign(np.array(self.m_A_perturb_ary) - self.m_A_fid)))[0]
self.z_crossings_ary = np.take(
self.z_ary_new, np.array(zero_crossings_idxs))
self.d_log_m_A_sq_dz_ary = (np.log(self.m_A_perturb_ary[zero_crossings_idxs + 1] ** 2) - np.log(
self.m_A_perturb_ary[zero_crossings_idxs] ** 2)) / dz
zero_crossings_homo_idxs = np.where(
np.diff(np.sign(np.array(self.m_A_ary) - self.m_A_fid)))[0]
if not len(zero_crossings_homo_idxs) == 0:
self.z_crossing_homo = self.z_ary_new[zero_crossings_homo_idxs[0]]
self.d_log_m_A_sq_dz_homo = (
np.log(self.m_A_ary[zero_crossings_homo_idxs + 1] ** 2) - np.log(self.m_A_ary[zero_crossings_homo_idxs] ** 2)) / dz
else:
self.z_crossing_homo = self.d_log_m_A_sq_dz_homo = None
def get_P_ary(self, eps, omega):
""" Get transition probabilities
"""
# Transition probabilities
self.P_ary = np.pi * self.m_A_fid ** 2 * eps ** 2 / omega * \
np.abs((self.d_log_m_A_sq_dz_ary *
self.dz_dt(self.z_crossings_ary)) ** -1)
# If range corresponds to homogeneous crossing, get that probability
self.P_homo = None
if self.d_log_m_A_sq_dz_homo is not None:
self.P_homo = np.pi * self.m_A_fid ** 2 * eps ** 2 / omega * \
np.abs((self.d_log_m_A_sq_dz_homo *
self.dz_dt(self.z_crossing_homo)) ** -1)
|
smsharmaREPO_NAMEdark-photons-perturbationsPATH_START.@dark-photons-perturbations_extracted@dark-photons-perturbations-master@grf@simulations.py@.PATH_END.py
|
{
"filename": "test_base_standard.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/partners/openai/tests/integration_tests/embeddings/test_base_standard.py",
"type": "Python"
}
|
"""Standard LangChain interface tests"""
from typing import Type
from langchain_core.embeddings import Embeddings
from langchain_tests.integration_tests.embeddings import EmbeddingsIntegrationTests
from langchain_openai import OpenAIEmbeddings
class TestOpenAIStandard(EmbeddingsIntegrationTests):
@property
def embeddings_class(self) -> Type[Embeddings]:
return OpenAIEmbeddings
@property
def embedding_model_params(self) -> dict:
return {"model": "text-embedding-3-small", "dimensions": 128}
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@partners@openai@tests@integration_tests@embeddings@test_base_standard.py@.PATH_END.py
|
{
"filename": "_selected.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/scattersmith/_selected.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Selected(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattersmith"
_path_str = "scattersmith.selected"
_valid_props = {"marker", "textfont"}
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattersmith.selected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
Returns
-------
plotly.graph_objs.scattersmith.selected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# textfont
# --------
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattersmith.selected.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
Sets the text font color of selected points.
Returns
-------
plotly.graph_objs.scattersmith.selected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.scattersmith.selected.Mark
er` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scattersmith.selected.Text
font` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Selected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattersmith.Selected`
marker
:class:`plotly.graph_objects.scattersmith.selected.Mark
er` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scattersmith.selected.Text
font` instance or dict with compatible properties
Returns
-------
Selected
"""
super(Selected, self).__init__("selected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattersmith.Selected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattersmith.Selected`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@scattersmith@_selected.py@.PATH_END.py
|
{
"filename": "test_patching.py",
"repo_name": "neuraloperator/neuraloperator",
"repo_path": "neuraloperator_extracted/neuraloperator-main/neuralop/training/tests/test_patching.py",
"type": "Python"
}
|
import torch
import pytest
from ..patching import MultigridPatching2D, make_patches
from neuralop.tests.test_utils import DummyModel
# Input shape params
batch_size = 16
channels = 1
side_len = 128
@pytest.mark.parametrize('levels', [1, 2, 3])
@pytest.mark.parametrize('padding_fraction', [0, 0.1, 0.2])
def test_make_patches(levels, padding_fraction):
x = torch.randn(batch_size, channels, side_len, side_len)
n_patches = 2 ** levels
padding = int(round(side_len * padding_fraction))
patched_x = make_patches(x, n_patches, padding)
patched_side_len = int((side_len // n_patches) + (2 * padding))
assert patched_x.shape == ((n_patches ** 2) * batch_size, channels, patched_side_len, patched_side_len)
@pytest.mark.parametrize('levels', [1, 2, 3])
@pytest.mark.parametrize('padding_fraction', [0, 0.1, 0.2])
@pytest.mark.parametrize('stitching', [True, False])
@pytest.mark.parametrize('evaluation', [True, False])
def test_full_mgp2d(levels, padding_fraction, stitching, evaluation):
model = DummyModel(16)
patcher = MultigridPatching2D(model=model,
levels=levels,
padding_fraction=padding_fraction,
stitching=stitching, # cpu-only, single process
use_distributed=False)
input_shape = (batch_size, channels, side_len, side_len)
x = torch.randn(*input_shape)
y = torch.randn(*input_shape)
patched_x, patched_y = patcher.patch(x,y)
n_patches = 2 ** levels
padding = int(round(side_len * padding_fraction))
patched_padded_side_len = int((side_len // n_patches) + (2 * padding))
assert patched_x.shape ==\
((n_patches ** 2) * batch_size, channels + levels, patched_padded_side_len, patched_padded_side_len)
# mimic output after scattering x to model parallel region
patched_out_shape = (patched_x.shape[0], 1, *patched_x.shape[2:])
patched_out = torch.randn(patched_out_shape)
# if padding is not applied, return without stitching
# otherwise unpad and stitch
if stitching or evaluation:
unpatch_shape = input_shape
else:
unpadded_patch_size = int(side_len // n_patches)
unpatch_shape = (patched_x.shape[0], channels, unpadded_patch_size, unpadded_patch_size)
# test stitching here in cases where padding is applied
unpatched_x, unpatched_y = patcher.unpatch(patched_out, patched_y, evaluation=evaluation)
assert unpatched_x.shape == unpatch_shape
assert unpatched_y.shape == unpatch_shape
|
neuraloperatorREPO_NAMEneuraloperatorPATH_START.@neuraloperator_extracted@neuraloperator-main@neuralop@training@tests@test_patching.py@.PATH_END.py
|
{
"filename": "_line.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/selection/_line.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="line", parent_name="layout.selection", **kwargs):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Line"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
width
Sets the line width (in px).
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@selection@_line.py@.PATH_END.py
|
{
"filename": "regrid.py",
"repo_name": "sherpa/sherpa",
"repo_path": "sherpa_extracted/sherpa-main/sherpa/models/regrid.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 - 2024
# Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Evaluate a model on a different grid to the requested one.
This is intended to support convolution-style models, where the
convolved model should be evaluated on a different grid to the
data - e.g. a larger grid, since the convolution will account
for signal outside the data range - and then be regridded to
match the desired grid.
"""
from abc import ABCMeta, abstractmethod
import itertools
import logging
import warnings
import numpy as np
from sherpa.utils._utils import rebin # type: ignore
from sherpa.utils.akima import akima
from sherpa.astro.utils import reshape_2d_arrays
from sherpa.utils.err import DataErr, ModelErr
warning = logging.getLogger(__name__).warning
PIXEL_RATIO_THRESHOLD = 0.1
def _to_readable_array(x):
"""Convert x into a ndarray that can not be edited (or is None)."""
if x is None:
return None
x = np.asarray(x).copy()
if not np.iterable(x):
raise DataErr("notanarray")
if x.ndim != 1:
raise DataErr("not1darray")
x.setflags(write=False)
return x
class Axis(metaclass=ABCMeta):
"""Represent an axis of a N-D object."""
# This is set when the data is set
_is_ascending = None
@property
def is_empty(self):
"""Is the axis empty?
An empty axis is either set to `None` or a zero-element
sequence.
"""
return self.size == 0
@property
@abstractmethod
def is_integrated(self):
"""Is the axis integrated?"""
pass
@property
def is_ascending(self):
"""Is the axis ascending?
The axis is ascending if the elements in `lo` are sorted in
ascending order. Only the first and last elements are
checked, and it is assumed that the elements are sorted.
"""
if self.is_empty:
raise DataErr("Axis is empty or has a size of 0")
return self._is_ascending
@property
@abstractmethod
def start(self):
"""The starting point (lowest value) of the data axis."""
pass
@property
@abstractmethod
def end(self):
"""The ending point of the data axis
If the data axis is ascending the end boundary is the last
element of the `hi` array when the axis is integrated,
otherwise it's the last element of `lo`. Conversely, for
descending axes, the last element is either the first element
of the `hi` array or of the `lo` array, depending on whether
the axis is integrated or not, respectively.
"""
pass
@property
@abstractmethod
def size(self):
"""The size of the axis."""
pass
def overlaps(self, other):
"""Check if this axis overlaps with another.
Parameters
----------
other : Axis instance
The axis to compare to.
Returns
-------
bool
True if they overlap, False if not
"""
# Could apply this check but this is not expected to be
# called directly, so leave for now.
#
# if not isinstance(other, Axis):
# raise TypeError("other argument must be an axis")
num = max(0, min(self.end, other.end) - max(self.start, other.start))
return bool(num != 0)
class PointAxis(Axis):
"""Represent a point (not integrated) axis of a N-D object.
The length can not be changed once set.
Parameters
----------
x : array_like or None
The starting point of the axis. If `None` or `[]` then the
data axis is said to be empty. The axis can be in ascending or
descending order but this is not checked.
"""
def __init__(self, x):
self._x = _to_readable_array(x)
if self._x is None:
return
nx = len(self._x)
if nx > 0:
self._is_ascending = self._x[-1] > self._x[0]
@property
def x(self):
return self._x
@property
def is_integrated(self):
return False
@property
def start(self):
if self.is_ascending:
return self.x[0]
return self.x[-1]
@property
def end(self):
if self.is_ascending:
return self.x[-1]
return self.x[0]
@property
def size(self):
if self.x is None:
return 0
return self.x.size
class IntegratedAxis(Axis):
"""Represent an integrated axis of a N-D object.
Parameters
----------
lo : array_like or None
The starting point of the axis. If `lo` is `None` or `[]`
then the data axis is said to be empty. The axis can be in
ascending or descending order.
hi : array_like or None
The ending point of the axis. The number of elements must match `lo` (either `None`
or a sequence of the same size). Each element is expected to
be larger than the corresponding element of the `lo` axis,
even if the `lo` array is in descending order.
"""
_lo = None
_hi = None
def __init__(self, lo, hi):
self._lo = _to_readable_array(lo)
self._hi = _to_readable_array(hi)
if self._lo is None:
if self._hi is None:
return
raise DataErr("mismatchn", "lo", "hi", "None", len(self._hi))
nlo = len(self._lo)
if self._hi is None:
raise DataErr("mismatchn", "lo", "hi", nlo, "None")
nhi = len(self._hi)
if nlo != nhi:
raise DataErr("mismatchn", "lo", "hi", nlo, nhi)
if nlo > 0:
self._is_ascending = lo[-1] > lo[0]
@property
def lo(self):
return self._lo
@property
def hi(self):
return self._hi
@property
def is_integrated(self):
return True
@property
def start(self):
if self.is_ascending:
return self.lo[0]
return self.lo[-1]
@property
def end(self):
if self.is_ascending:
return self.hi[-1]
return self.hi[0]
@property
def size(self):
if self.lo is None:
return 0
return self.lo.size
class EvaluationSpace1D():
"""Class for 1D Evaluation Spaces.
An Evaluation Space is a set of data axes representing the data
space over which a model can be evaluated. A 1D Evaluation Space
has only one axis.
Parameters
----------
x : array_like or None, optional
The data array, or the low end of the data bins if the dataset is "integrated"
xhi : array_like or None, optional
The high end of the data bins for integrated datasets.
"""
def __init__(self, x=None, xhi=None):
"""The input arrays are used to instantiate a single axis."""
if xhi is None:
self.x_axis = PointAxis(x)
else:
self.x_axis = IntegratedAxis(x, xhi)
@property
def is_empty(self):
"""Is the space empty (the x axis has no elements)?"""
return self.x_axis.is_empty
@property
def is_integrated(self):
"""Is the space integrated?"""
return self.x_axis.is_integrated
@property
def is_ascending(self):
"""Is the space ascending?"""
return self.x_axis.is_ascending
@property
def grid(self):
"""The grid representation of the space.
Returns
-------
tuple
A tuple representing the x axis. The tuple will contain
two arrays if the dataset is integrated, one otherwise.
"""
# We can not just rely on the is_integrated setting since
# an integrated axis can have is_integrated set to False
#
# TODO: should we fix this? It only affects a few corner-case tests
# so maybe it's something we can address upstream? Or work out
# why we want is_integrated to be False when the axis size is 0?
#
if self.x_axis.is_integrated:
return self.x_axis.lo, self.x_axis.hi
if isinstance(self.x_axis, IntegratedAxis):
return self.x_axis.lo,
return self.x_axis.x,
@property
def midpoint_grid(self):
"""The mid-points of the space.
For non-integrated spaces this returns the X axis.
Returns
-------
array
Return the average point of the bins of integrated axes,
for each bin, or the non-integrated x axis array.
"""
if self.x_axis.is_integrated:
return (self.x_axis.lo + self.x_axis.hi)/2
return self.x_axis.x
@property
def start(self):
"""The start (lowest value) of the space."""
return self.x_axis.start
@property
def end(self):
"""The end (highest value) of the space."""
return self.x_axis.end
def zeros_like(self):
"""Returns zeroes for each element of the space.
Returns
-------
array
A one-dimensional array.
"""
return np.zeros(self.x_axis.size)
def overlaps(self, other):
"""Check if this evaluation space overlaps with another.
Parameters
----------
other : EvaluationSpace1D
The space to compare to.
Returns
-------
bool
True if they overlap, False if not
"""
return self.x_axis.overlaps(other.x_axis)
def __contains__(self, other):
"""Are all elements of other within the range (start, end) of this space?
Parameters
----------
other : EvaluationSpace1D
The space to compare to.
Returns
-------
boolean
"""
# OL: I have mixed feelings about overriding this method. On one hand it makes the
# tests more expressive and natural, on the other this method is intended to check
# if an element is in a collection, so it's a bit of a stretch semantically.
return self.start <= other.start and self.end >= other.end
class EvaluationSpace2D():
"""Class for 2D Evaluation Spaces.
An Evaluation Space is a set of data axes representing the data
space over which a model can be evaluated. A 2D Evaluation Space
has two axes, x and y.
Parameters
----------
x, y : array_like or None, optional
The data array, or the low end of the x and y data bins if the dataset
is "integrated". These are not required to be the same length.
xhi, yhi : array_like or None, optional
The high end of the x and y data bins for integrated datasets.
"""
def __init__(self, x=None, y=None, xhi=None, yhi=None):
# In the 2D case the arrays are redundant, as they are flattened from a meshgrid.
# We need to clean them up first to have proper axes.
# This may happen when an EvaluationSpace2D is instantiated using the arrays passed to
# the calc method.
#
# This means that this class does not check that x and y (if set) have
# the same length.
#
x_unique, y_unique, xhi_unique, yhi_unique = self._clean_arrays(x, y, xhi, yhi)
if xhi_unique is None and yhi_unique is None:
self.x_axis = PointAxis(x_unique)
self.y_axis = PointAxis(y_unique)
else:
self.x_axis = IntegratedAxis(x_unique, xhi_unique)
self.y_axis = IntegratedAxis(y_unique, yhi_unique)
def _clean_arrays(self, x, y, xhi, yhi):
return self._clean(x), self._clean(y), self._clean(xhi), self._clean(yhi)
@staticmethod
def _clean(array):
if array is None:
return None
# We need to take extra care not to change the order of the arrays, hence
# the additional complexity
array_unique, indexes = np.unique(array, return_index=True)
return array_unique[indexes.argsort()]
@property
def is_empty(self):
"""Is the space empty (the x axis has no elements)?"""
return self.x_axis.is_empty or self.y_axis.is_empty
@property
def is_integrated(self):
"""Is the space integrated?"""
return (not self.is_empty) \
and self.x_axis.is_integrated \
and self.y_axis.is_integrated
@property
def is_ascending(self):
"""Is the space ascending?
Returns
-------
(xflag, yflag) : (bool, bool)
True if the axis is ascending, False otherwise, for the
x and y axes respectively.
"""
return self.x_axis.is_ascending, self.y_axis.is_ascending
@property
def start(self):
"""The start (lowest value) of the space.
Returns
-------
(xstart, ystart) : (number, number)
The start of the x and y axis arrays, respectively.
"""
return self.x_axis.start, self.y_axis.start
@property
def end(self):
"""The end (highest value) of the space.
Returns
-------
(xend, yend) : (number, number)
The end of the x and y axis arrays, respectively.
"""
return self.x_axis.end, self.y_axis.end
@property
def shape(self):
"""The sizes of the x and y axes."""
return self.x_axis.size, self.y_axis.size
def overlaps(self, other):
"""Check if this evaluation space overlaps with another.
Note that this is more stringent for 2D, as the boundaries
need to coincide in this case.
Parameters
----------
other : EvaluationSpace2D
The space to compare to.
Returns
-------
bool
True if they overlap, False if not
"""
return bool(self.x_axis.start == other.x_axis.start
and self.y_axis.start == other.y_axis.start
and self.x_axis.end == other.x_axis.end
and self.y_axis.end == other.y_axis.end)
@property
def grid(self):
"""The grid representation of the space.
The x and y arrays in the grid are one-dimensional
representations of the meshgrid obtained from the x and y axis
arrays, as in `numpy.meshgrid(x, y)[0].ravel()`
Returns
-------
tuple
A two element (x, y) or 4 element (x, y, xhi, yhi) tuple.
"""
if self.x_axis.is_integrated:
x, y = reshape_2d_arrays(self.x_axis.lo, self.y_axis.lo)
xhi, yhi = reshape_2d_arrays(self.x_axis.hi, self.y_axis.hi)
return x, y, xhi, yhi
return reshape_2d_arrays(self.x_axis.x, self.y_axis.x)
def zeros_like(self):
"""Returns zeroes for each element of the space.
Returns
-------
array
A one dimensional array.
"""
size = self.x_axis.size * self.y_axis.size
return np.zeros(size)
class ModelDomainRegridder1D():
"""Allow 1D models to be evaluated on a different grid.
This class is not used directly in a model expression;
instead it creates an instance that is used to evaluate
the model.
Parameters
----------
evaluation_space : object or None, optional
name : str, optional
The default name is 'regrid1d'.
interp : callable, optional
The interpolation function: it should accept three arrays: the
output x values and the x, y values to interpolate, and return
the interpolated y values. The default is to use
`sherpa.utils.akima.akima`.
Examples
--------
The "internal" model (gaussian plus constant) will be
evaluated on the grid 0 to 10 (spacing of 0.5), and then
linearly-interpolated onto the desired grid (1 to 8,
spacing of 0.7). In this example there is no benefit to
this approach - it is easier just to evaluate
`internal_mdl` on the grid `x` - but it illustrates
the approach.
>>> from sherpa.models import Gauss1D, Const1D
>>> internal_mdl = Gauss1D() + Const1D()
>>> eval_space = EvaluationSpace1D(np.arange(0, 10, 0.5))
>>> rmdl = ModelDomainRegridder1D(eval_space)
>>> mdl = rmdl.apply_to(internal_mdl)
>>> x = np.arange(1, 8, 0.7)
>>> y = mdl(x)
"""
def __init__(self, evaluation_space=None, name='regrid1d', **kwargs):
self.name = name
self.integrate = True
self.evaluation_space = evaluation_space if evaluation_space is not None else EvaluationSpace1D()
self.method = kwargs.get("interp", akima)
@property
def method(self):
"""Interpolate the data from the internal to requested grid.
This is *only* used for point grids, as integrated grids use a
simple rebinning scheme. The default is
`sherpa.utils.akima.akima`. The callable should accept `(xout,
xin, yin)` arguments and interpolate the `(xin, yin)` data
onto the `xout` grid, returning the interpolated data.
"""
return self._method
@method.setter
def method(self, method):
if not callable(method):
raise TypeError(f"method argument '{repr(method)}' is not callable")
self._method = method
@property
def grid(self):
"""The grid of the associated evaluation space."""
return self.evaluation_space.grid
@grid.setter
def grid(self, value):
try: # value is an iterable (integrated models) to be unpacked
self.evaluation_space = EvaluationSpace1D(*value)
except TypeError: # value is a single array (non-integrated models)
self.evaluation_space = EvaluationSpace1D(value)
def apply_to(self, model):
"""Evaluate a model on a different grid."""
from sherpa.models.model import RegridWrappedModel
return RegridWrappedModel(model, self)
def calc(self, pars, modelfunc, *args, **kwargs):
"""Evaluate and regrid a model
Evaluate the model on the internal grid and then convert it
onto the desired grid either preserving flux (rebinning) or
via interpolation.
Parameters
----------
pars : sequence of numbers
The parameter values of the model.
modelfunc
The model to evaluate (the calc attribute of the model)
args
The grid to interpolate the model onto. This must match the
format of the grid attribute of the model - i.e.
non-integrate (single array) or integrated (a pair of
equal-sized arrays).
kwargs
Keyword arguments for the model.
Notes
-----
If the requested grid (i.e. that defined by args) does not overlap
the stored grid (the grid attribute) then all values are set to 0.
However, if the grids partially overlap then there will be
extrapolation (depending on the method).
It is not clear yet whether the restriction on grid type (i.e.
must match between the requested grid and the internal grid
whether it is integrated or non-integrated) is too restrictive.
"""
if self.evaluation_space.is_empty: # Simply pass through
return modelfunc(pars, *args, **kwargs)
requested_eval_space = self._make_and_validate_grid(args)
return self._evaluate(requested_eval_space, pars, modelfunc, **kwargs)
def _make_and_validate_grid(self, args_array):
"""Validate input grid and check whether it's point or integrated.
Parameters
----------
args_array : list
The array or arguments passed to the `call` method
Returns
-------
requested_eval_space : EvaluationSpace1D
"""
nargs = len(args_array)
if nargs == 0:
raise ModelErr('nogrid')
requested_eval_space = EvaluationSpace1D(*args_array)
# Ensure the two grids match: integrated or non-integrated.
if self.evaluation_space.is_integrated and not requested_eval_space.is_integrated:
raise ModelErr('needsint')
if requested_eval_space.is_integrated and not self.evaluation_space.is_integrated:
raise ModelErr('needspoint')
if self.evaluation_space.is_integrated and requested_eval_space.is_integrated:
lo = self.evaluation_space.grid[0]
hi = self.evaluation_space.grid[1]
if np.any(lo[1:] < hi[:-1]) or np.any(lo == hi):
raise ModelErr('needsint')
return requested_eval_space
def eval_non_integrated(self, pars, modelfunc, data_grid, eval_grid,
**kwargs):
"""Interpolate the model.
Parameters
----------
pars : list of numbers
The parameters for the model.
modelfunc : callable
The model to evaluate. It is called as
`modelfunc(pars, x, **kwargs)`
data_grid : sequence of numbers
The grid on which to return the values.
eval_grid : sequence of numbers
The grid on which to evaluate the model.
kwargs
Any arguments to be sent to modelfunc.
Returns
-------
model : ndarray
The model values matching the data_grid bins.
"""
# eval_grid is out of data_grid range
if eval_grid[-1] < data_grid[0] or eval_grid[0] > data_grid[-1]:
return np.zeros(data_grid.size)
#
# join all elements of data_grid within
# eval_spaee to minimize interpolation
#
indices = np.where((data_grid > eval_grid[0]) &
(data_grid < eval_grid[-1]))
my_eval_grid = np.unique(np.append(eval_grid, data_grid[indices]))
y_tmp = modelfunc(pars, my_eval_grid, **kwargs)
y_interpolate = self.method(data_grid, my_eval_grid, y_tmp)
if y_interpolate.size == data_grid.size and \
eval_grid[0] < data_grid[0] and eval_grid[-1] > data_grid[-1]:
# data space all within eval_grid
return y_interpolate
# find indices within data_grid
indices = np.unique(data_grid.searchsorted(my_eval_grid))
indices = indices[np.where(indices < data_grid.size)]
y = np.zeros(data_grid.size)
y[indices] = y_interpolate[indices]
return y
def eval_integrated(self, pars, modelfunc, data_grid, eval_grid,
**kwargs):
"""Rebin the model onto a grid with low and high bins.
Parameters
----------
pars : list of numbers
The parameters for the model.
modelfunc : callable
The model to evaluate. It is called as
`modelfunc(pars, lo, hi, **kwargs)`
data_grid : (sequence of numbers, sequence of numbers)
The grid on which to return the values, as low and
high edges.
eval_grid : (sequence of numbers, sequence of numbers)
The grid on which to evaluate the model, as low and
high edges.
kwargs
Any arguments to be sent to modelfunc.
Returns
-------
model : ndarray
The model values matching the data_grid bins.
"""
y = modelfunc(pars, eval_grid[0], eval_grid[1],
**kwargs)
return rebin(y, eval_grid[0], eval_grid[1],
data_grid[0], data_grid[1])
def _evaluate(self, data_space, pars, modelfunc, **kwargs):
"""Evaluate the model and then convert to the requested grid.
The model is evaluated using the evaluation_space attribute and
then converted to match the data_space argument. If the data_space
is integrated and the integrate attribute is set then the conversion
is done by rebinning the data (and so preserving the signal)
otherwise the method attribute is used to interpolate the data.
Parameters
----------
data_space : EvaluationSpace1D instance
The output grid for the model.
pars : list of numbers
The parameters for the model.
modelfunc : callable
The model to evaluate. It is called as
modelfunc(pars, *args, **kwargs) where args is
either one or two arguments.
kwargs
Any arguments to be sent to modelfunc.
Notes
-----
This is based on sherpa.models.TableModel but is simplified as
we do not provide a fold method.
"""
# Not really sure I need this, but let's be safe
kwargs['integrate'] = self.integrate
eval_space = self.evaluation_space
if data_space.is_integrated and self.integrate:
return self.eval_integrated(pars, modelfunc,
data_space.grid, eval_space.grid,
**kwargs)
# We either have integrate=False or a non-integrated bin is given.
return self.eval_non_integrated(pars, modelfunc,
data_space.midpoint_grid,
eval_space.midpoint_grid,
**kwargs)
class ModelDomainRegridder2D():
"""Allow 2D models to be evaluated on a different grid.
This class is not used directly in a model expression;
instead it creates an instance that is used to evaluate
the model.
Parameters
----------
evaluation_space : object or None, optional
name : str, optional
The default name is 'regrid2d'.
Examples
--------
The "internal" model (gaussian plus constant) will be
evaluated on the grid 0 to 10 (spacing of 0.5), and then
linearly-interpolated onto the desired grid (1 to 8,
spacing of 0.7). In this example there is no benefit to
this approach - it is easier just to evaluate
``internal_mdl`` on the grid ``x, y`` - but it illustrates
the approach.
It is not obvious why this example appears to fail,
but it is left in as documentation. See issue 840 at
https://github.com/sherpa/sherpa/issues/840
>>> from sherpa.models import Gauss2D, Const2D
>>> internal_mdl = Gauss2D() + Const2D()
>>> eval_space = EvaluationSpace2D(np.arange(0, 10, 0.5), np.arange(0, 10, 0.5))
>>> rmdl = ModelDomainRegridder2D(eval_space)
>>> mdl = rmdl.apply_to(internal_mdl)
>>> x = np.arange(1, 8, 0.7)
>>> y = np.arange(1, 8, 0.7)
>>> x, y = reshape_2d_arrays(x, y)
>>> z = mdl(x, y) # doctest: +SHOW_WARNINGS
UserWarning: requested space and evaluation space do not overlap, evaluating model to 0
"""
def __init__(self, evaluation_space=None, name='regrid2d'):
self.name = name
self.evaluation_space = evaluation_space\
if evaluation_space is not None else EvaluationSpace2D()
@property
def grid(self):
return self.evaluation_space.grid
@grid.setter
def grid(self, value):
self.evaluation_space = EvaluationSpace2D(*value)
def apply_to(self, model):
"""Evaluate a model on a different grid."""
from sherpa.models.model import RegridWrappedModel
return RegridWrappedModel(model, self)
def calc(self, pars, modelfunc, *args, **kwargs):
"""Evaluate and regrid a model
Evaluate the model on the internal grid and then
interpolate onto the desired grid.
Parameters
----------
pars : sequence of numbers
The parameter values of the model.
modelfunc
The model to evaluate (the calc attribute of the model)
args
The grid to interpolate the model onto. This must match the
format of the grid attribute of the model - i.e.
non-integrate (x, y arrays) or integrated (xlo, ylo, xhi, yhi).
kwargs
Keyword arguments for the model.
Notes
-----
If the requested grid (i.e. that defined by args) does not overlap
the stored grid (the grid attribute) then all values are set to 0.
However, if the grids partially overlap then there will be
extrapolation (depending on the method).
It is not clear yet whether the restriction on grid type (i.e.
must match between the requested grid and the internal grid
whether it is integrated or non-integrated) is too restrictive.
"""
if self.evaluation_space.is_empty: # Simply pass through
return modelfunc(pars, *args, **kwargs)
requested_eval_space = self._make_and_validate_grid(args)
return self._evaluate(requested_eval_space, pars, modelfunc, **kwargs)
def _make_and_validate_grid(self, args_array):
"""
Validate input grid and check whether it's point or integrated.
Parameters
----------
args_array : list
The array or arguments passed to the `call` method
Returns
-------
requested_eval_space : EvaluationSpace2D
"""
nargs = len(args_array)
if nargs == 0:
raise ModelErr('nogrid')
requested_eval_space = EvaluationSpace2D(*args_array)
# Ensure the two grids match: integrated or non-integrated.
if self.evaluation_space.is_integrated and not requested_eval_space.is_integrated:
raise ModelErr('needsint')
if requested_eval_space.is_integrated and not self.evaluation_space.is_integrated:
raise ModelErr('needspoint')
return requested_eval_space
def _evaluate(self, requested_space, pars, modelfunc, **kwargs):
# Evaluate the model on the user-defined grid and then rebin
# onto the desired grid.
if not requested_space.overlaps(self.evaluation_space):
warnings.warn("requested space and evaluation space do not overlap, evaluating model to 0")
return requested_space.zeros_like()
y = modelfunc(pars, *self.grid, **kwargs)
return rebin_2d(y, self.evaluation_space, requested_space).ravel()
def rebin_2d(y, from_space, to_space):
to_x_dim = to_space.x_axis.size
to_y_dim = to_space.y_axis.size
from_x_dim = from_space.x_axis.size
from_y_dim = from_space.y_axis.size
if hasattr(from_space, "data_2_psf_pixel_size_ratio"):
ratio = from_space.data_2_psf_pixel_size_ratio
scale_x, scale_y = 1/ratio[0], 1/ratio[1]
else:
scale_x = from_x_dim / to_x_dim
scale_y = from_y_dim / to_y_dim
scale = scale_x * scale_y
if scale == 1:
return y
reshaped_y = y.reshape(from_x_dim, from_y_dim)
reshaped_scaled_y = reshaped_y / scale
if (abs(scale_x - round(scale_x)) > PIXEL_RATIO_THRESHOLD
or abs(scale_y - round(scale_y)) > PIXEL_RATIO_THRESHOLD):
return rebin_no_int(reshaped_scaled_y, dimensions=(to_x_dim, to_y_dim))
return rebin_int(reshaped_scaled_y, int(round(scale_x)), int(round(scale_y)))
def rebin_int(array, scale_x, scale_y):
"""Rebin array by an integer scale on both x and y
Parameters
----------
array : array_like
The array to be rebinned
scale_x : int
The pixel ratio on the x axis
scale_y : int
The pixel ratio on the y axis
Returns
-------
array_like
"""
xedge = np.shape(array)[0] % scale_x
yedge = np.shape(array)[1] % scale_y
sub_array = array[xedge:, yedge:]
binned_x_shape = np.shape(sub_array)[0] // scale_x
binned_y_shape = np.shape(sub_array)[1] // scale_y
image = np.reshape(sub_array, (binned_x_shape, scale_x, binned_y_shape, scale_y))
image = np.sum(image, axis=3)
image = np.sum(image, axis=1)
return image
def rebin_no_int(array, dimensions=None, scale=None):
"""Rebin the array, conserving flux.
Return the array ``array`` to the new ``dimensions`` conserving flux,
so that the sum of the output matches the sum of ``array``.
Raises
------
AssertionError
If the totals of the input and result array don't agree, raise an error because computation may have gone wrong
Notes
-----
This routine is based on the example at
http://martynbristow.co.uk/wordpress/blog/rebinning-data/
which was released as GPL v3 © Martyn Bristow 2015. It has been
slightly modified for Sherpa.
Examples
--------
>>> ar = np.array([
... [0,1,2],
... [1,2,3],
... [2,3,4],
... ])
>>> rebin_no_int(ar, (2,2))
array([[1.5, 4.5],
[4.5, 7.5]])
"""
if dimensions is not None:
if isinstance(dimensions, float):
dimensions = [int(dimensions)] * len(array.shape)
elif isinstance(dimensions, int):
dimensions = [dimensions] * len(array.shape)
elif len(dimensions) != len(array.shape):
raise RuntimeError('')
elif scale is not None:
if isinstance(scale, (float, int)):
dimensions = map(int, map(round, map(lambda x: x * scale, array.shape)))
elif len(scale) != len(array.shape):
raise RuntimeError('')
else:
raise RuntimeError('Incorrect parameters to rebin.\n\trebin(array, dimensions=(x,y))\n\trebin(array, scale=a')
dY, dX = map(divmod, map(float, array.shape), dimensions)
result = np.zeros(dimensions)
for j, i in itertools.product(*map(range, array.shape)):
(J, dj) = divmod(j * dimensions[0], array.shape[0])
(I, di) = divmod(i * dimensions[1], array.shape[1])
(J1, dj1) = divmod(j + 1, array.shape[0] / float(dimensions[0]))
(I1, di1) = divmod(i + 1, array.shape[1] / float(dimensions[1]))
# Moving to new bin
# Is this a discrete bin?
dx, dy = 0, 0
if (I1 - I == 0) | ((I1 - I == 1) & (di1 == 0)):
dx = 1
else:
dx = 1 - di1
if (J1 - J == 0) | ((J1 - J == 1) & (dj1 == 0)):
dy = 1
else:
dy = 1 - dj1
# Prevent it from allocating outide the array
I_ = min(dimensions[1] - 1, I + 1)
J_ = min(dimensions[0] - 1, J + 1)
result[J, I] += array[j, i] * dx * dy
result[J_, I] += array[j, i] * (1 - dy) * dx
result[J, I_] += array[j, i] * dy * (1 - dx)
result[J_, I_] += array[j, i] * (1 - dx) * (1 - dy)
allowError = 0.001
assert array.sum() == 0 or \
(array.sum() < result.sum() * (1 + allowError)) and \
(array.sum() > result.sum() * (1 - allowError))
return result
|
sherpaREPO_NAMEsherpaPATH_START.@sherpa_extracted@sherpa-main@sherpa@models@regrid.py@.PATH_END.py
|
{
"filename": "lscsn.py",
"repo_name": "LCOGT/lcogtsnpipe",
"repo_path": "lcogtsnpipe_extracted/lcogtsnpipe-master/trunk/bin/lscsn.py",
"type": "Python"
}
|
#!/usr/bin/env python
description = ">> New automated sn measurement"
usage = "%prog image [options] "
import time
from numpy import array,log10
import numpy as np
import lsc
import glob
import os
import sys
import shutil
import string
import re
from astropy.io.fits import getheader
from astropy.io import fits
from optparse import OptionParser
if __name__ == "__main__":
parser = OptionParser(usage=usage, description=description)
parser.add_option("-p", "--psf", dest="psf", default='',
type='str', help='psf file \t\t\t %default')
parser.add_option("-R", "--RA", dest="RA", default='', type='str', help='RA coordinate \t\t\t %default')
parser.add_option("-D", "--DEC", dest="DEC", default='', type='str',
help='DEC coordinate \t\t\t %default')
parser.add_option("--RA0", dest="RA0", default='', type='str',
help='RA coordinate where centering the box stamp \t\t\t %default')
parser.add_option("--DEC0", dest="DEC0", default='', type='str',
help='DEC coordinate where centering the box stamp \t\t\t %default')
parser.add_option("-n", "--iteration", dest="niter", default=3, type='int',
help=' number of iterations \t\t %default')
parser.add_option("-x", "--xorder", dest="xorder", default=2, type='int',
help=' order for backgorund in x \t\t %default')
parser.add_option("-y", "--yorder", dest="yorder", default=2, type='int',
help=' order for backgroun in y \t\t %default')
parser.add_option("-b", "--bkg", dest="bkg", default=4, type='float',
help=' backgroun radius dimension \t\t %default')
parser.add_option("-z", "--size", dest="size", default=7, type='float',
help=' half size of the stamp \t\t %default')
parser.add_option("-m", "--datamax", dest="datamax", type='float',
help=' data max for saturation \t\t %default')
parser.add_option("--datamin", dest="datamin", type='float',
help=' data min for saturation \t\t %default')
parser.add_option("-i", "--interactive", action="store_true", dest='interactive', default=False,
help='Interactive \t\t\t [%default]')
parser.add_option("-s", "--show", action="store_true", dest='show', default=False,
help='display images \t\t\t [%default]')
parser.add_option("-c", "--center", action="store_false", dest='recenter', default=True,
help='recenter \t\t\t [%default]')
parser.add_option("-r", "--redo", dest="redo", action="store_true",
default=False, help=' redo measurement \t\t\t [%default]')
option, args = parser.parse_args()
if len(args) < 1: sys.argv.append('--help')
option, args = parser.parse_args()
imglist = lsc.util.readlist(args[0])
if option.psf:
psflist = lsc.util.readlist(option.psf)
else:
psflist = ''
_ra = option.RA
_dec = option.DEC
_ra0 = option.RA0
_dec0 = option.DEC0
_size = option.size
_fb = option.bkg
_xord = option.xorder
_yord = option.yorder
_numiter = option.niter
_dmax = option.datamax
_dmin = option.datamin
_interactive = option.interactive
arterr = ''
_ra = string.split(_ra,',')
_dec = string.split(_dec,',')
if option.recenter == False:
_recenter = True
else:
_recenter = False
if option.show == False:
_show = False
else:
_show = True
if option.redo == False:
redo = False
else:
redo = True
if _interactive:
_show = True
##################### if the centering coordinates are not defined, use coordinate of the object
if _ra and not _ra0:
_ra0 = _ra[0]
if _dec and not _dec0:
_dec0 = _dec[0]
# ####################### SET IRAF PARAMETERS #######################
from pyraf import iraf
iraf.astcat(_doprint=0)
iraf.imcoords(_doprint=0)
iraf.digiphot(_doprint=0)
iraf.daophot(_doprint=0)
from iraf import digiphot
from iraf import daophot
from iraf import ptools
zmag = 0
iraf.digiphot.daophot.photpars.zmag = zmag
warn = '##################################\n'
for imglong in imglist:
if imglong:
if imglong[-5:] == '.fits':
img = imglong[:-5]
else:
img = imglong
######### read header, find psf file #############################################
hdr = lsc.util.readhdr(imglong)
_instrument = lsc.util.readkey3(hdr, 'instrume')
filter = lsc.util.readkey3(hdr, 'filter')
print '########## ' + str(filter) + ' ##############'
################### added for difference images ###############
DM = 0
if 'CONVOL00' in hdr:
if hdr['CONVOL00'] == 'TEMPLATE':
_difexp = lsc.util.readkey3(hdr, 'exptime')
_targexp = lsc.util.readkey3(hdr, 'exptarg')
_tempexp = lsc.util.readkey3(hdr, 'exptemp')
print _difexp, _targexp, _tempexp
DM = 2.5*log10(_targexp)-2.5*log10(_difexp)
print '#### ',str(DM)
######################################################################
if not psflist:
psfimage = img + '.psf'
else:
psfimage = re.sub('.fits', '', psflist[0])
skip = 0
if os.path.exists(img + '.sn2.fits'):
hdr2 = lsc.util.readhdr(img + '.sn2.fits')
snmag1 = lsc.util.readkey3(hdr2, 'PSFMAG1')
if snmag1 and not redo:
skip = 1
else:
sys.exit('psf not computed')
print psfimage
if not os.path.exists(psfimage + '.fits'):
sys.exit('missing psf file')
if redo:
skip = 0
####################################################################################
####################### plot image, find fwhm #####################################
if skip == 0:
###### set database value to 9999 before calculating them again ##################
lsc.mysqldef.updatevalue('photlco', 'psfmag', 9999, string.split(img, '/')[-1] + '.fits')
lsc.mysqldef.updatevalue('photlco', 'psfdmag', 9999, string.split(img, '/')[-1] + '.fits')
lsc.mysqldef.updatevalue('photlco', 'psfx', 9999, string.split(img, '/')[-1] + '.fits')
lsc.mysqldef.updatevalue('photlco', 'psfy', 9999, string.split(img, '/')[-1] + '.fits')
lsc.mysqldef.updatevalue('photlco', 'apmag', 9999, string.split(img, '/')[-1] + '.fits')
iraf.set(stdimage='imt2048')
if _interactive:
_z1, _z2, goon = lsc.util.display_image(img + '.fits', 1, '', '', True, _xsize=1, _ysize=1)
elif _show:
_z1, _z2, goon = lsc.util.display_image(img + '.fits', 1, '', '', False)
# Read aperture correction that corresponds to PSF used
if 'diff' in imglong:
if 'optimal' in imglong: #aperture correction is meaningless for theoretical PyZOGY PSF
apco0 = 0
else: #HOTPANTS difference imaging
if 'CONVOL00' in hdr:
convolve_value = lsc.util.readkey3(hdr, 'CONVOL00')
if convolve_value == 'TEMPLATE': #Read template aperture correction if template PSF is used
try:
hostname, username, passwd, database = lsc.mysqldef.getconnection('lcogt2')
conn = lsc.mysqldef.dbConnect(hostname, username, passwd, database)
except ImportError as e:
print e
print 'try running one of these:'
print 'pip install mysql-python'
print 'conda install mysql-python'
except Exception as e:
print e
print '### warning: problem connecting to the database'
template_filename = hdr['TEMPLATE']
template_db_info = lsc.mysqldef.getfromdataraw(conn, 'photlco', 'filename', str(template_filename), '*')
template_filepath = template_db_info[0]['filepath']
hdr_apco = lsc.util.readhdr(os.path.join(template_filepath, template_filename.replace('.fits', '.sn2.fits')))
elif convolve_value == 'IMAGE': #Read image aperture correction if image PSF is used
hdr_apco = lsc.util.readhdr(os.path.join(os.path.dirname(imglong), hdr['TARGET'].replace('.fits', '.sn2.fits')))
apco0 = lsc.util.readkey3(hdr_apco, 'APCO')
else: #For all non-difference imaging, the sn2 file should correspond to the PSF used filetype 1, 2, and 4
apco0 = lsc.util.readkey3(hdr2, 'APCO')
if 'PIXSCALE' in hdr2:
pixelscale = lsc.util.readkey3(hdr2, 'PIXSCALE')
elif 'CCDSCALE' in hdr2:
pixelscale = lsc.util.readkey3(hdr2, 'CCDSCALE')
if 'fs' in _instrument or 'em' in _instrument:
if 'CCDXBIN' in hdr2:
fwhm0 = lsc.util.readkey3(hdr2, 'PSF_FWHM') / (pixelscale * lsc.util.readkey3(hdr2, 'CCDXBIN'))
elif 'CCDSUM' in hdr2:
fwhm0 = lsc.util.readkey3(hdr2, 'PSF_FWHM') / (
pixelscale * int(string.split(lsc.util.readkey3(hdr2, 'CCDSUM'))[0]))
else:
fwhm0 = lsc.util.readkey3(hdr2, 'PSF_FWHM') / (pixelscale)
else:
fwhm0 = lsc.util.readkey3(hdr2, 'PSF_FWHM') / pixelscale
if not apco0:
print '\n### warning: apco not found'
apco0 = 0
if not fwhm0:
print '\n### warning: fwhm not found'
fwhm0 = 6
##################################################################################
################## find coordinate for the box position #########
xx0, yy0 = '', ''
if skip == 0:
if not _ra0 and not _dec0:
try:
print 'no box coordinate from input, take coordinate from database'
_ra0, _dec0, _ = lsc.util.checksndb(img + '.fits')
except:
print 'no coordinate from database'
_ra0, _dec0 = '', ''
if not _ra[0] and not _dec[0]:
print 'no sn coordinate from input'
if _ra0:
print 'use coordinate from the box for the SN'
_ra= [_ra0]
else:
sys.exit('no box and sn coordinate ')
if _dec0:
_dec= [_dec0]
else:
sys.exit('no box and sn coordinate ')
if _ra and _dec:
os.system('rm -rf tmp.*')
lll=[]
for jj in range(0,len(_ra)):
lll.append(str(_ra[jj]) + ' ' + str(_dec[jj]))
#lll = [str(_ra) + ' ' + str(_dec)]
iraf.wcsctran('STDIN', 'tmp.pix', img + '[0]', Stdin=lll, inwcs='world', units='degrees degrees',
outwcs='logical', columns='1 2', formats='%10.1f %10.1f')
if _show:
iraf.tvmark(1, 'tmp.pix', mark="circle", number='yes', radii=10, nxoffse=5, nyoffse=5,
color=214, txsize=2)
xxsn, yysn = [], []
for kk in iraf.fields('tmp.pix', '1,2', Stdout=1):
if kk:
xxsn.append(string.split(kk)[0])
yysn.append(string.split(kk)[1])
xxsn = array(xxsn,float)
yysn = array(yysn,float)
print 'SN coordinate ',xxsn, yysn
if _ra0 and _dec0:
os.system('rm -rf tmp.*')
lll = [str(_ra0) + ' ' + str(_dec0)]
iraf.wcsctran('STDIN', 'tmp.pix', img + '[0]', Stdin=lll, inwcs='world', units='degrees degrees',
outwcs='logical', columns='1 2', formats='%10.1f %10.1f')
if _show:
iraf.tvmark(1, 'tmp.pix', mark="circle", number='yes', radii=10, nxoffse=5, nyoffse=5,
color=214, txsize=2)
xx0, yy0 = string.split(iraf.fields('tmp.pix', '1,2', Stdout=1)[2])
print 'box coordinate ',xx0, yy0
os.system('rm -rf tmp.*')
else:
xx0, yy0 = '', ''
if xx0 == '' and not _interactive:
sys.exit('coordinate of the object not found, reduction not possible')
if _interactive:
repeat = 'y'
while repeat == 'y':
print "_____________________________________________"
print " MARK SN REGION WITH - x -, EXIT - q -"
try:
iraf.imexamine(img + '.fits', 1, wcs='logical', logfile='tmp.log', keeplog=True)
xytargets = iraf.fields('tmp.log', '1,2', Stdout=1)
xx0 = xytargets[0].split()[0]
yy0 = xytargets[0].split()[1]
lsc.util.delete("tmplabel")
ff = open('tmplabel', 'w')
ff.write(str(xx0) + ' ' + str(yy0) + ' -1' + ' \n')
ff.close()
iraf.tvmark(1, 'tmplabel', autol='no', mark="cross", inter='no', label='no', txsize=4)
repeat = raw_input('### repeat selection ? [y/n] ? [n] ')
if not repeat:
repeat = 'n'
elif repeat == 'yes':
repeat = 'y'
elif repeat == 'YES':
repeat = 'y'
except:
#x = y = value = 0
print '### WARNING: SN REGION NOT SELECTED !!!'
repeat = raw_input('### repeat selection ? [y/n] ? [n] ')
if not repeat: repeat = 'n'
if repeat in ['Y', 'y', 'YES', 'yes', 'Yes']:
repeat = 'y'
else:
sys.exit('error: no coordinate for box position selected')
#################################################################################
###################################### chose size of the box and cut it ###############
if skip == 0:
if not _interactive:
if not fwhm0:
sys.exit('error: fwhm not defined ')
if not xx0:
sys.exit('error: no coordinate for box position selected')
if _size:
size = _size
else:
size = 7
lsc.util.delete("original.fits")
lsc.util.imcopy(imglong, 'original.fits', (float(xx0), float(yy0)), 2*size*fwhm0)
else:
repeat = 'n'
while repeat in ['n','no','NO','N']:
size = raw_input('Size of the cut frame (fwhm) [' + str(_size) + '] ? ')
if not size:
size = _size
else:
size = int(size)
lsc.util.delete("original.fits")
lsc.util.imcopy(imglong, 'original.fits', (float(xx0), float(yy0)), 2*size*fwhm0)
iraf.set(stdimage='imt512')
_tmp1, _tmp2, goon = lsc.util.display_image('original.fits', 1, '', '', False, _xsize=.5,
_ysize=.5)
repeat = raw_input('### ok ? [y/n] ? [y] ')
if not repeat:
repeat = 'y'
x1 = float(xx0) - size * fwhm0
y1 = float(yy0) - size * fwhm0
xxsn -= x1
yysn -= y1
####################################### plot with right cuts ##########################
z11, z22='',''
if skip == 0:
if _show:
_z11, _z22, good = lsc.util.display_image('original.fits', 1, '', '', False, _xsize=.5, _ysize=.5)
z11, z22 = _z11, _z22
if _interactive:
answ = 'y'
answ = raw_input(">>>>> Cuts OK [y/n] [y]?")
if not answ:
answ = 'y'
elif answ == 'no':
answ = 'n'
while answ == 'n':
z11 = raw_input('>>> z1 = ? [' + str(_z11) + '] ? ')
z22 = raw_input('>>> z2 = ? [' + str(_z22) + '] ? ')
if not z11:
z11 = _z11
else:
z11 = float(z11)
if not z22:
z22 = _z22
else:
z22 = float(z22)
_z11, _z22, goon = lsc.util.display_image('original.fits', 1, z11, z22, False)
answ = raw_input(">>>>> Cuts OK [y/n] [y]?")
if not answ:
answ = 'y'
elif answ == 'no':
answ = 'n'
z11 = float(_z11)
z22 = float(_z22)
###############################################################################################3
######################################## write SN coordinates in coo file ########################
if skip == 0:
if not _interactive:
#_dimension = string.split((string.split(iraf.imheader('original', Stdout=1)[0], ']')[0]), '[')[1]
#aa, bb = string.split(_dimension, ',')
#aa, bb = float(aa) / 2, float(bb) / 2
vector=[]
for ii in range(0,len(xxsn)):
vector.append(str(xxsn[ii]) + ' ' + str(yysn[ii]) + ' 1')
ff = open('tmplabel', 'w')
for i in vector:
ff.write(i + ' \n')
ff.close()
if _show:
iraf.tvmark(1, 'tmplabel', autol='no', mark="circle", radii=10, inter='no', label='no',
number='yes', pointsize=20, txsize=2, color=204)
os.system('cp tmplabel ' + img + '.sn.coo')
else:
answ0 = 'n'
while answ0 == 'n':
_tmp1, _tmp2, goon = lsc.util.display_image('original.fits', 1, z11, z22, False)
print " ", str(z11), str(z22)
print "__________________________________________________"
print "IDENTIFY SN AND CO-STARS(S) WITH - x -, EXIT - q -"
print "__________________________________________________"
print " 1 1 'ID. SN AND CO-STAR(S) WITH -x- EXIT -q-'"
lsc.util.delete("tmplabel")
vector = iraf.imexamine('original.fits', 1, wcs='logical', xformat='', yformat='',
use_display='no', Stdout=1)
if string.count(vector[0], 'z1') == 1: vector = vector[1:]
ff = open('tmplabel', 'w')
for i in vector:
ff.write(i + ' \n')
ff.close()
if _show:
iraf.tvmark(1, 'tmplabel', autol='no', mark="circle", radii=10, inter='no',
label='no', number='yes', pointsize=20, txsize=2, color=204)
os.system('cp tmplabel ' + img + '.sn.coo')
answ0 = raw_input(">>>>> SN AND CO-STARS(S) IDENTIFICATIONS OK [y/n] [y]?")
if not answ0:
answ0 = 'y'
elif answ0 == 'no':
answ0 = 'n'
if skip == 0:
############################ BACKGROUND FIT ###############################
print ' ************ background fit **********************'
answ0 = 'n'
while answ0 == 'n':
lsc.util.delete("sky.fits,bg.fits,bgs.fits,sn.fits,residual.fits")
if _show: lsc.util.display_image('original.fits', 1, z11, z22, False, _xsize=.5, _ysize=.5)
nax = int(getheader('original.fits')['NAXIS1'])
nay = int(getheader('original.fits')['NAXIS2'])
if len(vector) == 1:
xb, yb, value = string.split(vector[0])
checkleng0 = 'yes'
while checkleng0 == 'yes':
if not _interactive:
leng0 = _fb
else:
leng0 = raw_input('>>> length of square for background in units of FWHM [3] ? ')
if not leng0:
leng0 = 3
try:
float(leng0)
checkleng0 = 'no'
except:
print 'WARNING: the FWHM should be a number !!!!'
checkleng0 == 'yes'
if _show:
iraf.tvmark(1, img + ".sn.coo", auto='no', mark="rectangle",
length=int(fwhm0 * float(leng0)), inter='no', color=204)
xb1 = int(float(xb) - fwhm0 * float(leng0) / 2)
xb2 = int(float(xb) + fwhm0 * float(leng0) / 2)
yb1 = int(float(yb) - fwhm0 * float(leng0) / 2)
yb2 = int(float(yb) + fwhm0 * float(leng0) / 2)
sec = "1 " + str(xb1) + " 1 " + str(nay) + '\n'
sec = sec + str(xb2) + ' ' + str(nax) + " 1 " + str(nay) + '\n'
sec = sec + str(xb1) + ' ' + str(xb2) + " 1 " + str(yb1) + '\n'
sec = sec + str(xb1) + ' ' + str(xb2) + ' ' + str(yb2) + ' ' + str(nay) + '\n'
ff = open('sec', 'w')
ff.write(sec)
ff.close()
inp = "bg.fits[" + str(xb1) + ":" + str(xb2) + "," + str(yb1) + ":" + str(yb2) + "]"
out = "sky.fits[" + str(xb1) + ":" + str(xb2) + "," + str(yb1) + ":" + str(yb2) + "]"
checkorder = 'yes'
while checkorder == 'yes':
if not _interactive or _xord and _yord:
if _xord and _yord:
xbgord0 = _xord
ybgord0 = _yord
else:
xbgord0 = _xbgord0
ybgord0 = _ybgord0
else:
xbgord0 = raw_input('>>> Order of function in x for bg fit [' + str(_xbgord0) + '] ? ')
if not xbgord0:
xbgord0 = _xbgord0
else:
_xbgord0 = xbgord0
ybgord0 = raw_input(
'>>> Order of function in y for bg fit ? [' + str(_ybgord0) + '] ? ')
if not ybgord0:
ybgord0 = _ybgord0
else:
_ybgord0 = ybgord0
try:
float(xbgord0)
float(ybgord0)
checkorder = 'no'
except:
print 'WARNING: value not valid !!'
checimsurfitkorder = 'yes'
iraf.imsurfit("original", "bg", xorder=xbgord0, yorder=ybgord0, regions="section",
section="sec")
else:
if not _interactive:
print 'select'
xb1 = int(min(xxsn) - fwhm0*_fb)
xb2 = int(max(xxsn) + fwhm0*_fb)
yb1 = int(min(yysn) - fwhm0*_fb)
yb2 = int(max(yysn) + fwhm0*_fb)
else:
lsc.util.delete("tmplabel")
lsc.util.delete("tmptbl")
ff = open('tmplabel', 'w')
ff.write('')
ff.close()
print ">>> Mark corners of bg-region with >b<, exit >q<"
iraf.tvmark(1, "tmplabel", autol='no', mark="none", inter='no', label='yes', txsize=2,
color=204)
iraf.tvmark(1, "", logfile="tmptbl", autol='yes', mark="cross", inter='yes', color=204)
ff = open('tmptbl', 'r')
ss = ff.readlines()
ff.close()
xb1 = int(float(string.split(ss[-2])[0]))
yb1 = int(float(string.split(ss[-2])[1]))
xb2 = int(float(string.split(ss[-1])[0]))
yb2 = int(float(string.split(ss[-1])[1]))
sec = "1 " + str(xb1) + " 1 " + str(nay) + '\n'
sec = sec + str(xb2) + ' ' + str(nax) + " 1 " + str(nay) + '\n'
sec = sec + str(xb1) + ' ' + str(xb2) + " 1 " + str(yb1) + '\n'
sec = sec + str(xb1) + ' ' + str(xb2) + ' ' + str(yb2) + ' ' + str(nay) + '\n'
ff = open('sec', 'w')
ff.write(sec)
ff.close()
inp = "bg.fits[" + str(xb1) + ":" + str(xb2) + "," + str(yb1) + ":" + str(yb2) + "]"
out = "sky.fits[" + str(xb1) + ":" + str(xb2) + "," + str(yb1) + ":" + str(yb2) + "]"
checkorder = 'yes'
while checkorder == 'yes':
if not _interactive or _xord and _yord:
if _xord and _yord:
xbgord0 = _xord
ybgord0 = _yord
else:
xbgord0 = _xbgord0
ybgord0 = _ybgord0
else:
xbgord0 = raw_input('>>> Order of function in x for bg fit [' + str(_xbgord0) + '] ? ')
if not xbgord0:
xbgord0 = _xbgord0
else:
_xbgord0 = xbgord0
ybgord0 = raw_input('>>> Order of function in y for bg fit [' + str(_ybgord0) + '] ? ')
if not ybgord0:
ybgord0 = _ybgord0
else:
_ybgord0 = ybgord0
try:
float(xbgord0)
float(ybgord0)
checkorder = 'no'
except:
print 'WARNING: value not valid !!'
checkorder = 'yes'
iraf.imsurfit("original", "bg", xorder=xbgord0, yorder=ybgord0, regions="sections",
sections="sec")
midpt = np.mean(fits.getdata('bg.fits'))
iraf.imcopy("original.fits", "sky.fits")
iraf.imcopy(inp, "bgs.fits")
iraf.imcopy("bgs.fits", out)
iraf.imarith("original.fits", "-", "sky.fits", "sn.fits")
iraf.imarith("sn.fits", "+", midpt, "sn.fits")
if _show or _interactive:
_tmp1, _tmp2, goon = lsc.util.display_image('original.fits', 1, z11, z22, False, _xcen=.25,
_ycen=.25, _xsize=.3, _ysize=.3)
s1 = 1
s2 = -int(fwhm0)
lsc.util.delete("tmptbl")
ff = open('tmptbl', 'w')
ff.write(str(s1) + ' ' + str(s2) + " ORIGINAL")
ff.close()
if _show:
iraf.tvmark(1, "tmptbl", autol='no', mark="none", inter='no', label='yes', txsize=2)
_tmp1, _tmp2, goon = lsc.util.display_image('sky.fits', 1, z11, z22, False, _xcen=.25,
_ycen=.75, _xsize=.3, _ysize=.3, _erase='no')
lsc.util.delete("tmptbl")
ff = open('tmptbl', 'w')
ff.write(str(s1) + ' ' + str(s2) + " BACKGROUND_FIT")
ff.close()
if _show:
iraf.tvmark(1, "tmptbl", autol='no', mark="none", inter='no', label='yes', txsize=2)
_tmp1, _tmp2, goon = lsc.util.display_image('sn.fits', 1, z11, z22, False, _xcen=.75, _ycen=.25,
_xsize=.3, _ysize=.3, _erase='no')
lsc.util.delete("tmptbl")
ff = open('tmptbl', 'w')
ff.write(str(s1) + ' ' + str(s2) + " STARS")
ff.close()
if _show: iraf.tvmark(1, "tmptbl", autol='no', mark="none", inter='no', label='yes', txsize=2)
if not _interactive:
answ0 = 'y'
else:
answ0 = raw_input(">>> Background fit OK [y/n] [y] ?")
if not answ0:
answ0 = 'y'
elif answ0 == 'no':
answ0 = 'n'
#################################### FITSN ###################################
print img, psfimage, 'xxxxx'
if _dmax is None:
_dmax = lsc.util.readkey3(hdr, 'datamax')
if _dmin is None and 'optimal' in img:
_dmin = 'INDEF'
elif _dmin is None:
_dmin = lsc.util.readkey3(hdr, 'datamin')
apori1, apori2, apori3, apmag1, apmag2, apmag3, dapmag1, dapmag2, dapmag3, fitmag, truemag, magerr, centx, centy = \
lsc.lscsnoopy.fitsn(img, psfimage, img + '.sn.coo', _recenter, fwhm0, 'original', 'sn',
'residual', _show, _interactive, _dmax, _dmin, z11, z22, midpt, _size, apco0)
################# Iterate Beckground ###################################
if _interactive:
if not _numiter:
answ0 = raw_input(">>> Iterate on background [y/n] [y] ?")
if not answ0: answ0 = 'y'
elif _numiter >= 1:
answ0 = 'y'
else:
answ0 = 'n'
else:
if _numiter >= 1:
answ0 = 'y'
time.sleep(1) #to prevent iraf warning
else:
answ0 = 'n'
_count = 0
while answ0 == 'y':
_count = _count + 1
print '######'
print '###### iteration number ' + str(_count)
print '######'
lsc.util.delete("sn.fits,residual.fits,snfit.fits,tmp.fits")
checkorder = 'yes'
while checkorder == 'yes':
if not _interactive or _xord and _yord:
if _xord and _yord:
xbgord0 = _xord
ybgord0 = _yord
else:
xbgord0 = _xbgord0
ybgord0 = _ybgord0
else:
xbgord0 = raw_input('>>> Order of function in x for bg fit [' + str(_xbgord0) + '] ? ')
if not xbgord0:
xbgord0 = _xbgord0
else:
_xbgord0 = xbgord0
ybgord0 = raw_input('>>> Order of function in x for bg fit [' + str(_ybgord0) + '] ? ')
if not ybgord0:
ybgord0 = _ybgord0
else:
_ybgord0 = ybgord0
try:
float(xbgord0)
float(ybgord0)
checkorder = 'no'
except:
print 'WARNING: value not valid !!'
checkorder = 'yes'
iraf.imsurfit("skyfit", "tmp", regions="all", xorder=xbgord0, yorder=ybgord0)
midpt = np.mean(fits.getdata("tmp.fits"))
iraf.imarith("original", "-", "tmp", "sn", calctype="r", pixtype="r")
iraf.imarith("sn.fits", "+", midpt, "sn.fits")
lsc.util.delete("skyfit.fits")
apori1, apori2, apori3, apmag1, apmag2, apmag3, dapmag1, dapmag2, dapmag3, fitmag, truemag, magerr, centx, centy = \
lsc.lscsnoopy.fitsn( img, psfimage, img + '.sn.coo', _recenter, fwhm0, 'original', 'sn',
'residual', _show, _interactive, _dmax, _dmin, z11, z22, midpt, _size, apco0)
print _numiter, _count
if _interactive:
if not _numiter:
answ0 = raw_input(">>> Iterate on background [y/n] [y] ?")
if not answ0: answ0 = 'y'
elif _count >= _numiter:
answ0 = 'n'
else:
answ0 = 'y'
else:
if _count >= _numiter:
answ0 = 'n'
else:
answ0 = 'y'
print "***************************************************************************"
print "#id x_ori y_ori x y ap_ori ap_bgsub fit_mag err_art err_fit"
for i in range(len(fitmag)):
print "SN", i, str(centx[i] + x1 - 1), str(centy[i] + y1 - 1), str(centx[i]), str(
centy[i]), " ", str(apori3[i]), " ", str(apmag3[i]), " ", str(truemag[i]), " ", str(
arterr), " ", str(magerr[i])
print "**************************************************************************"
########## AGGIUSTAMENTO MANUALE ###############
newmag = list(array(truemag))
if not _interactive:
answ0 = 'n'
else:
answ0 = raw_input(">>> Not yet happy ? Do you want to adjust manually stellar peak ? [y/n] [n] ")
if not answ0:
answ0 = 'n'
elif answ0 == 'yes':
answ0 = 'y'
dmag0 = 0
while answ0 == 'y':
checkdm = 'yes'
while checkdm == 'yes':
if len(truemag) > 1: print "!!!! WARNING: all components scaled accordingly !!!!"
_dmag0 = raw_input(">>> D(mag) adjustment (positive=fainter) [" + str(dmag0) + "]")
if _dmag0: dmag0 = _dmag0
try:
float(dmag0)
checkdm = 'no'
except:
checkdm = 'yes'
apori1, apori2, apori3, apmag1, apmag2, apmag3, fitmag, truemag, magerr, centx, centy, newmag = \
lsc.lscsnoopy.manusn(img, psfimage, dmag0, apori1, apori2, apori3, apmag1, apmag2, apmag3,
fitmag, truemag, magerr, centx, centy, z11, z22, midpt, _size, fwhm0,
x1, y1, arterr)
try:
dmag0 = newmag[0] - truemag[0]
except:
dmag0 = newmag[0]
answ0 = raw_input(">>> again ? [y/n] [y] ")
if not answ0:
answ0 = 'y'
elif answ0 == 'yes':
answ0 = 'y'
truemag = list(array(newmag))
#
#### ESPERIMENTO DI STELLE ARTIFICIALI AL VOLO ############################
#
if not _interactive:
answ0 = 'n'
else:
answ0 = raw_input(">>> Errors estimate (through artificial star experiment ?) [y/n] [y] ")
if not answ0:
answ0 = 'y'
elif answ0 == 'yes':
answ0 = 'y'
if answ0 == 'y':
leng0 = 4
try:
_arterr2, _arterr = lsc.lscsnoopy.errore(img, 'artlist.coo', size, truemag, fwhm0, leng0, False,
False, _numiter, z11, z22, midpt, nax, nay, xbgord0,
ybgord0, _recenter, apco0, _dmax, _dmin)
except:
print '\n### warningstamp size too small: artificail error = 0 '
_arterr2, _arterr = 0.0, 0.0
if _interactive:
arterr = raw_input("arterr ? [%6.6s] " % (str(_arterr)))
if not arterr: arterr = _arterr
else:
arterr = _arterr
else:
arterr = 0.0
####################### CHIUDI TUTTO ###################################
#
#fine:
if DM:
print 'different image with template PSF: apply DM : '+str(DM)
print "***************************************************************************"
print "#id x_ori y_ori x y ap_ori ap_bgsub fit_mag err_art err_fit"
print "# id ap_original ap_bgsub fit_mag err_art err_fit" #, >> nome0//".ec"
print "# SN_FIT " #, >> nome0//".ec"
print "# id ap_ori ap-bg fit_mag" #, >> nome0//".ec"
for i in range(len(fitmag)):
print "SN", i, str(centx[i] + x1 - 1), str(centy[i] + y1 - 1), str(centx[i]), str(
centy[i]), " ", str(apori3[i]), " ", str(apmag3[i]), " ", str(truemag[i]), " ", str(
arterr), " ", str(magerr[i])
if truemag[i] == 'INDEF':
truemag[i], arterr, magerr[i] = 9999, 0.0, 0.0
if apmag3[i] == 'INDEF':
apmag3[i] = 9999
headers = {'PSFX' + str(i + 1): (str(centx[i] + x1 - 1), 'x pos psf mag'),
'PSFY' + str(i + 1): (str(centy[i] + y1 - 1), 'y pos psf mag'),
'PSFMAG' + str(i + 1): (str(float(truemag[i]) - DM), 'psf magnitude'),
'PSFDMAG' + str(i + 1): (str(max(arterr, magerr[i])), 'psf mag error'),
'APMAG' + str(i + 1): (str(apmag3[i]), 'ap mag after bgsub'),
'DAPMAG' + str(i + 1): (str(dapmag3[i]), 'ap mag error')}
lsc.util.updateheader(img + '.sn2.fits', 0, headers)
lsc.util.delete("apori")
lsc.util.delete("sec")
lsc.util.delete("skyfit.fits")
lsc.util.delete("sn.fits")
lsc.util.delete("artsn.fits")
lsc.util.delete("artsky.fits,artbgs.fits")
lsc.util.delete("bg.fits,bgs.fits")
lsc.util.delete("tmp*")
lsc.util.delete(img + ".sn.*")
os.system('mv original.fits ' + img + '.og.fits')
os.system('mv residual.fits ' + img + '.rs.fits')
try:
lsc.mysqldef.updatevalue('photlco', 'psfmag', truemag[0] - DM, string.split(img, '/')[-1] + '.fits')
lsc.mysqldef.updatevalue('photlco', 'psfdmag', max(arterr, magerr[0]),
string.split(img, '/')[-1] + '.fits')
lsc.mysqldef.updatevalue('photlco', 'psfx', centx[0] + x1 - 1, string.split(img, '/')[-1] + '.fits')
lsc.mysqldef.updatevalue('photlco', 'psfy', centy[0] + y1 - 1, string.split(img, '/')[-1] + '.fits')
lsc.mysqldef.updatevalue('photlco', 'apmag', apmag3[0], string.split(img, '/')[-1] + '.fits')
lsc.mysqldef.updatevalue('photlco', 'dapmag', dapmag3[0], string.split(img, '/')[-1] + '.fits')
# Update the aperture correction to reflect what is actually applied to the data
# (difference imaging can change the sn2 file leading to a different aperture correction)
lsc.mysqldef.updatevalue('photlco', 'apercorr', apco0, string.split(img, '/')[-1] + '.fits')
except:
print 'module mysqldef not found'
else:
print 'already done'
else:
print '####\n#### WARNING: empty space in the list !!\n####'
|
LCOGTREPO_NAMElcogtsnpipePATH_START.@lcogtsnpipe_extracted@lcogtsnpipe-master@trunk@bin@lscsn.py@.PATH_END.py
|
{
"filename": "cris.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/projects/ace/cris.py",
"type": "Python"
}
|
from .load import load
from pytplot import options
from pyspedas.utilities.datasets import find_datasets
# This routine was originally in ace/__init__.py, until being moved to its own file.
# Please refer to __init__.py if you need to see the revision history before it was moved.
def cris(trange=['2018-11-5', '2018-11-6'],
datatype='h2',
prefix='',
suffix='',
get_support_data=False,
varformat=None,
varnames=[],
downloadonly=False,
notplot=False,
no_update=False,
time_clip=False,
force_download=False):
"""
Load data from the ACE Cosmic Ray Isotope Spectrometer (CRIS)
Parameters
----------
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
Default:['2018-11-5', '2018-11-6']
datatype: str
Data type; Valid options::
h2: (default) 1-Hour Level 2 Data
h3: Daily-averaged Level 2 Data
Default: 'h2'
prefix: str
The tplot variable names will be given this prefix. By default,
no prefix is added.
Default: ''
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
Default: ''
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
Default: False
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
Default: None
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
Default: []
downloadonly: bool
Set this flag to download the CDF files, but not load them into
tplot variables
Default: False
notplot: bool
Return the data in hash tables instead of creating tplot variables
Default: False
no_update: bool
If set, only load data from your local cache
Default: False
time_clip: bool
Time clip the variables to exactly the range specified in the trange keyword
Default: False
force_download: bool
Download file even if local version is more recent than server version
Default: False
Returns
----------
list of str
List of the tplot variables created.
Examples
----------
>>> import pyspedas
>>> from pytplot import tplot
>>> cris_vars = pyspedas.projects.ace.cris(trange=['2018-11-5', '2018-11-6'])
>>> tplot(['flux_B', 'flux_C', 'flux_N', 'flux_O', 'flux_F', 'flux_Ne'])
"""
return load(trange=trange, instrument='cris', datatype=datatype, prefix=prefix, suffix=suffix, get_support_data=get_support_data,
varformat=varformat, downloadonly=downloadonly, notplot=notplot, no_update=no_update, varnames=varnames,
time_clip=time_clip, force_download=force_download)
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@projects@ace@cris.py@.PATH_END.py
|
{
"filename": "adclass.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/gemini_instruments/gpi/adclass.py",
"type": "Python"
}
|
from astrodata import astro_data_tag, astro_data_descriptor, TagSet
from ..gemini import AstroDataGemini
class AstroDataGpi(AstroDataGemini):
__keyword_dict = dict(array_section='DATASEC',
detector_section='DATASEC',
exposure_time='ITIME',
filter='IFSFILT',
focal_plane_mask='OCCULTER',
pupil_mask='APODIZER')
@classmethod
def read(cls, source):
def gpi_parser(hdu):
if hdu.header.get('EXTNAME') == 'DQ' and hdu.header.get('EXTVER') == 3:
hdu.header['EXTNAME'] = ('SCI', 'BPM renamed by AstroData')
hdu.header['EXTVER'] = (int(2), 'BPM renamed by AstroData')
return super().read(source, extname_parser=gpi_parser)
@staticmethod
def _matches_data(source):
return source[0].header.get('INSTRUME', '').upper() == 'GPI'
@astro_data_tag
def _tag_instrument(self):
return TagSet({'GPI'}, ())
@astro_data_tag
def _tag_disperser(self):
disp = self.phu.get('DISPERSR', '')
if disp.startswith('DISP_WOLLASTON'):
return TagSet({'POL'}, ())
elif disp.startswith('DISP_PRISM'):
return TagSet({'SPECT', 'IFU'}, ())
@astro_data_descriptor
def dec(self):
"""
Returns the Declination of the center of the field in degrees.
It coincides with the position of the target, so that is used since
the WCS in GPI data is completely bogus. For code re-used, use
target_dec() if you really want the position of the target rather
than the center of the field.
Returns
-------
float
declination in degrees
"""
return self.target_dec(offset=True, icrs=True)
@astro_data_descriptor
def exposure_time(self):
"""
Returns the exposure time in seconds.
Returns
-------
float
Exposure time.
"""
# The ITIME keyword is in the extension HDUs!
exposure_time = self.hdr.get('ITIME', -1)[0]
if exposure_time < 0:
return None
if 'PREPARED' in self.tags:
return exposure_time
else:
return exposure_time * self.coadds()
@astro_data_descriptor
def filter_name(self, stripID=False, pretty=False):
"""
Returns the name of the filter(s) used. The component ID can be
removed with either 'stripID' or 'pretty'. If 'pretty' is True,
'IFSFILT' is stripped from the start of the name.
Parameters
----------
stripID : bool
If True, removes the component ID and returns only the name of
the filter.
pretty : bool
Strips the component ID and ``IFSFILT_`` prefix.
Returns
-------
str
The name of the filter combination with or without the component ID.
"""
filter_name = self._may_remove_component('IFSFILT', stripID, pretty)
if pretty:
filter_name = filter_name.replace('IFSFILT_', '')
return filter_name
@astro_data_descriptor
def ra(self):
"""
Returns the Right Ascension of the center of the field in degrees.
It coincides with the position of the target, so that is used since
the WCS in GPI data is completely bogus. For code re-used, use
target_ra() if you really want the position of the target rather
than the center of the field.
Returns
-------
float
right ascension in degrees
"""
return self.target_ra(offset=True, icrs=True)
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@gemini_instruments@gpi@adclass.py@.PATH_END.py
|
{
"filename": "CustomHotRegion_Accreting.py",
"repo_name": "xpsi-group/xpsi",
"repo_path": "xpsi_extracted/xpsi-main/examples/examples_modeling_tutorial/modules/CustomHotRegion_Accreting.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 6 16:46:36 2024
@author: bas
"""
import xpsi
from xpsi import Parameter
import numpy as np
class CustomHotRegion_Accreting(xpsi.HotRegion):
"""Custom implementation of HotRegion. Accreting Atmosphere model by
Anna Bobrikova. The parameters are ordered I(E < mu < tau < tbb < te).
E is energy.
mu is cos of zenith angle.
tau is the optical depth of the comptom slab.
tbb is the black body temperature.
te is temperature of the electron gas.
"""
required_names = ['super_colatitude',
'super_radius',
'phase_shift',
'super_tbb',
'super_te',
'super_tau']
optional_names = ['omit_colatitude',
'omit_radius',
'omit_azimuth',
'cede_colatitude',
'cede_radius',
'cede_azimuth',
'cede_tbb',
'cede_te',
'cede_tau']
def __init__(self,
bounds,
values,
symmetry = 'azimuthal_invariance',
interpolator = 'split',
omit = False,
cede = False,
concentric = False,
sqrt_num_cells = 32,
min_sqrt_num_cells = 10,
max_sqrt_num_cells = 80,
num_rays = 200,
num_leaves = 64,
num_phases = None,
phases = None,
do_fast = False,
fast_sqrt_num_cells = 16,
fast_min_sqrt_num_cells = 4,
fast_max_sqrt_num_cells = 16,
fast_num_rays = 100,
fast_num_leaves = 32,
fast_num_phases = None,
fast_phases = None,
is_antiphased = False,
custom = None,
image_order_limit = None,
**kwargs
):
doc = """
tbb
"""
super_tbb = Parameter('super_tbb',
strict_bounds = (0.001, 0.003), #tbb = Tbb(keV)/511keV
bounds = bounds.get('super_tbb', None),
doc = doc,
symbol = r'tbb',
value = values.get('super_tbb', None))
doc = """
te
"""
super_te = Parameter('super_te',
strict_bounds = (40., 200.), #te = Te(keV)*1000/511keV
bounds = bounds.get('super_te', None),
doc = doc,
symbol = r'te',
value = values.get('super_te', None))
doc = """
tau
"""
super_tau = Parameter('super_tau',
strict_bounds = (0.5, 3.5),
bounds = bounds.get('super_tau', None),
doc = doc,
symbol = r'tau',
value = values.get('super_tau', None))
custom = [super_tbb, super_te, super_tau]
if cede:
doc = """
cede_tbb
"""
cede_tbb = Parameter('cede_tbb',
strict_bounds = (0.001, 0.003),
bounds = bounds.get('cede_tbb', None),
doc = doc,
symbol = r'cede_tbb',
value = values.get('cede_tbb', None))
doc = """
cede_te
"""
cede_te = Parameter('cede_te',
strict_bounds = (40., 200.),
bounds = bounds.get('cede_te', None),
doc = doc,
symbol = r'cede_te',
value = values.get('cede_te', None))
doc = """
cede_tau
"""
cede_tau = Parameter('cede_tau',
strict_bounds = (0.5, 3.5),
bounds = bounds.get('cede_tau', None),
doc = doc,
symbol = r'cede_tau',
value = values.get('cede_tau', None))
custom += [cede_tbb,cede_te,cede_tau]
super(CustomHotRegion_Accreting, self).__init__(
bounds,
values,
symmetry = symmetry,
interpolator = interpolator,
omit = omit,
cede = cede,
concentric = concentric,
sqrt_num_cells = sqrt_num_cells,
min_sqrt_num_cells = min_sqrt_num_cells,
max_sqrt_num_cells = max_sqrt_num_cells,
num_rays = num_rays,
num_leaves = num_leaves,
num_phases = num_phases,
phases = phases,
do_fast = do_fast,
fast_sqrt_num_cells = fast_sqrt_num_cells,
fast_min_sqrt_num_cells = fast_min_sqrt_num_cells,
fast_max_sqrt_num_cells = fast_max_sqrt_num_cells,
fast_num_rays = fast_num_rays,
fast_num_leaves = fast_num_leaves,
fast_num_phases = fast_num_phases,
fast_phases = fast_phases,
is_antiphased = is_antiphased,
custom = custom,
image_order_limit = image_order_limit,
**kwargs
)
def _HotRegion__compute_cellParamVecs(self):
self._super_radiates = np.greater(self._super_cellArea, 0.0).astype(np.int32)
self._super_cellParamVecs = np.ones((self._super_radiates.shape[0],
self._super_radiates.shape[1],
3),
dtype=np.double)
self._super_cellParamVecs[...,0] *= self['super_te']
self._super_cellParamVecs[...,1] *= self['super_tbb']
self._super_cellParamVecs[...,2] *= self['super_tau']
try:
self._cede_radiates = np.greater(self._cede_cellArea, 0.0).astype(np.int32)
except AttributeError:
pass
else:
self._cede_cellParamVecs = np.ones((self._cede_radiates.shape[0],
self._cede_radiates.shape[1],
3), dtype=np.double)
self._cede_cellParamVecs[...,0] *= self['cede_te']
self._cede_cellParamVecs[...,1] *= self['cede_tbb']
self._cede_cellParamVecs[...,2] *= self['cede_tau']
|
xpsi-groupREPO_NAMExpsiPATH_START.@xpsi_extracted@xpsi-main@examples@examples_modeling_tutorial@modules@CustomHotRegion_Accreting.py@.PATH_END.py
|
{
"filename": "version.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py2/scipy/version.py",
"type": "Python"
}
|
# THIS FILE IS GENERATED FROM SCIPY SETUP.PY
short_version = '1.2.3'
version = '1.2.3'
full_version = '1.2.3'
git_revision = 'bc29b5bf5f8a1f5370407a4e33005277a53ef40b'
release = True
if not release:
version = full_version
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py2@scipy@version.py@.PATH_END.py
|
{
"filename": "_colorsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/waterfall/hoverlabel/font/_colorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="waterfall.hoverlabel.font", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@waterfall@hoverlabel@font@_colorsrc.py@.PATH_END.py
|
{
"filename": "_legend.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/ohlc/_legend.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(self, plotly_name="legend", parent_name="ohlc", **kwargs):
super(LegendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", "legend"),
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@ohlc@_legend.py@.PATH_END.py
|
{
"filename": "test_foregrounds.py",
"repo_name": "HERA-Team/hera_sim",
"repo_path": "hera_sim_extracted/hera_sim-main/hera_sim/tests/test_foregrounds.py",
"type": "Python"
}
|
import pytest
import numpy as np
from astropy import units
from contextlib import ExitStack as does_not_raise
from uvtools.utils import FFT, fourier_freqs
from hera_sim import DATA_PATH, foregrounds
from hera_sim.interpolators import Beam, Tsky
@pytest.fixture(scope="function")
def omega_p():
beamfile = DATA_PATH / "HERA_H1C_BEAM_POLY.npy"
omega_p = Beam(beamfile)
return omega_p
@pytest.fixture(scope="function")
def freqs():
return np.linspace(0.1, 0.2, 100, endpoint=False)
@pytest.fixture(scope="function")
def lsts():
return np.linspace(0, 2 * np.pi, 1000)
@pytest.fixture(scope="function")
def Tsky_mdl():
datafile = DATA_PATH / "HERA_Tsky_Reformatted.npz"
return Tsky(datafile)
@pytest.mark.parametrize("model", ["pntsrc", "diffuse"])
def test_foreground_shape(freqs, lsts, Tsky_mdl, omega_p, model):
bl_vec = [0, 0, 0]
if model == "pntsrc":
vis = foregrounds.pntsrc_foreground(lsts=lsts, freqs=freqs, bl_vec=bl_vec)
elif model == "diffuse":
vis = foregrounds.diffuse_foreground(
lsts=lsts, freqs=freqs, bl_vec=bl_vec, Tsky_mdl=Tsky_mdl, omega_p=omega_p
)
assert vis.shape == (lsts.size, freqs.size)
@pytest.mark.parametrize("model", ["pntsrc", "diffuse"])
def test_foreground_autos_are_real(freqs, lsts, Tsky_mdl, omega_p, model):
bl_vec = [0, 0, 0]
if model == "pntsrc":
vis = foregrounds.pntsrc_foreground(lsts=lsts, freqs=freqs, bl_vec=bl_vec)
elif model == "diffuse":
vis = foregrounds.diffuse_foreground(
lsts=lsts, freqs=freqs, bl_vec=bl_vec, Tsky_mdl=Tsky_mdl, omega_p=omega_p
)
assert np.allclose(vis.imag, 0)
# import uvtools, pylab as plt
# uvtools.plot.waterfall(vis, mode='log'); plt.colorbar(); plt.show()
@pytest.mark.parametrize("orientation", ["east", "west", "north"])
@pytest.mark.parametrize(
"model, expectation",
[("pntsrc", pytest.raises(AssertionError)), ("diffuse", does_not_raise())],
)
def test_foreground_orientation(
freqs, lsts, Tsky_mdl, omega_p, model, orientation, expectation
):
baselines = {"east": [100, 0, 0], "west": [-100, 0, 0], "north": [0, 100, 0]}
bl_vec = baselines[orientation]
fringe_filter_kwargs = {"fringe_filter_type": "gauss", "fr_width": 1e-5}
kwargs = dict(freqs=freqs, lsts=lsts, bl_vec=bl_vec)
# TODO: Update this to avoid repeated code?
if model == "diffuse":
model = foregrounds.diffuse_foreground
kwargs.update(
dict(
Tsky_mdl=Tsky_mdl,
omega_p=omega_p,
fringe_filter_kwargs=fringe_filter_kwargs,
)
)
elif model == "pntsrc":
# FIXME: point source foregrounds do not behave correctly in fringe-rate.
model = foregrounds.pntsrc_foreground
# Hack to make the tests pass for the eastward-orientation. This should
# be extra motivation for really digging into how we're simulating point
# source visibilities, since it doesn't consistently give the wrong
# fringe-rate behavior.
if orientation == "east":
expectation = does_not_raise()
vis = model(**kwargs)
vis_fft = FFT(vis, axis=0, taper="blackmanharris")
fringe_rates = fourier_freqs(lsts * units.day.to("s") * units.rad.to("cycle"))
max_frs = fringe_rates[np.argmax(np.abs(vis_fft), axis=0)]
divisor = 1 if orientation == "north" else np.abs(max_frs)
fr_signs = max_frs / divisor
expected_sign = {"east": 1, "west": -1, "north": 0}[orientation]
with expectation:
assert np.allclose(fr_signs, expected_sign, atol=1e-4)
@pytest.mark.parametrize("model", ["pntsrc", "diffuse"])
@pytest.mark.xfail
def test_foreground_conjugation(freqs, lsts, Tsky_mdl, omega_p, model):
bl_vec = np.array([100.0, 0, 0])
delay_filter_kwargs = {"delay_filter_type": "tophat"}
fringe_filter_kwargs = {"fringe_filter_type": "tophat"}
kwargs = dict(freqs=freqs, lsts=lsts, bl_vec=bl_vec)
if model == "diffuse":
model = foregrounds.diffuse_foreground
kwargs.update(
dict(
Tsky_mdl=Tsky_mdl,
omega_p=omega_p,
delay_filter_kwargs=delay_filter_kwargs,
fringe_filter_kwargs=fringe_filter_kwargs,
)
)
elif model == "pntsrc":
model = foregrounds.pntsrc_foreground
conj_kwargs = kwargs.copy()
conj_kwargs["bl_vec"] = -bl_vec
vis = model(**kwargs, rng=np.random.default_rng(0))
conj_vis = model(**conj_kwargs, rng=np.random.default_rng(0))
assert np.allclose(vis, conj_vis.conj()) # Assert V_ij = V*_ji
def test_diffuse_foreground_exception_no_tsky_mdl(freqs, lsts, omega_p):
bl_vec = [0, 0, 0]
with pytest.raises(ValueError) as err:
foregrounds.diffuse_foreground(
freqs=freqs, lsts=lsts, bl_vec=bl_vec, Tsky_mdl=None, omega_p=omega_p
)
assert "sky temperature model must be" in err.value.args[0]
def test_diffuse_foreground_exception_no_omega_p(freqs, lsts, Tsky_mdl):
bl_vec = [0, 0, 0]
with pytest.raises(ValueError) as err:
foregrounds.diffuse_foreground(
freqs=freqs, lsts=lsts, bl_vec=bl_vec, Tsky_mdl=Tsky_mdl, omega_p=None
)
assert "beam area array or interpolation object" in err.value.args[0]
|
HERA-TeamREPO_NAMEhera_simPATH_START.@hera_sim_extracted@hera_sim-main@hera_sim@tests@test_foregrounds.py@.PATH_END.py
|
{
"filename": "ticket.py",
"repo_name": "crossbario/crossbar",
"repo_path": "crossbar_extracted/crossbar-master/crossbar/router/auth/ticket.py",
"type": "Python"
}
|
#####################################################################################
#
# Copyright (c) typedef int GmbH
# SPDX-License-Identifier: EUPL-1.2
#
#####################################################################################
from typing import Union, Dict, Any
from txaio import make_logger, as_future
from autobahn.wamp.types import Accept, Deny, HelloDetails, Challenge, TransportDetails
from crossbar.router.auth.pending import PendingAuth
from crossbar.interfaces import IRealmContainer, IPendingAuth
__all__ = ('PendingAuthTicket', )
class PendingAuthTicket(PendingAuth):
"""
Pending authentication information for WAMP-Ticket authentication.
"""
log = make_logger()
AUTHMETHOD = 'ticket'
def __init__(self, pending_session_id: int, transport_details: TransportDetails, realm_container: IRealmContainer,
config: Dict[str, Any]):
super(PendingAuthTicket, self).__init__(
pending_session_id,
transport_details,
realm_container,
config,
)
# The secret/ticket the authenticating principal will need to provide (filled only in static mode).
self._signature = None
def hello(self, realm: str, details: HelloDetails) -> Union[Accept, Deny, Challenge]:
# remember the realm the client requested to join (if any)
self._realm = realm
# remember the authid the client wants to identify as (if any)
self._authid = details.authid
# use static principal database from configuration
if self._config['type'] == 'static':
self._authprovider = 'static'
if self._authid in self._config.get('principals', {}):
principal = self._config['principals'][self._authid]
principal['extra'] = details.authextra
error = self._assign_principal(principal)
if error:
return error
# now set signature as expected for WAMP-Ticket
self._signature = principal['ticket']
return Challenge(self._authmethod)
else:
return Deny(message='no principal with authid "{}" exists'.format(self._authid))
# use configured procedure to dynamically get a ticket for the principal
elif self._config['type'] == 'dynamic':
self._authprovider = 'dynamic'
init_d = as_future(self._init_dynamic_authenticator)
def init(_error):
if _error:
return _error
self._session_details['authmethod'] = self._authmethod # from AUTHMETHOD, via base
self._session_details['authextra'] = details.authextra
return Challenge(self._authmethod)
init_d.addBoth(init)
return init_d
elif self._config['type'] == 'function':
self._authprovider = 'function'
init_d = as_future(self._init_function_authenticator)
def init(_error):
if _error:
return _error
self._session_details['authmethod'] = self._authmethod # from AUTHMETHOD, via base
self._session_details['authextra'] = details.authextra
return Challenge(self._authmethod)
init_d.addBoth(init)
return init_d
else:
# should not arrive here, as config errors should be caught earlier
return Deny(message='invalid authentication configuration (authentication type "{}" is unknown)'.format(
self._config['type']))
def authenticate(self, signature: str) -> Union[Accept, Deny]:
def on_authenticate_ok(principal):
# backwards compatibility: dynamic ticket authenticator
# was expected to return a role directly
if isinstance(principal, str):
principal = {'role': principal}
error = self._assign_principal(principal)
if error:
return error
return self._accept()
def on_authenticate_error(err):
return self._marshal_dynamic_authenticator_error(err)
# WAMP-Ticket "static"
if self._authprovider == 'static':
# when doing WAMP-Ticket from static configuration, the ticket we
# expect was previously stored in self._signature
if signature == self._signature:
# ticket was valid: accept the client
self.log.debug("WAMP-Ticket: ticket was valid!")
return self._accept()
else:
# ticket was invalid: deny client
self.log.debug(
'WAMP-Ticket (static): expected ticket "{expected}"" ({expected_type}), but got "{sig}" ({sig_type})',
expected=self._signature,
expected_type=type(self._signature),
sig=signature,
sig_type=type(signature),
)
return Deny(message="ticket in static WAMP-Ticket authentication is invalid")
# WAMP-Ticket "dynamic"
elif self._authprovider == 'dynamic':
self._session_details['ticket'] = signature
assert self._authenticator_session
d = self._authenticator_session.call(self._authenticator, self._realm, self._authid, self._session_details)
d.addCallbacks(on_authenticate_ok, on_authenticate_error)
return d
# WAMP-Ticket "function"
elif self._authprovider == 'function':
self._session_details['ticket'] = signature
auth_d = as_future(self._authenticator, self._realm, self._authid, self._session_details)
auth_d.addCallbacks(on_authenticate_ok, on_authenticate_error)
return auth_d
else:
# should not arrive here, as config errors should be caught earlier
return Deny(message='invalid authentication configuration (authentication type "{}" is unknown)'.format(
self._config['type']))
IPendingAuth.register(PendingAuthTicket)
|
crossbarioREPO_NAMEcrossbarPATH_START.@crossbar_extracted@crossbar-master@crossbar@router@auth@ticket.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.