metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "xla_ops_grad.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/compiler/jit/ops/xla_ops_grad.py",
"type": "Python"
}
|
"""Gradients for XLA ops."""
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorflow.python.framework import ops
@ops.RegisterGradient("XlaClusterOutput")
def _XlaClusterOutputGrad(_, grad):
del grad # unused
raise RuntimeError("Gradient computation of graph in xla.compile() is "
"prohibited because it can cause performance degradation."
"Please move gradient computation inside xla.compile().")
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@compiler@jit@ops@xla_ops_grad.py@.PATH_END.py
|
{
"filename": "kde.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/stats/kde.py",
"type": "Python"
}
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.stats` namespace for importing the functions
# included below.
from scipy._lib.deprecation import _sub_module_deprecation
__all__ = ["gaussian_kde"] # noqa: F822
def __dir__():
return __all__
def __getattr__(name):
return _sub_module_deprecation(sub_package="stats", module="kde",
private_modules=["_kde"], all=__all__,
attribute=name)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@stats@kde.py@.PATH_END.py
|
{
"filename": "functions.py",
"repo_name": "saberyoung/haffet",
"repo_path": "haffet_extracted/haffet-master/sdapy/models/risepl/functions.py",
"type": "Python"
}
|
def powerlaw_post_baseline(times, t_0=0, amplitude=25, alpha_r=2):
''' power law function
'''
return amplitude * (times - t_0)**alpha_r
def powerlaw_full(times, t_0=0, amplitude=25, alpha_r=2, c=0):
''' power law fits (based on Miller et al, https://ui.adsabs.harvard.edu/abs/2020ApJ...902...47M/abstract), constant (background) before explosion, and power law post explosion.
'''
f = []
for t in times:
if t<t_0:
f.append( c )
else:
f.append( powerlaw_post_baseline(t, t_0, amplitude, alpha_r) + c )
return f
|
saberyoungREPO_NAMEhaffetPATH_START.@haffet_extracted@haffet-master@sdapy@models@risepl@functions.py@.PATH_END.py
|
{
"filename": "test_psf_runners.py",
"repo_name": "esheldon/ngmix",
"repo_path": "ngmix_extracted/ngmix-master/ngmix/tests/test_psf_runners.py",
"type": "Python"
}
|
import pytest
import numpy as np
import ngmix
from ngmix.runners import PSFRunner
from ngmix.guessers import GMixPSFGuesser, SimplePSFGuesser, CoellipPSFGuesser
from ngmix.em import EMFitter
from ngmix.admom import AdmomFitter
from ngmix.fitting import CoellipFitter, Fitter
from ._sims import get_ngauss_obs, get_psf_obs, get_model_obs
@pytest.mark.parametrize('guess_from_moms', [False, True])
@pytest.mark.parametrize('ngauss', [1, 2, 3, 4, 5])
def test_em_psf_runner_smoke(ngauss, guess_from_moms):
"""
Smoke test a PSFRunner running the EM fitter
"""
rng = np.random.RandomState(8821)
data = get_psf_obs(rng=rng)
obs = data['obs']
guesser = GMixPSFGuesser(
rng=rng,
ngauss=ngauss,
guess_from_moms=guess_from_moms,
)
# better tolerance needed for this psf fit
fitter = EMFitter(tol=1.0e-5)
runner = PSFRunner(
fitter=fitter,
guesser=guesser,
ntry=2,
)
res = runner.go(obs=obs)
assert res['flags'] == 0
@pytest.mark.parametrize('with_psf_obs', [False, True])
@pytest.mark.parametrize('guess_from_moms', [False, True])
def test_em_psf_runner(with_psf_obs, guess_from_moms):
"""
Test a PSFRunner running the EM fitter
with_psf_obs means it is an ordinary obs with a psf obs also.
The code knows to fit the psf obs not the main obs
"""
rng = np.random.RandomState(8821)
if with_psf_obs:
data = get_ngauss_obs(
rng=rng,
ngauss=1,
noise=0.0,
with_psf=True,
)
else:
data = get_psf_obs(rng=rng)
obs = data['obs']
guesser = GMixPSFGuesser(
rng=rng,
ngauss=3,
guess_from_moms=guess_from_moms,
)
# better tolerance needed for this psf fit
fitter = EMFitter(tol=1.0e-5)
runner = PSFRunner(
fitter=fitter,
guesser=guesser,
ntry=2,
)
res = runner.go(obs=obs)
assert res['flags'] == 0
# check reconstructed image allowing for noise
imfit = res.make_image()
if with_psf_obs:
comp_image = obs.psf.image
else:
comp_image = obs.image
imtol = 0.001 / obs.jacobian.scale**2
assert np.abs(imfit - comp_image).max() < imtol
@pytest.mark.parametrize('guess_from_moms', [False, True])
@pytest.mark.parametrize('model', ["gauss", "turb"])
def test_simple_psf_runner_smoke(model, guess_from_moms):
"""
Smoke test a PSFRunner running the simple fitter
"""
rng = np.random.RandomState(3893)
# make a complex psf so the fitting with high ngauss doesn't become too
# degenerate
data = get_psf_obs(model=model, rng=rng)
guesser = SimplePSFGuesser(
rng=rng,
guess_from_moms=guess_from_moms,
)
fitter = Fitter(model=model)
runner = PSFRunner(
fitter=fitter,
guesser=guesser,
ntry=2,
)
res = runner.go(obs=data['obs'])
assert res['flags'] == 0
@pytest.mark.parametrize('guess_from_moms', [False, True])
@pytest.mark.parametrize('model', ["gauss", "turb"])
def test_simple_psf_runner(model, guess_from_moms):
"""
Smoke test a PSFRunner running the simple fitter
"""
rng = np.random.RandomState(3893)
# make a complex psf so the fitting with high ngauss doesn't become too
# degenerate
data = get_psf_obs(model=model, rng=rng)
guesser = SimplePSFGuesser(
rng=rng,
guess_from_moms=guess_from_moms,
)
fitter = Fitter(model=model)
runner = PSFRunner(
fitter=fitter,
guesser=guesser,
ntry=2,
)
res = runner.go(obs=data['obs'])
assert res['flags'] == 0
# check reconstructed image allowing for noise
imfit = res.make_image()
obs = data['obs']
imtol = 0.001 / obs.jacobian.scale**2
assert np.abs(imfit - obs.image).max() < imtol
@pytest.mark.parametrize('guess_from_moms', [False, True])
@pytest.mark.parametrize('ngauss', [1, 2, 3, 4, 5])
def test_coellip_psf_runner_smoke(ngauss, guess_from_moms):
"""
Smoke test a PSFRunner running the coelliptical fitter
"""
rng = np.random.RandomState(9321)
# make a complex psf so the fitting with high ngauss doesn't become too
# degenerate
data = get_psf_obs(rng=rng)
data2 = get_psf_obs(T=1.0, rng=rng)
data3 = get_psf_obs(T=2.0, rng=rng)
combined_im = (
data['obs'].image + data2['obs'].image + data3['obs'].image
)
obs = data['obs']
obs.image = combined_im
guesser = CoellipPSFGuesser(
rng=rng,
ngauss=ngauss,
guess_from_moms=guess_from_moms,
)
fitter = CoellipFitter(ngauss=ngauss)
runner = PSFRunner(
fitter=fitter,
guesser=guesser,
ntry=2,
)
res = runner.go(obs=obs)
assert res['flags'] == 0
@pytest.mark.parametrize('with_psf_obs', [False, True])
@pytest.mark.parametrize('guess_from_moms', [False, True])
def test_coellip_psf_runner(with_psf_obs, guess_from_moms):
"""
Test a PSFRunner running the coelliptical fitter
"""
rng = np.random.RandomState(21)
if with_psf_obs:
data = get_ngauss_obs(
rng=rng,
ngauss=1,
noise=0.0,
with_psf=True,
)
else:
data = get_psf_obs(rng=rng)
obs = data['obs']
ngauss = 3
guesser = CoellipPSFGuesser(
rng=rng,
ngauss=ngauss,
guess_from_moms=guess_from_moms,
)
# better tolerance needed for this psf fit
fitter = CoellipFitter(ngauss=ngauss)
runner = PSFRunner(
fitter=fitter,
guesser=guesser,
ntry=4,
)
res = runner.go(obs=obs)
assert res['flags'] == 0
# check reconstructed image allowing for noise
imfit = res.make_image()
if with_psf_obs:
comp_image = obs.psf.image
else:
comp_image = obs.image
imtol = 0.001 / obs.jacobian.scale**2
assert np.abs(imfit - comp_image).max() < imtol
@pytest.mark.parametrize('guess_from_moms', [False, True])
@pytest.mark.parametrize('ngauss', [1, 2, 3, 4, 5])
def test_admom_psf_runner_smoke(ngauss, guess_from_moms):
"""
Smoke test a PSFRunner running the Admom fitter
"""
rng = np.random.RandomState(5661)
data = get_psf_obs(rng=rng, model='gauss')
obs = data['obs']
guesser = GMixPSFGuesser(
rng=rng,
ngauss=1,
guess_from_moms=guess_from_moms,
)
fitter = AdmomFitter()
runner = PSFRunner(
fitter=fitter,
guesser=guesser,
ntry=2,
)
res = runner.go(obs=obs)
assert res['flags'] == 0
@pytest.mark.parametrize('with_psf_obs', [False, True])
@pytest.mark.parametrize('guess_from_moms', [False, True])
def test_admom_psf_runner(with_psf_obs, guess_from_moms):
"""
Test a PSFRunner running the EM fitter
with_psf_obs means it is an ordinary obs with a psf obs also.
The code knows to fit the psf obs not the main obs
"""
rng = np.random.RandomState(8821)
if with_psf_obs:
data = get_ngauss_obs(
rng=rng,
ngauss=1,
noise=0.0,
with_psf=True,
psf_model='gauss',
)
else:
data = get_psf_obs(rng=rng, model='gauss')
obs = data['obs']
guesser = GMixPSFGuesser(
rng=rng,
ngauss=1,
guess_from_moms=guess_from_moms,
)
fitter = AdmomFitter()
runner = PSFRunner(
fitter=fitter,
guesser=guesser,
ntry=2,
)
res = runner.go(obs=obs)
assert res['flags'] == 0
# check reconstructed image allowing for noise
imfit = res.make_image()
if with_psf_obs:
comp_image = obs.psf.image
else:
comp_image = obs.image
imtol = 0.001 / obs.jacobian.scale**2
assert np.abs(imfit - comp_image).max() < imtol
@pytest.mark.parametrize('nband', [None, 3])
@pytest.mark.parametrize('nepoch', [None, 1, 3])
@pytest.mark.parametrize('set_result', [True, False])
def test_gaussmom_psf_runner(nband, nepoch, set_result):
"""
Test a PSFRunner using GaussMom
"""
rng = np.random.RandomState(8821)
data = get_model_obs(
rng=rng,
model='gauss',
noise=0.1,
nband=nband,
nepoch=nepoch,
)
obs = data['obs']
fitter = ngmix.gaussmom.GaussMom(fwhm=1.2)
runner = PSFRunner(fitter=fitter, set_result=set_result)
res = runner.go(obs=obs)
if nband is not None:
assert isinstance(res, list)
assert isinstance(res[0], list)
for tobslist in obs:
for tobs in tobslist:
if set_result:
assert 'result' in tobs.psf.meta
else:
assert 'result' not in tobs.psf.meta
elif nepoch is not None:
assert isinstance(res, list)
for tobs in obs:
if set_result:
assert 'result' in tobs.psf.meta
else:
assert 'result' not in tobs.psf.meta
else:
assert hasattr(res, 'keys')
if set_result:
assert 'result' in obs.psf.meta
else:
assert 'result' not in obs.psf.meta
|
esheldonREPO_NAMEngmixPATH_START.@ngmix_extracted@ngmix-master@ngmix@tests@test_psf_runners.py@.PATH_END.py
|
{
"filename": "j1407.py",
"repo_name": "mkenworthy/exorings",
"repo_path": "exorings_extracted/exorings-master/j1407.py",
"type": "Python"
}
|
''' custom routines for reading in J1407 data'''
from astropy.io import ascii
import numpy as np
from astropy.io import fits
def j1407_photom():
'read in J1407 photometry with errors'
dfile = '1SWASP+J140747.93-394542.6-detrend.fits'
# with pyfits.open(dfile) as fpin:
# tgtdata = fpin[1].data
hdu1 = fits.open(dfile)
tgtdata = hdu1[1].data
fitcol = "SINVP_DETREND_010"
polycol = "POLY1FIT"
time = tgtdata['TIMEMHJD']
flux = tgtdata['FLUX2_DECORR']/tgtdata[polycol]/tgtdata[fitcol]
fluxe = tgtdata['FLUX2_ERR_DECORR']/tgtdata[polycol]/tgtdata[fitcol]
camidx = np.r_[[int(i[:3]) for i in tgtdata['IMAGEID']]]
return (time, flux, fluxe, camidx)
def j1407_photom_binned(fin, phot_tmin, phot_tmax):
'read in binned j1407 photometry'
print ('reading in j1407 photometry from %s' % fin)
# load in J1407 binned photometry curve
tin = ascii.read(fin)
time = tin['time']
flux = tin['flux']
flux_err = tin['flux_rms']
print ('restricting photometry to HJD range %.1f to %.1f' % (phot_tmin, phot_tmax))
goodp = (time > phot_tmin) * (time < phot_tmax)
flux = flux[goodp]
time = time[goodp]
flux_err = flux_err[goodp]
print ('number of photometric points in J1407 light curve: %d' % time.size)
return(time, flux, flux_err)
def j1407_gradients(fin):
'read in gradients of j1407 light curve'
print ('reading in gradients of light curve from %s' % fin)
grad = ascii.read(fin)
grad_time = grad['col1'] + 54222.
grad_mag = np.abs(grad['col2'])
grad_mag_norm = grad_mag/np.max(grad_mag)
return(grad_time, grad_mag, grad_mag_norm)
|
mkenworthyREPO_NAMEexoringsPATH_START.@exorings_extracted@exorings-master@j1407.py@.PATH_END.py
|
{
"filename": "_lightposition.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/cone/_lightposition.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LightpositionValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="lightposition", parent_name="cone", **kwargs):
super(LightpositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Lightposition"),
data_docs=kwargs.pop(
"data_docs",
"""
x
Numeric vector, representing the X coordinate
for each vertex.
y
Numeric vector, representing the Y coordinate
for each vertex.
z
Numeric vector, representing the Z coordinate
for each vertex.
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@cone@_lightposition.py@.PATH_END.py
|
{
"filename": "indexing.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/jax/_src/state/indexing.py",
"type": "Python"
}
|
# Copyright 2023 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains shared logic and abstractions for Pallas indexing ops."""
from __future__ import annotations
import dataclasses
from typing import Any, Union
from jax._src import core
from jax._src import tree_util
from jax._src.typing import Array
from jax._src.util import merge_lists
from jax._src.util import partition_list
import numpy as np
@tree_util.register_pytree_node_class
@dataclasses.dataclass
class Slice:
"""A slice with a start index and a size.
Both start index and size can either be static, i.e. known at tracing
and compilation time, or dynamic.
"""
start: int | Array
size: int | Array
stride: int = 1
def __post_init__(self):
if self.stride < 1:
raise ValueError("`stride` must be >= 1.")
@property
def is_dynamic_start(self):
return not core.is_dim(self.start)
@property
def is_dynamic_size(self):
return not core.is_dim(self.size)
def tree_flatten(self):
# If `start` is statically known, we treat it as static information
xs = ()
data = ()
xs += (self.start,) if self.is_dynamic_start else (None,)
data += (None,) if self.is_dynamic_start else (self.start,)
xs += (self.size,) if self.is_dynamic_size else (None,)
data += (None,) if self.is_dynamic_size else (self.size,)
data += (self.stride,)
return xs, data
@classmethod
def tree_unflatten(cls, aux_data, children) -> Slice:
start, size = (
a if a is not None else b for a, b in zip(children, aux_data[:2])
)
return cls(start, size, aux_data[2])
@classmethod
def from_slice(cls, slc: slice, size: int) -> Slice:
start, step, size = core.canonicalize_slice(slc, size)
if step < 1:
raise ValueError(f"slice must have a step >= 1 (found: {step})")
return cls(start, size, step)
def dslice(
start: int | Array | None,
size: int | Array | None = None,
stride: int | None = None,
) -> slice | Slice:
"""Constructs a ``Slice`` from a start index and a size.
The semantics of ``dslice`` mirror those of the builtin ``slice`` type:
* ``dslice(None)`` is ``:``
* ``dslice(j)`` is ``:j``
* ``dslice(i, j)`` is ``i:i+j``
* ``dslice(i, j, stride)`` is ``i:i+j:stride``
"""
if start is None:
return slice(None)
if stride is None:
stride = 1
if not isinstance(stride, int):
raise ValueError("Non-static stride in `dslice`")
if size is None:
if not isinstance(start, int):
raise ValueError("Non-static `dslice`")
return Slice(0, start, stride)
return Slice(start, size, stride)
ds = dslice # Handy alias
IntIndexer = Union[int, Array]
DimIndexer = Union[IntIndexer, Slice]
def unpack_ndindexer(indexer: NDIndexer) -> tuple[tuple[bool, ...],
tuple[Slice, ...],
tuple[IntIndexer, ...]]:
is_int_indexing = [not isinstance(i, Slice) for i in indexer.indices]
slice_indexers, int_indexers = partition_list(
is_int_indexing, indexer.indices)
return tuple(is_int_indexing), tuple(slice_indexers), tuple(int_indexers) # type: ignore
def _maybe_concretize(x: Any):
# This is roughly the same logic as core.concrete_or_error, but we avoid
# calling that because constructing the ConcretizationTypeError can be
# expensive as the size of the tracing context (i.e. the jaxpr) grows.
return core.to_concrete_value(x)
@tree_util.register_pytree_node_class
@dataclasses.dataclass
class NDIndexer:
indices: tuple[DimIndexer, ...]
shape: tuple[int, ...]
int_indexer_shape: tuple[int, ...]
# Off by default to avoid doing validation during pytree operations.
validate: bool = False
def __post_init__(self):
if not self.validate:
return
if len(self.indices) != len(self.shape):
raise ValueError(
f"`indices` must be the same length as `Ref` shape.: {self}."
)
# We validate integer indexing shapes here
for idx, s in zip(self.indices, self.shape):
if isinstance(idx, Slice):
start = idx.start
if value := _maybe_concretize(start):
if value >= s:
raise ValueError(f"Out of bound slice: start={value}, dim={s}.")
if size := _maybe_concretize(idx.size):
if value + (size - 1) * idx.stride >= s:
raise ValueError(
f"Out of bound slice: start={value}, size={size},"
f" stride={idx.stride}, dim={s}."
)
continue
# The shape of indexer integers should be broadcastable up to the
# int_indexer_shape of the whole NDIndexer
if not np.shape(idx):
if (value := _maybe_concretize(idx)) and value >= s:
raise ValueError(f"Out of bound indexer: idx={value}, dim={s}.")
# For ()-shaped indexers, we can broadcast no problm.
continue
# If we don't have a ()-shaped indexer, the rank must match
# int_indexer_shape
if np.ndim(idx) != len(self.int_indexer_shape):
raise ValueError(
f"Indexer must have rank {np.ndim(idx)}: {idx=} vs."
f" {self.int_indexer_shape=}"
)
# Here we check that the shapes broadcast.
try:
np.broadcast_shapes(np.shape(idx), self.int_indexer_shape)
except ValueError as e:
raise ValueError(
f"Could not broadcast integer indexer: {idx=} vs."
f" {self.int_indexer_shape=}"
) from e
@property
def is_dynamic_size(self):
return any(isinstance(i, Slice) and i.is_dynamic_size for i in self.indices)
def tree_flatten(self):
flat_idx, idx_tree = tree_util.tree_flatten(self.indices)
return flat_idx, (idx_tree, self.shape, self.int_indexer_shape)
@classmethod
def tree_unflatten(cls, data, flat_idx):
idx_tree, shape, int_indexer_shape = data
indices = tree_util.tree_unflatten(idx_tree, flat_idx)
return cls(tuple(indices), shape, int_indexer_shape)
@classmethod
def from_indices_shape(cls, indices, shape) -> NDIndexer:
if not isinstance(indices, tuple):
# TODO(slebedev): Consider requiring `indices` to be a Sequence.
indices = (indices,)
indices = list(indices)
if num_ellipsis := sum(idx is ... for idx in indices):
if num_ellipsis > 1:
raise ValueError("Only one ellipsis is supported.")
# Expand ... so that `indices` has the same length as `shape`.
ip = indices.index(...)
indices[ip:ip+1] = [slice(None)] * (len(shape) - len(indices) + 1)
if len(indices) > len(shape):
indices = tuple(indices)
raise ValueError("`indices` must not be longer than `shape`: "
f"{indices=}, {shape=}")
elif len(indices) < len(shape):
# Pad `indices` to have the same length as `shape`.
indices.extend([slice(None)] * (len(shape) - len(indices)))
# Promote all builtin `slice`s to `Slice`.
indices = tuple(
Slice.from_slice(i, s) if isinstance(i, slice) else i
for i, s in zip(indices, shape))
is_int_indexing = [not isinstance(i, Slice) for i in indices]
if any(is_int_indexing):
other_indexers, int_indexers = partition_list(is_int_indexing, indices)
indexer_shapes = tuple(core.get_aval(i).shape for i in int_indexers)
try:
int_indexer_shape = np.broadcast_shapes(*indexer_shapes)
except ValueError as e:
# Raise a nicer error than the NumPy one.
raise ValueError(
f"Cannot broadcast shapes for indexing: {indexer_shapes}") from e
# Here we use the `broadcast_to` primitive instead of composing lax
# primitives together because it is easier to lower in targets like
# Triton/Mosaic.
#
# The local import avoids a circular dependency between primitives
# and this module.
from jax._src.state import primitives as sp # pytype: disable=import-error
int_indexers = [
sp.broadcast_to(i, int_indexer_shape) for i in int_indexers
]
indices = tuple(merge_lists(is_int_indexing, other_indexers, int_indexers))
else:
int_indexer_shape = ()
return cls(indices, shape, int_indexer_shape, validate=True)
def get_indexer_shape(self) -> tuple[int | Array, ...]:
_, slice_indexers, _ = unpack_ndindexer(self)
slice_shape = [s.size for s in slice_indexers]
# In NDIndexers, the int_indexer_shape is *always* at the front of the
# result.
return (*self.int_indexer_shape, *slice_shape)
def transform_shape(self, shape: None | tuple[int | Array, ...]) -> None | tuple[int | Array, ...]:
del shape # Unused
return self.get_indexer_shape()
def transform_dtype(self, dtype):
return dtype
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@_src@state@indexing.py@.PATH_END.py
|
{
"filename": "_opacitysrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/choroplethmapbox/marker/_opacitysrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OpacitysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="opacitysrc", parent_name="choroplethmapbox.marker", **kwargs
):
super(OpacitysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@choroplethmapbox@marker@_opacitysrc.py@.PATH_END.py
|
{
"filename": "_style.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/bar/hoverlabel/font/_style.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StyleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="style", parent_name="bar.hoverlabel.font", **kwargs
):
super(StyleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
values=kwargs.pop("values", ["normal", "italic"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@bar@hoverlabel@font@_style.py@.PATH_END.py
|
{
"filename": "_thickness.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter/error_x/_thickness.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ThicknessValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="thickness", parent_name="scatter.error_x", **kwargs
):
super(ThicknessValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatter@error_x@_thickness.py@.PATH_END.py
|
{
"filename": "test_enh2xyz.py",
"repo_name": "JLBLine/WODEN",
"repo_path": "WODEN_extracted/WODEN-master/cmake_testing/wodenpy/array_layout/test_enh2xyz.py",
"type": "Python"
}
|
from sys import path
import os
import unittest
import numpy as np
from wodenpy.array_layout import create_array_layout
D2R = np.pi / 180.0
##Vehicle for running tests
class Test(unittest.TestCase):
def test_enh2xyz(self):
"""Tests the `create_array_layout.enh2xyz` function, which should calculate the
local X,Y,Z coord using the local east, north, height. Test using
the cases where latitude is 0 and -30 deg, which have specific
outcomes"""
##Dummy array of boringness
east = np.arange(0,100,10)
north = np.arange(0,100,10)
height = np.arange(0,100,10)
##Test a latitude of 0.0 - X,Y,Z and e,n,h just map to one another
latitude = 0.0
X,Y,Z = create_array_layout.enh2xyz(east, north, height, latitude=latitude)
self.assertTrue(np.array_equal(height, X))
self.assertTrue(np.array_equal(east, Y))
self.assertTrue(np.array_equal(north, Z))
##Test at latitude of -30, mapping to X,Y,Z is somewhat simple still
latitude = -30*D2R
X,Y,Z = create_array_layout.enh2xyz(east, north, height, latitude=latitude)
self.assertTrue(np.allclose(0.5*north + (np.sqrt(3)/2)*height, X, atol=1e-15))
self.assertTrue(np.array_equal(east, Y))
self.assertTrue(np.allclose((np.sqrt(3)/2)*north + -0.5*height, Z, atol=1e-15))
##Run the test
if __name__ == '__main__':
unittest.main()
|
JLBLineREPO_NAMEWODENPATH_START.@WODEN_extracted@WODEN-master@cmake_testing@wodenpy@array_layout@test_enh2xyz.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "saberyoung/haffet",
"repo_path": "haffet_extracted/haffet-master/sdapy/models/polynomial/__init__.py",
"type": "Python"
}
|
"""Initilization modules."""
import os
path = os.path.dirname(os.path.abspath(__file__))
__all__ = []
for py in [
f[:-3] for f in os.listdir(path)
if f.endswith('.py') and f != '__init__.py'
]:
__all__.append(py)
from .functions import *
|
saberyoungREPO_NAMEhaffetPATH_START.@haffet_extracted@haffet-master@sdapy@models@polynomial@__init__.py@.PATH_END.py
|
{
"filename": "light_curve_collection.py",
"repo_name": "golmschenk/generalized-photometric-neural-network-experiments",
"repo_path": "generalized-photometric-neural-network-experiments_extracted/generalized-photometric-neural-network-experiments-main/generalized_photometric_neural_network_experiments/dataset/flare/light_curve_collection.py",
"type": "Python"
}
|
"""
Code to represent a collection of light curves.
"""
import shutil
import socket
import re
from filelock import FileLock
from typing import Union, Iterable, Optional, List
import numpy as np
import pandas as pd
from pathlib import Path
from generalized_photometric_neural_network_experiments.dataset.flare.generate_flare_frequency_distribution_synthetic_injectables import \
injectable_flare_frequency_distribution_metadata_path, InjectableFlareFrequencyDistributionFileColumn, \
InjectableFlareFrequencyDistributionMetadataColumn, injectable_flare_frequency_distributions_directory
from generalized_photometric_neural_network_experiments.dataset.flare.names_and_paths import metadata_csv_path, \
MetadataColumnName, light_curve_directory
from ramjet.data_interface.tess_data_interface import TessDataInterface
from ramjet.photometric_database.light_curve_collection import LightCurveCollection
class FlareExperimentLightCurveCollection(LightCurveCollection):
"""
A class to represent a collection of light curves related to the flare experiment.
"""
tess_data_interface = TessDataInterface()
def __init__(self, is_flaring: Optional[bool] = None, splits: Optional[List[int]] = None):
super().__init__()
self.metadata_data_frame = pd.read_csv(metadata_csv_path)
self.is_flaring: Optional[bool] = is_flaring
self.splits: Optional[List[int]] = splits
def load_times_and_fluxes_from_path(self, path: Path) -> (np.ndarray, np.ndarray):
"""
Loads the times and fluxes from a given light curve path.
:param path: The path to the light curve file.
:return: The times and the fluxes of the light curve.
"""
path = self.move_path_to_nvme(path)
fluxes, times = self.tess_data_interface.load_fluxes_and_times_from_fits_file(path)
return times, fluxes
def load_label_from_path(self, path: Path) -> Union[np.ndarray]:
"""
Loads the label of an example from a corresponding path.
:param path: The path to load the label for.
:return: The label.
"""
tic_id, sector = self.tess_data_interface.get_tic_id_and_sector_from_file_path(path)
metadata_row = self.get_metadata_row_for_tic_id_and_sector(tic_id, sector)
slope = metadata_row[MetadataColumnName.FLARE_FREQUENCY_DISTRIBUTION_SLOPE]
intercept = metadata_row[MetadataColumnName.FLARE_FREQUENCY_DISTRIBUTION_INTERCEPT]
return np.array([slope, intercept], dtype=np.float32)
def get_metadata_row_for_tic_id_and_sector(self, tic_id: int, sector: int) -> pd.Series:
"""
Gets the metadata row for a given TIC ID and sector.
:param tic_id: The TIC ID to lookup.
:param sector: The sector to lookup.
:return: The metadata row.
"""
metadata_row = self.metadata_data_frame[
(self.metadata_data_frame[MetadataColumnName.TIC_ID] == tic_id) &
(self.metadata_data_frame[MetadataColumnName.SECTOR] == sector)
].iloc[0]
return metadata_row
def get_paths(self) -> Iterable[Path]:
"""
Gets the paths for the light curves in the collection.
:return: An iterable of the light curve paths.
"""
return list(self.get_paths_for_label_existence_and_splits(self.is_flaring, self.splits))
def get_paths_for_label_existence_and_splits(self, is_flaring: Optional[bool] = None,
splits: Optional[List[int]] = None) -> Iterable[Path]:
"""
Gets the paths for a given label and splits.
:return: An iterable of the light curve paths.
"""
paths = []
for fits_path in light_curve_directory.glob('*.fits'):
tic_id, sector = self.tess_data_interface.get_tic_id_and_sector_from_file_path(fits_path)
metadata_row = self.get_metadata_row_for_tic_id_and_sector(tic_id, sector)
if is_flaring is not None:
slope_exists_for_row = pd.notna(
metadata_row[MetadataColumnName.FLARE_FREQUENCY_DISTRIBUTION_SLOPE])
intercept_exists_for_row = pd.notna(
metadata_row[MetadataColumnName.FLARE_FREQUENCY_DISTRIBUTION_INTERCEPT])
assert slope_exists_for_row == intercept_exists_for_row
if is_flaring and not slope_exists_for_row:
continue
if not is_flaring and slope_exists_for_row:
continue
if splits is not None:
if metadata_row[MetadataColumnName.SPLIT] not in splits:
continue
paths.append(fits_path)
return paths
def load_auxiliary_information_for_path(self, path: Path) -> np.ndarray:
"""
Loads auxiliary information information for the given path.
:param path: The path to the light curve file.
:return: The auxiliary information.
"""
tic_id, sector = self.tess_data_interface.get_tic_id_and_sector_from_file_path(path)
metadata_row = self.get_metadata_row_for_tic_id_and_sector(tic_id, sector)
luminosity = metadata_row[MetadataColumnName.LUMINOSITY__LOG_10_SOLAR_UNITS]
return np.array([luminosity], dtype=np.float32)
def move_path_to_nvme(self, path: Path) -> Path:
match = re.match(r"gpu\d{3}", socket.gethostname())
if match is not None:
nvme_path = Path("/lscratch/golmsche").joinpath(path)
if not nvme_path.exists():
nvme_path.parent.mkdir(exist_ok=True, parents=True)
nvme_lock_path = nvme_path.parent.joinpath(nvme_path.name + '.lock')
lock = FileLock(str(nvme_lock_path))
with lock.acquire():
if not nvme_path.exists():
nvme_tmp_path = nvme_path.parent.joinpath(nvme_path.name + '.tmp')
shutil.copy(path, nvme_tmp_path)
nvme_tmp_path.rename(nvme_path)
return nvme_path
else:
return path
class FlareExperimentUpsideDownLightCurveCollection(FlareExperimentLightCurveCollection):
def load_times_and_fluxes_from_path(self, path: Path) -> (np.ndarray, np.ndarray):
"""
Loads the times and fluxes from a given light curve path.
:param path: The path to the light curve file.
:return: The times and the fluxes of the light curve.
"""
path = self.move_path_to_nvme(path)
fluxes, times = self.tess_data_interface.load_fluxes_and_times_from_fits_file(path)
negative_fluxes = -fluxes
negative_offset_fluxes = negative_fluxes - np.min(negative_fluxes)
return times, negative_offset_fluxes
def load_label_from_path(self, path: Path) -> Union[np.ndarray]:
"""
Loads the label of an example from a corresponding path.
:param path: The path to load the label for.
:return: The label.
"""
return np.array([np.nan, np.nan], dtype=np.float32)
class InjectableFfdLightCurveCollection(LightCurveCollection):
def __init__(self, splits: Optional[List[int]] = None):
super().__init__()
self.metadata_data_frame = pd.read_csv(injectable_flare_frequency_distribution_metadata_path)
self.splits: Optional[List[int]] = splits
def load_times_and_magnifications_from_path(self, path: Path) -> (np.ndarray, np.ndarray):
path = self.move_path_to_nvme(path)
light_curve_data_frame = pd.read_feather(path)
magnifications = light_curve_data_frame[
InjectableFlareFrequencyDistributionFileColumn.RELATIVE_AMPLITUDE].values
times = light_curve_data_frame[InjectableFlareFrequencyDistributionFileColumn.TIME__DAYS].values
return times, magnifications
def load_label_from_path(self, path: Path) -> Union[np.ndarray]:
"""
Loads the label of an example from a corresponding path.
:param path: The path to load the label for.
:return: The label.
"""
metadata_row = self.metadata_data_frame[
self.metadata_data_frame[InjectableFlareFrequencyDistributionMetadataColumn.FILE_NAME] == path.name].iloc[0]
slope = metadata_row[InjectableFlareFrequencyDistributionMetadataColumn.SLOPE]
intercept = metadata_row[InjectableFlareFrequencyDistributionMetadataColumn.INTERCEPT]
return np.array([slope, intercept], dtype=np.float32)
def get_paths(self) -> Iterable[Path]:
"""
Gets the paths for the light curves in the collection.
:return: An iterable of the light curve paths.
"""
return list(self.get_paths_for_label_existence_and_splits(self.splits))
def get_paths_for_label_existence_and_splits(self, splits: Optional[List[int]] = None) -> Iterable[Path]:
"""
Gets the paths for a given label and splits.
:return: An iterable of the light curve paths.
"""
paths = []
for path in injectable_flare_frequency_distributions_directory.glob('*.feather'):
metadata_row = self.metadata_data_frame[
self.metadata_data_frame[InjectableFlareFrequencyDistributionMetadataColumn.FILE_NAME] ==
str(path.name)].iloc[0]
if splits is not None:
if metadata_row[MetadataColumnName.SPLIT] not in splits:
continue
paths.append(path)
return paths
def move_path_to_nvme(self, path: Path) -> Path:
match = re.match(r"gpu\d{3}", socket.gethostname())
if match is not None:
nvme_path = Path("/lscratch/golmsche").joinpath(path)
if not nvme_path.exists():
nvme_path.parent.mkdir(exist_ok=True, parents=True)
nvme_lock_path = nvme_path.parent.joinpath(nvme_path.name + '.lock')
lock = FileLock(str(nvme_lock_path))
with lock.acquire():
if not nvme_path.exists():
nvme_tmp_path = nvme_path.parent.joinpath(nvme_path.name + '.tmp')
shutil.copy(path, nvme_tmp_path)
nvme_tmp_path.rename(nvme_path)
return nvme_path
else:
return path
|
LONG_NAME_79.py
|
{
"filename": "README.md",
"repo_name": "mbejger/polgraw-allsky",
"repo_path": "polgraw-allsky_extracted/polgraw-allsky-master/search/network/src-openmp/lib/README.md",
"type": "Markdown"
}
|
This directory contains external libraries we use for vectorization:
SLEEF by Naoki Shibata: http://shibatch.sourceforge.net/
Yeppp! : http://www.yeppp.info/
|
mbejgerREPO_NAMEpolgraw-allskyPATH_START.@polgraw-allsky_extracted@polgraw-allsky-master@search@network@src-openmp@lib@README.md@.PATH_END.py
|
{
"filename": "asp_system_utils.py",
"repo_name": "NeoGeographyToolkit/StereoPipeline",
"repo_path": "StereoPipeline_extracted/StereoPipeline-master/src/asp/Python/asp_system_utils.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
"""
General system related utilities
"""
from __future__ import print_function
import sys, os, re, shutil, subprocess, string, time, errno, multiprocessing, signal
import os.path as P
import asp_string_utils, asp_cmd_utils
# This is explained further down
if 'ASP_LIBRARY_PATH' in os.environ:
os.environ['LD_LIBRARY_PATH'] = os.environ['ASP_LIBRARY_PATH']
def die(msg, code=-1):
'''Exit the program with a message'''
print(msg, file=sys.stderr)
sys.exit(code)
def verify_python_version_is_supported():
'''Verifies that a supported version of Python is being used.'''
if sys.version_info < (3, 0, 0):
print('\nERROR: Must use Python version >= 3.0.')
sys.exit(1)
# Print the version of the ASP programs
def print_version_and_exit():
prog = libexec_path("stereo_parse") # get the full path
cmd = prog + " --version"
ans = os.system(cmd)
sys.exit(0)
def get_prog_version(prog):
'''Get the version of a command line program.'''
try:
p = subprocess.Popen([prog,"--version"], stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
except:
raise Exception("Could not find: " + prog)
if p.returncode != 0 and ('stereo_parse' not in prog):
# Our own stereo_parse returns 1 even if the version check
# succeeded. Too much work would be needed to fix that, so
# just ignore the return status in that case.
raise Exception("Checking " + prog + " version caused errors")
# This is a fix for sometimes GNU Parallel printing a warning at the beginning
found = False
for line in out.split("\n"):
m = re.match(r"^.*?warning", line, re.IGNORECASE)
if m:
continue
# This covers a version with no dots and a version like 3.0.1-alpha.
# This is a fragile code.
m = re.match(r"^.*? (\d[^\s]+)", line)
if m:
return m.group(1)
raise Exception("Could not find version in: " + out)
return ""
def get_num_cpus():
"""Return the number of CPUs on the current machine."""
import sys
if sys.version_info < (2, 6, 0):
num_cpus = 8
else:
from multiprocessing import cpu_count
num_cpus = cpu_count()
return num_cpus
def checkIfToolExists(toolName):
"""Returns true if the system knows about the utility with this name (it is on the PATH)."""
# Look for the tool using the 'which' command
cmd = ['which', toolName]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True)
translateOut, err = p.communicate()
# Check if that command failed to find the file
failString = 'no ' + toolName + ' in ('
if translateOut.find(failString) >= 0:
raise Exception('Missing required executable "' + toolName + \
'", please add it to your PATH.')
else:
return True
# Find if a program is in the path.
# Some ASP tools like qi2txt can be only in libexec, hence the option lookInLibexec.
# TODO: This logic needs serious cleanup, but while making sure that nothing breaks.
def which(program, lookInLibexec = False):
if not lookInLibexec:
checkIfToolExists(program)
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
paths = []
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
paths.append(path)
for path in paths:
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
if lookInLibexec:
for path in paths:
exe_file = os.path.join(os.path.dirname(path), 'libexec', program)
if is_exe(exe_file):
return exe_file
raise Exception('Missing required executable "' + program + '", please add it to your PATH.')
return None
def getNumNodesInList(nodesListPath):
"""Get number of Pleiades nodes listed in a file"""
if nodesListPath is None:
return 1 # local machine
# Count the number of nodes without repetition
# (need this for Pleiades).
nodes = {}
num_nodes = 0
try:
fileHandle = open(nodesListPath, "r")
for line in fileHandle:
if re.match(r'^\s*$', line): continue # skip empty lines
matches = re.match(r'^\s*([^\s]*)', line)
if matches:
nodes[matches.group(1)] = 1
num_nodes = len(nodes)
except Exception as e: # Fail on exception
die(e)
if num_nodes == 0:
raise Exception('The list of computing nodes is empty')
return num_nodes
def check_parallel_version():
# This error will never be reached for users of our packaged final
# product as that one bundles 'parallel' with it.
ver = get_prog_version('parallel')
if ver < '20170722':
die("Expecting a version of GNU parallel >= 20170722.")
# Add simple quotes around each argument which has quotes or spaces
# and which is not already quoted. This is a bugfix for 3D CRS specified via
# --t_srs, and for arguments having spaces.
def escape_token(token):
has_issues = ('"' in token or ' ' in token or '\t' in token)
has_simple_quotes = token.startswith('\'') and token.endswith('\'')
has_double_quotes = token.startswith('"') and token.endswith('"')
if has_issues and not has_simple_quotes and not has_double_quotes:
token = '\'' + token + '\''
return token
def runInGnuParallel(numParallelProcesses, argumentFilePath, args,
nodeListPath=None, verbose=False):
"""Use GNU Parallel to spread task across multiple computers and processes"""
# Make sure GNU parallel is installed
if not checkIfToolExists('parallel'):
raise Exception('Need GNU Parallel to distribute the jobs.')
# Ensure our 'parallel' is not out of date
check_parallel_version()
# Use GNU parallel with given number of processes. Let output be
# interspersed, read input series from file Start in the same directory on
# remote machines. Ensure that vital env variables are copied over.
# We do not use the GNU Parallel --workdir option, as this tool cannot
# handle spaces in the path. Instead, each caller of this function must
# handle the workdir itself. See for example the mapproject program.
cmd = ['parallel', '--will-cite', '-u',
'--env', 'PATH', '--env', 'PYTHONPATH', '--env', 'ISISROOT',
'--env', 'ASP_LIBRARY_PATH',
'--env', 'ISISDATA', '-a', argumentFilePath]
# Add number of processes if specified (default is one job per CPU core)
if numParallelProcesses is not None:
cmd += ['-P', str(numParallelProcesses)]
# Add list of nodes as argument to the parallel tool if it is available
if nodeListPath is not None:
cmd += ['--sshloginfile', nodeListPath]
# Append any additional arguments to parallel
cmd += args
if verbose: # Echo the command line call we are about to make
print(" ".join(cmd))
# This is a bugfix for RHEL 8. The 'parallel' program fails to start with ASP's
# libs, so temporarily hide them.
if 'LD_LIBRARY_PATH' in os.environ:
os.environ['ASP_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH']
os.environ['LD_LIBRARY_PATH'] = ''
# Handle spaces and quotes
for i in range(len(cmd)):
cmd[i] = escape_token(cmd[i])
returnCode = subprocess.call(cmd)
# Undo the above
if 'ASP_LIBRARY_PATH' in os.environ:
os.environ['LD_LIBRARY_PATH'] = os.environ['ASP_LIBRARY_PATH']
return returnCode
# When user-exposed ASP executables are installed, they are in
# 'bin'. Otherwise, in dev mode, they are in the same dir as __file__.
# We prefer absolute paths below, in case some intermediate directories
# do not exist.
def bin_path(prog, **kw):
currpath = kw.get('path', P.dirname(P.abspath(__file__)))
binpath = os.path.abspath(P.join(currpath, '..', 'bin', prog))
if not P.isfile(binpath):
binpath = os.path.abspath(P.join(currpath, '..', 'Tools', prog))
if not P.isfile(binpath):
binpath = os.path.abspath(P.join(currpath, prog))
return binpath
# When hidden ASP executables are installed, they are in
# 'libexec'. Otherwise, in dev mode, they are in the same dir as
# __file__. If no luck at all, search in 'bin'.
def libexec_path(prog, **kw):
currpath = kw.get('path', P.dirname(P.abspath(__file__)))
libexecpath = os.path.abspath(P.join(currpath, '..', 'libexec', prog))
if not P.isfile(libexecpath):
libexecpath = os.path.abspath(P.join(currpath, '..', 'Tools', prog))
if not P.isfile(libexecpath):
libexecpath = os.path.abspath(P.join(currpath, prog))
if not P.isfile(libexecpath):
libexecpath = os.path.abspath(P.join(currpath, '..', 'bin', prog))
if not P.isfile(libexecpath):
# Could not find prog in ''libexec' or 'bin' either. We will
# come here only for executables like gdalinfo that will be
# packages in the release, but are not yet in dev mode. Just
# print a warning and hope this tool is somewhere in user's
# path.
print("Could not find: " + libexecpath)
libexecpath = which(prog)
if libexecpath is None:
raise Exception('Could not find: ' + prog)
print("Using instead: " + libexecpath)
return libexecpath
# mkdir without throw
def mkdir_p(path):
try:
os.makedirs(path)
except OSError:
pass
# Execute a given command stored in the libexec directory and parse
# its output. The output format is expected to be lines of
# comma-separated values. The first value on each line becomes the
# output variable name, the other values are read into the array of
# variable values.
def run_and_parse_output(cmd, args, sep, verbose, return_full_lines = False, **kw):
libexecpath = libexec_path(cmd)
call = [libexecpath]
call.extend(args)
if verbose:
print (asp_string_utils.argListToString(call))
try:
p = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
except OSError as e:
raise Exception('%s: %s' % (libexecpath, e))
(stdout, stderr) = p.communicate()
p.wait()
if p.returncode != 0:
if stdout is not None:
stdout = stdout.rstrip()
print(stdout)
if stderr is not None:
stderr = stderr.rstrip()
print(stderr)
raise Exception('Failed executing: ' + " ".join(call))
data = {}
if verbose:
if stdout is not None: print(stdout)
if stderr is not None: print(stderr)
count = 0
for line in stdout.split('\n'):
# Do not usually print warnings as they are verbose, and this function
# is invoked often. The main processes (such as stereo_pprc) will print
# the warnings.
if verbose and re.match(r"^Warning", line):
print(line)
if return_full_lines:
data[count] = line # return the entire line
count += 1
else:
if sep in line:
keywords = line.split(sep)
for index, item in enumerate(keywords):
# Strip whitespace from ends
keywords[index] = item.strip(' \t\n\r')
data[keywords[0]] = keywords[1:]
return data
def run_with_return_code(cmd, verbose=False):
# TODO: Wipe this and use instead executeCommand.
if verbose:
print (asp_string_utils.argListToString(cmd))
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
except OSError as e:
print('Error: %s: %s' % ( asp_string_utils.argListToString(cmd), e))
(stdout, stderr) = p.communicate()
p.wait()
if p.returncode != 0 and verbose:
if stdout is not None: print(stdout)
if stderr is not None: print(stderr)
print ('Failed executing: ' + " ".join(cmd))
return p.returncode
def is_valid_image(filename):
"""See if the current image file exists and is valid."""
if not os.path.exists(filename):
return False
verbose = False
ans = run_with_return_code(['gdalinfo', filename], verbose)
if ans == 0:
return True
return False
# For timeout
def timeout_alarm_handler(signum, frame):
raise Exception("Timeout reached!")
# TODO: Improve this function a bit
def executeCommand(cmd,
outputPath=None, # If given, throw if the file is not created. Don't run if it already exists.
suppressOutput=False, # If true, don't print anything!
redo=False, # If true, run even if outputPath already exists.
noThrow=False, # If true, don't throw if output is missing
numAttempts = 1, # How many attempts to use
sleepTime = 60, # How much to sleep between attempts
timeout = -1, # After how long to timeout in seconds
realTimeOutput = False # If to print what the command is doing in real time
):
'''Executes a command with multiple options'''
# Initialize outputs
out = ""
status = 0
err = ""
if cmd == '': # An empty task
return (out, err, status)
# Convert the input to list format if needed
if asp_string_utils.isString(cmd):
cmd = asp_string_utils.stringToArgList(cmd)
for attempt in range(numAttempts):
# Run the command if conditions are met
if redo or (outputPath is None) or (not os.path.exists(outputPath)):
if not suppressOutput:
print (asp_string_utils.argListToString(cmd))
if timeout > 0:
print("Will enforce timeout of " + str(timeout) + " seconds.")
signal.signal(signal.SIGALRM, timeout_alarm_handler)
signal.alarm(timeout)
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
if realTimeOutput and (not suppressOutput):
# Print in real time what is going on, and return nothing in out and err
while True:
curr_out = p.stdout.readline()
# TODO(oalexan1): It appears that quering p.stderr makes this not
# work in real time, so may need to turn off the line below.
# This will however break code which depends on things kept in
# the error string.
curr_err = p.stderr.readline()
out += curr_out
err += curr_err
if curr_out == '' and p.poll() is not None:
break
if curr_out:
print(curr_out.strip())
status = p.poll()
else:
# Collect the output and error
out, err = p.communicate()
status = p.returncode
if timeout > 0:
signal.alarm(0) # reset the alarm
except Exception as e:
out = ""
err = ('Error: %s: %s' % (asp_string_utils.argListToString(cmd), e))
if timeout > 0:
# this module is generally not available, so this use is very niche
import psutil
def kill_proc_tree(pid, including_parent=True):
parent = psutil.Process(pid)
for child in parent.children(recursive=True):
print("Killing: " + str(child))
child.kill()
if including_parent:
print("Killing: " + str(parent))
parent.kill()
pid = psutil.Process(p.pid)
try:
kill_proc_tree(p.pid)
except:
pass
status = 1
if not noThrow:
raise Exception(err)
if out is None: out = ""
if err is None: err = ""
if (not suppressOutput) and (not realTimeOutput):
print (out + '\n' + err)
if status == 0:
break
if numAttempts <= 1:
break
if attempt < numAttempts - 1:
print("attempt: " + str(attempt))
print("ran: " + asp_string_utils.argListToString(cmd) )
print("out = " + out)
print("err = " + err)
print("status = " + str(status))
print("Will sleep for " + str(sleepTime) + " seconds")
time.sleep(sleepTime)
else: # Output file already exists, don't re-run
out = ""
err = ""
status = 0
# Optionally check that the output file was created
if outputPath and (not os.path.exists(outputPath)) and (not noThrow):
raise asp_cmd_utils.CmdRunException('Failed to create output file: ' + outputPath)
return (out, err, status)
# A very simple wrapper around subprocess
def generic_run(cmd, verbose):
cmd_str = asp_string_utils.argListToString(cmd)
if verbose:
print(cmd_str)
try:
code = subprocess.call(cmd)
except OSError as e:
raise Exception('%s: %s' % (cmd_str, e))
if code != 0:
raise Exception('Failed to run: ' + cmd_str)
|
NeoGeographyToolkitREPO_NAMEStereoPipelinePATH_START.@StereoPipeline_extracted@StereoPipeline-master@src@asp@Python@asp_system_utils.py@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "florpi/sunbird",
"repo_path": "sunbird_extracted/sunbird-main/sunbird/utils.py",
"type": "Python"
}
|
# taken from https://github.com/cosmodesi/desilike/blob/main/desilike/utils.py
import numpy as np
import logging
import sys
import os
import time
import traceback
def setup_logging(level=logging.INFO, stream=sys.stdout, filename=None, filemode='w', **kwargs):
"""
Set up logging.
Parameters
----------
level : string, int, default=logging.INFO
Logging level.
stream : _io.TextIOWrapper, default=sys.stdout
Where to stream.
filename : string, default=None
If not ``None`` stream to file name.
filemode : string, default='w'
Mode to open file, only used if filename is not ``None``.
kwargs : dict
Other arguments for :func:`logging.basicConfig`.
"""
# Cannot provide stream and filename kwargs at the same time to logging.basicConfig, so handle different cases
# Thanks to https://stackoverflow.com/questions/30861524/logging-basicconfig-not-creating-log-file-when-i-run-in-pycharm
if isinstance(level, str):
level = {'info': logging.INFO, 'debug': logging.DEBUG, 'warning': logging.WARNING}[level.lower()]
for handler in logging.root.handlers:
logging.root.removeHandler(handler)
t0 = time.time()
class MyFormatter(logging.Formatter):
def format(self, record):
self._style._fmt = '[%09.2f] ' % (time.time() - t0) + ' %(asctime)s %(name)-28s %(levelname)-8s %(message)s'
return super(MyFormatter, self).format(record)
fmt = MyFormatter(datefmt='%m-%d %H:%M ')
if filename is not None:
mkdir(os.path.dirname(filename))
handler = logging.FileHandler(filename, mode=filemode)
else:
handler = logging.StreamHandler(stream=stream)
handler.setFormatter(fmt)
logging.basicConfig(level=level, handlers=[handler], **kwargs)
sys.excepthook = exception_handler
def exception_handler(exc_type, exc_value, exc_traceback):
"""Print exception with a logger."""
# Do not print traceback if the exception has been handled and logged
_logger_name = 'Exception'
log = logging.getLogger(_logger_name)
line = '=' * 100
# log.critical(line[len(_logger_name) + 5:] + '\n' + ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)) + line)
log.critical('\n' + line + '\n' + ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)) + line)
if exc_type is KeyboardInterrupt:
log.critical('Interrupted by the user.')
else:
log.critical('An error occured.')
def mkdir(dirname):
"""Try to create ``dirname`` and catch :class:`OSError`."""
try:
os.makedirs(dirname) # MPI...
except OSError:
return
|
florpiREPO_NAMEsunbirdPATH_START.@sunbird_extracted@sunbird-main@sunbird@utils.py@.PATH_END.py
|
{
"filename": "KuzminDiskPotential.py",
"repo_name": "jobovy/galpy",
"repo_path": "galpy_extracted/galpy-main/galpy/potential/KuzminDiskPotential.py",
"type": "Python"
}
|
###############################################################################
# KuzminDiskPotential.py: class that implements Kuzmin disk potential
#
# - amp
# Phi(R, z)= ---------------------------
# \sqrt{R^2 + (a + |z|)^2}
###############################################################################
import numpy
from ..util import conversion
from .Potential import Potential
class KuzminDiskPotential(Potential):
"""Class that implements the Kuzmin Disk potential
.. math::
\\Phi(R,z) = -\\frac{\\mathrm{amp}}{\\sqrt{R^2 + (a + |z|)^2}}
with :math:`\\mathrm{amp} = GM` the total mass.
"""
def __init__(self, amp=1.0, a=1.0, normalize=False, ro=None, vo=None):
"""
Initialize a Kuzmin disk Potential.
Parameters
----------
amp : float or Quantity, optional
Amplitude to be applied to the potential, the total mass. Can be a Quantity with units of mass or Gxmass.
a : float or Quantity, optional
Scale length.
normalize : bool or float, optional
If True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
ro : float, optional
Distance scale for translation into internal units (default from configuration file).
vo : float, optional
Velocity scale for translation into internal units (default from configuration file).
Notes
-----
- 2016-05-09 - Written - Aladdin
"""
Potential.__init__(self, amp=amp, ro=ro, vo=vo, amp_units="mass")
a = conversion.parse_length(a, ro=self._ro)
self._a = a ## a must be greater or equal to 0.
if normalize or (
isinstance(normalize, (int, float)) and not isinstance(normalize, bool)
):
self.normalize(normalize)
self.hasC = True
self.hasC_dxdv = True
return None
def _evaluate(self, R, z, phi=0.0, t=0.0):
return -(self._denom(R, z) ** -0.5)
def _Rforce(self, R, z, phi=0.0, t=0.0):
return -(self._denom(R, z) ** -1.5) * R
def _zforce(self, R, z, phi=0.0, t=0.0):
return -numpy.sign(z) * self._denom(R, z) ** -1.5 * (self._a + numpy.fabs(z))
def _R2deriv(self, R, z, phi=0.0, t=0.0):
return self._denom(R, z) ** -1.5 - 3.0 * R**2 * self._denom(R, z) ** -2.5
def _z2deriv(self, R, z, phi=0.0, t=0.0):
a = self._a
return (
self._denom(R, z) ** -1.5
- 3.0 * (a + numpy.fabs(z)) ** 2.0 * self._denom(R, z) ** -2.5
)
def _Rzderiv(self, R, z, phi=0.0, t=0.0):
return (
-3
* numpy.sign(z)
* R
* (self._a + numpy.fabs(z))
* self._denom(R, z) ** -2.5
)
def _surfdens(self, R, z, phi=0.0, t=0.0):
return self._a * (R**2 + self._a**2) ** -1.5 / 2.0 / numpy.pi
def _mass(self, R, z=None, t=0.0):
return 1.0 - self._a / numpy.sqrt(R**2.0 + self._a**2.0)
def _denom(self, R, z):
return R**2.0 + (self._a + numpy.fabs(z)) ** 2.0
|
jobovyREPO_NAMEgalpyPATH_START.@galpy_extracted@galpy-main@galpy@potential@KuzminDiskPotential.py@.PATH_END.py
|
{
"filename": "train.md",
"repo_name": "ML4GW/aframe",
"repo_path": "aframe_extracted/aframe-main/docs/projects/train.md",
"type": "Markdown"
}
|
Train
=====
Training Aframe networks using [PyTorch Lightning](https://lightning.ai/docs/pytorch/stable/) and hyper-parameter tuning using [Ray Tune](https://docs.ray.io/en/latest/tune/index.html)
## Environment
The `train` project environment is manged by `poetry`.
In the root of the `train` project, run
```bash
apptainer build $AFRAME_CONTAINER_ROOT/train.sif apptainer.def
```
to build the `train` container.
This project can also be installed locally via
```
poetry install
```
## Scripts
The train project consists of two main executables
`train` - launch a single training job
`train.tune` - launch distributed hyper-parameter tuning using Ray
### Train
The training script takes advantage of [LightningCLI](https://lightning.ai/docs/pytorch/stable/cli/lightning_cli.html#lightning-cli) allowing for modularity and flexibility. One single training script supports
- Time domain and Frequency domain data representations
- Supervised and Semi-supervised optimization schemes
all by changing a configuration file. This is achieved by using a class hierarchy of [`DataModules`](https://lightning.ai/docs/pytorch/stable/data/datamodule.html) and [`LightningModules`](https://lightning.ai/docs/pytorch/stable/common/lightning_module.html) where core functionality
common to all use-cases is abstracted into base classes.
To see a list of arguments one can locally run
```bash
poetry run python -m train --help
```
or inside the container
```bash
apptainer run $AFRAME_CONTAINER_ROOT/train.sif python -m train --help
```
This list is quite exhaustive. It is suggested that you start from the default [configuration file](./config.yaml).
#### Example: Training Aframe
> **Note** It is assumed you have generated a training dataset via the [data project example](../data/README.md#example-generating-training-data)
The following will a training run using GPU 0
```bash
mkdir ~/aframe/results
APPTAINERENV_CUDA_VISIBLE_DEVICES=0 apptainer run --nv $AFRAME_CONTAINER_ROOT/train.sif \
python -m train \
--config /opt/aframe/projects/train/config.yaml \
--data.ifos=[H1,L1] \
--data.data_dir ~/aframe/data/train \
--trainer.logger=WandbLogger \
--trainer.logger.project=aframe \
--trainer.logger.name=my-first-run \
--trainer.logger.save_dir=~/aframe/results/my-first-run
```
This will infer most of your training arguments from the YAML config that got put into the container at build time. If you want to change this config, or if you change any code and you want to see those changes reflected inside the container, you can "bind" your local version of the [root](../../) `Aframe` repository into the container by including `apptainer run --bind .:/opt/aframe` at the beginning of the above command.
You can even train using multiple GPUS for free! Just specify a list of comma-separated GPU indices to `APPTAINERENV_CUDA_VISIBLE_DEVICES`.
##### Weights & Biases (WandB)
`Aframe` uses [WandB](https://docs.wandb.ai/?_gl=1*csft4n*_ga*Njk1NDUzNjcyLjE3MTI4NDYyNTA.*_ga_JH1SJHJQXJ*MTcxMzI4NzY0NC4yOC4xLjE3MTMyODc2NDUuNTkuMC4w) for experiment tracking. WandB already has built-in integration with lightning.
You can assign various attributes to your W&B logger
- name: name the run will be assigned
- group: group to which the run will be assigned. This is useful for runs that are part of the same experiment but execute in different scripts, e.g. a hyperparameter sweep or maybe separate train, inferenence, and evaluation scripts
- tags: comma-separated list of tags to give your run. Makes it easy to filter in the dashboard e.g. for autoencoder runs
- project: the workspace consisting of multiple related experiments that your run is a part of, e.g. aframe
- entity: the group managing the experiments your run is associated, e.g. ml4gw. If left blank, the project and run will be associated with your personal account
> **_Note_** All the attributes above can also be configured via [environment variables](https://docs.wandb.ai/guides/track/environment-variables#optional-environment-variables)
Once your run is started, you can go to [wandb.ai](https://wandb.ai) and track your loss and validation score. If you don't want to track your run with W&B, just remove the first three `--trainer` arguments above. This will save your training metrics to a local CSV in the `save_dir`.
### Tune
In addition, the train project consists of a tuning script for performing a distributed hyper-parameter search with Ray Tune.
It is recommended that multiple GPU's are available for an efficient search.
A local tune job can be launched with
```
APPTAINERENV_CUDA_VISIBLE_DEVICES=<IDs of GPUs to tune on> apptainer run --nv $AFRAME_CONTAINER_ROOT/train.sif \
python -m train.tune \
--config /opt/aframe/projects/train/config.yaml
--data.ifos=[H1,L1]
--data.data_dir ~/aframe/data/train
--trainer.logger=WandbLogger
--trainer.logger.project=aframe
--trainer.logger.save_dir=~/aframe/results/my-first-tune \
--tune.name my-first-tune \
--tune.storage_dir ~/aframe/results/my-first-tune \
--tune.temp_dir ~/aframe/results/my-first-tune/ray \
--tune.num_samples 8 \
--tune.cpus_per_gpu 6 \
--tune.gpus_per_worker 1 \
--tune.num_workers 4
```
This will launch 8 hyperparameter search jobs that will execute on 4 workers using the Asynchronous Successive Halving Algorithm (ASHA).
All the runs will be given the same **group** ID in W&B, and will be assigned random names in that group.
**NOTE: for some reason, right now this will launch one job at a time that takes all available GPUs. This needs sorting out**
If you already have a ray cluster running somewhere, you can distribute your jobs over that cluster by simply adding the `--tune.endpoint <ip address of ray cluster>:10001` command line argument.
Similarly, to see a list of arguments one can locally run
```bash
poetry run python -m train.tune --help
```
or inside the container
```bash
apptainer run $AFRAME_CONTAINER_ROOT/train.sif python -m train.tune --help
```
|
ML4GWREPO_NAMEaframePATH_START.@aframe_extracted@aframe-main@docs@projects@train.md@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/layout/polar/radialaxis/title/_font.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Font(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.polar.radialaxis.title"
_path_str = "layout.polar.radialaxis.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One",
"Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow",
"Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# lineposition
# ------------
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
# shadow
# ------
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# style
# -----
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
# textcase
# --------
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
# variant
# -------
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
# weight
# ------
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this axis' title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.polar.r
adialaxis.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.polar.radialaxis.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.polar.radialaxis.title.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("lineposition", None)
_v = lineposition if lineposition is not None else _v
if _v is not None:
self["lineposition"] = _v
_v = arg.pop("shadow", None)
_v = shadow if shadow is not None else _v
if _v is not None:
self["shadow"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("style", None)
_v = style if style is not None else _v
if _v is not None:
self["style"] = _v
_v = arg.pop("textcase", None)
_v = textcase if textcase is not None else _v
if _v is not None:
self["textcase"] = _v
_v = arg.pop("variant", None)
_v = variant if variant is not None else _v
if _v is not None:
self["variant"] = _v
_v = arg.pop("weight", None)
_v = weight if weight is not None else _v
if _v is not None:
self["weight"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@layout@polar@radialaxis@title@_font.py@.PATH_END.py
|
{
"filename": "_make.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/attrs/py2/attr/_make.py",
"type": "Python"
}
|
from __future__ import absolute_import, division, print_function
import copy
import inspect
import linecache
import sys
import threading
import uuid
import warnings
from operator import itemgetter
from . import _config, setters
from ._compat import (
PY2,
PYPY,
isclass,
iteritems,
metadata_proxy,
new_class,
ordered_dict,
set_closure_cell,
)
from .exceptions import (
DefaultAlreadySetError,
FrozenInstanceError,
NotAnAttrsClassError,
PythonTooOldError,
UnannotatedAttributeError,
)
if not PY2:
import typing
# This is used at least twice, so cache it here.
_obj_setattr = object.__setattr__
_init_converter_pat = "__attr_converter_%s"
_init_factory_pat = "__attr_factory_{}"
_tuple_property_pat = (
" {attr_name} = _attrs_property(_attrs_itemgetter({index}))"
)
_classvar_prefixes = (
"typing.ClassVar",
"t.ClassVar",
"ClassVar",
"typing_extensions.ClassVar",
)
# we don't use a double-underscore prefix because that triggers
# name mangling when trying to create a slot for the field
# (when slots=True)
_hash_cache_field = "_attrs_cached_hash"
_empty_metadata_singleton = metadata_proxy({})
# Unique object for unequivocal getattr() defaults.
_sentinel = object()
class _Nothing(object):
"""
Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
``_Nothing`` is a singleton. There is only ever one of it.
.. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False.
"""
_singleton = None
def __new__(cls):
if _Nothing._singleton is None:
_Nothing._singleton = super(_Nothing, cls).__new__(cls)
return _Nothing._singleton
def __repr__(self):
return "NOTHING"
def __bool__(self):
return False
def __len__(self):
return 0 # __bool__ for Python 2
NOTHING = _Nothing()
"""
Sentinel to indicate the lack of a value when ``None`` is ambiguous.
"""
class _CacheHashWrapper(int):
"""
An integer subclass that pickles / copies as None
This is used for non-slots classes with ``cache_hash=True``, to avoid
serializing a potentially (even likely) invalid hash value. Since ``None``
is the default value for uncalculated hashes, whenever this is copied,
the copy's value for the hash should automatically reset.
See GH #613 for more details.
"""
if PY2:
# For some reason `type(None)` isn't callable in Python 2, but we don't
# actually need a constructor for None objects, we just need any
# available function that returns None.
def __reduce__(self, _none_constructor=getattr, _args=(0, "", None)):
return _none_constructor, _args
else:
def __reduce__(self, _none_constructor=type(None), _args=()):
return _none_constructor, _args
def attrib(
default=NOTHING,
validator=None,
repr=True,
cmp=None,
hash=None,
init=True,
metadata=None,
type=None,
converter=None,
factory=None,
kw_only=False,
eq=None,
order=None,
on_setattr=None,
):
"""
Create a new attribute on a class.
.. warning::
Does *not* do anything unless the class is also decorated with
`attr.s`!
:param default: A value that is used if an ``attrs``-generated ``__init__``
is used and no value is passed while instantiating or the attribute is
excluded using ``init=False``.
If the value is an instance of `Factory`, its callable will be
used to construct a new value (useful for mutable data types like lists
or dicts).
If a default is not set (or set manually to `attr.NOTHING`), a value
*must* be supplied when instantiating; otherwise a `TypeError`
will be raised.
The default can also be set using decorator notation as shown below.
:type default: Any value
:param callable factory: Syntactic sugar for
``default=attr.Factory(factory)``.
:param validator: `callable` that is called by ``attrs``-generated
``__init__`` methods after the instance has been initialized. They
receive the initialized instance, the `Attribute`, and the
passed value.
The return value is *not* inspected so the validator has to throw an
exception itself.
If a `list` is passed, its items are treated as validators and must
all pass.
Validators can be globally disabled and re-enabled using
`get_run_validators`.
The validator can also be set using decorator notation as shown below.
:type validator: `callable` or a `list` of `callable`\\ s.
:param repr: Include this attribute in the generated ``__repr__``
method. If ``True``, include the attribute; if ``False``, omit it. By
default, the built-in ``repr()`` function is used. To override how the
attribute value is formatted, pass a ``callable`` that takes a single
value and returns a string. Note that the resulting string is used
as-is, i.e. it will be used directly *instead* of calling ``repr()``
(the default).
:type repr: a `bool` or a `callable` to use a custom function.
:param eq: If ``True`` (default), include this attribute in the
generated ``__eq__`` and ``__ne__`` methods that check two instances
for equality. To override how the attribute value is compared,
pass a ``callable`` that takes a single value and returns the value
to be compared.
:type eq: a `bool` or a `callable`.
:param order: If ``True`` (default), include this attributes in the
generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods.
To override how the attribute value is ordered,
pass a ``callable`` that takes a single value and returns the value
to be ordered.
:type order: a `bool` or a `callable`.
:param cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the
same value. Must not be mixed with *eq* or *order*.
:type cmp: a `bool` or a `callable`.
:param Optional[bool] hash: Include this attribute in the generated
``__hash__`` method. If ``None`` (default), mirror *eq*'s value. This
is the correct behavior according the Python spec. Setting this value
to anything else than ``None`` is *discouraged*.
:param bool init: Include this attribute in the generated ``__init__``
method. It is possible to set this to ``False`` and set a default
value. In that case this attributed is unconditionally initialized
with the specified default value or factory.
:param callable converter: `callable` that is called by
``attrs``-generated ``__init__`` methods to convert attribute's value
to the desired format. It is given the passed-in value, and the
returned value will be used as the new value of the attribute. The
value is converted before being passed to the validator, if any.
:param metadata: An arbitrary mapping, to be used by third-party
components. See `extending_metadata`.
:param type: The type of the attribute. In Python 3.6 or greater, the
preferred method to specify the type is using a variable annotation
(see `PEP 526 <https://www.python.org/dev/peps/pep-0526/>`_).
This argument is provided for backward compatibility.
Regardless of the approach used, the type will be stored on
``Attribute.type``.
Please note that ``attrs`` doesn't do anything with this metadata by
itself. You can use it as part of your own code or for
`static type checking <types>`.
:param kw_only: Make this attribute keyword-only (Python 3+)
in the generated ``__init__`` (if ``init`` is ``False``, this
parameter is ignored).
:param on_setattr: Allows to overwrite the *on_setattr* setting from
`attr.s`. If left `None`, the *on_setattr* value from `attr.s` is used.
Set to `attr.setters.NO_OP` to run **no** `setattr` hooks for this
attribute -- regardless of the setting in `attr.s`.
:type on_setattr: `callable`, or a list of callables, or `None`, or
`attr.setters.NO_OP`
.. versionadded:: 15.2.0 *convert*
.. versionadded:: 16.3.0 *metadata*
.. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
.. versionchanged:: 17.1.0
*hash* is ``None`` and therefore mirrors *eq* by default.
.. versionadded:: 17.3.0 *type*
.. deprecated:: 17.4.0 *convert*
.. versionadded:: 17.4.0 *converter* as a replacement for the deprecated
*convert* to achieve consistency with other noun-based arguments.
.. versionadded:: 18.1.0
``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``.
.. versionadded:: 18.2.0 *kw_only*
.. versionchanged:: 19.2.0 *convert* keyword argument removed.
.. versionchanged:: 19.2.0 *repr* also accepts a custom callable.
.. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
.. versionadded:: 19.2.0 *eq* and *order*
.. versionadded:: 20.1.0 *on_setattr*
.. versionchanged:: 20.3.0 *kw_only* backported to Python 2
.. versionchanged:: 21.1.0
*eq*, *order*, and *cmp* also accept a custom callable
.. versionchanged:: 21.1.0 *cmp* undeprecated
"""
eq, eq_key, order, order_key = _determine_attrib_eq_order(
cmp, eq, order, True
)
if hash is not None and hash is not True and hash is not False:
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
if factory is not None:
if default is not NOTHING:
raise ValueError(
"The `default` and `factory` arguments are mutually "
"exclusive."
)
if not callable(factory):
raise ValueError("The `factory` argument must be a callable.")
default = Factory(factory)
if metadata is None:
metadata = {}
# Apply syntactic sugar by auto-wrapping.
if isinstance(on_setattr, (list, tuple)):
on_setattr = setters.pipe(*on_setattr)
if validator and isinstance(validator, (list, tuple)):
validator = and_(*validator)
if converter and isinstance(converter, (list, tuple)):
converter = pipe(*converter)
return _CountingAttr(
default=default,
validator=validator,
repr=repr,
cmp=None,
hash=hash,
init=init,
converter=converter,
metadata=metadata,
type=type,
kw_only=kw_only,
eq=eq,
eq_key=eq_key,
order=order,
order_key=order_key,
on_setattr=on_setattr,
)
def _compile_and_eval(script, globs, locs=None, filename=""):
"""
"Exec" the script with the given global (globs) and local (locs) variables.
"""
bytecode = compile(script, filename, "exec")
eval(bytecode, globs, locs)
def _make_method(name, script, filename, globs=None):
"""
Create the method with the script given and return the method object.
"""
locs = {}
if globs is None:
globs = {}
_compile_and_eval(script, globs, locs, filename)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[filename] = (
len(script),
None,
script.splitlines(True),
filename,
)
return locs[name]
def _make_attr_tuple_class(cls_name, attr_names):
"""
Create a tuple subclass to hold `Attribute`s for an `attrs` class.
The subclass is a bare tuple with properties for names.
class MyClassAttributes(tuple):
__slots__ = ()
x = property(itemgetter(0))
"""
attr_class_name = "{}Attributes".format(cls_name)
attr_class_template = [
"class {}(tuple):".format(attr_class_name),
" __slots__ = ()",
]
if attr_names:
for i, attr_name in enumerate(attr_names):
attr_class_template.append(
_tuple_property_pat.format(index=i, attr_name=attr_name)
)
else:
attr_class_template.append(" pass")
globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property}
_compile_and_eval("\n".join(attr_class_template), globs)
return globs[attr_class_name]
# Tuple class for extracted attributes from a class definition.
# `base_attrs` is a subset of `attrs`.
_Attributes = _make_attr_tuple_class(
"_Attributes",
[
# all attributes to build dunder methods for
"attrs",
# attributes that have been inherited
"base_attrs",
# map inherited attributes to their originating classes
"base_attrs_map",
],
)
def _is_class_var(annot):
"""
Check whether *annot* is a typing.ClassVar.
The string comparison hack is used to avoid evaluating all string
annotations which would put attrs-based classes at a performance
disadvantage compared to plain old classes.
"""
annot = str(annot)
# Annotation can be quoted.
if annot.startswith(("'", '"')) and annot.endswith(("'", '"')):
annot = annot[1:-1]
return annot.startswith(_classvar_prefixes)
def _has_own_attribute(cls, attrib_name):
"""
Check whether *cls* defines *attrib_name* (and doesn't just inherit it).
Requires Python 3.
"""
attr = getattr(cls, attrib_name, _sentinel)
if attr is _sentinel:
return False
for base_cls in cls.__mro__[1:]:
a = getattr(base_cls, attrib_name, None)
if attr is a:
return False
return True
def _get_annotations(cls):
"""
Get annotations for *cls*.
"""
if _has_own_attribute(cls, "__annotations__"):
return cls.__annotations__
return {}
def _counter_getter(e):
"""
Key function for sorting to avoid re-creating a lambda for every class.
"""
return e[1].counter
def _collect_base_attrs(cls, taken_attr_names):
"""
Collect attr.ibs from base classes of *cls*, except *taken_attr_names*.
"""
base_attrs = []
base_attr_map = {} # A dictionary of base attrs to their classes.
# Traverse the MRO and collect attributes.
for base_cls in reversed(cls.__mro__[1:-1]):
for a in getattr(base_cls, "__attrs_attrs__", []):
if a.inherited or a.name in taken_attr_names:
continue
a = a.evolve(inherited=True)
base_attrs.append(a)
base_attr_map[a.name] = base_cls
# For each name, only keep the freshest definition i.e. the furthest at the
# back. base_attr_map is fine because it gets overwritten with every new
# instance.
filtered = []
seen = set()
for a in reversed(base_attrs):
if a.name in seen:
continue
filtered.insert(0, a)
seen.add(a.name)
return filtered, base_attr_map
def _collect_base_attrs_broken(cls, taken_attr_names):
"""
Collect attr.ibs from base classes of *cls*, except *taken_attr_names*.
N.B. *taken_attr_names* will be mutated.
Adhere to the old incorrect behavior.
Notably it collects from the front and considers inherited attributes which
leads to the buggy behavior reported in #428.
"""
base_attrs = []
base_attr_map = {} # A dictionary of base attrs to their classes.
# Traverse the MRO and collect attributes.
for base_cls in cls.__mro__[1:-1]:
for a in getattr(base_cls, "__attrs_attrs__", []):
if a.name in taken_attr_names:
continue
a = a.evolve(inherited=True)
taken_attr_names.add(a.name)
base_attrs.append(a)
base_attr_map[a.name] = base_cls
return base_attrs, base_attr_map
def _transform_attrs(
cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer
):
"""
Transform all `_CountingAttr`s on a class into `Attribute`s.
If *these* is passed, use that and don't look for them on the class.
*collect_by_mro* is True, collect them in the correct MRO order, otherwise
use the old -- incorrect -- order. See #428.
Return an `_Attributes`.
"""
cd = cls.__dict__
anns = _get_annotations(cls)
if these is not None:
ca_list = [(name, ca) for name, ca in iteritems(these)]
if not isinstance(these, ordered_dict):
ca_list.sort(key=_counter_getter)
elif auto_attribs is True:
ca_names = {
name
for name, attr in cd.items()
if isinstance(attr, _CountingAttr)
}
ca_list = []
annot_names = set()
for attr_name, type in anns.items():
if _is_class_var(type):
continue
annot_names.add(attr_name)
a = cd.get(attr_name, NOTHING)
if not isinstance(a, _CountingAttr):
if a is NOTHING:
a = attrib()
else:
a = attrib(default=a)
ca_list.append((attr_name, a))
unannotated = ca_names - annot_names
if len(unannotated) > 0:
raise UnannotatedAttributeError(
"The following `attr.ib`s lack a type annotation: "
+ ", ".join(
sorted(unannotated, key=lambda n: cd.get(n).counter)
)
+ "."
)
else:
ca_list = sorted(
(
(name, attr)
for name, attr in cd.items()
if isinstance(attr, _CountingAttr)
),
key=lambda e: e[1].counter,
)
own_attrs = [
Attribute.from_counting_attr(
name=attr_name, ca=ca, type=anns.get(attr_name)
)
for attr_name, ca in ca_list
]
if collect_by_mro:
base_attrs, base_attr_map = _collect_base_attrs(
cls, {a.name for a in own_attrs}
)
else:
base_attrs, base_attr_map = _collect_base_attrs_broken(
cls, {a.name for a in own_attrs}
)
attr_names = [a.name for a in base_attrs + own_attrs]
AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
if kw_only:
own_attrs = [a.evolve(kw_only=True) for a in own_attrs]
base_attrs = [a.evolve(kw_only=True) for a in base_attrs]
attrs = AttrsClass(base_attrs + own_attrs)
# Mandatory vs non-mandatory attr order only matters when they are part of
# the __init__ signature and when they aren't kw_only (which are moved to
# the end and can be mandatory or non-mandatory in any order, as they will
# be specified as keyword args anyway). Check the order of those attrs:
had_default = False
for a in (a for a in attrs if a.init is not False and a.kw_only is False):
if had_default is True and a.default is NOTHING:
raise ValueError(
"No mandatory attributes allowed after an attribute with a "
"default value or factory. Attribute in question: %r" % (a,)
)
if had_default is False and a.default is not NOTHING:
had_default = True
if field_transformer is not None:
attrs = field_transformer(cls, attrs)
return _Attributes((attrs, base_attrs, base_attr_map))
if PYPY:
def _frozen_setattrs(self, name, value):
"""
Attached to frozen classes as __setattr__.
"""
if isinstance(self, BaseException) and name in (
"__cause__",
"__context__",
):
BaseException.__setattr__(self, name, value)
return
raise FrozenInstanceError()
else:
def _frozen_setattrs(self, name, value):
"""
Attached to frozen classes as __setattr__.
"""
raise FrozenInstanceError()
def _frozen_delattrs(self, name):
"""
Attached to frozen classes as __delattr__.
"""
raise FrozenInstanceError()
class _ClassBuilder(object):
"""
Iteratively build *one* class.
"""
__slots__ = (
"_attr_names",
"_attrs",
"_base_attr_map",
"_base_names",
"_cache_hash",
"_cls",
"_cls_dict",
"_delete_attribs",
"_frozen",
"_has_pre_init",
"_has_post_init",
"_is_exc",
"_on_setattr",
"_slots",
"_weakref_slot",
"_has_own_setattr",
"_has_custom_setattr",
)
def __init__(
self,
cls,
these,
slots,
frozen,
weakref_slot,
getstate_setstate,
auto_attribs,
kw_only,
cache_hash,
is_exc,
collect_by_mro,
on_setattr,
has_custom_setattr,
field_transformer,
):
attrs, base_attrs, base_map = _transform_attrs(
cls,
these,
auto_attribs,
kw_only,
collect_by_mro,
field_transformer,
)
self._cls = cls
self._cls_dict = dict(cls.__dict__) if slots else {}
self._attrs = attrs
self._base_names = set(a.name for a in base_attrs)
self._base_attr_map = base_map
self._attr_names = tuple(a.name for a in attrs)
self._slots = slots
self._frozen = frozen
self._weakref_slot = weakref_slot
self._cache_hash = cache_hash
self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False))
self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False))
self._delete_attribs = not bool(these)
self._is_exc = is_exc
self._on_setattr = on_setattr
self._has_custom_setattr = has_custom_setattr
self._has_own_setattr = False
self._cls_dict["__attrs_attrs__"] = self._attrs
if frozen:
self._cls_dict["__setattr__"] = _frozen_setattrs
self._cls_dict["__delattr__"] = _frozen_delattrs
self._has_own_setattr = True
if getstate_setstate:
(
self._cls_dict["__getstate__"],
self._cls_dict["__setstate__"],
) = self._make_getstate_setstate()
def __repr__(self):
return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__)
def build_class(self):
"""
Finalize class based on the accumulated configuration.
Builder cannot be used after calling this method.
"""
if self._slots is True:
return self._create_slots_class()
else:
return self._patch_original_class()
def _patch_original_class(self):
"""
Apply accumulated methods and return the class.
"""
cls = self._cls
base_names = self._base_names
# Clean class of attribute definitions (`attr.ib()`s).
if self._delete_attribs:
for name in self._attr_names:
if (
name not in base_names
and getattr(cls, name, _sentinel) is not _sentinel
):
try:
delattr(cls, name)
except AttributeError:
# This can happen if a base class defines a class
# variable and we want to set an attribute with the
# same name by using only a type annotation.
pass
# Attach our dunder methods.
for name, value in self._cls_dict.items():
setattr(cls, name, value)
# If we've inherited an attrs __setattr__ and don't write our own,
# reset it to object's.
if not self._has_own_setattr and getattr(
cls, "__attrs_own_setattr__", False
):
cls.__attrs_own_setattr__ = False
if not self._has_custom_setattr:
cls.__setattr__ = object.__setattr__
return cls
def _create_slots_class(self):
"""
Build and return a new class with a `__slots__` attribute.
"""
cd = {
k: v
for k, v in iteritems(self._cls_dict)
if k not in tuple(self._attr_names) + ("__dict__", "__weakref__")
}
# If our class doesn't have its own implementation of __setattr__
# (either from the user or by us), check the bases, if one of them has
# an attrs-made __setattr__, that needs to be reset. We don't walk the
# MRO because we only care about our immediate base classes.
# XXX: This can be confused by subclassing a slotted attrs class with
# XXX: a non-attrs class and subclass the resulting class with an attrs
# XXX: class. See `test_slotted_confused` for details. For now that's
# XXX: OK with us.
if not self._has_own_setattr:
cd["__attrs_own_setattr__"] = False
if not self._has_custom_setattr:
for base_cls in self._cls.__bases__:
if base_cls.__dict__.get("__attrs_own_setattr__", False):
cd["__setattr__"] = object.__setattr__
break
# Traverse the MRO to collect existing slots
# and check for an existing __weakref__.
existing_slots = dict()
weakref_inherited = False
for base_cls in self._cls.__mro__[1:-1]:
if base_cls.__dict__.get("__weakref__", None) is not None:
weakref_inherited = True
existing_slots.update(
{
name: getattr(base_cls, name)
for name in getattr(base_cls, "__slots__", [])
}
)
base_names = set(self._base_names)
names = self._attr_names
if (
self._weakref_slot
and "__weakref__" not in getattr(self._cls, "__slots__", ())
and "__weakref__" not in names
and not weakref_inherited
):
names += ("__weakref__",)
# We only add the names of attributes that aren't inherited.
# Setting __slots__ to inherited attributes wastes memory.
slot_names = [name for name in names if name not in base_names]
# There are slots for attributes from current class
# that are defined in parent classes.
# As their descriptors may be overriden by a child class,
# we collect them here and update the class dict
reused_slots = {
slot: slot_descriptor
for slot, slot_descriptor in iteritems(existing_slots)
if slot in slot_names
}
slot_names = [name for name in slot_names if name not in reused_slots]
cd.update(reused_slots)
if self._cache_hash:
slot_names.append(_hash_cache_field)
cd["__slots__"] = tuple(slot_names)
qualname = getattr(self._cls, "__qualname__", None)
if qualname is not None:
cd["__qualname__"] = qualname
# Create new class based on old class and our methods.
cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd)
# The following is a fix for
# https://github.com/python-attrs/attrs/issues/102. On Python 3,
# if a method mentions `__class__` or uses the no-arg super(), the
# compiler will bake a reference to the class in the method itself
# as `method.__closure__`. Since we replace the class with a
# clone, we rewrite these references so it keeps working.
for item in cls.__dict__.values():
if isinstance(item, (classmethod, staticmethod)):
# Class- and staticmethods hide their functions inside.
# These might need to be rewritten as well.
closure_cells = getattr(item.__func__, "__closure__", None)
elif isinstance(item, property):
# Workaround for property `super()` shortcut (PY3-only).
# There is no universal way for other descriptors.
closure_cells = getattr(item.fget, "__closure__", None)
else:
closure_cells = getattr(item, "__closure__", None)
if not closure_cells: # Catch None or the empty list.
continue
for cell in closure_cells:
try:
match = cell.cell_contents is self._cls
except ValueError: # ValueError: Cell is empty
pass
else:
if match:
set_closure_cell(cell, cls)
return cls
def add_repr(self, ns):
self._cls_dict["__repr__"] = self._add_method_dunders(
_make_repr(self._attrs, ns=ns)
)
return self
def add_str(self):
repr = self._cls_dict.get("__repr__")
if repr is None:
raise ValueError(
"__str__ can only be generated if a __repr__ exists."
)
def __str__(self):
return self.__repr__()
self._cls_dict["__str__"] = self._add_method_dunders(__str__)
return self
def _make_getstate_setstate(self):
"""
Create custom __setstate__ and __getstate__ methods.
"""
# __weakref__ is not writable.
state_attr_names = tuple(
an for an in self._attr_names if an != "__weakref__"
)
def slots_getstate(self):
"""
Automatically created by attrs.
"""
return tuple(getattr(self, name) for name in state_attr_names)
hash_caching_enabled = self._cache_hash
def slots_setstate(self, state):
"""
Automatically created by attrs.
"""
__bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in zip(state_attr_names, state):
__bound_setattr(name, value)
# The hash code cache is not included when the object is
# serialized, but it still needs to be initialized to None to
# indicate that the first call to __hash__ should be a cache
# miss.
if hash_caching_enabled:
__bound_setattr(_hash_cache_field, None)
return slots_getstate, slots_setstate
def make_unhashable(self):
self._cls_dict["__hash__"] = None
return self
def add_hash(self):
self._cls_dict["__hash__"] = self._add_method_dunders(
_make_hash(
self._cls,
self._attrs,
frozen=self._frozen,
cache_hash=self._cache_hash,
)
)
return self
def add_init(self):
self._cls_dict["__init__"] = self._add_method_dunders(
_make_init(
self._cls,
self._attrs,
self._has_pre_init,
self._has_post_init,
self._frozen,
self._slots,
self._cache_hash,
self._base_attr_map,
self._is_exc,
self._on_setattr is not None
and self._on_setattr is not setters.NO_OP,
attrs_init=False,
)
)
return self
def add_attrs_init(self):
self._cls_dict["__attrs_init__"] = self._add_method_dunders(
_make_init(
self._cls,
self._attrs,
self._has_pre_init,
self._has_post_init,
self._frozen,
self._slots,
self._cache_hash,
self._base_attr_map,
self._is_exc,
self._on_setattr is not None
and self._on_setattr is not setters.NO_OP,
attrs_init=True,
)
)
return self
def add_eq(self):
cd = self._cls_dict
cd["__eq__"] = self._add_method_dunders(
_make_eq(self._cls, self._attrs)
)
cd["__ne__"] = self._add_method_dunders(_make_ne())
return self
def add_order(self):
cd = self._cls_dict
cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = (
self._add_method_dunders(meth)
for meth in _make_order(self._cls, self._attrs)
)
return self
def add_setattr(self):
if self._frozen:
return self
sa_attrs = {}
for a in self._attrs:
on_setattr = a.on_setattr or self._on_setattr
if on_setattr and on_setattr is not setters.NO_OP:
sa_attrs[a.name] = a, on_setattr
if not sa_attrs:
return self
if self._has_custom_setattr:
# We need to write a __setattr__ but there already is one!
raise ValueError(
"Can't combine custom __setattr__ with on_setattr hooks."
)
# docstring comes from _add_method_dunders
def __setattr__(self, name, val):
try:
a, hook = sa_attrs[name]
except KeyError:
nval = val
else:
nval = hook(self, a, val)
_obj_setattr(self, name, nval)
self._cls_dict["__attrs_own_setattr__"] = True
self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__)
self._has_own_setattr = True
return self
def _add_method_dunders(self, method):
"""
Add __module__ and __qualname__ to a *method* if possible.
"""
try:
method.__module__ = self._cls.__module__
except AttributeError:
pass
try:
method.__qualname__ = ".".join(
(self._cls.__qualname__, method.__name__)
)
except AttributeError:
pass
try:
method.__doc__ = "Method generated by attrs for class %s." % (
self._cls.__qualname__,
)
except AttributeError:
pass
return method
_CMP_DEPRECATION = (
"The usage of `cmp` is deprecated and will be removed on or after "
"2021-06-01. Please use `eq` and `order` instead."
)
def _determine_attrs_eq_order(cmp, eq, order, default_eq):
"""
Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
values of eq and order. If *eq* is None, set it to *default_eq*.
"""
if cmp is not None and any((eq is not None, order is not None)):
raise ValueError("Don't mix `cmp` with `eq' and `order`.")
# cmp takes precedence due to bw-compatibility.
if cmp is not None:
return cmp, cmp
# If left None, equality is set to the specified default and ordering
# mirrors equality.
if eq is None:
eq = default_eq
if order is None:
order = eq
if eq is False and order is True:
raise ValueError("`order` can only be True if `eq` is True too.")
return eq, order
def _determine_attrib_eq_order(cmp, eq, order, default_eq):
"""
Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
values of eq and order. If *eq* is None, set it to *default_eq*.
"""
if cmp is not None and any((eq is not None, order is not None)):
raise ValueError("Don't mix `cmp` with `eq' and `order`.")
def decide_callable_or_boolean(value):
"""
Decide whether a key function is used.
"""
if callable(value):
value, key = True, value
else:
key = None
return value, key
# cmp takes precedence due to bw-compatibility.
if cmp is not None:
cmp, cmp_key = decide_callable_or_boolean(cmp)
return cmp, cmp_key, cmp, cmp_key
# If left None, equality is set to the specified default and ordering
# mirrors equality.
if eq is None:
eq, eq_key = default_eq, None
else:
eq, eq_key = decide_callable_or_boolean(eq)
if order is None:
order, order_key = eq, eq_key
else:
order, order_key = decide_callable_or_boolean(order)
if eq is False and order is True:
raise ValueError("`order` can only be True if `eq` is True too.")
return eq, eq_key, order, order_key
def _determine_whether_to_implement(
cls, flag, auto_detect, dunders, default=True
):
"""
Check whether we should implement a set of methods for *cls*.
*flag* is the argument passed into @attr.s like 'init', *auto_detect* the
same as passed into @attr.s and *dunders* is a tuple of attribute names
whose presence signal that the user has implemented it themselves.
Return *default* if no reason for either for or against is found.
auto_detect must be False on Python 2.
"""
if flag is True or flag is False:
return flag
if flag is None and auto_detect is False:
return default
# Logically, flag is None and auto_detect is True here.
for dunder in dunders:
if _has_own_attribute(cls, dunder):
return False
return default
def attrs(
maybe_cls=None,
these=None,
repr_ns=None,
repr=None,
cmp=None,
hash=None,
init=None,
slots=False,
frozen=False,
weakref_slot=True,
str=False,
auto_attribs=False,
kw_only=False,
cache_hash=False,
auto_exc=False,
eq=None,
order=None,
auto_detect=False,
collect_by_mro=False,
getstate_setstate=None,
on_setattr=None,
field_transformer=None,
):
r"""
A class decorator that adds `dunder
<https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
specified attributes using `attr.ib` or the *these* argument.
:param these: A dictionary of name to `attr.ib` mappings. This is
useful to avoid the definition of your attributes within the class body
because you can't (e.g. if you want to add ``__repr__`` methods to
Django models) or don't want to.
If *these* is not ``None``, ``attrs`` will *not* search the class body
for attributes and will *not* remove any attributes from it.
If *these* is an ordered dict (`dict` on Python 3.6+,
`collections.OrderedDict` otherwise), the order is deduced from
the order of the attributes inside *these*. Otherwise the order
of the definition of the attributes is used.
:type these: `dict` of `str` to `attr.ib`
:param str repr_ns: When using nested classes, there's no way in Python 2
to automatically detect that. Therefore it's possible to set the
namespace explicitly for a more meaningful ``repr`` output.
:param bool auto_detect: Instead of setting the *init*, *repr*, *eq*,
*order*, and *hash* arguments explicitly, assume they are set to
``True`` **unless any** of the involved methods for one of the
arguments is implemented in the *current* class (i.e. it is *not*
inherited from some base class).
So for example by implementing ``__eq__`` on a class yourself,
``attrs`` will deduce ``eq=False`` and will create *neither*
``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible
``__ne__`` by default, so it *should* be enough to only implement
``__eq__`` in most cases).
.. warning::
If you prevent ``attrs`` from creating the ordering methods for you
(``order=False``, e.g. by implementing ``__le__``), it becomes
*your* responsibility to make sure its ordering is sound. The best
way is to use the `functools.total_ordering` decorator.
Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*,
*cmp*, or *hash* overrides whatever *auto_detect* would determine.
*auto_detect* requires Python 3. Setting it ``True`` on Python 2 raises
a `PythonTooOldError`.
:param bool repr: Create a ``__repr__`` method with a human readable
representation of ``attrs`` attributes..
:param bool str: Create a ``__str__`` method that is identical to
``__repr__``. This is usually not necessary except for
`Exception`\ s.
:param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__``
and ``__ne__`` methods that check two instances for equality.
They compare the instances as if they were tuples of their ``attrs``
attributes if and only if the types of both classes are *identical*!
:param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``,
``__gt__``, and ``__ge__`` methods that behave like *eq* above and
allow instances to be ordered. If ``None`` (default) mirror value of
*eq*.
:param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq*
and *order* to the same value. Must not be mixed with *eq* or *order*.
:param Optional[bool] hash: If ``None`` (default), the ``__hash__`` method
is generated according how *eq* and *frozen* are set.
1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.
2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to
None, marking it unhashable (which it is).
3. If *eq* is False, ``__hash__`` will be left untouched meaning the
``__hash__`` method of the base class will be used (if base class is
``object``, this means it will fall back to id-based hashing.).
Although not recommended, you can decide for yourself and force
``attrs`` to create one (e.g. if the class is immutable even though you
didn't freeze it programmatically) by passing ``True`` or not. Both of
these cases are rather special and should be used carefully.
See our documentation on `hashing`, Python's documentation on
`object.__hash__`, and the `GitHub issue that led to the default \
behavior <https://github.com/python-attrs/attrs/issues/136>`_ for more
details.
:param bool init: Create a ``__init__`` method that initializes the
``attrs`` attributes. Leading underscores are stripped for the argument
name. If a ``__attrs_pre_init__`` method exists on the class, it will
be called before the class is initialized. If a ``__attrs_post_init__``
method exists on the class, it will be called after the class is fully
initialized.
If ``init`` is ``False``, an ``__attrs_init__`` method will be
injected instead. This allows you to define a custom ``__init__``
method that can do pre-init work such as ``super().__init__()``,
and then call ``__attrs_init__()`` and ``__attrs_post_init__()``.
:param bool slots: Create a `slotted class <slotted classes>` that's more
memory-efficient. Slotted classes are generally superior to the default
dict classes, but have some gotchas you should know about, so we
encourage you to read the `glossary entry <slotted classes>`.
:param bool frozen: Make instances immutable after initialization. If
someone attempts to modify a frozen instance,
`attr.exceptions.FrozenInstanceError` is raised.
.. note::
1. This is achieved by installing a custom ``__setattr__`` method
on your class, so you can't implement your own.
2. True immutability is impossible in Python.
3. This *does* have a minor a runtime performance `impact
<how-frozen>` when initializing new instances. In other words:
``__init__`` is slightly slower with ``frozen=True``.
4. If a class is frozen, you cannot modify ``self`` in
``__attrs_post_init__`` or a self-written ``__init__``. You can
circumvent that limitation by using
``object.__setattr__(self, "attribute_name", value)``.
5. Subclasses of a frozen class are frozen too.
:param bool weakref_slot: Make instances weak-referenceable. This has no
effect unless ``slots`` is also enabled.
:param bool auto_attribs: If ``True``, collect `PEP 526`_-annotated
attributes (Python 3.6 and later only) from the class body.
In this case, you **must** annotate every field. If ``attrs``
encounters a field that is set to an `attr.ib` but lacks a type
annotation, an `attr.exceptions.UnannotatedAttributeError` is
raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't
want to set a type.
If you assign a value to those attributes (e.g. ``x: int = 42``), that
value becomes the default value like if it were passed using
``attr.ib(default=42)``. Passing an instance of `Factory` also
works as expected in most cases (see warning below).
Attributes annotated as `typing.ClassVar`, and attributes that are
neither annotated nor set to an `attr.ib` are **ignored**.
.. warning::
For features that use the attribute name to create decorators (e.g.
`validators <validators>`), you still *must* assign `attr.ib` to
them. Otherwise Python will either not find the name or try to use
the default value to call e.g. ``validator`` on it.
These errors can be quite confusing and probably the most common bug
report on our bug tracker.
.. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/
:param bool kw_only: Make all attributes keyword-only (Python 3+)
in the generated ``__init__`` (if ``init`` is ``False``, this
parameter is ignored).
:param bool cache_hash: Ensure that the object's hash code is computed
only once and stored on the object. If this is set to ``True``,
hashing must be either explicitly or implicitly enabled for this
class. If the hash code is cached, avoid any reassignments of
fields involved in hash code computation or mutations of the objects
those fields point to after object creation. If such changes occur,
the behavior of the object's hash code is undefined.
:param bool auto_exc: If the class subclasses `BaseException`
(which implicitly includes any subclass of any exception), the
following happens to behave like a well-behaved Python exceptions
class:
- the values for *eq*, *order*, and *hash* are ignored and the
instances compare and hash by the instance's ids (N.B. ``attrs`` will
*not* remove existing implementations of ``__hash__`` or the equality
methods. It just won't add own ones.),
- all attributes that are either passed into ``__init__`` or have a
default value are additionally available as a tuple in the ``args``
attribute,
- the value of *str* is ignored leaving ``__str__`` to base classes.
:param bool collect_by_mro: Setting this to `True` fixes the way ``attrs``
collects attributes from base classes. The default behavior is
incorrect in certain cases of multiple inheritance. It should be on by
default but is kept off for backward-compatability.
See issue `#428 <https://github.com/python-attrs/attrs/issues/428>`_ for
more details.
:param Optional[bool] getstate_setstate:
.. note::
This is usually only interesting for slotted classes and you should
probably just set *auto_detect* to `True`.
If `True`, ``__getstate__`` and
``__setstate__`` are generated and attached to the class. This is
necessary for slotted classes to be pickleable. If left `None`, it's
`True` by default for slotted classes and ``False`` for dict classes.
If *auto_detect* is `True`, and *getstate_setstate* is left `None`,
and **either** ``__getstate__`` or ``__setstate__`` is detected directly
on the class (i.e. not inherited), it is set to `False` (this is usually
what you want).
:param on_setattr: A callable that is run whenever the user attempts to set
an attribute (either by assignment like ``i.x = 42`` or by using
`setattr` like ``setattr(i, "x", 42)``). It receives the same arguments
as validators: the instance, the attribute that is being modified, and
the new value.
If no exception is raised, the attribute is set to the return value of
the callable.
If a list of callables is passed, they're automatically wrapped in an
`attr.setters.pipe`.
:param Optional[callable] field_transformer:
A function that is called with the original class object and all
fields right before ``attrs`` finalizes the class. You can use
this, e.g., to automatically add converters or validators to
fields based on their types. See `transform-fields` for more details.
.. versionadded:: 16.0.0 *slots*
.. versionadded:: 16.1.0 *frozen*
.. versionadded:: 16.3.0 *str*
.. versionadded:: 16.3.0 Support for ``__attrs_post_init__``.
.. versionchanged:: 17.1.0
*hash* supports ``None`` as value which is also the default now.
.. versionadded:: 17.3.0 *auto_attribs*
.. versionchanged:: 18.1.0
If *these* is passed, no attributes are deleted from the class body.
.. versionchanged:: 18.1.0 If *these* is ordered, the order is retained.
.. versionadded:: 18.2.0 *weakref_slot*
.. deprecated:: 18.2.0
``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a
`DeprecationWarning` if the classes compared are subclasses of
each other. ``__eq`` and ``__ne__`` never tried to compared subclasses
to each other.
.. versionchanged:: 19.2.0
``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider
subclasses comparable anymore.
.. versionadded:: 18.2.0 *kw_only*
.. versionadded:: 18.2.0 *cache_hash*
.. versionadded:: 19.1.0 *auto_exc*
.. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
.. versionadded:: 19.2.0 *eq* and *order*
.. versionadded:: 20.1.0 *auto_detect*
.. versionadded:: 20.1.0 *collect_by_mro*
.. versionadded:: 20.1.0 *getstate_setstate*
.. versionadded:: 20.1.0 *on_setattr*
.. versionadded:: 20.3.0 *field_transformer*
.. versionchanged:: 21.1.0
``init=False`` injects ``__attrs_init__``
.. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__``
.. versionchanged:: 21.1.0 *cmp* undeprecated
"""
if auto_detect and PY2:
raise PythonTooOldError(
"auto_detect only works on Python 3 and later."
)
eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None)
hash_ = hash # work around the lack of nonlocal
if isinstance(on_setattr, (list, tuple)):
on_setattr = setters.pipe(*on_setattr)
def wrap(cls):
if getattr(cls, "__class__", None) is None:
raise TypeError("attrs only works with new-style classes.")
is_frozen = frozen or _has_frozen_base_class(cls)
is_exc = auto_exc is True and issubclass(cls, BaseException)
has_own_setattr = auto_detect and _has_own_attribute(
cls, "__setattr__"
)
if has_own_setattr and is_frozen:
raise ValueError("Can't freeze a class with a custom __setattr__.")
builder = _ClassBuilder(
cls,
these,
slots,
is_frozen,
weakref_slot,
_determine_whether_to_implement(
cls,
getstate_setstate,
auto_detect,
("__getstate__", "__setstate__"),
default=slots,
),
auto_attribs,
kw_only,
cache_hash,
is_exc,
collect_by_mro,
on_setattr,
has_own_setattr,
field_transformer,
)
if _determine_whether_to_implement(
cls, repr, auto_detect, ("__repr__",)
):
builder.add_repr(repr_ns)
if str is True:
builder.add_str()
eq = _determine_whether_to_implement(
cls, eq_, auto_detect, ("__eq__", "__ne__")
)
if not is_exc and eq is True:
builder.add_eq()
if not is_exc and _determine_whether_to_implement(
cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__")
):
builder.add_order()
builder.add_setattr()
if (
hash_ is None
and auto_detect is True
and _has_own_attribute(cls, "__hash__")
):
hash = False
else:
hash = hash_
if hash is not True and hash is not False and hash is not None:
# Can't use `hash in` because 1 == True for example.
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
elif hash is False or (hash is None and eq is False) or is_exc:
# Don't do anything. Should fall back to __object__'s __hash__
# which is by id.
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" hashing must be either explicitly or implicitly "
"enabled."
)
elif hash is True or (
hash is None and eq is True and is_frozen is True
):
# Build a __hash__ if told so, or if it's safe.
builder.add_hash()
else:
# Raise TypeError on attempts to hash.
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" hashing must be either explicitly or implicitly "
"enabled."
)
builder.make_unhashable()
if _determine_whether_to_implement(
cls, init, auto_detect, ("__init__",)
):
builder.add_init()
else:
builder.add_attrs_init()
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" init must be True."
)
return builder.build_class()
# maybe_cls's type depends on the usage of the decorator. It's a class
# if it's used as `@attrs` but ``None`` if used as `@attrs()`.
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls)
_attrs = attrs
"""
Internal alias so we can use it in functions that take an argument called
*attrs*.
"""
if PY2:
def _has_frozen_base_class(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return (
getattr(cls.__setattr__, "__module__", None)
== _frozen_setattrs.__module__
and cls.__setattr__.__name__ == _frozen_setattrs.__name__
)
else:
def _has_frozen_base_class(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return cls.__setattr__ == _frozen_setattrs
def _generate_unique_filename(cls, func_name):
"""
Create a "filename" suitable for a function being generated.
"""
unique_id = uuid.uuid4()
extra = ""
count = 1
while True:
unique_filename = "<attrs generated {0} {1}.{2}{3}>".format(
func_name,
cls.__module__,
getattr(cls, "__qualname__", cls.__name__),
extra,
)
# To handle concurrency we essentially "reserve" our spot in
# the linecache with a dummy line. The caller can then
# set this value correctly.
cache_line = (1, None, (str(unique_id),), unique_filename)
if (
linecache.cache.setdefault(unique_filename, cache_line)
== cache_line
):
return unique_filename
# Looks like this spot is taken. Try again.
count += 1
extra = "-{0}".format(count)
def _make_hash(cls, attrs, frozen, cache_hash):
attrs = tuple(
a for a in attrs if a.hash is True or (a.hash is None and a.eq is True)
)
tab = " "
unique_filename = _generate_unique_filename(cls, "hash")
type_hash = hash(unique_filename)
hash_def = "def __hash__(self"
hash_func = "hash(("
closing_braces = "))"
if not cache_hash:
hash_def += "):"
else:
if not PY2:
hash_def += ", *"
hash_def += (
", _cache_wrapper="
+ "__import__('attr._make')._make._CacheHashWrapper):"
)
hash_func = "_cache_wrapper(" + hash_func
closing_braces += ")"
method_lines = [hash_def]
def append_hash_computation_lines(prefix, indent):
"""
Generate the code for actually computing the hash code.
Below this will either be returned directly or used to compute
a value which is then cached, depending on the value of cache_hash
"""
method_lines.extend(
[
indent + prefix + hash_func,
indent + " %d," % (type_hash,),
]
)
for a in attrs:
method_lines.append(indent + " self.%s," % a.name)
method_lines.append(indent + " " + closing_braces)
if cache_hash:
method_lines.append(tab + "if self.%s is None:" % _hash_cache_field)
if frozen:
append_hash_computation_lines(
"object.__setattr__(self, '%s', " % _hash_cache_field, tab * 2
)
method_lines.append(tab * 2 + ")") # close __setattr__
else:
append_hash_computation_lines(
"self.%s = " % _hash_cache_field, tab * 2
)
method_lines.append(tab + "return self.%s" % _hash_cache_field)
else:
append_hash_computation_lines("return ", tab)
script = "\n".join(method_lines)
return _make_method("__hash__", script, unique_filename)
def _add_hash(cls, attrs):
"""
Add a hash method to *cls*.
"""
cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False)
return cls
def _make_ne():
"""
Create __ne__ method.
"""
def __ne__(self, other):
"""
Check equality and either forward a NotImplemented or
return the result negated.
"""
result = self.__eq__(other)
if result is NotImplemented:
return NotImplemented
return not result
return __ne__
def _make_eq(cls, attrs):
"""
Create __eq__ method for *cls* with *attrs*.
"""
attrs = [a for a in attrs if a.eq]
unique_filename = _generate_unique_filename(cls, "eq")
lines = [
"def __eq__(self, other):",
" if other.__class__ is not self.__class__:",
" return NotImplemented",
]
# We can't just do a big self.x = other.x and... clause due to
# irregularities like nan == nan is false but (nan,) == (nan,) is true.
globs = {}
if attrs:
lines.append(" return (")
others = [" ) == ("]
for a in attrs:
if a.eq_key:
cmp_name = "_%s_key" % (a.name,)
# Add the key function to the global namespace
# of the evaluated function.
globs[cmp_name] = a.eq_key
lines.append(
" %s(self.%s),"
% (
cmp_name,
a.name,
)
)
others.append(
" %s(other.%s),"
% (
cmp_name,
a.name,
)
)
else:
lines.append(" self.%s," % (a.name,))
others.append(" other.%s," % (a.name,))
lines += others + [" )"]
else:
lines.append(" return True")
script = "\n".join(lines)
return _make_method("__eq__", script, unique_filename, globs)
def _make_order(cls, attrs):
"""
Create ordering methods for *cls* with *attrs*.
"""
attrs = [a for a in attrs if a.order]
def attrs_to_tuple(obj):
"""
Save us some typing.
"""
return tuple(
key(value) if key else value
for value, key in (
(getattr(obj, a.name), a.order_key) for a in attrs
)
)
def __lt__(self, other):
"""
Automatically created by attrs.
"""
if other.__class__ is self.__class__:
return attrs_to_tuple(self) < attrs_to_tuple(other)
return NotImplemented
def __le__(self, other):
"""
Automatically created by attrs.
"""
if other.__class__ is self.__class__:
return attrs_to_tuple(self) <= attrs_to_tuple(other)
return NotImplemented
def __gt__(self, other):
"""
Automatically created by attrs.
"""
if other.__class__ is self.__class__:
return attrs_to_tuple(self) > attrs_to_tuple(other)
return NotImplemented
def __ge__(self, other):
"""
Automatically created by attrs.
"""
if other.__class__ is self.__class__:
return attrs_to_tuple(self) >= attrs_to_tuple(other)
return NotImplemented
return __lt__, __le__, __gt__, __ge__
def _add_eq(cls, attrs=None):
"""
Add equality methods to *cls* with *attrs*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__eq__ = _make_eq(cls, attrs)
cls.__ne__ = _make_ne()
return cls
_already_repring = threading.local()
def _make_repr(attrs, ns):
"""
Make a repr method that includes relevant *attrs*, adding *ns* to the full
name.
"""
# Figure out which attributes to include, and which function to use to
# format them. The a.repr value can be either bool or a custom callable.
attr_names_with_reprs = tuple(
(a.name, repr if a.repr is True else a.repr)
for a in attrs
if a.repr is not False
)
def __repr__(self):
"""
Automatically created by attrs.
"""
try:
working_set = _already_repring.working_set
except AttributeError:
working_set = set()
_already_repring.working_set = working_set
if id(self) in working_set:
return "..."
real_cls = self.__class__
if ns is None:
qualname = getattr(real_cls, "__qualname__", None)
if qualname is not None:
class_name = qualname.rsplit(">.", 1)[-1]
else:
class_name = real_cls.__name__
else:
class_name = ns + "." + real_cls.__name__
# Since 'self' remains on the stack (i.e.: strongly referenced) for the
# duration of this call, it's safe to depend on id(...) stability, and
# not need to track the instance and therefore worry about properties
# like weakref- or hash-ability.
working_set.add(id(self))
try:
result = [class_name, "("]
first = True
for name, attr_repr in attr_names_with_reprs:
if first:
first = False
else:
result.append(", ")
result.extend(
(name, "=", attr_repr(getattr(self, name, NOTHING)))
)
return "".join(result) + ")"
finally:
working_set.remove(id(self))
return __repr__
def _add_repr(cls, ns=None, attrs=None):
"""
Add a repr method to *cls*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__repr__ = _make_repr(attrs, ns)
return cls
def fields(cls):
"""
Return the tuple of ``attrs`` attributes for a class.
The tuple also allows accessing the fields by their names (see below for
examples).
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: tuple (with name accessors) of `attr.Attribute`
.. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
by name.
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return attrs
def fields_dict(cls):
"""
Return an ordered dictionary of ``attrs`` attributes for a class, whose
keys are the attribute names.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: an ordered dict where keys are attribute names and values are
`attr.Attribute`\\ s. This will be a `dict` if it's
naturally ordered like on Python 3.6+ or an
:class:`~collections.OrderedDict` otherwise.
.. versionadded:: 18.1.0
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return ordered_dict(((a.name, a) for a in attrs))
def validate(inst):
"""
Validate all attributes on *inst* that have a validator.
Leaves all exceptions through.
:param inst: Instance of a class with ``attrs`` attributes.
"""
if _config._run_validators is False:
return
for a in fields(inst.__class__):
v = a.validator
if v is not None:
v(inst, a, getattr(inst, a.name))
def _is_slot_cls(cls):
return "__slots__" in cls.__dict__
def _is_slot_attr(a_name, base_attr_map):
"""
Check if the attribute name comes from a slot class.
"""
return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name])
def _make_init(
cls,
attrs,
pre_init,
post_init,
frozen,
slots,
cache_hash,
base_attr_map,
is_exc,
has_global_on_setattr,
attrs_init,
):
if frozen and has_global_on_setattr:
raise ValueError("Frozen classes can't use on_setattr.")
needs_cached_setattr = cache_hash or frozen
filtered_attrs = []
attr_dict = {}
for a in attrs:
if not a.init and a.default is NOTHING:
continue
filtered_attrs.append(a)
attr_dict[a.name] = a
if a.on_setattr is not None:
if frozen is True:
raise ValueError("Frozen classes can't use on_setattr.")
needs_cached_setattr = True
elif (
has_global_on_setattr and a.on_setattr is not setters.NO_OP
) or _is_slot_attr(a.name, base_attr_map):
needs_cached_setattr = True
unique_filename = _generate_unique_filename(cls, "init")
script, globs, annotations = _attrs_to_init_script(
filtered_attrs,
frozen,
slots,
pre_init,
post_init,
cache_hash,
base_attr_map,
is_exc,
needs_cached_setattr,
has_global_on_setattr,
attrs_init,
)
if cls.__module__ in sys.modules:
# This makes typing.get_type_hints(CLS.__init__) resolve string types.
globs.update(sys.modules[cls.__module__].__dict__)
globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict})
if needs_cached_setattr:
# Save the lookup overhead in __init__ if we need to circumvent
# setattr hooks.
globs["_cached_setattr"] = _obj_setattr
init = _make_method(
"__attrs_init__" if attrs_init else "__init__",
script,
unique_filename,
globs,
)
init.__annotations__ = annotations
return init
def _setattr(attr_name, value_var, has_on_setattr):
"""
Use the cached object.setattr to set *attr_name* to *value_var*.
"""
return "_setattr('%s', %s)" % (attr_name, value_var)
def _setattr_with_converter(attr_name, value_var, has_on_setattr):
"""
Use the cached object.setattr to set *attr_name* to *value_var*, but run
its converter first.
"""
return "_setattr('%s', %s(%s))" % (
attr_name,
_init_converter_pat % (attr_name,),
value_var,
)
def _assign(attr_name, value, has_on_setattr):
"""
Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise
relegate to _setattr.
"""
if has_on_setattr:
return _setattr(attr_name, value, True)
return "self.%s = %s" % (attr_name, value)
def _assign_with_converter(attr_name, value_var, has_on_setattr):
"""
Unless *attr_name* has an on_setattr hook, use normal assignment after
conversion. Otherwise relegate to _setattr_with_converter.
"""
if has_on_setattr:
return _setattr_with_converter(attr_name, value_var, True)
return "self.%s = %s(%s)" % (
attr_name,
_init_converter_pat % (attr_name,),
value_var,
)
if PY2:
def _unpack_kw_only_py2(attr_name, default=None):
"""
Unpack *attr_name* from _kw_only dict.
"""
if default is not None:
arg_default = ", %s" % default
else:
arg_default = ""
return "%s = _kw_only.pop('%s'%s)" % (
attr_name,
attr_name,
arg_default,
)
def _unpack_kw_only_lines_py2(kw_only_args):
"""
Unpack all *kw_only_args* from _kw_only dict and handle errors.
Given a list of strings "{attr_name}" and "{attr_name}={default}"
generates list of lines of code that pop attrs from _kw_only dict and
raise TypeError similar to builtin if required attr is missing or
extra key is passed.
>>> print("\n".join(_unpack_kw_only_lines_py2(["a", "b=42"])))
try:
a = _kw_only.pop('a')
b = _kw_only.pop('b', 42)
except KeyError as _key_error:
raise TypeError(
...
if _kw_only:
raise TypeError(
...
"""
lines = ["try:"]
lines.extend(
" " + _unpack_kw_only_py2(*arg.split("="))
for arg in kw_only_args
)
lines += """\
except KeyError as _key_error:
raise TypeError(
'__init__() missing required keyword-only argument: %s' % _key_error
)
if _kw_only:
raise TypeError(
'__init__() got an unexpected keyword argument %r'
% next(iter(_kw_only))
)
""".split(
"\n"
)
return lines
def _attrs_to_init_script(
attrs,
frozen,
slots,
pre_init,
post_init,
cache_hash,
base_attr_map,
is_exc,
needs_cached_setattr,
has_global_on_setattr,
attrs_init,
):
"""
Return a script of an initializer for *attrs* and a dict of globals.
The globals are expected by the generated script.
If *frozen* is True, we cannot set the attributes directly so we use
a cached ``object.__setattr__``.
"""
lines = []
if pre_init:
lines.append("self.__attrs_pre_init__()")
if needs_cached_setattr:
lines.append(
# Circumvent the __setattr__ descriptor to save one lookup per
# assignment.
# Note _setattr will be used again below if cache_hash is True
"_setattr = _cached_setattr.__get__(self, self.__class__)"
)
if frozen is True:
if slots is True:
fmt_setter = _setattr
fmt_setter_with_converter = _setattr_with_converter
else:
# Dict frozen classes assign directly to __dict__.
# But only if the attribute doesn't come from an ancestor slot
# class.
# Note _inst_dict will be used again below if cache_hash is True
lines.append("_inst_dict = self.__dict__")
def fmt_setter(attr_name, value_var, has_on_setattr):
if _is_slot_attr(attr_name, base_attr_map):
return _setattr(attr_name, value_var, has_on_setattr)
return "_inst_dict['%s'] = %s" % (attr_name, value_var)
def fmt_setter_with_converter(
attr_name, value_var, has_on_setattr
):
if has_on_setattr or _is_slot_attr(attr_name, base_attr_map):
return _setattr_with_converter(
attr_name, value_var, has_on_setattr
)
return "_inst_dict['%s'] = %s(%s)" % (
attr_name,
_init_converter_pat % (attr_name,),
value_var,
)
else:
# Not frozen.
fmt_setter = _assign
fmt_setter_with_converter = _assign_with_converter
args = []
kw_only_args = []
attrs_to_validate = []
# This is a dictionary of names to validator and converter callables.
# Injecting this into __init__ globals lets us avoid lookups.
names_for_globals = {}
annotations = {"return": None}
for a in attrs:
if a.validator:
attrs_to_validate.append(a)
attr_name = a.name
has_on_setattr = a.on_setattr is not None or (
a.on_setattr is not setters.NO_OP and has_global_on_setattr
)
arg_name = a.name.lstrip("_")
has_factory = isinstance(a.default, Factory)
if has_factory and a.default.takes_self:
maybe_self = "self"
else:
maybe_self = ""
if a.init is False:
if has_factory:
init_factory_name = _init_factory_pat.format(a.name)
if a.converter is not None:
lines.append(
fmt_setter_with_converter(
attr_name,
init_factory_name + "(%s)" % (maybe_self,),
has_on_setattr,
)
)
conv_name = _init_converter_pat % (a.name,)
names_for_globals[conv_name] = a.converter
else:
lines.append(
fmt_setter(
attr_name,
init_factory_name + "(%s)" % (maybe_self,),
has_on_setattr,
)
)
names_for_globals[init_factory_name] = a.default.factory
else:
if a.converter is not None:
lines.append(
fmt_setter_with_converter(
attr_name,
"attr_dict['%s'].default" % (attr_name,),
has_on_setattr,
)
)
conv_name = _init_converter_pat % (a.name,)
names_for_globals[conv_name] = a.converter
else:
lines.append(
fmt_setter(
attr_name,
"attr_dict['%s'].default" % (attr_name,),
has_on_setattr,
)
)
elif a.default is not NOTHING and not has_factory:
arg = "%s=attr_dict['%s'].default" % (arg_name, attr_name)
if a.kw_only:
kw_only_args.append(arg)
else:
args.append(arg)
if a.converter is not None:
lines.append(
fmt_setter_with_converter(
attr_name, arg_name, has_on_setattr
)
)
names_for_globals[
_init_converter_pat % (a.name,)
] = a.converter
else:
lines.append(fmt_setter(attr_name, arg_name, has_on_setattr))
elif has_factory:
arg = "%s=NOTHING" % (arg_name,)
if a.kw_only:
kw_only_args.append(arg)
else:
args.append(arg)
lines.append("if %s is not NOTHING:" % (arg_name,))
init_factory_name = _init_factory_pat.format(a.name)
if a.converter is not None:
lines.append(
" "
+ fmt_setter_with_converter(
attr_name, arg_name, has_on_setattr
)
)
lines.append("else:")
lines.append(
" "
+ fmt_setter_with_converter(
attr_name,
init_factory_name + "(" + maybe_self + ")",
has_on_setattr,
)
)
names_for_globals[
_init_converter_pat % (a.name,)
] = a.converter
else:
lines.append(
" " + fmt_setter(attr_name, arg_name, has_on_setattr)
)
lines.append("else:")
lines.append(
" "
+ fmt_setter(
attr_name,
init_factory_name + "(" + maybe_self + ")",
has_on_setattr,
)
)
names_for_globals[init_factory_name] = a.default.factory
else:
if a.kw_only:
kw_only_args.append(arg_name)
else:
args.append(arg_name)
if a.converter is not None:
lines.append(
fmt_setter_with_converter(
attr_name, arg_name, has_on_setattr
)
)
names_for_globals[
_init_converter_pat % (a.name,)
] = a.converter
else:
lines.append(fmt_setter(attr_name, arg_name, has_on_setattr))
if a.init is True:
if a.type is not None and a.converter is None:
annotations[arg_name] = a.type
elif a.converter is not None and not PY2:
# Try to get the type from the converter.
sig = None
try:
sig = inspect.signature(a.converter)
except (ValueError, TypeError): # inspect failed
pass
if sig:
sig_params = list(sig.parameters.values())
if (
sig_params
and sig_params[0].annotation
is not inspect.Parameter.empty
):
annotations[arg_name] = sig_params[0].annotation
if attrs_to_validate: # we can skip this if there are no validators.
names_for_globals["_config"] = _config
lines.append("if _config._run_validators is True:")
for a in attrs_to_validate:
val_name = "__attr_validator_" + a.name
attr_name = "__attr_" + a.name
lines.append(
" %s(self, %s, self.%s)" % (val_name, attr_name, a.name)
)
names_for_globals[val_name] = a.validator
names_for_globals[attr_name] = a
if post_init:
lines.append("self.__attrs_post_init__()")
# because this is set only after __attrs_post_init is called, a crash
# will result if post-init tries to access the hash code. This seemed
# preferable to setting this beforehand, in which case alteration to
# field values during post-init combined with post-init accessing the
# hash code would result in silent bugs.
if cache_hash:
if frozen:
if slots:
# if frozen and slots, then _setattr defined above
init_hash_cache = "_setattr('%s', %s)"
else:
# if frozen and not slots, then _inst_dict defined above
init_hash_cache = "_inst_dict['%s'] = %s"
else:
init_hash_cache = "self.%s = %s"
lines.append(init_hash_cache % (_hash_cache_field, "None"))
# For exceptions we rely on BaseException.__init__ for proper
# initialization.
if is_exc:
vals = ",".join("self." + a.name for a in attrs if a.init)
lines.append("BaseException.__init__(self, %s)" % (vals,))
args = ", ".join(args)
if kw_only_args:
if PY2:
lines = _unpack_kw_only_lines_py2(kw_only_args) + lines
args += "%s**_kw_only" % (", " if args else "",) # leading comma
else:
args += "%s*, %s" % (
", " if args else "", # leading comma
", ".join(kw_only_args), # kw_only args
)
return (
"""\
def {init_name}(self, {args}):
{lines}
""".format(
init_name=("__attrs_init__" if attrs_init else "__init__"),
args=args,
lines="\n ".join(lines) if lines else "pass",
),
names_for_globals,
annotations,
)
class Attribute(object):
"""
*Read-only* representation of an attribute.
Instances of this class are frequently used for introspection purposes
like:
- `fields` returns a tuple of them.
- Validators get them passed as the first argument.
- The *field transformer* hook receives a list of them.
:attribute name: The name of the attribute.
:attribute inherited: Whether or not that attribute has been inherited from
a base class.
Plus *all* arguments of `attr.ib` (except for ``factory``
which is only syntactic sugar for ``default=Factory(...)``.
.. versionadded:: 20.1.0 *inherited*
.. versionadded:: 20.1.0 *on_setattr*
.. versionchanged:: 20.2.0 *inherited* is not taken into account for
equality checks and hashing anymore.
.. versionadded:: 21.1.0 *eq_key* and *order_key*
For the full version history of the fields, see `attr.ib`.
"""
__slots__ = (
"name",
"default",
"validator",
"repr",
"eq",
"eq_key",
"order",
"order_key",
"hash",
"init",
"metadata",
"type",
"converter",
"kw_only",
"inherited",
"on_setattr",
)
def __init__(
self,
name,
default,
validator,
repr,
cmp, # XXX: unused, remove along with other cmp code.
hash,
init,
inherited,
metadata=None,
type=None,
converter=None,
kw_only=False,
eq=None,
eq_key=None,
order=None,
order_key=None,
on_setattr=None,
):
eq, eq_key, order, order_key = _determine_attrib_eq_order(
cmp, eq_key or eq, order_key or order, True
)
# Cache this descriptor here to speed things up later.
bound_setattr = _obj_setattr.__get__(self, Attribute)
# Despite the big red warning, people *do* instantiate `Attribute`
# themselves.
bound_setattr("name", name)
bound_setattr("default", default)
bound_setattr("validator", validator)
bound_setattr("repr", repr)
bound_setattr("eq", eq)
bound_setattr("eq_key", eq_key)
bound_setattr("order", order)
bound_setattr("order_key", order_key)
bound_setattr("hash", hash)
bound_setattr("init", init)
bound_setattr("converter", converter)
bound_setattr(
"metadata",
(
metadata_proxy(metadata)
if metadata
else _empty_metadata_singleton
),
)
bound_setattr("type", type)
bound_setattr("kw_only", kw_only)
bound_setattr("inherited", inherited)
bound_setattr("on_setattr", on_setattr)
def __setattr__(self, name, value):
raise FrozenInstanceError()
@classmethod
def from_counting_attr(cls, name, ca, type=None):
# type holds the annotated value. deal with conflicts:
if type is None:
type = ca.type
elif ca.type is not None:
raise ValueError(
"Type annotation and type argument cannot both be present"
)
inst_dict = {
k: getattr(ca, k)
for k in Attribute.__slots__
if k
not in (
"name",
"validator",
"default",
"type",
"inherited",
) # exclude methods and deprecated alias
}
return cls(
name=name,
validator=ca._validator,
default=ca._default,
type=type,
cmp=None,
inherited=False,
**inst_dict
)
@property
def cmp(self):
"""
Simulate the presence of a cmp attribute and warn.
"""
warnings.warn(_CMP_DEPRECATION, DeprecationWarning, stacklevel=2)
return self.eq and self.order
# Don't use attr.evolve since fields(Attribute) doesn't work
def evolve(self, **changes):
"""
Copy *self* and apply *changes*.
This works similarly to `attr.evolve` but that function does not work
with ``Attribute``.
It is mainly meant to be used for `transform-fields`.
.. versionadded:: 20.3.0
"""
new = copy.copy(self)
new._setattrs(changes.items())
return new
# Don't use _add_pickle since fields(Attribute) doesn't work
def __getstate__(self):
"""
Play nice with pickle.
"""
return tuple(
getattr(self, name) if name != "metadata" else dict(self.metadata)
for name in self.__slots__
)
def __setstate__(self, state):
"""
Play nice with pickle.
"""
self._setattrs(zip(self.__slots__, state))
def _setattrs(self, name_values_pairs):
bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in name_values_pairs:
if name != "metadata":
bound_setattr(name, value)
else:
bound_setattr(
name,
metadata_proxy(value)
if value
else _empty_metadata_singleton,
)
_a = [
Attribute(
name=name,
default=NOTHING,
validator=None,
repr=True,
cmp=None,
eq=True,
order=False,
hash=(name != "metadata"),
init=True,
inherited=False,
)
for name in Attribute.__slots__
]
Attribute = _add_hash(
_add_eq(
_add_repr(Attribute, attrs=_a),
attrs=[a for a in _a if a.name != "inherited"],
),
attrs=[a for a in _a if a.hash and a.name != "inherited"],
)
class _CountingAttr(object):
"""
Intermediate representation of attributes that uses a counter to preserve
the order in which the attributes have been defined.
*Internal* data structure of the attrs library. Running into is most
likely the result of a bug like a forgotten `@attr.s` decorator.
"""
__slots__ = (
"counter",
"_default",
"repr",
"eq",
"eq_key",
"order",
"order_key",
"hash",
"init",
"metadata",
"_validator",
"converter",
"type",
"kw_only",
"on_setattr",
)
__attrs_attrs__ = tuple(
Attribute(
name=name,
default=NOTHING,
validator=None,
repr=True,
cmp=None,
hash=True,
init=True,
kw_only=False,
eq=True,
eq_key=None,
order=False,
order_key=None,
inherited=False,
on_setattr=None,
)
for name in (
"counter",
"_default",
"repr",
"eq",
"order",
"hash",
"init",
"on_setattr",
)
) + (
Attribute(
name="metadata",
default=None,
validator=None,
repr=True,
cmp=None,
hash=False,
init=True,
kw_only=False,
eq=True,
eq_key=None,
order=False,
order_key=None,
inherited=False,
on_setattr=None,
),
)
cls_counter = 0
def __init__(
self,
default,
validator,
repr,
cmp,
hash,
init,
converter,
metadata,
type,
kw_only,
eq,
eq_key,
order,
order_key,
on_setattr,
):
_CountingAttr.cls_counter += 1
self.counter = _CountingAttr.cls_counter
self._default = default
self._validator = validator
self.converter = converter
self.repr = repr
self.eq = eq
self.eq_key = eq_key
self.order = order
self.order_key = order_key
self.hash = hash
self.init = init
self.metadata = metadata
self.type = type
self.kw_only = kw_only
self.on_setattr = on_setattr
def validator(self, meth):
"""
Decorator that adds *meth* to the list of validators.
Returns *meth* unchanged.
.. versionadded:: 17.1.0
"""
if self._validator is None:
self._validator = meth
else:
self._validator = and_(self._validator, meth)
return meth
def default(self, meth):
"""
Decorator that allows to set the default for an attribute.
Returns *meth* unchanged.
:raises DefaultAlreadySetError: If default has been set before.
.. versionadded:: 17.1.0
"""
if self._default is not NOTHING:
raise DefaultAlreadySetError()
self._default = Factory(meth, takes_self=True)
return meth
_CountingAttr = _add_eq(_add_repr(_CountingAttr))
class Factory(object):
"""
Stores a factory callable.
If passed as the default value to `attr.ib`, the factory is used to
generate a new value.
:param callable factory: A callable that takes either none or exactly one
mandatory positional argument depending on *takes_self*.
:param bool takes_self: Pass the partially initialized instance that is
being initialized as a positional argument.
.. versionadded:: 17.1.0 *takes_self*
"""
__slots__ = ("factory", "takes_self")
def __init__(self, factory, takes_self=False):
"""
`Factory` is part of the default machinery so if we want a default
value here, we have to implement it ourselves.
"""
self.factory = factory
self.takes_self = takes_self
def __getstate__(self):
"""
Play nice with pickle.
"""
return tuple(getattr(self, name) for name in self.__slots__)
def __setstate__(self, state):
"""
Play nice with pickle.
"""
for name, value in zip(self.__slots__, state):
setattr(self, name, value)
_f = [
Attribute(
name=name,
default=NOTHING,
validator=None,
repr=True,
cmp=None,
eq=True,
order=False,
hash=True,
init=True,
inherited=False,
)
for name in Factory.__slots__
]
Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f)
def make_class(name, attrs, bases=(object,), **attributes_arguments):
"""
A quick way to create a new class called *name* with *attrs*.
:param str name: The name for the new class.
:param attrs: A list of names or a dictionary of mappings of names to
attributes.
If *attrs* is a list or an ordered dict (`dict` on Python 3.6+,
`collections.OrderedDict` otherwise), the order is deduced from
the order of the names or attributes inside *attrs*. Otherwise the
order of the definition of the attributes is used.
:type attrs: `list` or `dict`
:param tuple bases: Classes that the new class will subclass.
:param attributes_arguments: Passed unmodified to `attr.s`.
:return: A new class with *attrs*.
:rtype: type
.. versionadded:: 17.1.0 *bases*
.. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained.
"""
if isinstance(attrs, dict):
cls_dict = attrs
elif isinstance(attrs, (list, tuple)):
cls_dict = dict((a, attrib()) for a in attrs)
else:
raise TypeError("attrs argument must be a dict or a list.")
pre_init = cls_dict.pop("__attrs_pre_init__", None)
post_init = cls_dict.pop("__attrs_post_init__", None)
user_init = cls_dict.pop("__init__", None)
body = {}
if pre_init is not None:
body["__attrs_pre_init__"] = pre_init
if post_init is not None:
body["__attrs_post_init__"] = post_init
if user_init is not None:
body["__init__"] = user_init
type_ = new_class(name, bases, {}, lambda ns: ns.update(body))
# For pickling to work, the __module__ variable needs to be set to the
# frame where the class is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
type_.__module__ = sys._getframe(1).f_globals.get(
"__name__", "__main__"
)
except (AttributeError, ValueError):
pass
# We do it here for proper warnings with meaningful stacklevel.
cmp = attributes_arguments.pop("cmp", None)
(
attributes_arguments["eq"],
attributes_arguments["order"],
) = _determine_attrs_eq_order(
cmp,
attributes_arguments.get("eq"),
attributes_arguments.get("order"),
True,
)
return _attrs(these=cls_dict, **attributes_arguments)(type_)
# These are required by within this module so we define them here and merely
# import into .validators / .converters.
@attrs(slots=True, hash=True)
class _AndValidator(object):
"""
Compose many validators to a single one.
"""
_validators = attrib()
def __call__(self, inst, attr, value):
for v in self._validators:
v(inst, attr, value)
def and_(*validators):
"""
A validator that composes multiple validators into one.
When called on a value, it runs all wrapped validators.
:param callables validators: Arbitrary number of validators.
.. versionadded:: 17.1.0
"""
vals = []
for validator in validators:
vals.extend(
validator._validators
if isinstance(validator, _AndValidator)
else [validator]
)
return _AndValidator(tuple(vals))
def pipe(*converters):
"""
A converter that composes multiple converters into one.
When called on a value, it runs all wrapped converters, returning the
*last* value.
Type annotations will be inferred from the wrapped converters', if
they have any.
:param callables converters: Arbitrary number of converters.
.. versionadded:: 20.1.0
"""
def pipe_converter(val):
for converter in converters:
val = converter(val)
return val
if not PY2:
if not converters:
# If the converter list is empty, pipe_converter is the identity.
A = typing.TypeVar("A")
pipe_converter.__annotations__ = {"val": A, "return": A}
else:
# Get parameter type.
sig = None
try:
sig = inspect.signature(converters[0])
except (ValueError, TypeError): # inspect failed
pass
if sig:
params = list(sig.parameters.values())
if (
params
and params[0].annotation is not inspect.Parameter.empty
):
pipe_converter.__annotations__["val"] = params[
0
].annotation
# Get return type.
sig = None
try:
sig = inspect.signature(converters[-1])
except (ValueError, TypeError): # inspect failed
pass
if sig and sig.return_annotation is not inspect.Signature().empty:
pipe_converter.__annotations__[
"return"
] = sig.return_annotation
return pipe_converter
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@attrs@py2@attr@_make.py@.PATH_END.py
|
{
"filename": "__version__.py",
"repo_name": "ryanhausen/fitsmap",
"repo_path": "fitsmap_extracted/fitsmap-master/fitsmap/__version__.py",
"type": "Python"
}
|
"""0.11.1"""
|
ryanhausenREPO_NAMEfitsmapPATH_START.@fitsmap_extracted@fitsmap-master@fitsmap@__version__.py@.PATH_END.py
|
{
"filename": "select_samples.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/Sandbox/imaging/select_samples.py",
"type": "Python"
}
|
import fitsio
from astropy.table import Table,join,vstack
import numpy as np
import glob
import os
import sys
from desitarget import cuts
from desitarget import targetmask
import astropy.io.fits as fits
dirsweeps = '/global/project/projectdirs/cosmo/data/legacysurvey/dr9/south/sweep/9.0/'
dirsweepn = '/global/project/projectdirs/cosmo/data/legacysurvey/dr9/north/sweep/9.0/'
targroot = '/project/projectdirs/desi/target/catalogs/dr9m/0.44.0/targets/main/resolve/'
sfs = glob.glob(dirsweeps+'sweep*')
sfn = glob.glob(dirsweepn+'sweep*')
outdir = '/project/projectdirs/desi/users/ajross/dr9/'
def gather_targets(type,fo='targetDR9m44.fits',prog='dark'):
#just concatenate all of the targets for a given type, keeping only the columns quoted below
print(targroot+prog)
fns = glob.glob(targroot+prog+'/*.fits')
ncat = len(fns)
print('data is split into '+str(ncat)+' healpix files')
keys = ['RA', 'DEC', 'BRICKID', 'BRICKNAME','MORPHTYPE','DCHISQ','FLUX_G', 'FLUX_R', 'FLUX_Z','MW_TRANSMISSION_G', 'MW_TRANSMISSION_R', 'MW_TRANSMISSION_Z','FLUX_IVAR_G', 'FLUX_IVAR_R', 'FLUX_IVAR_Z','NOBS_G', 'NOBS_R', 'NOBS_Z','PSFDEPTH_G', 'PSFDEPTH_R', 'PSFDEPTH_Z', 'GALDEPTH_G', 'GALDEPTH_R',\
'GALDEPTH_Z','FIBERFLUX_G', 'FIBERFLUX_R', 'FIBERFLUX_Z', 'FIBERTOTFLUX_G', 'FIBERTOTFLUX_R', 'FIBERTOTFLUX_Z',\
'MASKBITS', 'EBV', 'PHOTSYS','TARGETID','DESI_TARGET']
#check to make sure those were copied correctly
f = fitsio.read(fns[0])
for key in keys:
try:
d = f[key]
except:
print(key+' not in target file!')
bs = targetmask.desi_mask[type]
outf = outdir+type +fo
print('file will be written to '+outf)
data = fitsio.read(fns[0],columns=keys)
data = data[(data['DESI_TARGET'] & bs)>0]
for i in range(1,ncat):
print(i)
datan = fitsio.read(fns[i],columns=keys)
datan = datan[(datan['DESI_TARGET'] & bs)>0]
data = np.hstack((data,datan))
print(len(data))
fitsio.write(outf,data,clobber=True)
print('wrote to '+outf)
del data
# put information together, takes a couple of minutes
#
# mydict = {}
# for key in keys:
# mydict[key] = []
#
# for i in range(0,ncat):
# data = fitsio.read(fns[i],columns=keys)
# data = data[(data['DESI_TARGET'] & bs)>0]
# for key in keys:
# mydict[key] += data[key].tolist()
# print(i)
#
# print(str(len(mydict['RA']))+' '+type)
#
#
# collist = []
# for key in keys:
# fmt = fits.open(fns[0])[1].columns[key].format
# collist.append(fits.Column(name=key,format=fmt,array=mydict[key]))
# print(key)
# hdu = fits.BinTableHDU.from_columns(fits.ColDefs(collist))
# hdu.writeto(outf,overwrite=True)
# print('wrote to '+outf)
# del collist
# del mydict
def starsel_sweep(f,gfluxmin):
w = f['TYPE'] == 'PSF '
gflux = f['FLUX_G']/f['MW_TRANSMISSION_G']
w &= gflux > gfluxmin
return w
def typesel(f,type,south=True,ebvfac=1.,Rv=3.1):
if ebvfac == 1. and Rv == 3.1:
gflux = f['FLUX_G']/f['MW_TRANSMISSION_G']
rflux = f['FLUX_R']/f['MW_TRANSMISSION_R']
zflux = f['FLUX_Z']/f['MW_TRANSMISSION_Z']
w1flux = f['FLUX_W1']/f['MW_TRANSMISSION_W1']
zfiberflux = f['FIBERFLUX_Z']/f['MW_TRANSMISSION_Z']
else:
Rg = 3.214*ebvfac
Rr = 2.165*ebvfac
Rz = 1.211*ebvfac
Rw1 = 0.184*ebvfac
if Rv < 3.1:
#linear interpolation from Schlafly 2011 table
Rg = (3.739-3.273)*(3.1-Rv)*ebvfac+Rg
Rr = (2.113-2.176)*(3.1-Rv)*ebvfac+Rr
Rz = (1.175-1.217)*(3.1-Rv)*ebvfac+Rz
Rw1 = (-.1)*(Rv-3.1)*ebvfac+Rw1
if Rv > 3.1:
#linear interpolation from Schlafly 2011 table
Rg = (3.006-3.273)*(Rv-3.1)*ebvfac+Rg
Rr = (2.205-2.176)*(Rv-3.1)*ebvfac+Rr
Rz = (1.236-1.217)*(Rv-3.1)*ebvfac+Rz
Rw1 = (-.05)*(Rv-3.1)*ebvfac+Rw1
print('ebvfac,Rv,Rg,Rr,Rz,Rw1')
print(ebvfac,Rv,Rg,Rr,Rz,Rw1)
wtg = 10**(-0.4*Rg*f['EBV'])
wtr = 10**(-0.4*Rr*f['EBV'])
wtz = 10**(-0.4*Rz*f['EBV'])
wtw = 10**(-0.4*Rw1*f['EBV'])
gflux = f['FLUX_G']/wtg
rflux = f['FLUX_R']/wtr
zflux = f['FLUX_Z']/wtz
w1flux = f['FLUX_W1']/wtw
zfiberflux = f['FIBERFLUX_Z']/wtz
if type == 'LRG':
w = cuts.isLRG_colors(gflux, rflux, zflux, w1flux,zfiberflux, south=south)
if type == 'ELG':
w = cuts.isELG_colors(gflux, rflux, zflux, w1flux,zfiberflux, south=south)
return w
def putstar_me(gfluxmin,south=True):
if south == True:
fls = sfs
else:
fls = sfn
f = fitsio.read(fls[0])
w0 = starsel_sweep(f,gfluxmin)
fw = f[w0]
#dt = fw.dtype
dt = []
cols = ['BRICKID','OBJID','RA','DEC','DCHISQ','EBV','FLUX_G','FLUX_R','FLUX_Z','MW_TRANSMISSION_G','MW_TRANSMISSION_R','MW_TRANSMISSION_Z',\
'PSFDEPTH_G','PSFDEPTH_R','PSFDEPTH_Z','GAIA_PHOT_G_MEAN_MAG','GAIA_ASTROMETRIC_EXCESS_NOISE','MASKBITS']
for colname in cols:
dt.append((colname,fw.dtype[colname]))
new = np.empty(len(fw),dtype=dt)
for colname in cols:
new[colname][...] = fw[colname][...]
tm = new
#for i in range(1,10):
for i in range(1,len(fls)):
#try:
f = fitsio.read(fls[i])
w = starsel_sweep(f,gfluxmin)
fw = f[w]
new = np.empty(len(fw),dtype=dt)
for colname in cols:
new[colname][...] = fw[colname][...]
tmn = np.concatenate((tm, new),axis=0)
print(i,len(tmn))
tm = tmn
#except:
# print(i)
NS = 'south'
if south != True:
NS = 'north'
s = 0
es = ''
while s == 0:
outf = outdir+'mysweeps/stars_gfluxg'+str(gfluxmin)+'_'+NS+es+'.fits'
try:
fitsio.read(outf)
es += 'n'
print(es)
if len(es) > 10:
return 'es too long, probably a bug'
except:
s = 1
#tm = Table(tm,names=fi.dtype.names)
fits = fitsio.FITS(outf,'rw')
fits.write(tm, names=cols,overwrite=True)
def puttype(type,south=True,ebvfac=1.,Rv=3.1):
if south == True:
fls = sfs
else:
fls = sfn
f = fitsio.read(fls[0])
w0 = typesel(f,type,south=south,ebvfac=ebvfac,Rv=Rv)
fw = f[w0]
dt = fw.dtype
new = np.empty(len(fw),dtype=dt)
#cols = ['BRICKID','OBJID','RA','DEC','DCHISQ','EBV','FLUX_G','FLUX_R','FLUX_Z','MW_TRANSMISSION_G','MW_TRANSMISSION_R','MW_TRANSMISSION_Z',\
#'PSFDEPTH_G','PSFDEPTH_R','PSFDEPTH_Z','GAIA_PHOT_G_MEAN_MAG','GAIA_ASTROMETRIC_EXCESS_NOISE','MASKBITS']
#for colname in fw.dtype.names:
cols = fw.dtype.names
for colname in cols:
new[colname][...] = fw[colname][...]
tm = new
for i in range(1,len(fls)):
#try:
f = fitsio.read(fls[i])
w = typesel(f,type,south=south,ebvfac=ebvfac,Rv=Rv)
fw = f[w]
new = np.empty(len(fw),dtype=dt)
for colname in cols:
new[colname][...] = fw[colname][...]
tmn = np.concatenate((tm, new),axis=0)
print(i,len(tmn))
tm = tmn
#except:
# print(i)
NS = 'south'
if south != True:
NS = 'north'
s = 0
es = 'ebvfac'+str(ebvfac)+'Rv'+str(Rv)
outf = outdir+'mysweeps/'+type+'dr8_'+NS+es+'.fits'
# while s == 0:
#
# try:
# fitsio.read(outf)
# es += 'n'
# print(es)
# if len(es) > 10:
# return 'es too long, probably a bug'
# except:
# s = 1
#tm = Table(tm,names=fi.dtype.names)
fits = fitsio.FITS(outf,'rw')
fits.write(tm, names=dt.names,overwrite=True)
if __name__ == '__main__':
#get target catalogs for the main dark time programs
gather_targets('ELG')
gather_targets('LRG')
gather_targets('QSO')
#fluxlim = float(str(sys.argv[1]))
#putstar(fluxlim)
#this was meant to test dependence with changes to extinction parameters explicitly
#ebf = 1
#rv = 2.6
#puttype('LRG',ebvfac=ebf,Rv=rv)
#print('LRG south done')
#puttype('LRG',south=False,ebvfac=ebf,Rv=rv)
#print('LRG north done')
#puttype('ELG',ebvfac=ebf,Rv=rv)
#print('ELG south done')
#puttype('ELG',south=False,ebvfac=ebf,Rv=rv)
#print('ELG north done')
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@Sandbox@imaging@select_samples.py@.PATH_END.py
|
{
"filename": "test_cco_buf_inter.py",
"repo_name": "mpi4py/mpi4py",
"repo_path": "mpi4py_extracted/mpi4py-master/test/test_cco_buf_inter.py",
"type": "Python"
}
|
from mpi4py import MPI
import mpiunittest as unittest
import arrayimpl
def skip_op(typecode, op):
if typecode in '?':
return True
if typecode in 'FDG':
if op in (MPI.MAX, MPI.MIN):
return True
return False
def maxvalue(a):
try:
typecode = a.typecode
except AttributeError:
typecode = a.dtype.char
if typecode == ('f'):
return 1e30
elif typecode == ('d'):
return 1e300
else:
return 2 ** (a.itemsize * 7) - 1
@unittest.skipMPI('openmpi(<1.6.0)')
@unittest.skipMPI('msmpi', MPI.COMM_WORLD.Get_size() >= 3)
@unittest.skipMPI('MPICH1')
@unittest.skipIf(MPI.ROOT == MPI.PROC_NULL, 'mpi-root')
@unittest.skipIf(MPI.COMM_WORLD.Get_size() < 2, 'mpi-world-size<2')
class BaseTestCCOBufInter:
BASECOMM = MPI.COMM_NULL
INTRACOMM = MPI.COMM_NULL
INTERCOMM = MPI.COMM_NULL
def setUp(self):
size = self.BASECOMM.Get_size()
rank = self.BASECOMM.Get_rank()
if rank < size // 2:
self.COLOR = 0
self.LOCAL_LEADER = 0
self.REMOTE_LEADER = size // 2
else:
self.COLOR = 1
self.LOCAL_LEADER = 0
self.REMOTE_LEADER = 0
self.INTRACOMM = self.BASECOMM.Split(self.COLOR, key=0)
Create_intercomm = MPI.Intracomm.Create_intercomm
self.INTERCOMM = Create_intercomm(
self.INTRACOMM,
self.LOCAL_LEADER,
self.BASECOMM,
self.REMOTE_LEADER,
)
def tearDown(self):
self.INTRACOMM.Free()
self.INTERCOMM.Free()
def testBarrier(self):
self.INTERCOMM.Barrier()
def testBcast(self):
comm = self.INTERCOMM
rank = comm.Get_rank()
size = comm.Get_size()
rsize = comm.Get_remote_size()
for array, typecode in arrayimpl.loop():
for color in (0, 1):
if self.COLOR == color:
for root in range(size):
if root == rank:
buf = array(root, typecode, root+color)
comm.Bcast(buf.as_mpi(), root=MPI.ROOT)
else:
comm.Bcast(None, root=MPI.PROC_NULL)
else:
for root in range(rsize):
buf = array(-1, typecode, root+color)
comm.Bcast(buf.as_mpi(), root=root)
check = arrayimpl.scalar(root)
for value in buf:
self.assertEqual(value, check)
def testGather(self):
comm = self.INTERCOMM
rank = comm.Get_rank()
size = comm.Get_size()
rsize = comm.Get_remote_size()
for array, typecode in arrayimpl.loop():
for color in (0, 1):
if self.COLOR == color:
for root in range(size):
if root == rank:
rbuf = array(-1, typecode, (rsize, root+color))
comm.Gather(None, rbuf.as_mpi(), root=MPI.ROOT)
check = arrayimpl.scalar(root)
for value in rbuf.flat:
self.assertEqual(value, check)
else:
comm.Gather(None, None, root=MPI.PROC_NULL)
else:
for root in range(rsize):
sbuf = array(root, typecode, root+color)
comm.Gather(sbuf.as_mpi(), None, root=root)
def testScatter(self):
comm = self.INTERCOMM
rank = comm.Get_rank()
size = comm.Get_size()
rsize = comm.Get_remote_size()
for array, typecode in arrayimpl.loop():
for color in (0, 1):
if self.COLOR == color:
for root in range(size):
if root == rank:
sbuf = array(root, typecode, (rsize, root+color))
comm.Scatter(sbuf.as_mpi(), None, root=MPI.ROOT)
else:
comm.Scatter(None, None, root=MPI.PROC_NULL)
else:
for root in range(rsize):
rbuf = array(root, typecode, root+color)
comm.Scatter(None, rbuf.as_mpi(), root=root)
check = arrayimpl.scalar(root)
for value in rbuf:
self.assertEqual(value, check)
def testAllgather(self):
comm = self.INTERCOMM
rank = comm.Get_rank()
size = comm.Get_size()
rsize = comm.Get_remote_size()
for array, typecode in arrayimpl.loop():
for color in (0, 1):
if self.COLOR == color:
for n in range(size):
sbuf = array( n, typecode, color)
rbuf = array(-1, typecode, (rsize, n+color))
comm.Allgather(sbuf.as_mpi(), rbuf.as_mpi())
check = arrayimpl.scalar(n)
for value in rbuf.flat:
self.assertEqual(value, check)
else:
for n in range(rsize):
sbuf = array( n, typecode, n+color)
rbuf = array(-1, typecode, (rsize, color))
comm.Allgather(sbuf.as_mpi(), rbuf.as_mpi())
check = arrayimpl.scalar(n)
for value in rbuf.flat:
self.assertEqual(value, check)
def testAlltoall(self):
comm = self.INTERCOMM
rank = comm.Get_rank()
size = comm.Get_size()
rsize = comm.Get_remote_size()
for array, typecode in arrayimpl.loop():
for color in (0, 1):
if self.COLOR == color:
for n in range(size):
sbuf = array( n, typecode, (rsize, (n+1)*color))
rbuf = array(-1, typecode, (rsize, n+3*color))
comm.Alltoall(sbuf.as_mpi(), rbuf.as_mpi())
check = arrayimpl.scalar(n)
for value in rbuf.flat:
self.assertEqual(value, check)
else:
for n in range(rsize):
sbuf = array( n, typecode, (rsize, n+3*color))
rbuf = array(-1, typecode, (rsize, (n+1)*color))
comm.Alltoall(sbuf.as_mpi(), rbuf.as_mpi())
check = arrayimpl.scalar(n)
for value in rbuf.flat:
self.assertEqual(value, check)
@unittest.skipMPI('mvapich', MPI.COMM_WORLD.Get_size() > 2)
def testReduce(self):
comm = self.INTERCOMM
rank = comm.Get_rank()
lsize = comm.Get_size()
rsize = comm.Get_remote_size()
for array, typecode in arrayimpl.loop():
for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN):
if skip_op(typecode, op): continue
for color in (0, 1):
if self.COLOR == color:
for root in range(lsize):
if root == rank:
rbuf = array(-1, typecode, rsize)
comm.Reduce(
None, rbuf.as_mpi(),
op=op, root=MPI.ROOT,
)
max_val = maxvalue(rbuf)
for i, value in enumerate(rbuf):
if op == MPI.SUM:
if (i * rsize) < max_val:
self.assertAlmostEqual(value, i*rsize)
elif op == MPI.PROD:
if (i ** rsize) < max_val:
self.assertAlmostEqual(value, i**rsize)
elif op == MPI.MAX:
self.assertEqual(value, i)
elif op == MPI.MIN:
self.assertEqual(value, i)
else:
comm.Reduce(
None, None,
op=op, root=MPI.PROC_NULL,
)
else:
for root in range(rsize):
sbuf = array(range(lsize), typecode)
comm.Reduce(
sbuf.as_mpi(), None,
op=op, root=root,
)
def testAllreduce(self):
comm = self.INTERCOMM
rank = comm.Get_rank()
size = comm.Get_size()
rsize = comm.Get_remote_size()
for array, typecode in arrayimpl.loop():
for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN):
if skip_op(typecode, op): continue
sbuf = array(range(5), typecode)
rbuf = array([-1] * 5, typecode)
comm.Allreduce(sbuf.as_mpi(), rbuf.as_mpi(), op)
max_val = maxvalue(rbuf)
for i, value in enumerate(rbuf):
if op == MPI.SUM:
if (i * rsize) < max_val:
self.assertAlmostEqual(value, i*rsize)
elif op == MPI.PROD:
if (i ** rsize) < max_val:
self.assertAlmostEqual(value, i**rsize)
elif op == MPI.MAX:
self.assertEqual(value, i)
elif op == MPI.MIN:
self.assertEqual(value, i)
class TestCCOBufInter(BaseTestCCOBufInter, unittest.TestCase):
BASECOMM = MPI.COMM_WORLD
class TestCCOBufInterDup(TestCCOBufInter):
def setUp(self):
self.BASECOMM = self.BASECOMM.Dup()
super().setUp()
def tearDown(self):
self.BASECOMM.Free()
super().tearDown()
if __name__ == '__main__':
unittest.main()
|
mpi4pyREPO_NAMEmpi4pyPATH_START.@mpi4py_extracted@mpi4py-master@test@test_cco_buf_inter.py@.PATH_END.py
|
{
"filename": "_activeselection.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/layout/_activeselection.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Activeselection(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout"
_path_str = "layout.activeselection"
_valid_props = {"fillcolor", "opacity"}
# fillcolor
# ---------
@property
def fillcolor(self):
"""
Sets the color filling the active selection' interior.
The 'fillcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["fillcolor"]
@fillcolor.setter
def fillcolor(self, val):
self["fillcolor"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the active selection.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
fillcolor
Sets the color filling the active selection' interior.
opacity
Sets the opacity of the active selection.
"""
def __init__(self, arg=None, fillcolor=None, opacity=None, **kwargs):
"""
Construct a new Activeselection object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.Activeselection`
fillcolor
Sets the color filling the active selection' interior.
opacity
Sets the opacity of the active selection.
Returns
-------
Activeselection
"""
super(Activeselection, self).__init__("activeselection")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.Activeselection
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Activeselection`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("fillcolor", None)
_v = fillcolor if fillcolor is not None else _v
if _v is not None:
self["fillcolor"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@layout@_activeselection.py@.PATH_END.py
|
{
"filename": "python-reference_utils_get_confusion_matrix.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/concepts/python-reference_utils_get_confusion_matrix.md",
"type": "Markdown"
}
|
# get_confusion_matrix
{% include [utils-get_confusion_matrix__desc](../_includes/work_src/reusage-python/get_confusion_matrix__desc.md) %}
## {{ dl--invoke-format }} {#method-call-format}
```python
get_confusion_matrix(model, data, thread_count)
```
## {{ dl--parameters }} {#parameters-list}
### model
#### Description
The trained model.
**Possible types**
{{ python-type__catboostCatBoost }}
**Default value**
{{ python--required }}
### data
#### Description
A set of samples to build the confusion matrix with.
**Possible types**
{{ python-type--pool }}
**Default value**
{{ python--required }}
### thread_count
#### Description
The number of threads to use.
**Possible types**
{{ python-type--int }}
**Default value**
-1 (the number of threads is set to the number of CPU cores)
## {{ dl--output-format }} {#output-data-format}
confusion matrix : array, shape = [n_classes, n_classes]
## {{ dl--example }} {#examples}
#### Multiclassification
```python
from catboost import Pool, CatBoostClassifier
from catboost.utils import get_confusion_matrix
train_data = [[1, 1924, 44],
[1, 1932, 37],
[0, 1980, 37],
[1, 2012, 204]]
train_label = ["France", "USA", "USA", "UK"]
train_dataset = Pool(data=train_data,
label=train_label)
model = CatBoostClassifier(loss_function='MultiClass',
iterations=100,
verbose=False)
model.fit(train_dataset)
cm = get_confusion_matrix(model, Pool(train_data, train_label))
print(cm)
```
Output:
```bash
[[1. 0. 0.]
[0. 1. 0.]
[0. 0. 2.]]
```
#### Binary classification
```python
from catboost import Pool, CatBoostClassifier
from catboost.utils import get_confusion_matrix
train_data = [[1, 1924, 44],
[1, 1932, 37],
[0, 1980, 37],
[1, 2012, 204]]
train_label = [0, 1, 1, 0]
train_dataset = Pool(data=train_data,
label=train_label)
model = CatBoostClassifier(loss_function='Logloss',
iterations=100,
verbose=False)
model.fit(train_dataset)
cm = get_confusion_matrix(model, Pool(train_data, train_label))
print(cm)
```
Output:
```bash
[[2. 0.]
[0. 2.]]
```
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@concepts@python-reference_utils_get_confusion_matrix.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "lsst-ts/ts_wep",
"repo_path": "ts_wep_extracted/ts_wep-main/python/lsst/ts/wep/deblend/__init__.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
from .deblendAdapt import *
from .deblendDefault import *
from .deblendDonutFactory import *
from .nelderMeadModify import *
|
lsst-tsREPO_NAMEts_wepPATH_START.@ts_wep_extracted@ts_wep-main@python@lsst@ts@wep@deblend@__init__.py@.PATH_END.py
|
{
"filename": "test_sky_stacking.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/geminidr/f2/tests/longslit/test_sky_stacking.py",
"type": "Python"
}
|
#!/usr/bin/env python3
"""
Tests for sky stacking and subtraction for F2.
"""
import pytest
import astrodata
from astrodata.testing import download_from_archive
import gemini_instruments
from geminidr.f2.primitives_f2_longslit import F2Longslit
# ---- Fixtures ---------------------------------------------------------------
@pytest.fixture
def f2_abba():
return [astrodata.open(download_from_archive(f)) for f in
('S20200301S0071.fits', 'S20200301S0072.fits',
'S20200301S0073.fits', 'S20200301S0074.fits')]
# ---- Tests ------------------------------------------------------------------
@pytest.mark.dragons_remote_data
@pytest.mark.f2ls
def test_associate_sky_abba(f2_abba):
p = F2Longslit(f2_abba)
p.prepare()
p.separateSky()
p.associateSky()
a1, b1, b2, a2 = p.showList()
a_frames = {'S20200301S0071_skyAssociated.fits',
'S20200301S0074_skyAssociated.fits'}
b_frames = {'S20200301S0072_skyAssociated.fits',
'S20200301S0073_skyAssociated.fits'}
# check that the A frames get the B frames as skies, and vice versa
for ad in (a1, a2):
assert set(ad.SKYTABLE['SKYNAME']) == b_frames
for ad in (b1, b2):
assert set(ad.SKYTABLE['SKYNAME']) == a_frames
@pytest.mark.dragons_remote_data
@pytest.mark.f2ls
def test_associate_sky_pass_skies(f2_abba):
in_sky_names = set([ad.filename for ad in f2_abba[1:3]])
p = F2Longslit(f2_abba)
# Don't run separate sky to simulate resuming work with known skies.
p.associateSky(sky=f2_abba[1:3])
out_sky_names = set([ad.phu['ORIGNAME'] for ad in p.streams['sky']])
assert in_sky_names == out_sky_names
@pytest.mark.dragons_remote_data
@pytest.mark.f2ls
def test_associate_sky_use_all(f2_abba):
in_sky_names = set([ad.filename for ad in f2_abba])
p = F2Longslit(f2_abba)
p.prepare()
p.separateSky()
p.associateSky(distance=0, use_all=True)
for ad in p.showList():
skies = set([s.replace('_skyAssociated', '')
for s in ad.SKYTABLE['SKYNAME']])
# Check that each AD has all the other frames as skies, but not itself.
assert skies == in_sky_names - set([ad.phu['ORIGNAME']])
@pytest.mark.dragons_remote_data
@pytest.mark.f2ls
def test_associate_sky_exclude_all(f2_abba):
p = F2Longslit(f2_abba)
p.prepare()
p.separateSky()
# Offset is 40" so this will exclude skies if 'use_all' is False.
p.associateSky(distance=50)
@pytest.mark.dragons_remote_data
@pytest.mark.f2ls
def test_associate_sky_quasi_abcde():
files = ['S20210515S0196.fits', 'S20210515S0197.fits',
'S20210515S0201.fits', 'S20210515S0202.fits',
'S20210515S0203.fits', 'S20210515S0206.fits',
'S20210515S0208.fits']
data = [astrodata.open(download_from_archive(f)) for f in files]
p = F2Longslit(data)
p.prepare()
p.separateSky()
p.associateSky()
assert set(p.showList()[0].SKYTABLE['SKYNAME']) == set([
'S20210515S0197_skyAssociated.fits',
'S20210515S0201_skyAssociated.fits',
'S20210515S0202_skyAssociated.fits'])
for ad in p.showList()[1:-1]:
assert set(ad.SKYTABLE['SKYNAME']) == set([
'S20210515S0196_skyAssociated.fits',
'S20210515S0208_skyAssociated.fits'])
assert set(p.showList()[-1].SKYTABLE['SKYNAME']) == set([
'S20210515S0202_skyAssociated.fits',
'S20210515S0203_skyAssociated.fits',
'S20210515S0206_skyAssociated.fits'])
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@geminidr@f2@tests@longslit@test_sky_stacking.py@.PATH_END.py
|
{
"filename": "widget_upload.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipywidgets/py3/ipywidgets/widgets/widget_upload.py",
"type": "Python"
}
|
# Copyright(c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""FileUpload class.
Represents a file upload button.
"""
import datetime as dt
from traitlets import (
observe, default, Unicode, Dict, Int, Bool, Bytes, CaselessStrEnum
)
from .widget_description import DescriptionWidget
from .valuewidget import ValueWidget
from .widget_core import CoreWidget
from .widget_button import ButtonStyle
from .widget import register, widget_serialization
from .trait_types import InstanceDict, TypedTuple
from traitlets import Bunch
def _deserialize_single_file(js):
uploaded_file = Bunch()
for attribute in ['name', 'type', 'size', 'content']:
uploaded_file[attribute] = js[attribute]
uploaded_file['last_modified'] = dt.datetime.fromtimestamp(
js['last_modified'] / 1000,
tz=dt.timezone.utc
)
return uploaded_file
def _deserialize_value(js, _):
return [_deserialize_single_file(entry) for entry in js]
def _serialize_single_file(uploaded_file):
js = {}
for attribute in ['name', 'type', 'size', 'content']:
js[attribute] = uploaded_file[attribute]
js['last_modified'] = int(uploaded_file['last_modified'].timestamp() * 1000)
return js
def _serialize_value(value, _):
return [_serialize_single_file(entry) for entry in value]
_value_serialization = {
'from_json': _deserialize_value,
'to_json': _serialize_value
}
@register
class FileUpload(DescriptionWidget, ValueWidget, CoreWidget):
"""File upload widget
This creates a file upload input that allows the user to select
one or more files to upload. The file metadata and content
can be retrieved in the kernel.
Examples
--------
>>> import ipywidgets as widgets
>>> uploader = widgets.FileUpload()
# After displaying `uploader` and uploading a file:
>>> uploader.value
[
{
'name': 'example.txt',
'type': 'text/plain',
'size': 36,
'last_modified': datetime.datetime(2020, 1, 9, 15, 58, 43, 321000, tzinfo=datetime.timezone.utc),
'content': <memory at 0x10c1b37c8>
}
]
>>> uploader.value[0].content.tobytes()
b'This is the content of example.txt.\n'
Parameters
----------
accept: str, optional
Which file types to accept, e.g. '.doc,.docx'. For a full
description of how to specify this, see
https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/file#attr-accept
Defaults to accepting all file types.
multiple: bool, optional
Whether to accept multiple files at the same time. Defaults to False.
disabled: bool, optional
Whether user interaction is enabled.
icon: str, optional
The icon to use for the button displayed on the screen.
Can be any Font-awesome icon without the fa- prefix.
Defaults to 'upload'. If missing, no icon is shown.
description: str, optional
The text to show on the label. Defaults to 'Upload'.
button_style: str, optional
One of 'primary', 'success', 'info', 'warning', 'danger' or ''.
style: widgets.widget_button.ButtonStyle, optional
Style configuration for the button.
value: Tuple[Dict], optional
The value of the last uploaded file or set of files. See the
documentation for details of how to use this to retrieve file
content and metadata:
https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.html#File-Upload
error: str, optional
Whether the last upload triggered an error.
"""
_model_name = Unicode('FileUploadModel').tag(sync=True)
_view_name = Unicode('FileUploadView').tag(sync=True)
accept = Unicode(help='File types to accept, empty string for all').tag(sync=True)
multiple = Bool(help='If True, allow for multiple files upload').tag(sync=True)
disabled = Bool(help='Enable or disable button').tag(sync=True)
icon = Unicode('upload', help="Font-awesome icon name, without the 'fa-' prefix.").tag(sync=True)
button_style = CaselessStrEnum(
values=['primary', 'success', 'info', 'warning', 'danger', ''], default_value='',
help='Use a predefined styling for the button.').tag(sync=True)
style = InstanceDict(ButtonStyle).tag(sync=True, **widget_serialization)
error = Unicode(help='Error message').tag(sync=True)
value = TypedTuple(Dict(), help='The file upload value').tag(
sync=True, echo_update=False, **_value_serialization)
@default('description')
def _default_description(self):
return 'Upload'
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipywidgets@py3@ipywidgets@widgets@widget_upload.py@.PATH_END.py
|
{
"filename": "(5.1) delta Scuti SNR.ipynb",
"repo_name": "danhey/maelstrom",
"repo_path": "maelstrom_extracted/maelstrom-master/paper/notebooks/(5.1) delta Scuti SNR.ipynb",
"type": "Jupyter Notebook"
}
|
```python
%run setup.py
```
```python
df = pd.read_csv('../../../../Dropbox (Sydney Uni)/Shared/pulsator_fraction/all_stars_with_gaia_mathur_green.csv')
puls = df[df['pulsating']==1]
```
Let's count the number of TESS CVZ stars with 2 min data
```python
files = []
import glob
for sector in list(range(1,14)):
files.extend(glob.glob('/Volumes/silo2/dhey3294/TESS/sector_' + str(sector) + '/tess*.fits'))
tics = [a.split('_')[1].split('/')[-1].split('-')[2].lstrip('0') for a in files]
unique_tics = np.unique(tics)
```
```python
df = pd.read_csv('../data/MAST_Crossmatch_TIC.csv', skiprows=4)
cut = (df['Teff'] > 6500) & (df['Teff'] < 10000)
unique_tics = df[cut]['target_name'].values
```
And now search for delta Scuti stars
```python
from scipy.stats import skew
def dsct_search(unique_tic):
try:
unique_tic = str(unique_tic)
indices = [i for i, x in enumerate(tics) if x == unique_tic]
lc = lk.TessLightCurveFile(files[indices[0]]).PDCSAP_FLUX.normalize()
for index in indices[1:]:
lc = lc.append(lk.TessLightCurveFile(files[index]).PDCSAP_FLUX.normalize())
except:
return None
lc = lc.remove_nans()
pg = lc.to_periodogram(normalization='amplitude')#.plot()
skewer = skew(pg.power.value[(pg.frequency.value > 20) & (pg.frequency.value < 100)])
if skewer > 5:
fig, axes = plt.subplots(3,1, figsize=[8,13])
lc.plot(ax=axes[0])
axes[1].plot(pg.frequency.value, pg.power.value, linewidth=0.7)
axes[2].plot(pg.frequency.value, pg.power.value, linewidth=0.7)
axes[2].set_xlim(0,100)
plt.savefig('dSct search/' + unique_tic + '.png', bbox_inches='tight')
plt.clf()
plt.close(fig)
return unique_tic, skewer, pg.power.value.max() / np.median(pg.power.value)
else:
return None
```
```python
from tqdm import tqdm
dscts = []
for unique_tic in tqdm(unique_tics):
dscts.append(dsct_search(unique_tic))
```
100%|██████████| 1651/1651 [00:00<00:00, 273610.02it/s]
```python
res = np.loadtxt('res.txt')
snrs = np.array(np.array(res)[:,2], dtype=float)
plt.hist(snrs[snrs<2000], bins=15);
plt.xlim(0,1750)
```
(0, 1750)

```python
import matplotlib
matplotlib.rcParams["font.size"] = 7
```
```python
import seaborn as sns
sns.set_style('white')
plt.figure(figsize=mnras_size(240.))
_, bins, _ = plt.hist(np.log(snrs), bins=18, alpha=0.4, density=True, color=red, label=r'TESS');
plt.hist(np.log(puls['snrmed']), bins=bins, alpha=0.4, color=blue, density=True, label=r'Kepler')
plt.xlabel('log SNR')
plt.ylabel('Probability density')
plt.legend()
plt.axvline(np.median(np.log(snrs)), c=red, linewidth=0.7, linestyle='dashed')
plt.axvline(np.median(np.log(puls['snrmed'])), c=blue, linewidth=0.7, linestyle='dashed')
plt.savefig(overleaf_path + 'dsct_tess_kepler_comparison.pdf', dpi=300, bbox_inches='tight', pad_inches=0)
```

```python
```
|
danheyREPO_NAMEmaelstromPATH_START.@maelstrom_extracted@maelstrom-master@paper@notebooks@(5.1) delta Scuti SNR.ipynb@.PATH_END.py
|
{
"filename": "TestNLFitters.py",
"repo_name": "dokester/BayesicFitting",
"repo_path": "BayesicFitting_extracted/BayesicFitting-master/BayesicFitting/test/TestNLFitters.py",
"type": "Python"
}
|
# run with : python3 -m unittest TestNLFitters
import unittest
import os
import numpy as numpy
from numpy.testing import assert_array_almost_equal as assertAAE
from astropy import units
import math
import matplotlib.pyplot as plt
from BayesicFitting import *
from StdTests import stdFittertest
__author__ = "Do Kester"
__year__ = 2017
__license__ = "GPL3"
__version__ = "0.9"
__maintainer__ = "Do"
__status__ = "Development"
# *
# * This file is part of the BayesicFitting package.
# *
# * BayesicFitting is free software: you can redistribute it and/or modify
# * it under the terms of the GNU Lesser General Public License as
# * published by the Free Software Foundation, either version 3 of
# * the License, or ( at your option ) any later version.
# *
# * BayesicFitting is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU Lesser General Public License for more details.
# *
# * The GPL3 license can be found at <http://www.gnu.org/licenses/>.
# *
# * A JAVA version of this code was part of the Herschel Common
# * Science System (HCSS), also under GPL3.
# *
# * 2004 - 2014 Do Kester, SRON (Java code)
# * 2016 - 2017 Do Kester
class TestNLFitters( unittest.TestCase ):
"""
Test harness for Fitter class.
Author: Do Kester
"""
def __init__( self, testname ):
super( ).__init__( testname )
self.doplot = ( "DOPLOT" in os.environ and os.environ["DOPLOT"] == "1" )
################################################################################
def testAmoebaFitter( self ):
options = {"temp": 10.0, "maxiter": 10000 }
options = {"maxiter": 10000}
stdFittertest( AmoebaFitter, 201, plot=self.doplot, options=options )
def testAmoebaFitter2( self ):
options = {"temp": 10.0, "maxiter": 10000, "verbose": 1 }
stdFittertest( AmoebaFitter, 201, errdis='gauss', plot=self.doplot, options=options )
def testAmoebaFitter3( self ):
options = {"temp": 10.0, "maxiter": 10000 }
stdFittertest( AmoebaFitter, 201, errdis='cauchy', plot=self.doplot, options=options )
def testAmoebaFitter4( self ):
options = {"maxiter": 10000 }
options = {"temp": 10.0, "maxiter": 10000 }
stdFittertest( AmoebaFitter, 201, errdis='Exponential', plot=self.doplot, options=options )
def testNelderMeadFitter1( self ):
stdFittertest( NelderMeadFitter, 201, plot=self.doplot, options={"maxiter": 3000} )
def testNelderMeadFitter2( self ):
self.assertRaises( AttributeError, stdFittertest, NelderMeadFitter, 201,
errdis='Uniform', options={'maxiter':100} )
def testNelderMeadFitter3( self ):
self.assertRaises( ValueError, stdFittertest, NelderMeadFitter, 201,
errdis='Unifom', options={'maxiter':100} )
def testPowellFitter( self ):
stdFittertest( PowellFitter, 201, plot=self.doplot )
def testConjugateGradientFitter( self ):
stdFittertest( ConjugateGradientFitter, 201, plot=self.doplot )
def testBfgsFitter( self ):
stdFittertest( BfgsFitter, 201, plot=self.doplot )
def testNewtonCgFitter( self ):
stdFittertest( NewtonCgFitter, 201, plot=self.doplot )
def testLbfgsbFitter( self ):
stdFittertest( LbfgsbFitter, 201, plot=self.doplot )
def testTncFitter( self ):
stdFittertest( TncFitter, 201, plot=self.doplot )
def testCobylaFitter( self ):
stdFittertest( CobylaFitter, 201, plot=self.doplot )
def testSlsqpFitter( self ):
stdFittertest( SlsqpFitter, 201, plot=self.doplot )
def testDoglegFitter( self ):
stdFittertest( DoglegFitter, 201, plot=self.doplot )
def testTrustNcgFitter( self ):
stdFittertest( TrustNcgFitter, 201, plot=self.doplot )
@classmethod
def suite( cls ):
return unittest.TestCase.suite( TestNLFitters.__class__ )
|
dokesterREPO_NAMEBayesicFittingPATH_START.@BayesicFitting_extracted@BayesicFitting-master@BayesicFitting@test@TestNLFitters.py@.PATH_END.py
|
{
"filename": "pipes.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/pipes.py",
"type": "Python"
}
|
"""Conversion pipeline templates.
The problem:
------------
Suppose you have some data that you want to convert to another format,
such as from GIF image format to PPM image format. Maybe the
conversion involves several steps (e.g. piping it through compress or
uuencode). Some of the conversion steps may require that their input
is a disk file, others may be able to read standard input; similar for
their output. The input to the entire conversion may also be read
from a disk file or from an open file, and similar for its output.
The module lets you construct a pipeline template by sticking one or
more conversion steps together. It will take care of creating and
removing temporary files if they are necessary to hold intermediate
data. You can then use the template to do conversions from many
different sources to many different destinations. The temporary
file names used are different each time the template is used.
The templates are objects so you can create templates for many
different conversion steps and store them in a dictionary, for
instance.
Directions:
-----------
To create a template:
t = Template()
To add a conversion step to a template:
t.append(command, kind)
where kind is a string of two characters: the first is '-' if the
command reads its standard input or 'f' if it requires a file; the
second likewise for the output. The command must be valid /bin/sh
syntax. If input or output files are required, they are passed as
$IN and $OUT; otherwise, it must be possible to use the command in
a pipeline.
To add a conversion step at the beginning:
t.prepend(command, kind)
To convert a file to another file using a template:
sts = t.copy(infile, outfile)
If infile or outfile are the empty string, standard input is read or
standard output is written, respectively. The return value is the
exit status of the conversion pipeline.
To open a file for reading or writing through a conversion pipeline:
fp = t.open(file, mode)
where mode is 'r' to read the file, or 'w' to write it -- just like
for the built-in function open() or for os.popen().
To create a new template object initialized to a given one:
t2 = t.clone()
""" # '
import re
import os
import tempfile
import warnings
# we import the quote function rather than the module for backward compat
# (quote used to be an undocumented but used function in pipes)
from shlex import quote
warnings._deprecated(__name__, remove=(3, 13))
__all__ = ["Template"]
# Conversion step kinds
FILEIN_FILEOUT = 'ff' # Must read & write real files
STDIN_FILEOUT = '-f' # Must write a real file
FILEIN_STDOUT = 'f-' # Must read a real file
STDIN_STDOUT = '--' # Normal pipeline element
SOURCE = '.-' # Must be first, writes stdout
SINK = '-.' # Must be last, reads stdin
stepkinds = [FILEIN_FILEOUT, STDIN_FILEOUT, FILEIN_STDOUT, STDIN_STDOUT, \
SOURCE, SINK]
class Template:
"""Class representing a pipeline template."""
def __init__(self):
"""Template() returns a fresh pipeline template."""
self.debugging = 0
self.reset()
def __repr__(self):
"""t.__repr__() implements repr(t)."""
return '<Template instance, steps=%r>' % (self.steps,)
def reset(self):
"""t.reset() restores a pipeline template to its initial state."""
self.steps = []
def clone(self):
"""t.clone() returns a new pipeline template with identical
initial state as the current one."""
t = Template()
t.steps = self.steps[:]
t.debugging = self.debugging
return t
def debug(self, flag):
"""t.debug(flag) turns debugging on or off."""
self.debugging = flag
def append(self, cmd, kind):
"""t.append(cmd, kind) adds a new step at the end."""
if not isinstance(cmd, str):
raise TypeError('Template.append: cmd must be a string')
if kind not in stepkinds:
raise ValueError('Template.append: bad kind %r' % (kind,))
if kind == SOURCE:
raise ValueError('Template.append: SOURCE can only be prepended')
if self.steps and self.steps[-1][1] == SINK:
raise ValueError('Template.append: already ends with SINK')
if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
raise ValueError('Template.append: missing $IN in cmd')
if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
raise ValueError('Template.append: missing $OUT in cmd')
self.steps.append((cmd, kind))
def prepend(self, cmd, kind):
"""t.prepend(cmd, kind) adds a new step at the front."""
if not isinstance(cmd, str):
raise TypeError('Template.prepend: cmd must be a string')
if kind not in stepkinds:
raise ValueError('Template.prepend: bad kind %r' % (kind,))
if kind == SINK:
raise ValueError('Template.prepend: SINK can only be appended')
if self.steps and self.steps[0][1] == SOURCE:
raise ValueError('Template.prepend: already begins with SOURCE')
if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
raise ValueError('Template.prepend: missing $IN in cmd')
if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
raise ValueError('Template.prepend: missing $OUT in cmd')
self.steps.insert(0, (cmd, kind))
def open(self, file, rw):
"""t.open(file, rw) returns a pipe or file object open for
reading or writing; the file is the other end of the pipeline."""
if rw == 'r':
return self.open_r(file)
if rw == 'w':
return self.open_w(file)
raise ValueError('Template.open: rw must be \'r\' or \'w\', not %r'
% (rw,))
def open_r(self, file):
"""t.open_r(file) and t.open_w(file) implement
t.open(file, 'r') and t.open(file, 'w') respectively."""
if not self.steps:
return open(file, 'r')
if self.steps[-1][1] == SINK:
raise ValueError('Template.open_r: pipeline ends width SINK')
cmd = self.makepipeline(file, '')
return os.popen(cmd, 'r')
def open_w(self, file):
if not self.steps:
return open(file, 'w')
if self.steps[0][1] == SOURCE:
raise ValueError('Template.open_w: pipeline begins with SOURCE')
cmd = self.makepipeline('', file)
return os.popen(cmd, 'w')
def copy(self, infile, outfile):
return os.system(self.makepipeline(infile, outfile))
def makepipeline(self, infile, outfile):
cmd = makepipeline(infile, self.steps, outfile)
if self.debugging:
print(cmd)
cmd = 'set -x; ' + cmd
return cmd
def makepipeline(infile, steps, outfile):
# Build a list with for each command:
# [input filename or '', command string, kind, output filename or '']
list = []
for cmd, kind in steps:
list.append(['', cmd, kind, ''])
#
# Make sure there is at least one step
#
if not list:
list.append(['', 'cat', '--', ''])
#
# Take care of the input and output ends
#
[cmd, kind] = list[0][1:3]
if kind[0] == 'f' and not infile:
list.insert(0, ['', 'cat', '--', ''])
list[0][0] = infile
#
[cmd, kind] = list[-1][1:3]
if kind[1] == 'f' and not outfile:
list.append(['', 'cat', '--', ''])
list[-1][-1] = outfile
#
# Invent temporary files to connect stages that need files
#
garbage = []
for i in range(1, len(list)):
lkind = list[i-1][2]
rkind = list[i][2]
if lkind[1] == 'f' or rkind[0] == 'f':
(fd, temp) = tempfile.mkstemp()
os.close(fd)
garbage.append(temp)
list[i-1][-1] = list[i][0] = temp
#
for item in list:
[inf, cmd, kind, outf] = item
if kind[1] == 'f':
cmd = 'OUT=' + quote(outf) + '; ' + cmd
if kind[0] == 'f':
cmd = 'IN=' + quote(inf) + '; ' + cmd
if kind[0] == '-' and inf:
cmd = cmd + ' <' + quote(inf)
if kind[1] == '-' and outf:
cmd = cmd + ' >' + quote(outf)
item[1] = cmd
#
cmdlist = list[0][1]
for item in list[1:]:
[cmd, kind] = item[1:3]
if item[0] == '':
if 'f' in kind:
cmd = '{ ' + cmd + '; }'
cmdlist = cmdlist + ' |\n' + cmd
else:
cmdlist = cmdlist + '\n' + cmd
#
if garbage:
rmcmd = 'rm -f'
for file in garbage:
rmcmd = rmcmd + ' ' + quote(file)
trapcmd = 'trap ' + quote(rmcmd + '; exit') + ' 1 2 3 13 14 15'
cmdlist = trapcmd + '\n' + cmdlist + '\n' + rmcmd
#
return cmdlist
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@pipes.py@.PATH_END.py
|
{
"filename": "results.py",
"repo_name": "sdss/marvin",
"repo_path": "marvin_extracted/marvin-main/python/marvin/tools/results.py",
"type": "Python"
}
|
#!/usr/bin/env python
# encoding: utf-8
# Licensed under a 3-clause BSD license.
#
from __future__ import print_function
import copy
import datetime
import json
import os
import warnings
from collections import namedtuple
from functools import wraps
from operator import add
import numpy as np
import six
from astropy.table import Table, hstack, vstack
from fuzzywuzzy import process
import marvin.utils.plot.scatter
from marvin import config, log
from marvin.api.api import Interaction
from marvin.core import marvin_pickle
from marvin.core.exceptions import (MarvinBreadCrumb, MarvinError, MarvinUserWarning)
from marvin.tools.cube import Cube
from marvin.tools.maps import Maps
from marvin.tools.modelcube import ModelCube
from marvin.tools.rss import RSS
from marvin.utils.datamodel.query import datamodel
from marvin.utils.datamodel.query.base import ParameterGroup
from marvin.utils.general import (downloadList, get_images_by_list, map_bins_to_column, temp_setattr,
turn_off_ion)
try:
import cPickle as pickle
except:
import pickle
try:
import pandas as pd
except ImportError:
pd = None
warnings.warn('Could not import pandas.', MarvinUserWarning)
__all__ = ['Results', 'ResultSet']
breadcrumb = MarvinBreadCrumb()
def local_mode_only(fxn):
'''Decorator that bypasses function if in remote mode.'''
@wraps(fxn)
def wrapper(self, *args, **kwargs):
if self.mode == 'remote':
raise MarvinError('{0} not available in remote mode'.format(fxn.__name__))
else:
return fxn(self, *args, **kwargs)
return wrapper
def remote_mode_only(fxn):
'''Decorator that bypasses function if in local mode.'''
@wraps(fxn)
def wrapper(self, *args, **kwargs):
if self.mode == 'local':
raise MarvinError('{0} not available in local mode'.format(fxn.__name__))
else:
return fxn(self, *args, **kwargs)
return wrapper
class ColumnGroup(ParameterGroup):
''' Subclass of Parameter Group '''
def __repr__(self):
''' New repr for the Results Column Parameter Group '''
old = list.__repr__(self)
old = old.replace('>,', '>,\n')
return ('<ParameterGroup name={0.name}, n_parameters={1}>\n '
'{2}'.format(self, len(self), old))
def __str__(self):
''' New string repr for prints '''
return self.__repr__()
def marvintuple(name, params=None, **kwargs):
''' Custom namedtuple class factory for Marvin Results rows
A class factory designed to create a new Marvin ResultRow class object. marvintuple
creates a new class definition which can be instantiated. Parameters can be pushed into
an instance as individual arguments, or as key-value pairs. See the
`namedtuple <https://docs.python.org/2/library/collections.html#collections.namedtuple>`_
for details on the Python container.
Parameters:
name (str):
The name of the Class. Required.
params (str|list):
The list of parameters to add as fields to the namedtuple. Can be a list of names
or a comma-separated string of names.
Returns:
a new namedtuple class
Example:
>>> # create new class with two fields
>>> mt = marvintuple('Row', ['mangaid', 'plateifu'])
>>>
>>> # create a new instance of the class, with values
>>> row = mt('1-209232', '8485-1901')
>>> # or
>>> row = mt(mangaid='1-209232', plateifu='8485-1901')
'''
# check the params input
if params and isinstance(params, six.string_types):
params = params.split(',') if ',' in params else [params]
params = [p.strip() for p in params]
# pop any extra keywords
results = kwargs.pop('results', None)
# create default namedtuple
nt = namedtuple(name, params, **kwargs)
def new_add(self, other):
''' Overloaded add to combine tuples without duplicates '''
if self.release:
assert self.release == other.release, 'Cannot add result rows from different releases'
if self._search_filter:
assert self._search_filter == other._search_filter, ('Cannot add result rows generated '
'using different search filters')
assert hasattr(self, 'plateifu') and hasattr(other, 'plateifu'), ("All rows must have a "
"plateifu column to be able to add")
assert self.plateifu == other.plateifu, 'The plateifus must be the same to add these rows'
self_dict = self._asdict()
other_dict = other._asdict()
self_dict.update(other_dict)
new_fields = tuple(self_dict.keys())
marvin_row = marvintuple(self.__class__.__name__, new_fields, results=self._results)
return marvin_row(**self_dict)
# append new properties and overloaded methods
nt.__add__ = new_add
nt._results = results
nt.release = results.release if results else None
nt._search_filter = results.search_filter if results else None
return nt
class ResultSet(list):
''' A Set of Results
A list object representing a set of query results. Each row of the list is a
ResultRow object, which is a custom Marvin namedtuple object. ResultSets can be
extended column-wise or row-wise by adding them together.
Parameters:
_objects (list):
A list of objects. Required.
count (int):
The count of objects in the current list
totalcount (int):
The total count of objects in the full results
index (int):
The index of the current set within the total set.
columns (list):
A list of columns accompanying this set
results (Results):
The Marvin Results object this set is a part of
'''
def __init__(self, _objects, **kwargs):
list.__init__(self, _objects)
self._results = kwargs.get('results', None)
self.columns = kwargs.get('columns', None)
self.count = kwargs.get('count', None)
self.total = kwargs.get('total', None)
self._populate_from_results()
self.pages = int(np.ceil(self.total / float(self.count))) if self.count else 0
self.index = kwargs.get('index') if kwargs.get('index') else 0
self.end_index = self.index + self.count
self.current_page = (int(self.index) + self.count) / self.count
def __repr__(self):
old = list.__repr__(self)
return ('<ResultSet(set={0.current_page}/{0.pages}, index={0.index}:{0.end_index}, '
'count_in_set={0.count}, total={0.total})>\n{1}'.format(self, old.replace('),', '),\n')))
def __getitem__(self, value):
if isinstance(value, six.string_types):
value = str(value)
if value in self.columns:
colname = self.columns[value].remote
rows = [row.__getattribute__(colname) for row in self]
else:
rows = [row for row in self if value in row]
if rows:
return rows[0] if len(rows) == 1 else rows
else:
raise ValueError('{0} not found in the list'.format(value))
elif isinstance(value, int):
return list.__getitem__(self, value)
elif isinstance(value, slice):
newset = list.__getitem__(self, value)
return ResultSet(newset, index=int(value.start), count=len(newset), total=self.total, columns=self.columns, results=self._results)
elif isinstance(value, np.ndarray):
return np.array(self)[value]
def __getslice__(self, start, stop):
newset = list.__getslice__(self, start, stop)
return ResultSet(newset, index=start, count=len(newset), total=self.total, columns=self.columns, results=self._results)
def __add__(self, other):
newresults = self._results
if not isinstance(other, ResultSet):
raise MarvinUserWarning('Can only add ResultSets together')
# add elements
if self.index == other.index:
# column-wise add
newcols = self.columns.full + [col.full for col in other.columns if col.full not in self.columns.full]
parent = self._results.datamodel if self._results else None
newcols = ColumnGroup('Columns', newcols, parent=parent)
newresults.columns = newcols
new_set = map(add, self, other)
else:
# row-wise add
# warn if the subsets are not consecutive
if abs(self.index - other.index) > self.count:
warnings.warn('You are combining non-consectuive sets! '
'The indexing and ordering will be messed up')
# copy the sets
new_set = copy.copy(self) if self.index < other.index else copy.copy(other)
set_b = copy.copy(other) if self.index < other.index else copy.copy(self)
# filter out any rows that already exist in the set
rows = [row for row in set_b if row not in new_set]
# extend the set
new_set.extend(rows)
newcols = self.columns
self.count = len(new_set)
self.index = min(self.index, other.index)
return ResultSet(new_set, count=self.count, total=self.total, index=self.index,
columns=newcols, results=newresults)
def __radd__(self, other):
return self.__add__(other)
def _populate_from_results(self):
''' Populate some parameters from the results '''
if self._results:
self.columns = self._results.columns if not self.columns else self.columns
self.choices = self.columns.list_params('remote')
self.count = self._results.count if not self.count else self.count
self.total = self._results.totalcount if not self.total else self.total
else:
self.count = self.count if self.count else len(self)
self.total = self.total if self.total else len(self)
def to_dict(self, name=None, format_type='listdict'):
''' Convert the ResultSet into a dictionary
Converts the set of results into a list of dictionaries. Optionally
accepts a column name keyword to extract only that column.
Parameters:
name (str):
Name of the column you wish to extract. Default is None.
format_type (str):
The format of the output dictionary. Can either be a list of dictionaries
or a dictionary of lists.
Returns:
The output converted into dictionary format.
'''
keys = self.columns.list_params('remote')
if format_type == 'listdict':
if name:
output = [{k: res.__getattribute__(k) for k in [name]} for res in self]
else:
output = [{k: res.__getattribute__(k) for k in keys} for res in self]
elif format_type == 'dictlist':
if name:
output = {k: [res._asdict()[k] for res in self] for k in [name]}
else:
output = {k: [res._asdict()[k] for res in self] for k in keys}
else:
raise MarvinError('Cannot output dictionaries. Check your input format_type.')
return output
def to_list(self):
''' Converts to a standard Python list object '''
return list(self)
def sort(self, name=None, reverse=False):
''' Sort the results
In-place sorting of the result set. This is the standard list sorting mechanism.
When no name is specified, does standard list sorting with no key.
Parameters:
name (str):
Column name to sort on. Default is None.
reverse (bool):
If True, sorts in reverse (descending) order.
Returns:
A sorted list
'''
if name:
colname = self.columns[name].remote
return list.sort(self, key=lambda row: row.__getattribute__(colname), reverse=reverse)
else:
return list.sort(self)
class Results(object):
''' A class to handle results from queries on the MaNGA dataset
Parameters:
results (list):
List of results satisfying the input Query
query (object / str):
The query used to produce these results. In local mode, the query is an
SQLalchemy object that can be used to redo the query, or extract subsets
of results from the query. In remote more, the query is a literal string
representation of the SQL query.
return_type (str):
The MarvinTools object to convert the results into. If initially set, the results
are automaticaly converted into the specified Marvin Tool Object on initialization
objects (list):
The list of Marvin Tools objects created by returntype
count (int):
The number of objects in the returned query results
totalcount (int):
The total number of objects in the full query results
mode ({'auto', 'local', 'remote'}):
The load mode to use. See :doc:`Mode secision tree</mode_decision>`.
chunk (int):
For paginated results, the number of results to return. Defaults to 10.
start (int):
For paginated results, the starting index value of the results. Defaults to 0.
end (int):
For paginated results, the ending index value of the resutls. Defaults to start+chunk.
Attributes:
count (int): The count of objects in your current page of results
totalcount (int): The total number of results in the query
query_time (datetime): A datetime TimeDelta representation of the query runtime
Returns:
results: An object representing the Results entity
Example:
>>> f = 'nsa.z < 0.012 and ifu.name = 19*'
>>> q = Query(search_filter=f)
>>> r = q.run()
>>> print(r)
>>> Results(results=[(u'4-3602', u'1902', -9999.0), (u'4-3862', u'1902', -9999.0), (u'4-3293', u'1901', -9999.0), (u'4-3988', u'1901', -9999.0), (u'4-4602', u'1901', -9999.0)],
>>> query=<sqlalchemy.orm.query.Query object at 0x115217090>,
>>> count=64,
>>> mode=local)
'''
def __init__(self, results=None, mode=None, data_origin=None, release=None, count=None,
totalcount=None, runtime=None, response_time=None, chunk=None, start=None,
end=None, queryobj=None, query=None, search_filter=None, return_params=None,
return_type=None, limit=None, params=None, **kwargs):
# basic parameters
self.results = results
self.mode = mode if mode else config.mode
self.data_origin = data_origin
self.objects = None
# input query parameters
self._queryobj = queryobj
self._params = self._queryobj.params if self._queryobj else params
self.release = self._queryobj.release if self._queryobj else release
self.query = self._queryobj.query if self._queryobj else query
self.return_type = self._queryobj.return_type if self._queryobj else return_type
self.search_filter = self._queryobj.search_filter if self._queryobj else search_filter
self.return_params = self._queryobj.return_params if self._queryobj else return_params
self.limit = self._queryobj.limit if self._queryobj else limit
# stat parameters
self.datamodel = datamodel[self.release]
self.count = count if count else len(self.results)
self.totalcount = totalcount if totalcount else self.count
self._runtime = runtime
self.query_time = self._getRunTime() if self._runtime is not None else None
self.response_time = response_time
# ordering parameters
self.chunk = chunk
self.start = start
self.end = end
self.sortcol = None
self.order = None
# drop breadcrumb
breadcrumb.drop(message='Initializing MarvinResults {0}'.format(self.__class__),
category=self.__class__)
# Convert results to MarvinTuple
if self.count > 0 and self.results:
self._set_page()
self._create_result_set(index=self.start)
# Auto convert to Marvin Object
if self.return_type:
self.convertToTool(self.return_type)
def __add__(self, other):
assert isinstance(other, Results) is True, 'Can only add Marvin Results together'
assert self.release == other.release, 'Cannot add Marvin Results from different releases'
assert self.search_filter == other.search_filter, 'Cannot add Marvin Results with different search filters'
results = self.results + other.results
return_params = self.return_params + [p for p in other.return_params if p not in self.return_params]
params = self._params + [p for p in other._params if p not in self._params]
return Results(results=results, params=params, return_params=return_params, limit=self.limit,
search_filter=self.search_filter, count=len(results), totalcount=self.totalcount,
release=self.release, mode=self.mode)
def __radd__(self, other):
return self.__add__(other)
def __repr__(self):
return ('Marvin Results(query={0}, totalcount={1}, count={2}, mode={3})'.format(self.search_filter, self.totalcount, self.count, self.mode))
def showQuery(self):
''' Displays the literal SQL query used to generate the Results objects
Returns:
querystring (str):
A string representation of the SQL query
'''
# check unicode or str
isstr = isinstance(self.query, six.string_types)
# return the string query or compile the real query
if isstr:
return self.query
else:
return str(self.query.statement.compile(compile_kwargs={'literal_binds': True}))
def _getRunTime(self):
''' Sets the query runtime as a datetime timedelta object '''
if isinstance(self._runtime, dict):
return datetime.timedelta(**self._runtime)
else:
return self._runtime
def download(self, images=False, limit=None):
''' Download results via sdss_access
Uses sdss_access to download the query results via rsync.
Downloads them to the local sas. The data type downloaded
is indicated by the returntype parameter
i.e. $SAS_BASE_DIR/mangawork/manga/spectro/redux/...
Parameters:
images (bool):
Set to only download the images of the query results
limit (int):
A limit of the number of results to download
Returns:
NA: na
Example:
>>> r = q.run()
>>> r.returntype = 'cube'
>>> r.download()
'''
plateifu = self.getListOf('plateifu')
if images:
tmp = get_images_by_list(plateifu, releas=self.release, download=True)
else:
downloadList(plateifu, dltype=self.return_type, limit=limit)
def sort(self, name, order='asc'):
''' Sort the set of results by column name
Sorts the results (in place) by a given parameter / column name. Sets
the results to the new sorted results.
Parameters:
name (str):
The column name to sort on
order ({'asc', 'desc'}):
To sort in ascending or descending order. Default is asc.
Example:
>>> r = q.run()
>>> r.getColumns()
>>> [u'mangaid', u'name', u'nsa.z']
>>> r.results
>>> [(u'4-3988', u'1901', -9999.0),
>>> (u'4-3862', u'1902', -9999.0),
>>> (u'4-3293', u'1901', -9999.0),
>>> (u'4-3602', u'1902', -9999.0),
>>> (u'4-4602', u'1901', -9999.0)]
>>> # Sort the results by mangaid
>>> r.sort('mangaid')
>>> [(u'4-3293', u'1901', -9999.0),
>>> (u'4-3602', u'1902', -9999.0),
>>> (u'4-3862', u'1902', -9999.0),
>>> (u'4-3988', u'1901', -9999.0),
>>> (u'4-4602', u'1901', -9999.0)]
>>> # Sort the results by IFU name in descending order
>>> r.sort('ifu.name', order='desc')
>>> [(u'4-3602', u'1902', -9999.0),
>>> (u'4-3862', u'1902', -9999.0),
>>> (u'4-3293', u'1901', -9999.0),
>>> (u'4-3988', u'1901', -9999.0),
>>> (u'4-4602', u'1901', -9999.0)]
'''
remotename = self._check_column(name, 'remote')
self.sortcol = remotename
self.order = order
if self.mode == 'local':
reverse = True if order == 'desc' else False
self.getAll()
self.results.sort(remotename, reverse=reverse)
self.results = self.results[0:self.limit]
elif self.mode == 'remote':
# Fail if no route map initialized
if not config.urlmap:
raise MarvinError('No URL Map found. Cannot make remote call')
# Get the query route
url = config.urlmap['api']['querycubes']['url']
params = {'searchfilter': self.search_filter, 'returnparams': self.return_params,
'sort': remotename, 'order': order, 'limit': self.limit}
self._interaction(url, params, create_set=True, calltype='Sort')
return self.results
def toTable(self):
''' Output the results as an Astropy Table
Uses the Python Astropy package
Parameters:
None
Returns:
tableres:
Astropy Table
Example:
>>> r = q.run()
>>> r.toTable()
>>> <Table length=5>
>>> mangaid name nsa.z
>>> unicode6 unicode4 float64
>>> -------- -------- ------------
>>> 4-3602 1902 -9999.0
>>> 4-3862 1902 -9999.0
>>> 4-3293 1901 -9999.0
>>> 4-3988 1901 -9999.0
>>> 4-4602 1901 -9999.0
'''
try:
tabres = Table(rows=self.results, names=self.columns.full)
except ValueError as e:
raise MarvinError('Could not make astropy Table from results: {0}'.format(e))
return tabres
def merge_tables(self, tables, direction='vert', **kwargs):
''' Merges a list of Astropy tables of results together
Combines two Astropy tables using either the Astropy
vstack or hstack method. vstack refers to vertical stacking of table rows.
hstack refers to horizonal stacking of table columns. hstack assumes the rows in each
table refer to the same object. Buyer beware: stacking tables without proper understanding
of your rows and columns may results in deleterious results.
merge_tables also accepts all keyword arguments that Astropy vstack and hstack method do.
See `vstack <http://docs.astropy.org/en/stable/table/operations.html#stack-vertically>`_
See `hstack <http://docs.astropy.org/en/stable/table/operations.html#stack-horizontally>`_
Parameters:
tables (list):
A list of Astropy Table objects. Required.
direction (str):
The direction of the table stacking, either vertical ('vert') or horizontal ('hor').
Default is 'vert'. Direction string can be fuzzy.
Returns:
A new Astropy table that is the stacked combination of all input tables
Example:
>>> # query 1
>>> q, r = doQuery(search_filter='nsa.z < 0.1', returnparams=['g_r', 'cube.ra', 'cube.dec'])
>>> # query 2
>>> q2, r2 = doQuery(search_filter='nsa.z < 0.1')
>>>
>>> # convert to tables
>>> table_1 = r.toTable()
>>> table_2 = r2.toTable()
>>> tables = [table_1, table_2]
>>>
>>> # vertical (row) stacking
>>> r.merge_tables(tables, direction='vert')
>>> # horizontal (column) stacking
>>> r.merge_tables(tables, direction='hor')
'''
choices = ['vertical', 'horizontal']
stackdir, score = process.extractOne(direction, choices)
if stackdir == 'vertical':
return vstack(tables, **kwargs)
elif stackdir == 'horizontal':
return hstack(tables, **kwargs)
def toFits(self, filename='myresults.fits', overwrite=False):
''' Output the results as a FITS file
Writes a new FITS file from search results using
the astropy Table.write()
Parameters:
filename (str):
Name of FITS file to output
overwrite (bool):
Set to True to overwrite an existing file
'''
myext = os.path.splitext(filename)[1]
if not myext:
filename = filename + '.fits'
table = self.toTable()
table.write(filename, format='fits', overwrite=overwrite)
print('Writing new FITS file {0}'.format(filename))
def toCSV(self, filename='myresults.csv', overwrite=False):
''' Output the results as a CSV file
Writes a new CSV file from search results using
the astropy Table.write()
Parameters:
filename (str):
Name of CSV file to output
overwrite (bool):
Set to True to overwrite an existing file
'''
myext = os.path.splitext(filename)[1]
if not myext:
filename = filename + '.csv'
table = self.toTable()
table.write(filename, format='csv', overwrite=overwrite)
print('Writing new CSV file {0}'.format(filename))
def toDF(self):
'''Call toDataFrame().
'''
return self.toDataFrame()
def toDataFrame(self):
'''Output the results as an pandas dataframe.
Uses the pandas package.
Parameters:
None
Returns:
dfres:
pandas dataframe
Example:
>>> r = q.run()
>>> r.toDataFrame()
mangaid plate name nsa_mstar z
0 1-22286 7992 12704 1.702470e+11 0.099954
1 1-22301 7992 6101 9.369260e+10 0.105153
2 1-22414 7992 6103 7.489660e+10 0.092272
3 1-22942 7992 12705 8.470360e+10 0.104958
4 1-22948 7992 9102 1.023530e+11 0.119399
'''
res = self.results.to_list() if self.results else []
try:
dfres = pd.DataFrame(res)
except (ValueError, NameError) as e:
raise MarvinError('Could not make pandas dataframe from results: {0}'.format(e))
return dfres
def _create_result_set(self, index=None, rows=None):
''' Creates a Marvin ResultSet
Parameters:
index (int):
The starting index of the result subset
rows (list|ResultSet):
A list of rows containing the value data to input into the ResultSet
Returns:
creates a marvin ResultSet and sets it as the results attribute
'''
# grab the columns from the results
self.columns = self.getColumns()
ntnames = self.columns.list_params('remote')
# dynamically create a new ResultRow Class
rows = rows if rows else self.results
row_is_dict = isinstance(rows[0], dict)
if not isinstance(rows, ResultSet):
nt = marvintuple('ResultRow', ntnames, results=self)
if row_is_dict:
results = [nt(**r) for r in rows]
else:
results = [nt(*r) for r in rows]
else:
results = rows
self.count = len(results)
# Build the ResultSet
self.results = ResultSet(results, count=self.count, total=self.totalcount, index=index, results=self)
def _set_page(self):
''' Set the page of the data '''
if self.start and self.end:
self.chunk = (self.end - self.start)
else:
self.chunk = self.chunk if self.chunk else self.limit if self.limit else 100
self.start = 0
self.end = self.start + self.chunk
self.pages = int(np.ceil(self.totalcount / float(self.count))) if self.count else 0
self.index = self.start
self.current_page = (int(self.index) + self.count) / self.count
def save(self, path=None, overwrite=False):
''' Save the results as a pickle object
Parameters:
path (str):
Filepath and name of the pickled object
overwrite (bool):
Set this to overwrite an existing pickled file
Returns:
path (str):
The filepath and name of the pickled object
'''
# set the filename and path
sf = self.search_filter.replace(' ', '') if self.search_filter else 'anon'
# set the path
if not path:
path = os.path.expanduser('~/marvin_results_{0}.mpf'.format(sf))
# check for file extension
if not os.path.splitext(path)[1]:
path = os.path.join(path + '.mpf')
path = os.path.realpath(path)
if os.path.isdir(path):
raise MarvinError('path must be a full route, including the filename.')
if os.path.exists(path) and not overwrite:
warnings.warn('file already exists. Not overwriting.', MarvinUserWarning)
return
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
# convert results into a dict
dict_results = self.results.to_dict()
# set bad pickled attributes to None
attrs = ['results', 'datamodel', 'columns', '_queryobj']
vals = [dict_results, None, None, None]
isnotstr = not isinstance(self.query, six.string_types)
if isnotstr:
attrs += ['query']
vals += [None]
# pickle the results
try:
with temp_setattr(self, attrs, vals):
pickle.dump(self, open(path, 'wb'), protocol=-1)
except Exception as ee:
if os.path.exists(path):
os.remove(path)
raise MarvinError('Error found while pickling: {0}'.format(str(ee)))
return path
@classmethod
def restore(cls, path, delete=False):
''' Restore a pickled Results object
Parameters:
path (str):
The filename and path to the pickled object
delete (bool):
Turn this on to delete the pickled fil upon restore
Returns:
Results (instance):
The instantiated Marvin Results class
'''
obj = marvin_pickle.restore(path, delete=delete)
obj.datamodel = datamodel[obj.release]
obj._create_result_set()
obj.getColumns()
return obj
def toJson(self, orient: str = 'records', pure: bool = None) -> str:
''' Output the results as a JSON object
Uses Python panda package to convert the results to a JSON object. The default
orientation is a list "records". Valid orientations are ('split', 'records',
'index', 'columns', 'values', 'table'). If pandas is not installed or the "pure"
option is set, will use the json package to convert the results to JSON representation.
Parameters:
orient (str):
The pandas orientation to use when converting to JSON. Default is 'records'.
pure (bool):
Set this to True to use the json library for conversion instead of the pandas package
Returns:
str:
The results as a JSON string
'''
# if no pandas or pure is true, then use json dumps
if not pd or pure:
try:
jsonres = json.dumps(self.results)
except TypeError as e:
raise MarvinError('Results not JSON-ifiable. Check the format of results: {0}'.format(e))
else:
jsonres = self.toDF().to_json(orient=orient)
return jsonres
def getColumns(self):
''' Get the columns of the returned reults
Returns a ParameterGroup containing the columns from the
returned results. Each row of the ParameterGroup is a
QueryParameter.
Returns:
columns (list):
A list of column names from the results
Example:
>>> r = q.run()
>>> cols = r.getColumns()
>>> print(cols)
>>> [u'mangaid', u'name', u'nsa.z']
'''
try:
self.columns = ColumnGroup('Columns', self._params, parent=self.datamodel)
except Exception as e:
raise MarvinError('Could not create query columns: {0}'.format(e))
return self.columns
def _interaction(self, url, params, calltype='', create_set=None, **kwargs):
''' Perform a remote Interaction call
Parameters:
url (str):
The url of the request
params (dict):
A dictionary of parameters (get or post) to send with the request
calltype (str):
The method call sending the request
create_set (bool):
If True, sets the response output as the new results and creates a
new named tuple set
Returns:
output:
The output data from the request
Raises:
MarvinError: Raises on any HTTP Request error
'''
# check if the returnparams parameter is in the proper format
if 'returnparams' in params:
return_params = params.get('returnparams', None)
if return_params and isinstance(return_params, list):
params['returnparams'] = ','.join(return_params)
# add the release just in case
params.update({'release': self.release})
# check if we're getting all results
datastream = calltype == 'getAll'
# send the request
try:
ii = Interaction(route=url, params=params, stream=True, datastream=datastream)
except MarvinError as e:
raise MarvinError('API Query {0} call failed: {1}'.format(calltype, e))
else:
remotes = self._queryobj._get_remote_parameters(ii)
output = remotes['results']
self.response_time = remotes['response_time']
self._runtime = remotes['runtime']
self.query_time = self._getRunTime()
index = kwargs.get('index', None)
if create_set:
self._create_result_set(index=index, rows=output)
else:
return output
def _check_column(self, name, name_type):
''' Check if a name exists as a column '''
try:
name_in_col = name in self.columns
except KeyError as e:
raise MarvinError('Column {0} not found in results: {1}'.format(name, e))
else:
assert name_type in ['full', 'remote', 'name', 'short', 'display'], \
'name_type must be one of "full, remote, name, short, display"'
return self.columns[str(name)].__getattribute__(name_type)
def getListOf(self, name=None, to_json=False, to_ndarray=False, return_all=None):
''' Extract a list of a single parameter from results
Parameters:
name (str):
Name of the parameter name to return. If not specified,
it returns all parameters.
to_json (bool):
True/False boolean to convert the output into a JSON format
to_ndarray (bool):
True/False boolean to convert the output into a Numpy array
return_all (bool):
if True, returns the entire result set for that column
Returns:
output (list):
A list of results for one parameter
Example:
>>> r = q.run()
>>> r.getListOf('mangaid')
>>> [u'4-3988', u'4-3862', u'4-3293', u'4-3602', u'4-4602']
Raises:
AssertionError:
Raised when no name is specified.
'''
assert name, 'Must specify a column name'
# check column name and get full name
fullname = self._check_column(name, 'full')
# deal with the output
if return_all:
# # grab all of that column
url = config.urlmap['api']['getcolumn']['url'].format(colname=fullname)
params = {'searchfilter': self.search_filter, 'format_type': 'list',
'return_all': True, 'returnparams': self.return_params}
output = self._interaction(url, params, calltype='getList')
else:
# only deal with current page
output = self.results[name] if self.results.count > 1 else [self.results[name]]
if to_json:
output = json.dumps(output) if output else None
if to_ndarray:
output = np.array(output) if output else None
return output
def getDictOf(self, name=None, format_type='listdict', to_json=False, return_all=None):
''' Get a dictionary of specified parameters
Parameters:
name (str):
Name of the parameter name to return. If not specified,
it returns all parameters.
format_type ({'listdict', 'dictlist'}):
The format of the results. Listdict is a list of dictionaries.
Dictlist is a dictionary of lists. Default is listdict.
to_json (bool):
True/False boolean to convert the output into a JSON format
return_all (bool):
if True, returns the entire result set for that column
Returns:
output (list, dict):
Can be either a list of dictionaries, or a dictionary of lists
Example:
>>> # get some results
>>> r = q.run()
>>> # Get a list of dictionaries
>>> r.getDictOf(format_type='listdict')
>>> [{'cube.mangaid': u'4-3988', 'ifu.name': u'1901', 'nsa.z': -9999.0},
>>> {'cube.mangaid': u'4-3862', 'ifu.name': u'1902', 'nsa.z': -9999.0},
>>> {'cube.mangaid': u'4-3293', 'ifu.name': u'1901', 'nsa.z': -9999.0},
>>> {'cube.mangaid': u'4-3602', 'ifu.name': u'1902', 'nsa.z': -9999.0},
>>> {'cube.mangaid': u'4-4602', 'ifu.name': u'1901', 'nsa.z': -9999.0}]
>>> # Get a dictionary of lists
>>> r.getDictOf(format_type='dictlist')
>>> {'cube.mangaid': [u'4-3988', u'4-3862', u'4-3293', u'4-3602', u'4-4602'],
>>> 'ifu.name': [u'1901', u'1902', u'1901', u'1902', u'1901'],
>>> 'nsa.z': [-9999.0, -9999.0, -9999.0, -9999.0, -9999.0]}
>>> # Get a dictionary of only one parameter
>>> r.getDictOf('mangaid')
>>> [{'cube.mangaid': u'4-3988'},
>>> {'cube.mangaid': u'4-3862'},
>>> {'cube.mangaid': u'4-3293'},
>>> {'cube.mangaid': u'4-3602'},
>>> {'cube.mangaid': u'4-4602'}]
'''
# get the remote and full name
remotename = self._check_column(name, 'remote') if name else None
fullname = self._check_column(name, 'full') if name else None
# create the dictionary
output = self.results.to_dict(name=remotename, format_type=format_type)
# deal with the output
if return_all:
# grab all or of a specific column
params = {'searchfilter': self.search_filter, 'return_all': True,
'format_type': format_type, 'returnparams': self.return_params}
url = config.urlmap['api']['getcolumn']['url'].format(colname=fullname)
output = self._interaction(url, params, calltype='getDict')
else:
# only deal with current page
output = self.results.to_dict(name=remotename, format_type=format_type)
if to_json:
output = json.dumps(output) if output else None
return output
def loop(self, chunk=None):
''' Loop over the full set of results
Starts a loop to collect all the results (in chunks)
until the current count reaches the total number
of results. Uses extendSet.
Parameters:
chunk (int):
The number of objects to return
Example:
>>> # get some results from a query
>>> r = q.run()
>>> # start a loop, grabbing in chunks of 400
>>> r.loop(chunk=400)
'''
while self.count < self.totalcount:
self.extendSet(chunk=chunk)
def extendSet(self, chunk=None, start=None):
''' Extend the Result set with the next page
Extends the current ResultSet with the next page of results
or a specified page. Calls either getNext or getSubset.
Parameters:
chunk (int):
The number of objects to return
start (int):
The starting index of your subset extraction
Returns:
A new results set
Example:
>>> # run a query
>>> r = q.run()
>>> # extend the current result set with the next page
>>> r.extendSet()
>>>
See Also:
getNext, getSubset
'''
oldset = copy.copy(self.results)
if start is not None:
nextset = self.getSubset(start, limit=chunk)
else:
nextset = self.getNext(chunk=chunk)
newset = oldset + nextset
self.count = len(newset)
self.results = newset
def getNext(self, chunk=None):
''' Retrieve the next chunk of results
Returns the next chunk of results from the query.
from start to end in units of chunk. Used with getPrevious
to paginate through a long list of results
Parameters:
chunk (int):
The number of objects to return
Returns:
results (list):
A list of query results
Example:
>>> r = q.run()
>>> r.getNext(5)
>>> Retrieving next 5, from 35 to 40
>>> [(u'4-4231', u'1902', -9999.0),
>>> (u'4-14340', u'1901', -9999.0),
>>> (u'4-14510', u'1902', -9999.0),
>>> (u'4-13634', u'1901', -9999.0),
>>> (u'4-13538', u'1902', -9999.0)]
See Also:
getAll, getPrevious, getSubset
'''
if chunk and chunk < 0:
warnings.warn('Chunk cannot be negative. Setting to {0}'.format(self.chunk), MarvinUserWarning)
chunk = self.chunk
newstart = self.end
self.chunk = chunk if chunk else self.chunk
newend = newstart + self.chunk
# This handles cases when the number of results is < total
if self.totalcount == self.count:
warnings.warn('You have all the results. Cannot go forward', MarvinUserWarning)
return self.results
# This handles the end edge case
if newend > self.totalcount:
warnings.warn('You have reached the end.', MarvinUserWarning)
newend = self.totalcount
newstart = self.end
# This grabs the next chunk
log.info('Retrieving next {0}, from {1} to {2}'.format(self.chunk, newstart, newend))
if self.mode == 'local':
self.results = self.query.slice(newstart, newend).all()
if self.results:
self._create_result_set(index=newstart)
elif self.mode == 'remote':
# Fail if no route map initialized
if not config.urlmap:
raise MarvinError('No URL Map found. Cannot make remote call')
# Get the query route
url = config.urlmap['api']['getsubset']['url']
params = {'searchfilter': self.search_filter, 'returnparams': self.return_params,
'start': newstart, 'end': newend, 'limit': chunk,
'sort': self.sortcol, 'order': self.order}
self._interaction(url, params, calltype='getNext', create_set=True,
index=newstart)
self.start = newstart
self.end = newend
self.count = len(self.results)
if self.return_type:
self.convertToTool(self.return_type)
return self.results
def getPrevious(self, chunk=None):
''' Retrieve the previous chunk of results.
Returns a previous chunk of results from the query.
from start to end in units of chunk. Used with getNext
to paginate through a long list of results
Parameters:
chunk (int):
The number of objects to return
Returns:
results (list):
A list of query results
Example:
>>> r = q.run()
>>> r.getPrevious(5)
>>> Retrieving previous 5, from 30 to 35
>>> [(u'4-3988', u'1901', -9999.0),
>>> (u'4-3862', u'1902', -9999.0),
>>> (u'4-3293', u'1901', -9999.0),
>>> (u'4-3602', u'1902', -9999.0),
>>> (u'4-4602', u'1901', -9999.0)]
See Also:
getNext, getAll, getSubset
'''
if chunk and chunk < 0:
warnings.warn('Chunk cannot be negative. Setting to {0}'.format(self.chunk), MarvinUserWarning)
chunk = self.chunk
newend = self.start
self.chunk = chunk if chunk else self.chunk
newstart = newend - self.chunk
# This handles cases when the number of results is < total
if self.totalcount == self.count:
warnings.warn('You have all the results. Cannot go back', MarvinUserWarning)
return self.results
# This handles the start edge case
if newstart < 0:
warnings.warn('You have reached the beginning.', MarvinUserWarning)
newstart = 0
newend = self.start
# This grabs the previous chunk
log.info('Retrieving previous {0}, from {1} to {2}'.format(self.chunk, newstart, newend))
if self.mode == 'local':
self.results = self.query.slice(newstart, newend).all()
if self.results:
self._create_result_set(index=newstart)
elif self.mode == 'remote':
# Fail if no route map initialized
if not config.urlmap:
raise MarvinError('No URL Map found. Cannot make remote call')
# Get the query route
url = config.urlmap['api']['getsubset']['url']
params = {'searchfilter': self.search_filter, 'returnparams': self.return_params,
'start': newstart, 'end': newend, 'limit': chunk,
'sort': self.sortcol, 'order': self.order}
self._interaction(url, params, calltype='getPrevious', create_set=True,
index=newstart)
self.start = newstart
self.end = newend
self.count = len(self.results)
if self.return_type:
self.convertToTool(self.return_type)
return self.results
def getSubset(self, start, limit=None):
''' Extracts a subset of results
Parameters:
start (int):
The starting index of your subset extraction
limit (int):
The limiting number of results to return.
Returns:
results (list):
A list of query results
Example:
>>> r = q.run()
>>> r.getSubset(0, 10)
>>> [(u'14-12', u'1901', -9999.0),
>>> (u'14-13', u'1902', -9999.0),
>>> (u'27-134', u'1901', -9999.0),
>>> (u'27-100', u'1902', -9999.0),
>>> (u'27-762', u'1901', -9999.0),
>>> (u'27-759', u'1902', -9999.0),
>>> (u'27-827', u'1901', -9999.0),
>>> (u'27-828', u'1902', -9999.0),
>>> (u'27-1170', u'1901', -9999.0),
>>> (u'27-1167', u'1902', -9999.0)]
See Also:
getNext, getPrevious, getAll
'''
if not limit:
limit = self.chunk
if limit < 0:
warnings.warn('Limit cannot be negative. Setting to {0}'.format(self.chunk), MarvinUserWarning)
limit = self.chunk
start = 0 if int(start) < 0 else int(start)
end = start + int(limit)
self.start = start
self.end = end
self.chunk = limit
if self.mode == 'local':
self.results = self.query.slice(start, end).all()
if self.results:
self._create_result_set(index=start)
elif self.mode == 'remote':
# Fail if no route map initialized
if not config.urlmap:
raise MarvinError('No URL Map found. Cannot make remote call')
# Get the query route
url = config.urlmap['api']['getsubset']['url']
params = {'searchfilter': self.search_filter, 'returnparams': self.return_params,
'start': start, 'end': end, 'limit': limit,
'sort': self.sortcol, 'order': self.order}
self._interaction(url, params, calltype='getSubset', create_set=True, index=start)
self.count = len(self.results)
if self.return_type:
self.convertToTool(self.return_type)
return self.results
def getAll(self, force=False):
''' Retrieve all of the results of a query
Attempts to return all the results of a query. The efficiency of this
method depends heavily on how many rows and columns you wish to return.
A cutoff limit is applied for results with more than 500,000 rows or
results with more than 25 columns.
Parameters
force (bool):
If True, force attempt to download everything
Returns:
The full list of query results.
See Also:
getNext, getPrevious, getSubset, loop
'''
if (self.totalcount > 500000 or len(self.columns) > 25) and not force:
raise MarvinUserWarning("Cannot retrieve all results. The total number of requested "
"rows or columns is too high. Please use the getNext, getPrevious, "
"getSubset, or loop methods to retrieve pages.")
if self.mode == 'local':
self.results = self.query.from_self().all()
self._create_result_set()
elif self.mode == 'remote':
# Get the query route
url = config.urlmap['api']['querycubes']['url']
params = {'searchfilter': self.search_filter, 'return_all': True,
'returnparams': self.return_params, 'limit': self.limit,
'sort': self.sortcol, 'order': self.order}
self._interaction(url, params, calltype='getAll', create_set=True)
self.count = self.totalcount
print('Returned all {0} results'.format(self.totalcount))
def convertToTool(self, tooltype, mode='auto', limit=None):
''' Converts the list of results into Marvin Tool objects
Creates a list of Marvin Tool objects from a set of query results.
The new list is stored in the Results.objects property.
If the Query.returntype parameter is specified, then the Results object
will automatically convert the results to the desired Tool on initialization.
Parameters:
tooltype (str):
The requested Marvin Tool object that the results are converted into.
Overrides the returntype parameter. If not set, defaults
to the returntype parameter.
limit (int):
Limit the number of results you convert to Marvin tools. Useful
for extremely large result sets. Default is None.
mode (str):
The mode to use when attempting to convert to Tool. Default mode
is to use the mode internal to Results. (most often remote mode)
Example:
>>> # Get the results from some query
>>> r = q.run()
>>> r.results
>>> [NamedTuple(mangaid=u'14-12', name=u'1901', nsa.z=-9999.0),
>>> NamedTuple(mangaid=u'14-13', name=u'1902', nsa.z=-9999.0),
>>> NamedTuple(mangaid=u'27-134', name=u'1901', nsa.z=-9999.0),
>>> NamedTuple(mangaid=u'27-100', name=u'1902', nsa.z=-9999.0),
>>> NamedTuple(mangaid=u'27-762', name=u'1901', nsa.z=-9999.0)]
>>> # convert results to Marvin Cube tools
>>> r.convertToTool('cube')
>>> r.objects
>>> [<Marvin Cube (plateifu='7444-1901', mode='remote', data_origin='api')>,
>>> <Marvin Cube (plateifu='7444-1902', mode='remote', data_origin='api')>,
>>> <Marvin Cube (plateifu='7995-1901', mode='remote', data_origin='api')>,
>>> <Marvin Cube (plateifu='7995-1902', mode='remote', data_origin='api')>,
>>> <Marvin Cube (plateifu='8000-1901', mode='remote', data_origin='api')>]
'''
# set the desired tool type
toollist = ['cube', 'spaxel', 'maps', 'rss', 'modelcube']
tooltype = tooltype if tooltype else self.return_type
assert tooltype in toollist, 'Returned tool type must be one of {0}'.format(toollist)
# get the parameter list to check against
paramlist = self.columns.full
print('Converting results to Marvin {0} objects'.format(tooltype.title()))
if tooltype == 'cube':
self.objects = [self._get_object(Cube, plateifu=res.plateifu, mode=mode) for res in self.results[0:limit]]
elif tooltype == 'maps':
isbin = 'bintype.name' in paramlist
istemp = 'template.name' in paramlist
self.objects = []
for res in self.results[0:limit]:
mapkwargs = {'mode': mode, 'plateifu': res.plateifu}
if isbin:
binval = res.bintype_name
mapkwargs['bintype'] = binval
if istemp:
tempval = res.template_name
mapkwargs['template_kin'] = tempval
self.objects.append(self._get_object(Maps, **mapkwargs))
elif tooltype == 'spaxel':
assert 'spaxelprop.x' in paramlist and 'spaxelprop.y' in paramlist, \
'Parameters must include spaxelprop.x and y in order to convert to Marvin Spaxel.'
self.objects = []
tab = self.toTable()
uniq_plateifus = list(set(self.getListOf('plateifu')))
for plateifu in uniq_plateifus:
c = self._get_object(Cube, plateifu=plateifu, mode=mode)
univals = tab['cube.plateifu'] == plateifu
x = tab[univals]['spaxelprop.x'].tolist()
y = tab[univals]['spaxelprop.y'].tolist()
spaxels = c[y, x]
self.objects.extend(spaxels)
elif tooltype == 'rss':
self.objects = [self._get_object(RSS, plateifu=res.plateifu, mode=mode) for res in self.results[0:limit]]
elif tooltype == 'modelcube':
isbin = 'bintype.name' in paramlist
istemp = 'template.name' in paramlist
self.objects = []
assert self.release != 'MPL-4', "ModelCubes require a release of MPL-5 and up"
for res in self.results[0:limit]:
mapkwargs = {'mode': mode, 'plateifu': res.plateifu}
if isbin:
binval = res.bintype_name
mapkwargs['bintype'] = binval
if istemp:
tempval = res.template_name
mapkwargs['template_kin'] = tempval
self.objects.append(self._get_object(ModelCube, **mapkwargs))
@staticmethod
def _get_object(obj, **kwargs):
''' Return a Marvin object or an error message
To preserve the lengths of self.results and self.objects, it will
either return an instance or an error message in its place
Parameters:
obj (object):
A Marvin Class object
kwargs:
Any set of parameters to instantiate a Marvin object
Returns:
The Marvin instance or an error message if it failed
'''
try:
inst = obj(**kwargs)
except MarvinError as e:
plateifu = kwargs.get('plateifu', '')
inst = 'Error creating {0} for {1}: {2}'.format(obj.__name__, plateifu, e)
return inst
def plot(self, x_name, y_name, **kwargs):
''' Make a scatter plot from two columns of results
Creates a Matplotlib scatter plot from Results columns.
Accepts as input two string column names. Will extract the total
entire column (if not already available) and plot them. Creates
a scatter plot with (optionally) adjoining 1-d histograms for each column.
See :meth:`marvin.utils.plot.scatter.plot` and
:meth:`marvin.utils.plot.scatter.hist` for details.
Parameters:
x_name (str):
The name of the x-column of data. Required
y_name (str):
The name of the y-column of data. Required
return_plateifus (bool):
If True, includes the plateifus in each histogram bin in the
histogram output. Default is True.
return_figure (bool):
Set to False to not return the Figure and Axis object. Defaults to True.
show_plot (bool):
Set to False to not show the interactive plot
**kwargs (dict):
Any other keyword argument that will be passed to Marvin's
scatter and hist plotting methods
Returns:
The figure, axes, and histogram data from the plotting function
Example:
>>> # do a query and get the results
>>> q = Query(search_filter='nsa.z < 0.1', returnparams=['nsa.elpetro_ba', 'g_r'])
>>> r = q.run()
>>> # plot the total columns of Redshift vs g-r magnitude
>>> fig, axes, hist_data = r.plot('nsa.z', 'g_r')
'''
assert all([x_name, y_name]), 'Must provide both an x and y column'
return_plateifus = kwargs.pop('return_plateifus', True)
with_hist = kwargs.get('with_hist', True)
show_plot = kwargs.pop('show_plot', True)
return_figure = kwargs.get('return_figure', True)
# get the named column
x_col = self.columns[x_name]
y_col = self.columns[y_name]
# get the values of the two columns
if self.count != self.totalcount:
x_data = self.getListOf(x_name, return_all=True)
y_data = self.getListOf(y_name, return_all=True)
else:
x_data = self.results[x_name]
y_data = self.results[y_name]
with turn_off_ion(show_plot=show_plot):
output = marvin.utils.plot.scatter.plot(x_data, y_data, xlabel=x_col, ylabel=y_col, **kwargs)
# computes a list of plateifus in each bin
if return_plateifus and with_hist:
plateifus = self.getListOf('plateifu', return_all=True)
hdata = output[2] if return_figure else output
if 'xhist' in hdata:
hdata['xhist']['bins_plateifu'] = map_bins_to_column(plateifus, hdata['xhist']['indices'])
if 'yhist' in hdata:
hdata['yhist']['bins_plateifu'] = map_bins_to_column(plateifus, hdata['yhist']['indices'])
output = output[0:2] + (hdata,) if return_figure else hdata
return output
def hist(self, name, **kwargs):
''' Make a histogram for a given column of the results
Creates a Matplotlib histogram from a Results Column.
Accepts as input a string column name. Will extract the total
entire column (if not already available) and plot it.
See :meth:`marvin.utils.plot.scatter.hist` for details.
Parameters:
name (str):
The name of the column of data. Required
return_plateifus (bool):
If True, includes the plateifus in each histogram bin in the
histogram output. Default is True.
return_figure (bool):
Set to False to not return the Figure and Axis object. Defaults to True.
show_plot (bool):
Set to False to not show the interactive plot
**kwargs (dict):
Any other keyword argument that will be passed to Marvin's
hist plotting methods
Returns:
The histogram data, figure, and axes from the plotting function
Example:
>>> # do a query and get the results
>>> q = Query(search_filter='nsa.z < 0.1', returnparams=['nsa.elpetro_ba', 'g_r'])
>>> r = q.run()
>>> # plot a histogram of the redshift column
>>> hist_data, fig, axes = r.hist('nsa.z')
'''
return_plateifus = kwargs.pop('return_plateifus', True)
show_plot = kwargs.pop('show_plot', True)
return_figure = kwargs.get('return_figure', True)
# get the named column
col = self.columns[name]
# get the values of the two columns
if self.count != self.totalcount:
data = self.getListOf(name, return_all=True)
else:
data = self.results[name]
# xhist, fig, ax_hist_x = output
with turn_off_ion(show_plot=show_plot):
output = marvin.utils.plot.scatter.hist(data, **kwargs)
if return_plateifus:
plateifus = self.getListOf('plateifu', return_all=True)
hdata = output[0] if return_figure else output
hdata['bins_plateifu'] = map_bins_to_column(plateifus, hdata['indices'])
output = (hdata,) + output[1:] if return_figure else hdata
return output
|
sdssREPO_NAMEmarvinPATH_START.@marvin_extracted@marvin-main@python@marvin@tools@results.py@.PATH_END.py
|
{
"filename": "_size.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram2dcontour/colorbar/title/font/_size.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="size",
parent_name="histogram2dcontour.colorbar.title.font",
**kwargs,
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram2dcontour@colorbar@title@font@_size.py@.PATH_END.py
|
{
"filename": "_include.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/xaxis/autorangeoptions/_include.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class IncludeValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self,
plotly_name="include",
parent_name="layout.xaxis.autorangeoptions",
**kwargs,
):
super(IncludeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@xaxis@autorangeoptions@_include.py@.PATH_END.py
|
{
"filename": "design_description.md",
"repo_name": "MazinLab/MKIDGen3",
"repo_path": "MKIDGen3_extracted/MKIDGen3-main/docs/design_description.md",
"type": "Markdown"
}
|
# MKIDGEN3 Software Design Description
## Overview
The MKIDGEN3 software facilitates using multiple hardware subsystems (RFSoC board, IF board) to set up and read out an MKID array.
The core functionality revolves around capturing data at various points in the FPGA DSP pipeline.
Captures are facilitated by `capture requests`. The `Director` progam runs on the client machine and facilitates
generating capture requests to fufill array setup steps. Once all calibration settings have been established, it facilitates
a standing capture request to record photons. The `GUI` can be clicked to call director functions and facilitate
array setup steps in a graphical way.
## Key Components
### Capture Requests
Capture requests can target three possible locations on the FPGA:
1. Setup / Engineering
2. ADC Capture
3. IQ Capture (Post-DDC, Pre-optimal filter)
4. Phase Capture (Post-optimal filter)
2. Postage stamp (post optimal filter IQ timestreams for 8 pixels)
3. Photon capture
All three locations can run captures concurrently but the setup / engineering capture only supports
one sub-location at a time. Every capture request is tracked via a `capture_id` which is a hash consisting
of all the settings related to the capture. Identical capture settings will produce the same capture ID.
The capture ID(s) can be used by a subscriber machine to filter capture requests published by the FRS.
*TODO: Consider including the capture location / type in the zmq header such that it can be used by a subscriber
to filter. Jenny thinks this is a good idea.* A capture request may fail and/or abort at any time. Upon
completion (including failure) a single null data byte will be published.
### FPGA Redis Server
The FPGA Redis Server runs on the ARM core and is responsible for two main tasks:
1. Keep updated information on the current status of any capture requests (queued, in-progress, completed)
*Will every single capture request be recorded in the Redis Server? What counts as "current"?
Jenny thinks this needs to be flushed out / discussed a little more.*
2. Keep updated information of the status of the FPGA programming including the DAC table settings,
optimal filter taps, bin2res settings, etc.
*Is there a seperate utility for tracking setup steps / what's potentially out of date or is that
also a function of this server? Who is responsible for storing MKID feedline/array information like
resonator frequncies and powers? However these are stored needs to be convenient to hand off to different
programs, view, associate with data, etc.*
*TODO: Where/how are logging messages stored or published?*
### Feedline Readout Server
The Feedline Readout Server (FRS) facilitates programming and interacting with the FPGA.
The FRS accepts capture requests on the capture request port and processes them in a loose priority order, executing requestes as they are
compatible with those previously received and running.
Anyone is able to subscribe to published data and they are able to filter by capture ID (and capture type if we inplement that).
Only the computer that generated the capture request necessarily knows the capture ID(s) for the
requests they submitted.
## Array Setup Steps
1. Run sweeps (power and freq.) to find res and drive power
2. Process
3. Rerun 1&2 with fixed freq to finialize optimal drive power
4. Run IQ sweeps to find loop centers
5. Process
6. capture Optimal filter phase data
7. Process
8. capture phase data for thresholding
9. Process
10. ready to observe
## Definitions
- active feedlines: a feedline which is intended to be used for observing and has the requisite calibration/setup information
- observing: recording photon data on all active feedlines
- capture request: contains an id which is the hash of the capture settings
## Main Programs, and their objects:
- Feedline Readout Server (FRS)
- FeedlineReadoutServer
- FeedlineHardware
- TapThread
- Readout Director
- PhotonDataAggregator
- Feedline Redis Server
- Contains all calibration data for one feedline
- dac table, IF settings, res frequencies, optimal filters, etc.
- Global Redis Server
- contains all calibration data for all feedlines
- can be edited to change individual calibration settings manually, updated settings only get applied when observing is started
Recovery Procedure:
- If a feedline goes down mid observing there are two choices:
- Restart feedline with exact same settings, observing continues uninterrupted, photon capture IDs are the same
- Recalibrate one or more feedlines: observing stops
## Usage Scenarios
Full array setup sans-gui
1. Start global redis server
2. start FL redis servers (optional)
3. Start all FRSs
4. Create some sort of PowerSweepEngine
- needs board to use (pull from redis by default or by explicit list)
- needs the power sweep settings
- needs processing method and config settings
5. Tell engine to go
6. Engine generates and submits capture jobs harvesting and storing the data
- handles hiccups with resume ability
- stores what settings it used into redis: state:last_powersweep:...
7. once all data is recieved it processes the data per its config and stores the result
- in redis: state:last_powersweep:result:...
- in a file at a location specified by the current configuration
8. Create some sort of rotateloopsengine
## Redis command, control, status schema
state:fl#:....
config:fl#...
|
MazinLabREPO_NAMEMKIDGen3PATH_START.@MKIDGen3_extracted@MKIDGen3-main@docs@design_description.md@.PATH_END.py
|
{
"filename": "_fgopacity.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/icicle/marker/pattern/_fgopacity.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FgopacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="fgopacity", parent_name="icicle.marker.pattern", **kwargs
):
super(FgopacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@icicle@marker@pattern@_fgopacity.py@.PATH_END.py
|
{
"filename": "stage_2.py",
"repo_name": "jdhenshaw/scousepy",
"repo_path": "scousepy_extracted/scousepy-master/scousepy/stage_2.py",
"type": "Python"
}
|
# Licensed under an MIT open source license - see LICENSE
import numpy as np
def generate_saa_list(scouseobject):
"""
Returns a list constaining all spectral averaging areas.
Parameters
----------
scouseobject : Instance of the scousepy class
"""
saa_list=[]
for i in range(len(scouseobject.wsaa)):
saa_dict = scouseobject.saa_dict[i]
for key in saa_dict.keys():
# get the relavent SAA
saa = saa_dict[key]
if saa.to_be_fit:
saa_list.append([saa.index, i])
return saa_list
|
jdhenshawREPO_NAMEscousepyPATH_START.@scousepy_extracted@scousepy-master@scousepy@stage_2.py@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/bar/outsidetextfont/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="shadow", parent_name="bar.outsidetextfont", **kwargs
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@bar@outsidetextfont@_shadow.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "sibirrer/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/lenstronomy/LensModel/__init__.py",
"type": "Python"
}
|
sibirrerREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@lenstronomy@LensModel@__init__.py@.PATH_END.py
|
|
{
"filename": "batlib.py",
"repo_name": "parsotat/batanalysis",
"repo_path": "batanalysis_extracted/batanalysis-main/batanalysis/batlib.py",
"type": "Python"
}
|
"""
This file holds various functions that users can call to interface with bat observation objects
"""
import astropy as ap
from astropy.io import fits
from astropy.time import Time
import numpy as np
import shutil
import matplotlib.pyplot as plt
import os
import warnings
from pathlib import Path
import requests
from astroquery.heasarc import Heasarc
import swifttools.swift_too as swtoo
import datetime
import dpath
from concurrent.futures import ThreadPoolExecutor
import functools
import swiftbat.swutil as sbu
# for python>3.6
try:
import heasoftpy as hsp
except ModuleNotFoundError as err:
# Error handling
print(err)
_orig_pdir = os.getenv("PFILES")
def dirtest(directory, clean_dir=True):
"""
Tests if a directory exists and either creates the directory or removes it and then re-creates it
:param directory: String of the directory that should be created or deleted and re-created
:param clean_dir: Boolean to denote if the directory should be deleted and recreated
:return: None
"""
directory = Path(directory)
# see if the directory exists
if directory.exists():
if clean_dir:
# remove the directory and recreate it
shutil.rmtree(directory)
directory.mkdir(parents=True)
else:
# create it
directory.mkdir(parents=True)
def curdir():
"""
Get the current working directory. Is legacy, since moving to use the pathlib module.
"""
cdir = os.getcwd() + "/"
return cdir
def datadir(new=None, mkdir=False, makepersistent=False, tdrss=False) -> Path:
"""Return the data directory (optionally changing and creating it)
Args:
new (Path|str, optional): Use this as the data directory
mkdir (bool, optional): Create the directory (and its parents) if necessary
makepersistent (bool, optional): If set, stores the name in ~/.swift/swift_datadir_name and uses it as new
default
tdrss (bool, optional): subdirectory storing tdrss data types
"""
global _datadir
datadirnamefile = Path("~/.swift/swift_datadir_name").expanduser()
if new is not None:
new = Path(new).expanduser().resolve()
if mkdir:
new.mkdir(parents=True, exist_ok=True)
new.joinpath("tdrss").mkdir(exist_ok=True)
new.joinpath("trend").mkdir(exist_ok=True)
if makepersistent:
persistfile = datadirnamefile
persistfile.parent.mkdir(exist_ok=True) # make ~/.swift if necessary
persistfile.open("wt").write(str(new))
_datadir = new
if not globals().get("_datadir", False):
# Not previously initialized
try:
_datadir = Path(datadirnamefile.open().read())
if not _datadir.exists():
raise RuntimeError(
f'Persistent data directory "{_datadir}" does not exist'
)
except FileNotFoundError:
# No persistent directory exists. Use cwd
_datadir = Path.cwd()
warnings.warn(f"Saving data in current directory {_datadir}")
assert isinstance(_datadir, Path)
if tdrss:
return _datadir.joinpath("tdrss")
return _datadir
def create_custom_catalog(
src_name_list,
src_ra_list,
src_dec_list,
src_glon_list,
src_glat_list,
catalog_name="custom_catalog.cat",
catalog_dir=None,
catnum_init=32767,
):
"""
This creates a catalog file for a number of sources that the user is interested in. Merges the created catalog with
a past BAT survey catalog which includes typical bright sources observed by BAT. This allows the sources to be
appropriately cleaned.
:param src_name_list: List of the names of the sources that should be added to the catalog
:param src_ra_list: List of the RA of the sources, in the same order as src_name_list
:param src_dec_list: List of the Dec of the sources, in the same order as src_name_list
:param src_glon_list: List of the galactic latitude of the sources, in the same order as src_name_list
:param src_glat_list: List of the galactic longitude of the sources, in the same order as src_name_list
:param catalog_name: String of the name of the resulting catalog that is produced
:param catalog_dir: String (or None) of the directory where the catalog should be saved
:param catnum_init: Int that denotes the initial catalog number to be assigned to the sources of interest, should
not overlap with any cat_num values of other BAT survey sourced (this parameter should be ignored except for
very few scenarios)
:return: Path object pointing to the new catalog file
"""
# Add check to make sure that input is not tuple
if (
type(src_name_list) is tuple
or type(src_ra_list) is tuple
or type(src_dec_list) is tuple
or type(src_glon_list) is tuple
or type(src_glat_list) is tuple
):
raise ValueError(
"The inputs cannot be tuples, either single values or lists are accepted."
)
# make the inputs lists if they are not
if type(src_name_list) is not list:
src_name_list = [src_name_list]
if type(src_ra_list) is not list:
src_ra_list = [src_ra_list]
if type(src_dec_list) is not list:
src_dec_list = [src_dec_list]
if type(src_glon_list) is not list:
src_glon_list = [src_glon_list]
if type(src_glat_list) is not list:
src_glat_list = [src_glat_list]
# name sure that the source names are ascii strings
src_name_list = [i.encode("ascii") for i in src_name_list]
# set default for catalog name and location
catalog_name = Path(catalog_name)
if catalog_dir is None:
catalog_dir = Path.cwd()
else:
catalog_dir = Path(catalog_dir)
prev_name = catalog_name.stem
cat = catalog_dir.joinpath(
prev_name + "_prev.cat"
)
final_cat = catalog_dir.joinpath(
catalog_name
)
# create the columns of file
c1 = fits.Column(
name="CATNUM",
array=np.array(
[i for i in range(catnum_init - len(src_name_list), catnum_init)]
),
format="I",
) # 2 byte integer
c2 = fits.Column(name="NAME", array=np.array(src_name_list), format="30A")
c3 = fits.Column(
name="RA_OBJ", array=np.array(src_ra_list), format="D", unit="deg", disp="F9.5"
)
c4 = fits.Column(
name="DEC_OBJ",
array=np.array(src_dec_list),
format="D",
unit="deg",
disp="F9.5",
)
c5 = fits.Column(
name="GLON_OBJ",
array=np.array(src_glon_list),
format="D",
unit="deg",
disp="F9.5",
)
c6 = fits.Column(
name="GLAT_OBJ",
array=np.array(src_glat_list),
format="D",
unit="deg",
disp="F9.5",
)
c7 = fits.Column(
name="ALWAYS_CLEAN", array=np.array([0] * len(src_name_list)), format="1L"
) # 1 byte logical
cols = fits.ColDefs([c1, c2, c3, c4, c5, c6, c7])
hdu = fits.BinTableHDU.from_columns(cols)
hdu.writeto(str(cat))
# need to get the file name off to get the dir this file is located in
dir = Path(__file__[::-1].partition("/")[-1][::-1])
hsp.ftmerge(
infile="%s %s"
% (str(dir.joinpath("data").joinpath("survey6b_2.cat")), str(cat)),
outfile=str(final_cat),
)
os.system("rm %s" % (str(cat)))
return final_cat
def combine_survey_lc(survey_obsid_list, output_dir=None, clean_dir=True):
"""
Concatenates a set of *.cat files to produce a fits file containing data over the duration of times specified in the
BatSurvey objects. This runs for the catalog that was passed to the constructor methods of the BatSurvey objects
:param survey_obsid_list: List of BatSurvey objects
:param clean_dir: Boolean set to True by default. Denotes if the whole directory that holds all the compiled light curve
data for the passed survey observations should be deleted and recreated if the directory exists.
:return: Returns a string with the directory of the combined light curve files
"""
if type(survey_obsid_list) is not list:
survey_obsid_list = [survey_obsid_list]
# get the main directory where we should create the total_lc directory
if output_dir is None:
output_dir = survey_obsid_list[0].result_dir.parent.joinpath(
"total_lc"
) # os.path.join(main_dir, "total_lc")
else:
output_dir = Path(output_dir).expanduser().resolve()
# if the directory doesn't exist, create it otherwise overwrite it
dirtest(output_dir, clean_dir=clean_dir)
# make the local pfile dir if it doesn't exist and set this value
_local_pfile_dir = output_dir.joinpath(".local_pfile")
_local_pfile_dir.mkdir(parents=True, exist_ok=True)
try:
hsp.local_pfiles(pfiles_dir=str(_local_pfile_dir))
except AttributeError:
hsp.utils.local_pfiles(par_dir=str(_local_pfile_dir))
ret = []
for obs in survey_obsid_list:
for i in obs.pointing_flux_files:
dictionary = dict(
keycolumn="NAME",
infile=str(i),
outfile=str(output_dir.joinpath("%s.cat")),
)
# there is a bug in the heasoftpy code so try to explicitly call it for now
ret.append(hsp.batsurvey_catmux(**dictionary))
return output_dir
def read_lc_data(filename, energy_band_index=None, T0=0):
"""
Reads in a fits file that contains rate information, at different energy bands, at a number of METs
:param filename: String of the name of the fits file
:param energy_band_index: int or None to denote which energy band the user wants to choose
The bands, in order of the index that they would be accessed are: 14-20 keV, 20-24 keV, 24-35 keV, 35-50 keV,
50-75 keV, 75-100 keV, 100-150 keV, 150-195 keV
:param T0: float that represents a critial time that observations should be measured in time with respect to
:return: arrays of the time, time bin size, rate, rate_error, and the SNR of the measurement in time
"""
# get fits file data
time = []
time_err = []
rate = []
rate_err = []
snr = []
filename = str(filename)
lc_fits = fits.open(filename)
lc_fits_data = lc_fits[1].data
time_array = lc_fits_data.field("TIME")
timestop_array = lc_fits_data.field("TIME_STOP")
#exposure_array = lc_fits_data.field("EXPOSURE") this isn't needed
rate_array = lc_fits_data.field("RATE")
rate_err_array = lc_fits_data.field("RATE_ERR")
bkg_var_array = lc_fits_data.field("BKG_VAR")
snr_array = lc_fits_data.field("VECTSNR")
for i in range(len(lc_fits_data)):
time_start = time_array[i] - T0 # this is in MET
time_stop = timestop_array[i] - T0
time_mid = (time_start + time_stop) / 2.0 # we want to leave units as MET
time_err_num = (time_stop - time_start) / 2.0 # we want to leave units as MET
time.append(time_mid)
time_err.append(time_err_num)
if energy_band_index is not None:
rate.append(rate_array[i][energy_band_index - 1])
rate_err.append(rate_err_array[i][energy_band_index - 1])
snr.append(snr_array[i][energy_band_index - 1])
else:
if len(rate_array[i]) > 8:
rate.append(rate_array[i][-1])
rate_err.append(rate_err_array[i][-1])
snr.append(snr_array[i][-1])
else:
rate_tot = 0.0
rate_err_2_tot = 0.0
bkg_var_2_tot = 0.0
for j in range(len(rate_array[i])):
rate_num = rate_array[i][j]
rate_err_2 = rate_err_array[i][j] * rate_err_array[i][j]
bkg_var_2 = bkg_var_array[i][j] * bkg_var_array[i][j]
rate_tot = rate_tot + rate_num
rate_err_2_tot = rate_err_2_tot + rate_err_2
bkg_var_2_tot = bkg_var_2_tot + bkg_var_2
rate.append(rate_tot)
rate_err_tot = np.sqrt(rate_err_2_tot)
rate_err.append(rate_err_tot)
snr_allband_num = rate_tot / np.sqrt(bkg_var_2_tot)
snr.append(snr_allband_num)
lc_fits.close()
return time, time_err, rate, rate_err, snr
def calc_response(phafilename):
"""
This function generates the response matrix for a given pha file by calling batdrmgen
(this is a HEASOFT function).
:param phafilename: String that denotes the location and name of the PHA file that the user would like to
calculate the response matrix for.
:return: Heasoftpy "Result" object obtained from calling heasoftpy batdrmgen. The "Result" object is the entire
output, which helps to debug in case of an error.
"""
if type(phafilename) is not list:
phafilename = [phafilename]
# when passing in tht whole filename, the paths mess up the connection between the response file and the pha file
# since there seems to be some character limit to this header value. Therefore, we need to cd to the directory
# that the PHA file lives in and create the .rsp file and then cd back to the original location.
# make sure that all elements are paths
phafilename = [Path(i) for i in phafilename]
# we are passing in a whole filepath or
# we are already located in the PHA directory and are mabe calculating the upperlimit bkg spectrum
_local_pfile_dir = (
phafilename[0].resolve().parents[1].joinpath(".local_pfile")
)
_local_pfile_dir.mkdir(parents=True, exist_ok=True)
try:
hsp.local_pfiles(pfiles_dir=str(_local_pfile_dir))
except AttributeError:
hsp.utils.local_pfiles(par_dir=str(_local_pfile_dir))
# Check if the phafilename is a string and if it has an extension .pha. If NOT then exit
for filename in phafilename:
if ".pha" not in filename.name:
raise ValueError(
"The file name %s needs to be a string and must have an extension of .pha ."
% (str(filename))
)
# get the cwd
current_dir = Path.cwd()
# get the directory that we have to cd to and the name of the file
pha_dir = filename.parent
pha_file = filename.name
# cd to that dir
if str(pha_dir) != str(current_dir):
os.chdir(pha_dir)
# Split the filename by extension, so as to remove the .pha and replace it with .rsp
# this is necessary since sources can have '.' in name
out = (
filename.stem + ".rsp"
)
# create drm
output = hsp.batdrmgen(
infile=pha_file, outfile=out, chatter=2, clobber="YES", hkfile="NONE"
)
# cd back
if str(pha_dir) != str(current_dir):
os.chdir(current_dir)
return output
def fit_spectrum(
phafilename,
surveyobservation,
plotting=True,
generic_model=None,
setPars=None,
use_cstat=True,
fit_iterations=1000,
verbose=True,
):
"""
Fits a spectrum that is loaded in from a BAT pha file. The header of the PHA file must have the associated
response information.
User has to pass a phafilename and the BatSurvey object "surveyobservation" (mandatory).
The user should have already run the "batsurvey" command and created the surveyobservation object.
The user can specfiy their own spectral model that is XSPEC compatible.
To learn about how to specify a spectral model in pyXspec the user can
look at the following link: https://heasarc.gsfc.nasa.gov/xanadu/xspec/python/html/index.html
For e.g, to specify a model one has to do the following:
model=xsp.Model(generic_model,setPars={1:45,2:"123,-1"}) Here -1 stands for "frozen".
User has to specify cflux in their model. This is mandatory because we use this same function (and hence the user specified model)
to test any detection in the BAT energy band.
If no model is specfied by the user, then by default the specrum is fit with the following Xspec model:
cflux*(powerlaw): with Cflux E_min=14 keV (Frozen), E_max=195 keV (Frozen), flux=-12 (initial value),
powerlaw Gamma=2 Free, and norm=frozen. Powerlaw norm kept frozen.
:param phafilename: String that denotes the location and name of the PHA file.
:param surveyobservation: Object denoting the batsurvey observation object which contains all the
necessary information related to this observation.
:param plotting: Boolean statement, if the user wants to plot the spectrum.
:param generic_model: String with XSPEC compatible model, which must include cflux.
:param setPars: Boolean to set the parameter values of the model specified above.
:param use_cstat: Boolean to use cstat in case of low counts (Poisson statistics), otherwise use chi squared stats.
:param fit_iterations: Number of fit iterations to be carried out by XSPEC.
Since BAT data has just 8 energy channels, a default of 100 is enough.
But the user can specify any value that may be needed.
:param verbose: Boolean to show every output during the fitting process.
Set to True by default, that'll help the user to identify any issues with the fits.
:return: None
"""
try:
import xspec as xsp
except ModuleNotFoundError as err:
# Error handling
print(err)
raise ModuleNotFoundError(
"The pyXspec package needs to installed to fit spectra with this function."
)
# In the next few steps we will get into the directory where the PHA files and rsp files are located
# Do the fitting and then get out to our current directory: current_dir
# get the cwd.
phafilename = Path(phafilename)
current_dir = Path.cwd()
# Check if the phafilename is a string and if it has an extension .pha. If NOT then exit
if ".pha" not in phafilename.name:
raise ValueError(
"The file name %s needs to be a string and must have an extension of .pha ."
% (str(phafilename))
)
# get the directory that we have to cd to and the name of the file
pha_dir = phafilename.parent
pha_file = phafilename.name
# The old statement: pointing_id=pha_file.split(".")[0].split("_")[-1] didnt work if source_id has period in it
pointing_id = phafilename.stem.split("_")[-1]
if len(pha_file.split("_survey")) > 1:
# we've got a pha for a normal survey catalog
source_id = pha_file.split("_survey")[
0
] # This is the source name compatible with the catalog
else:
# we've got a mosaic survey result
source_id = pha_file.split("_mosaic")[0]
# cd to that dir
if str(pha_dir) != str(current_dir):
os.chdir(pha_dir)
xsp.AllData -= "*"
s = xsp.Spectrum(
pha_file
)
# Define model
if (
generic_model is not None
): # User provides a string of model, and a Dictionary for the initial values
if type(generic_model) is str:
if "cflux" in generic_model:
# The user must provide the cflux, or else we will not be able to predict of there is a statistical
# detection (in the next function).
try:
model = xsp.Model(
generic_model, setPars=setPars
) # Set the initial value for the fitting using the Model object attribute
except Exception as e:
print(e)
raise ValueError("The model needs to be specified correctly")
else:
raise ValueError(
"The model needs cflux in order to calulate error on the flux in 14-195 keV"
)
else:
# If User does not pass any model
model = xsp.Model("cflux*po")
p1 = model(1) # cflux Emin = 14 keV
p2 = model(2) # cflux Emax = 195 keV
p3 = model(3) # cflux lg10Flux
p4 = model(4) # Photon index Gamma
p5 = model(5) # Powerlaw norm
# Setting the vlaues and freezing them.
p1.values = 14 # already frozen
p2.values = 195 # already frozen
p4.values = 2
p4.frozen = False
p5.values = 0.001
p5.frozen = True
# model_components=model.componentNames #This is a list of the model components
# Check if the model is XSPEC compatible : Done Listing down the model parameters in a dictionary: parm1: Value,
# param2: Value.... If no initial values given , default XSPEC values to be used. We will manipulate these param
# values to "set a value" or "freeze/thaw" a value, set a range for these viable values. We can call the best fit
# param values, after fit.
# Fitting the data with this model
if use_cstat:
xsp.Fit.statMethod = "cstat"
else:
xsp.Fit.statMethod = "chi"
# Stop fit at nIterations and do not query.
xsp.Fit.query = "no"
xsp.Fit.nIterations = fit_iterations
xsp.Fit.renorm()
# try to do the fitting if it doesn't work fill in np.nan values for things
try:
xsp.Fit.perform()
if verbose:
xsp.AllModels.show()
xsp.Fit.show()
# Get coordinates from XSPEC plot to use in matplotlib:
xsp.Plot.device = "/null"
xsp.Plot("data")
chans = xsp.Plot.x()
rates = xsp.Plot.y()
xerr = xsp.Plot.xErr()
yerr = xsp.Plot.yErr()
folded = xsp.Plot.model()
# Plot using Matplotlib:
f, ax = plt.subplots()
ax.errorbar(x=chans, xerr=xerr, y=rates, yerr=yerr, fmt="ro")
ax.plot(chans, folded, "k-")
ax.set_xlabel("Energy (keV)")
ax.set_ylabel("counts/cm^2/sec/keV")
ax.set_xscale("log")
ax.set_yscale("log")
f.savefig(
phafilename.parent.joinpath(phafilename.stem + ".pdf")
)
if plotting:
plt.show()
# Capturing the Flux and its error. saved to the model object, can be obtained by calling model(1).error,
# model(2).error
model_params = dict()
for i in range(1, model.nParameters + 1):
xsp.Fit.error("2.706 %d" % (i))
# get the name of the parameter
par_name = model(i).name
model_params[par_name] = dict(
val=model(i).values[0],
lolim=model(i).error[0],
hilim=model(i).error[1],
errflag=model(i).error[-1],
)
surveyobservation.set_pointing_info(
pointing_id, "model_params", model_params, source_id=source_id
)
except Exception as Error_with_Xspec_fitting:
# this is probably that XSPEC cannot fit because of negative counts
if verbose:
print(Error_with_Xspec_fitting)
# need to fill in nan values for all the model params and 'TTTTTTTTT' for the error flag
model_params = dict()
for i in range(1, model.nParameters + 1):
# get the name of the parameter
par_name = model(i).name
model_params[par_name] = dict(
val=np.nan, lolim=np.nan, hilim=np.nan, errflag="TTTTTTTTT"
)
surveyobservation.set_pointing_info(
pointing_id, "model_params", model_params, source_id=source_id
)
# Incorporating the model names, parameters, errors into the BatSurvey object.
xsp.Xset.save(phafilename.stem + ".xcm")
xspec_savefile = phafilename.parent.joinpath(
phafilename.stem + ".xcm"
)
surveyobservation.set_pointing_info(
pointing_id, "xspec_model", xspec_savefile, source_id=source_id
)
# cd back
if str(pha_dir) != str(current_dir):
os.chdir(current_dir)
return None
def calculate_detection(
surveyobservation,
source_id,
pl_index=2,
nsigma=3,
bkg_nsigma=5,
plot_fit=False,
verbose=True,
):
"""
This function uses the fitting function and statistically checks if there is any significant detection (at a specfied confidence).
If there is no detection, then the function re-calculates the PHA with a bkg_nsigma times the background to calculate the
upper limit on the flux, at a certain confidence level (given by the user specified bkg_nsigma).
We deal with two cases:
(1) Non-detection: Checking if nsigma error on The 14-195 keV flux is consistent with the equation (measured flux - nsigma*error)<=0,
then return: upper limit=True
and then recalculate the PHA +response again.... with count rate= bkg_nsigma*BKG_VAR
(2) Detection: If (measured flux - nsigma*error)>=0 then return: "detection has been measured"
This operates on the entire batsurvey object (corresponding to a batobservation id),
and we want to see if there is a detection for 'any number of pointings for a given source' in that batobservation id.
Note that it operates ONLY on one source.
For different sources one can specify separate detection threshold ('sigma') for different sources.
Thus we have kept this function to operate only ONE source at a time.
:param surveyobservation: Object denoting the batsurvey observation object which contains all the necessary
information related to this observation.
:param source_id: String denoting the source name exactly as that in the phafilename.
:param pl_index: Float (default 2) denoting the power law photon index that will be used to obtain a flux upper
limit
:param nsigma: Integer, denoting the number fo sigma the user needs to justify a detection
:param bkg_nsigma: Integer, denoting the number of sigma the user needs to calculate flux upper limit in case
of a non detection.
:param plot_fit: Boolean to determine if the fit should be plotted or not
:param verbose: Boolean to show every output during the fitting process. Set to True by default, that'll help the
user to identify any issues with the fits.
:return: In case of a non-detection a flux upper limit is returned.
"""
try:
import xspec as xsp
except ModuleNotFoundError as err:
# Error handling
print(err)
raise ModuleNotFoundError(
"The pyXspec package needs to installed to determine if a source has been detected with this function."
)
current_dir = Path.cwd()
# get the directory that we have to cd to and the name of the file
pha_dir = surveyobservation.get_pha_filenames(id_list=[source_id])[0].parent
pointing_ids = (
surveyobservation.get_pointing_ids()
) # This is a list of pointing_ids in this bat survey observation
# cd to that dir
if str(pha_dir) != str(current_dir):
os.chdir(pha_dir)
flux_upperlim = []
# By specifying the source_id, we now have the specific PHA filename list corresponding to the
# pointing_id_list for this given bat survey observation.
phafilename_list = surveyobservation.get_pha_filenames(
id_list=[source_id], pointing_id_list=pointing_ids
)
for i in range(len(phafilename_list)): # Loop over all phafilename_list,
pha_dir = phafilename_list[i].parent
pha_file = phafilename_list[i].name
# The old statement: pointing_id=pha_file.split(".")[0].split("_")[-1] didnt work if source_id has period in it
pointing_id = phafilename_list[i].stem.split("_")[-1]
# Within the pointing dictionar we have the "key" called "Xspec_model" which has the parameters, values and
# errors.
error_issues = False # preset this here
try:
pointing_dict = surveyobservation.get_pointing_info(
pointing_id, source_id=source_id
)
model = pointing_dict["model_params"]["lg10Flux"]
flux = model["val"] # ".cflux.lg10Flux.values[0] #Value
fluxerr_lolim = model["lolim"] # .cflux.lg10Flux.error[0] #Error
fluxerr_uplim = model["hilim"] # .cflux.lg10Flux.error[1]
avg_flux_err = 0.5 * (
((10**fluxerr_uplim) - (10**flux))
+ ((10**flux) - (10**fluxerr_lolim))
)
print(
"The condition here is",
10 ** (flux),
[10**fluxerr_lolim, 10**fluxerr_uplim],
nsigma,
avg_flux_err,
((10**flux) - nsigma * avg_flux_err),
)
# check the errors for any issues:
if "T" in model["errflag"]:
error_issues = True
except ValueError:
# the fitting was not successful and the dictionary was not created but want to enter the upper limit if
# statement
fluxerr_lolim = 0
flux = 1
nsigma = 1
avg_flux_err = 1
if (
fluxerr_lolim == 0
or (((10**flux) - nsigma * avg_flux_err) <= 0)
or np.isnan(flux)
or error_issues
):
print("No detection, just upperlimits for the spectrum:", pha_file)
# Here redo the PHA calculation with 5*BKG_VAR
surveyobservation.calculate_pha(
calc_upper_lim=True,
bkg_nsigma=bkg_nsigma,
id_list=source_id,
single_pointing=pointing_id,
)
# can also do surveyobservation.get_pha_filenames(id_list=source_id,pointing_id_list=pointing_id,
# getupperlim=True) to get the created upperlimit file. Will do this because it is more robust
# bkgnsigma_upper_limit_pha_file= pha_file.split(".")[0]+'_bkgnsigma_%d'%(bkg_nsigma) + '_upperlim.pha'
bkgnsigma_upper_limit_pha_file = surveyobservation.get_pha_filenames(
id_list=source_id, pointing_id_list=pointing_id, getupperlim=True
)[0].name
try:
calc_response(bkgnsigma_upper_limit_pha_file)
except:
# This is a MosaicBatSurvey object which already has the default associated response file
pass
xsp.AllData -= "*"
s = xsp.Spectrum(bkgnsigma_upper_limit_pha_file)
xsp.Fit.statMethod = "cstat"
model = xsp.Model("po")
# p1 = m1(1) # cflux Emin = 15 keV
# p2 = m1(2) # cflux Emax = 150 keV
# p3 = m1(3) # cflux lg10Flux
p4 = model(1) # Photon index Gamma
p5 = model(2) # Powerlaw norm
# p1.values = 15 # already frozen
# p2.values = 150 # already frozen
p4.frozen = True
p4.values = pl_index
p5.values = 0.001
p5.frozen = False
if verbose:
print("******************************************************")
print(
f"Fitting the {bkg_nsigma} times bkg of the spectrum {bkgnsigma_upper_limit_pha_file}"
)
xsp.Fit.nIterations = 100
xsp.Fit.perform()
if plot_fit:
xsp.AllModels.show()
xsp.Fit.show()
xsp.AllModels.calcFlux("14.0 195.0")
if verbose:
print("******************************************************")
print("******************************************************")
print("******************************************************")
print(s.flux)
# Capturing the simple model. saved to the model object, can be obtained by calling model(1).error,
# model(2).error
model_params = dict()
for j in range(1, model.nParameters + 1):
# get the name of the parameter
par_name = model(j).name
model_params[par_name] = dict(
val=model(j).values[0],
lolim=model(j).error[0],
hilim=model(j).error[1],
errflag="TTTTTTTTT",
)
surveyobservation.set_pointing_info(
pointing_id, "model_params", model_params, source_id=source_id
)
surveyobservation.set_pointing_info(
pointing_id,
"nsigma_lg10flux_upperlim",
np.log10(s.flux[0]),
source_id=source_id,
)
else: # Detection
if verbose:
print("A detection has been measured at the %d sigma level" % (nsigma))
# cd back
if str(pha_dir) != str(current_dir):
os.chdir(current_dir)
return flux_upperlim # This is a list for all the Valid non-detection pointings
def print_parameters(
obs_list,
source_id,
values=["met_time", "utc_time", "exposure"],
energy_range=[14, 195],
latex_table=False,
savetable=False,
save_file="output.txt",
overwrite=True,
add_obs_id=True,
):
"""
Convenience function to plot various survey data pieces of information in a formatted file/table
:param obs_list: A list of BatSurvey objects
:param source_id: A string with the name of the source of interest.
:param values: A list of strings contaning information that the user would like to be printed out. The strings
correspond to the keys in the pointing_info dictionaries of each BatSurvey object and the colmns will be put
in this order.
:param energy_range: a list or array of the minimum energy range that should be considered and the maximum energy
range that should be considered. By default, this is 14-195 keV
:param latex_table: Boolean to denote if the output should be formatted as a latex table
:param savetable: Boolean to denote if the user wants to save the table to a file
:param save_file: string that specified the location and name of the file that contains the saved table
:param overwrite: Boolean that says to overwrite the output file if it already exists
:param add_obs_id: Boolean to denote if the observation and pointing IDs should be added to the value list automatically
:return: None
"""
save_file = Path(save_file)
if save_file.exists() and overwrite:
save_file.unlink()
if type(obs_list) is not list:
obs_list = [obs_list]
if add_obs_id:
# make sure that the values list has obs_id and pointing_id in it
if "pointing_id" not in values:
values.insert(0, "pointing_id")
if "obs_id" not in values:
values.insert(0, "obs_id")
# get all the data that we need
all_data = concatenate_data(obs_list, source_id, values, energy_range=energy_range)[
source_id
]
if savetable and save_file is not None:
# open the file to write the output to
f = open(str(save_file), "w")
outstr = " " # Obs ID \t Pointing ID\t"
for i in values:
outstr += f"{i: ^31}\t" # "\t%s"%(i)
if not savetable:
print(outstr)
else:
f.writelines([str(outstr), "\n"])
if latex_table:
nchar = 29
else:
nchar = 30
outstr = ""
for i in range(len(all_data[list(all_data.keys())[0]])):
for key in values:
val = all_data[key]
if "id" in key:
# if we have just one observation ID then we still want to print the obs_id for the first entry in list
# if we dont then we need to make sure that the printed value is not the same as the one prior
if i == 0 or val[i] != val[i - 1]:
print_val = val[i]
else:
print_val = ""
else:
print_val = val[i]
# see if there are errrors associated with the key
if key + "_lolim" in all_data.keys():
# get the errors
lolim = all_data[key + "_lolim"][i]
hilim = all_data[key + "_hilim"][i]
if not np.isnan(lolim) and not np.isnan(hilim):
middle_str = ""
if len(str(val[i]).split("e")) > 1:
base = int(str(val[i]).split("e")[-1])
if latex_table:
middle_str += "$"
middle_str += f"{val[i] / 10 ** base:-.3}^{{{hilim / 10 ** base:+.2}}}_{{{-1 * lolim / 10 ** base:+.2}}}"
if latex_table:
middle_str += f" \\times "
else:
middle_str += f" x "
middle_str += f"10^{{{base:+}}}"
if latex_table:
middle_str += "$"
print_val = middle_str
else:
print_val = ""
if latex_table:
print_val += "$"
print_val += (
f"{val[i]:-.3}" + f"^{{{hilim :+.2}}}_{{{-1 * lolim :+.2}}}"
)
if latex_table:
print_val += "$"
outstr += f"{print_val: ^{nchar}}" + "\t"
else:
middle_str = ""
if len(str(val[i]).split("e")) > 1:
base = int(str(val[i]).split("e")[-1])
if latex_table:
middle_str += "$"
middle_str += f"{val[i] / 10 ** base:-.3}"
if latex_table:
middle_str += f" \\times "
else:
middle_str += f" x "
middle_str += f"10^{{{base:+}}}"
if latex_table:
middle_str += "$"
print_val = middle_str
outstr += f"{print_val: ^{nchar}}\t"
else:
outstr += f"{print_val: ^{nchar}}\t"
if latex_table:
outstr += " & "
if latex_table:
outstr = outstr[:-2]
outstr += " \\\\"
outstr += "\n"
if savetable and save_file is not None:
f.writelines([str(outstr), "\n"])
f.close()
else:
print(outstr)
if savetable and save_file is not None:
f.close()
def download_swiftdata(
observations,
reload=False,
fetch=True,
jobs=10,
bat=True,
auxil=True,
log=False,
uvot=False,
xrt=False,
tdrss=True,
save_dir=None,
**kwargs,
) -> dict:
"""
Download Swift data from HEASARC or quicklook sites to a local mirror directory.
If the data already exists in the mirror, it is not reloaded unless it is from
a quicklook site, or if reload is set.
Data for observations can be selected by instrument or by filename match.
Observations are specified as a list of OBSIDs, or a table with an 'OBSID' field.
Match is a string or list of strings that match the filenames using unix globbing rules.
e.g. `match=['*brtms*', '*sao.*']` will match both the BAT 64 ms rates and the
instrument auxiliary orbit information file (if bat=True and auxil=True are set) for
each observation.
The result is returned in a dict indexed by OBSID. The 'data' element of an OBSID's
dict entry is a `swifttools.swift_too.Swift_Data` table including attributes for
the .url and .localpath of each file.
:param observations: OBSIDs to download
:param reload: load even if the data is already in the save_dir
:param fetch: Download the data if it is not locally cached (defaults to True)
:param jobs: number of simultaneous download jobs. (Set to 1 to execute unthreaded.)
:param bat: load the bat data
:param auxil: load the auxil data
:param log: load the log data (mostly diagnostic, defaults to false)
:param uvot: load the uvot data (high volume, defaults to false)
:param xrt: load the xrt data (high volume, defaults to false)
:param tdrss: load the tdrss data (necessary for triggered BAT event data, defaults to True)
:param save_dir: The output directory where the observation ID directories will be saved
(From swifttools.swift_too.Data )
:param match: pattern (or list) to match (defaults to all)
:param kwargs: passed to swifttools.swift_too.Data
:return: dict{obsid: {obsoutdir:..., success:..., loaded:..., [, datafiles:swtoo.Data][, ]}
"""
# for GRBs do eg. object_name='GRB220715B', mission="swiftmastr"
# table = heasarc.query_object(object_name, mission=mission, sortvar="START_TIME")
# The first entry in the table should be the TTE data observation ID, from when the GRB was triggered, want to
# download the 0 segment. (Can download others for different survey analyses etc)
# Can also query mission="swifttdrss" and get the GRB target ID and just download the obs_id=str(Target ID)+'000'
if save_dir is None:
save_dir = datadir()
save_dir = Path(save_dir).resolve()
if np.isscalar(observations) or isinstance(observations, ap.table.row.Row):
observations = [observations]
obsids = []
for entry in observations:
try: # swiftmastr observation table
entry = entry["OBSID"]
except:
pass
try: # swifttools.ObsQuery
entry = entry.obsid # f"{entry.targetid:08d}{entry.seg:03d}"
except:
pass
if isinstance(entry, int):
entry = f"{entry:011d}"
if not isinstance(entry, str):
raise RuntimeError(f"Can't convert {entry} to OBSID string")
obsids.append(entry)
# Remove duplicate obsids, but otherwise keep in order.
obsids = list({o: None for o in obsids}.keys())
nowts = datetime.datetime.now().timestamp()
kwargs["fetch"] = fetch
download_partialfunc = functools.partial(
_download_single_observation,
reload=reload,
bat=bat,
auxil=auxil,
log=log,
uvot=uvot,
xrt=xrt,
tdrss=tdrss,
save_dir=save_dir,
nowts=nowts,
**kwargs,
)
if jobs == 1:
results = {}
for obsid in obsids:
result = download_partialfunc(obsid)
results[obsid] = result
else:
with ThreadPoolExecutor(max_workers=jobs) as executor:
results = {
result["obsid"]: result
for result in executor.map(download_partialfunc, obsids)
}
return results
def _download_single_observation(
obsid, *, reload, bat, auxil, log, uvot, xrt, tdrss, save_dir, nowts, **kwargs
):
"""Helper function--not for general use
Downloads files for a single OBSID, given parameters from download_swiftdata()
after encapsulation as a partial function for threading.
Args:
obsid (str): Observation ID to download
(remaining arguments are as in download_swiftdata())
Raises:
RuntimeError: If missing local directory. Other exceptions are presented as warnings and
by setting the 'success' flag to False.
Returns:
_type_: _description_
"""
obsoutdir = save_dir.joinpath(obsid)
quicklookfile = obsoutdir.joinpath(".quicklook")
result = dict(obsid=obsid, success=True, obsoutdir=obsoutdir, quicklook=False)
try:
clobber = reload or quicklookfile.exists()
data = swtoo.Swift_Data(
obsid=obsid,
clobber=clobber,
bat=bat,
log=log,
auxil=auxil,
uvot=uvot,
xrt=xrt,
tdrss=tdrss,
outdir=str(save_dir),
**kwargs,
)
result["data"] = data
if data.status.status != "Accepted":
raise RuntimeError(" ".join(data.status.warnings + data.status.errors))
if data.quicklook: # Mark the directory as quicklook
quicklookfile.open("w").close()
result["quicklook"] = True
elif quicklookfile.exists():
# This directory just transitioned from quicklook to archival version
oldqlookdir = save_dir.joinpath("old_quicklook", obsid)
oldqlookdir.mkdir(exist_ok=True, parents=True)
for stalefile in obsoutdir.glob("**/*"):
# Any file older than the time before the data was downloaded
if (
stalefile.is_file()
and stalefile.stat().st_mtime < nowts
and not stalefile.name.startswith(".")
):
stalefile.replace(oldqlookdir.joinpath(stalefile.name))
quicklookfile.unlink()
result.update(
datafiles=data,
quicklook=data.quicklook,
outdir=Path(data.outdir),
success=True,
downloaded=True,
)
if not Path(data.outdir).is_dir():
raise RuntimeError(f"Data directory {data.outdir} missing")
except Exception as e:
warnings.warn(f"Did not download {obsid} {e}")
result["success"] = False
return result
def test_remote_URL(url):
return requests.head(url).status_code < 400
def from_heasarc(object_name=None, tablename="swiftmastr", **kwargs):
heasarc = Heasarc()
with warnings.catch_warnings():
warnings.simplefilter("ignore", ap.utils.exceptions.AstropyWarning)
table = heasarc.query_object(
object_name=object_name, mission=tablename, **kwargs
)
return table
def find_trigger_data():
raise NotImplementedError
def met2mjd(met_time):
"""
A convenience function that calculates the MJD time from a Swift MET time. Ths function either uses the swiftbat
code base which is quicker or the heasoftpy swifttime function which is slower.
:param met_time: a number that is the Swift MET time that will be converted
:return: a MJD date that includes the Swift time clock correction
"""
try:
val = sbu.met2mjd(met_time, correct=True)
except (ModuleNotFoundError, RuntimeError):
# calculate times in UTC and MJD units as well
inputs = dict(
intime=str(met_time),
insystem="MET",
informat="s",
outsystem="UTC",
outformat="m",
) # output in MJD
o = hsp.swifttime(**inputs)
val = float(o.params["outtime"])
atime = Time(val, format="mjd", scale="utc")
return atime.value
def met2utc(met_time, mjd_time=None):
"""
A convenience function that calculates the UTC time from a Swift MET time. Ths function first converts the time to
MJD, which either uses the swiftbat code base which is quicker or the heasoftpy swifttime function which is slower,
and then converts it to UTC. The user can also supply a MJD time to save on computational time.
:param met_time: a number that is the Swift MET time that will be converted
:param mjd_time: default to None, which means that the code will first calculate the MJD time and then convert it to
UTC time. If the user already has the MJD time, they can specify it here and the function will directly
convert it.
:return: a numpy datetime64 object of the MET time with the Swift clock correction applied
"""
if mjd_time is None:
mjd_time = met2mjd(met_time)
atime = Time(mjd_time, format="mjd", scale="utc")
return atime.datetime64
def save_progress(obs_list):
"""
Convience function to save progress for a list of BatSurvey observations
:param obs_list: list of BatSurvey or MosaicBatSurvey objects
:return: None
"""
if type(obs_list) is not list:
obs_list = [obs_list]
for i in obs_list:
i.save()
def set_pdir(pdir):
"""
Sets the custom pfile directory for calling a heasoftpy function. This ensures that functions can be called in
parallel. This is depreciated since heasoftpy v1.2.
:param pdir: None, Path, or string to the custom pfiles directory. a value of None will force heasoftpy to create a
custom pfiles directory in /tmp, as is specified in their documentation.
:return:
"""
# if it's not None, make sure that it's a string that can be passed to heasoftpy. None will
if pdir is not None:
pdir = str(pdir)
try:
hsp.local_pfiles(pfiles_dir=pdir)
except AttributeError:
hsp.utils.local_pfiles(par_dir=pdir)
def reset_pdir():
"""
Resets the pfiles environment variable to what it originally was. This is depreciated since heasoftpy v1.2.
:return:
"""
os.environ["PFILES"] = _orig_pdir
def concatenate_data(
bat_observation, source_ids, keys, energy_range=[14, 195], chronological_order=True
):
"""
This convenience function collects the data that was requested by the user as passed into the keys variable. The
data is returned in the form of a dictionary with the same keys and numpy arrays of all the concatenated data. if
the user asks for parameters with errors associated with them these errors will be automatically included. For
example if the user wants rates information then the function will automatically include a dicitonary key to
hold the rates error information as well
:param bat_observation: a list of BatObservation objects including BatSurvey and MosaicBatSurvey objects that the
user wants to extract the relevant data from.
:param source_ids: The sources that the user would like to collect data for
:param keys: a string or list of strings
:param energy_range: a list or array of the minimum energy range that should be considered and the maximum energy
range that should be considered
:param chronological_order: Boolean to denote if the outputs should be sorted chronologically or kept in the same
order as the BATSurvey objects that were passed in
:return: dict with the keys specified by the user and numpy lists as the concatenated values for each key
"""
# make sure that the keys are a list
if type(keys) is not list:
# it is a single string:
keys = [keys]
# see if the user has the rates included in here
if "rate" in keys:
# see if the rates_err is already included. If not add it.
if "rate_err" not in keys:
keys.append("rate_err")
if type(source_ids) is not list:
# it is a single string:
source_ids = [source_ids]
# create a dict from the keys for soure and what the user is interested in
concat_data = dict().fromkeys(source_ids)
for i in concat_data.keys():
concat_data[i] = dict().fromkeys(keys)
for j in concat_data[i].keys():
concat_data[i][j] = []
# deterine the energy range that may be of interest. This can be none for total E range or one of the basic 8
# channel energies or a range that spans more than one energy range of the 8 channels.
if np.isclose([14, 195], energy_range).sum() == 2:
e_range_idx = [-1] # this is just the last index of the arrays for counts, etc
else:
# get the index
obs_min_erange_idx = bat_observation[0].emin.index(np.min(energy_range))
obs_max_erange_idx = bat_observation[0].emax.index(np.max(energy_range))
e_range_idx = np.arange(obs_min_erange_idx, obs_max_erange_idx + 1)
if chronological_order:
# sort the obs ids by time of 1st pointing id
all_met = [
i.pointing_info[i.pointing_ids[0]]["met_time"] for i in bat_observation
]
sorted_obs_idx = np.argsort(all_met)
else:
sorted_obs_idx = np.arange(len(bat_observation))
# iterate over observation IDs
for idx in sorted_obs_idx:
obs = bat_observation[idx]
try:
# have obs id for normal survey object
observation_id = obs.obs_id
except AttributeError:
# dont have obs_id for mosaic survey object
observation_id = "mosaic"
if chronological_order:
# sort the pointing IDs too
sorted_pointing_ids = np.sort(obs.pointing_ids)
else:
sorted_pointing_ids = obs.pointing_ids
# iterate over pointings
for pointings in sorted_pointing_ids:
# iterate over sources
for source in concat_data.keys():
# see if the source exists in the observation
if source in obs.get_pointing_info(pointings).keys():
# iterate over the keys of interest
for user_key in keys:
save_val = np.nan
# see if the user wants observation ID or pointing ID
if "obs" in user_key:
save_val = observation_id
concat_data[source][user_key].append(save_val)
save_val = (
np.inf
) # set to a crazy number so we don't get errors with np.isnan for a string
if "pointing" in user_key:
save_val = pointings
concat_data[source][user_key].append(save_val)
save_val = (
np.inf
) # set to a crazy number so we don't get errors with np.isnan for a string
# search in all
continue_search = True
for dictionary in [
obs.get_pointing_info(pointings),
obs.get_pointing_info(pointings, source_id=source),
]:
if (
continue_search
and np.isnan(save_val)
and len(
dpath.search(
obs.get_pointing_info(
pointings, source_id=source
)["model_params"],
user_key,
)
)
== 0
and ("flux" not in user_key.lower())
and ("index" not in user_key.lower())
):
try:
# if this is a rate/rate_err/snr need to calcualate these quantities based on the
# returned array
if "rate" in user_key or "snr" in user_key:
rate, rate_err, snr = obs.get_count_rate(
e_range_idx, pointings, source
)
if "rate_err" in user_key:
save_val = rate_err
elif "rate" in user_key:
save_val = rate
elif "snr" in user_key:
save_val = snr
else:
save_val = dpath.get(dictionary, user_key)
except KeyError:
# if the key doest exist don't do anything but add np.nan
save_val = np.nan
# this key for rate, rate_err, SNR doesn't exist probably because the source wasn't
# detected so don't enter the outer if statement again which will keep saving
# np.nan
if "rate" in user_key or "snr" in user_key:
continue_search = False
# save the value to the appropriate list under the appropriate key
concat_data[source][user_key].append(save_val)
# see if the values are for the model fit
if (
continue_search
and np.sum(np.isnan(save_val)) > 0
and "model_params"
in obs.get_pointing_info(pointings, source_id=source).keys()
):
# can have obs.get_pointing_info(pointings, source_id=source)["model_params"]
# but it can be None if the source isn't detected
# if obs.get_pointing_info(pointings, source_id=source)["model_params"] is not None:
# have to modify the name of the flux related quantity here
if "flux" in user_key.lower():
real_user_key = "lg10Flux"
else:
real_user_key = user_key
# try to access the dictionary key
try:
save_val = dpath.get(
obs.get_pointing_info(pointings, source_id=source)[
"model_params"
],
real_user_key,
)
except KeyError:
# if the key doest exist don't do anything but add np.nan
save_val = np.nan
# if the value that we want is flux but we only have an upper limit then we have to get
# the nsigma_lg10flux_upperlim value
if real_user_key == "lg10Flux":
real_user_key = "nsigma_lg10flux_upperlim"
# see if there is a nsigma_lg10flux_upperlim
try:
save_val = dpath.get(
obs.get_pointing_info(
pointings, source_id=source
),
real_user_key,
)
except KeyError:
# if the key doest exist don't do anything but add np.nan
save_val = np.nan
# need to calculate the error on the value
# first do the case of flux upper limit
if real_user_key == "nsigma_lg10flux_upperlim":
save_value = 10**save_val
# there is no upper/lower error since we have an upper limit
error = np.ones(2) * np.nan
is_upper_lim = True
else:
is_upper_lim = False
if real_user_key == "lg10Flux":
save_value = 10 ** save_val["val"]
error = np.array(
[
10 ** save_val["lolim"],
10 ** save_val["hilim"],
]
)
error = np.abs(save_value - error)
else:
try:
save_value = save_val["val"]
error = np.array(
[save_val["lolim"], save_val["hilim"]]
)
if "T" in save_val["errflag"]:
error = np.ones(2) * np.nan
else:
error = np.abs(save_value - error)
except TypeError:
# this is the last resort for catching any keys that aren't found in the dict
# so we may have save_val be = np.nan and we will get TypeError trying to
# call it as a dict
save_value = np.nan
error = np.ones(2) * np.nan
# save the value to the appropriate list under the appropriate key
concat_data[source][user_key].append(save_value)
# save the errors as well. We may need to create the dictionary key for the error/upperlimit
user_key_lolim = user_key + "_lolim"
user_key_hilim = user_key + "_hilim"
user_key_upperlim = user_key + "_upperlim"
try:
concat_data[source][user_key_lolim].append(error[0])
concat_data[source][user_key_hilim].append(error[1])
concat_data[source][user_key_upperlim].append(
is_upper_lim
)
except KeyError:
concat_data[source][user_key_lolim] = []
concat_data[source][user_key_hilim] = []
concat_data[source][user_key_upperlim] = []
concat_data[source][user_key_lolim].append(error[0])
concat_data[source][user_key_hilim].append(error[1])
concat_data[source][user_key_upperlim].append(
is_upper_lim
)
# turn things into numpy array for easier handling
for src_key in concat_data.keys():
for key, val in concat_data[src_key].items():
concat_data[src_key][key] = np.array(val)
return concat_data
def make_fake_tdrss_message(
obs_id, trig_time, trig_stop, ra_obj, dec_obj, obs_dir=None
):
"""
This function creates a fake TDRSS message file that specifies a few important pieces of information which can be
used in the BAT TTE data processing pipeline.
:param obs_id:
:param trig_time:
:param trig_stop:
:param ra_obj:
:param dec_obj:
:param obs_dir:
:return: The path object to the location of the created tdrss message file
"""
from .batobservation import BatObservation
# see if the observation directory exists
obs = BatObservation(obs_id, obs_dir=obs_dir)
# see if the tdrss directory exists. If not, then create it
# create the tdrss message filename
tdrss_dir = obs.obs_dir.joinpath("tdrss")
tdrss_dir.mkdir(parents=True, exist_ok=True)
tdrss_file = tdrss_dir.joinpath(f"sw{obs.obs_dir.stem}msbce_test.fits.gz")
# get the trigger id from the observation id
trig_id = obs.obs_dir.stem[1:-3]
hdr = fits.Header()
hdr["CREATOR"] = ("BatAnalysis", " Program that created FITS file")
hdr["OBS_ID"] = (obs_id, "Observation ID")
hdr["TARG_ID"] = (trig_id, "Target ID")
hdr["TRIGGER"] = (trig_id, "Trigger Number")
hdr["TRIGTIME"] = (trig_time, "[s] MET TRIGger Time")
hdr["DATETRIG"] = (f"{met2utc(trig_time):.23}", "Corrected UTC date of the trigger")
hdr["BRA_OBJ"] = (ra_obj, "[deg] BAT RA location of GRB or Object")
hdr["BDEC_OBJ"] = (dec_obj, "[deg] BAT DEC location of GRB or Object")
hdr["TRIGSTOP"] = (trig_stop, "[s] Trigger MET STOP time")
hdr["BACKSTRT"] = (0.0, "[s] BACKground STaRT time")
hdr["BACKSTOP"] = (0.0, "[s] BACKground STOP time")
hdr["IMAGETRG"] = ("T", "Image Trigger occured?")
tdrss_header = fits.PrimaryHDU(header=hdr)
tdrss_header.writeto(tdrss_file)
return tdrss_file
|
parsotatREPO_NAMEbatanalysisPATH_START.@batanalysis_extracted@batanalysis-main@batanalysis@batlib.py@.PATH_END.py
|
{
"filename": "publish.py",
"repo_name": "dfm/exopop",
"repo_path": "exopop_extracted/exopop-main/document/publish.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import os
import glob
import shutil
from datetime import date
from itertools import imap
from subprocess import check_call
datestamp = date.today().strftime("%m%d")
TMPDIR = "fore" + datestamp
outfn = TMPDIR+".tar"
tex = open("ms.tex", "r").read()
try:
os.makedirs(TMPDIR)
except os.error:
pass
def rename(fn):
a, b = os.path.split(fn)
return os.path.split(a)[1] + "-" + b, fn
for a, b in imap(rename, glob.glob("figures/*/*.pdf")
+ glob.glob("figures/*.pdf")):
shutil.copyfile(b, os.path.join(TMPDIR, a))
tex = tex.replace(b, a)
shutil.copyfile("vc.tex", os.path.join(TMPDIR, "vc.tex"))
open(os.path.join(TMPDIR, "ms.tex"), "w").write(tex)
check_call(" ".join(["cd", TMPDIR+";",
"tar", "-cf", os.path.join("..", outfn), "*"]),
shell=True)
shutil.rmtree(TMPDIR)
print("Wrote file: '{0}'".format(outfn))
|
dfmREPO_NAMEexopopPATH_START.@exopop_extracted@exopop-main@document@publish.py@.PATH_END.py
|
{
"filename": "check.py",
"repo_name": "ajeldorado/falco-python",
"repo_path": "falco-python_extracted/falco-python-master/falco/check.py",
"type": "Python"
}
|
"""
Module to hold input-checking functions to minimize repetition
"""
import numbers
import numpy as np
class CheckException(Exception):
pass
# String check support
string_types = (str, bytes)
# Int check support
int_types = (int, np.integer)
def _checkname(vname):
"""
Internal check that we can use vname as a string for printing
"""
if not isinstance(vname, string_types):
raise CheckException('vname must be a string when fed to check ' + \
'functions')
pass
def _checkexc(vexc):
"""
Internal check that we can raise from the vexc object
"""
if not isinstance(vexc, type): # pre-check it is class-like
raise CheckException('vexc must be a Exception, or an object ' + \
'descended from one when fed to check functions')
if not issubclass(vexc, Exception):
raise CheckException('vexc must be a Exception, or an object ' + \
'descended from one when fed to check functions')
pass
def centering(var):
"""
Check whether an object is in the values ['pixel', 'interpixel'].
Parameters
----------
var
Variable to check
Returns
-------
var
Same value as input
"""
_VALID_CENTERING = ['pixel', 'interpixel']
_CENTERING_ERR = ('Invalid centering specification. Options: '
'{}'.format(_VALID_CENTERING))
if not isinstance(var, str):
raise TypeError("'centering' value must be a string'")
if not (var in _VALID_CENTERING):
raise ValueError(_CENTERING_ERR)
return var
def is_dict(var, vname):
"""
Check whether an object is a dictionary.
Parameters
----------
var: dict
variable to check
vname: str
string to output in case of error for debugging
"""
_checkname(vname)
if not isinstance(var, dict):
raise TypeError(vname + 'must be a dictionary')
return var
def is_bool(var, vname):
"""
Check whether an object is a boolean.
Parameters
----------
var : bool
variable to check
vname : str
string to output in case of error for debugging
"""
_checkname(vname)
if not isinstance(var, bool):
raise TypeError(vname + 'must be a bool')
return var
def real_positive_scalar(var, vname, vexc):
"""
Checks whether an object is a real positive scalar.
Arguments:
var: variable to check
vname: string to output in case of error for debugging
vexc: Exception to raise in case of error for debugging
Returns:
returns var
"""
_checkname(vname)
_checkexc(vexc)
if not isinstance(var, numbers.Number):
raise vexc(vname + ' must be scalar')
if not np.isrealobj(var):
raise vexc(vname + ' must be real')
if var <= 0:
raise vexc(vname + ' must be positive')
return var
def real_array(var, vname, vexc):
"""
Checks whether an object is a real numpy array, or castable to one.
Arguments:
var: variable to check
vname: string to output in case of error for debugging
vexc: Exception to raise in case of error for debugging
Returns:
returns var
"""
_checkname(vname)
_checkexc(vexc)
var = np.asarray(var) # cast to array
if len(var.shape) == 0:
raise vexc(vname + ' must have length > 0')
if not np.isrealobj(var):
raise vexc(vname + ' must be a real array')
# skip 'c' as we don't want complex; rest are non-numeric
if not var.dtype.kind in ['b', 'i', 'u', 'f']:
raise vexc(vname + ' must be a real numeric type to be real')
return var
def oneD_array(var, vname, vexc):
"""
Checks whether an object is a 1D numpy array, or castable to one.
Arguments:
var: variable to check
vname: string to output in case of error for debugging
vexc: Exception to raise in case of error for debugging
Returns:
returns var
"""
_checkname(vname)
_checkexc(vexc)
var = np.asarray(var) # cast to array
if len(var.shape) != 1:
raise vexc(vname + ' must be a 1D array')
if (not np.isrealobj(var)) and (not np.iscomplexobj(var)):
raise vexc(vname + ' must be a real or complex 1D array')
return var
def twoD_array(var, vname, vexc):
"""
Checks whether an object is a 2D numpy array, or castable to one.
Arguments:
var: variable to check
vname: string to output in case of error for debugging
vexc: Exception to raise in case of error for debugging
Returns:
returns var
"""
_checkname(vname)
_checkexc(vexc)
var = np.asarray(var) # cast to array
if len(var.shape) != 2:
raise vexc(vname + ' must be a 2D array')
if (not np.isrealobj(var)) and (not np.iscomplexobj(var)):
raise vexc(vname + ' must be a real or complex 2D array')
return var
def twoD_square_array(var, vname, vexc):
"""
Checks whether an object is a 2D square array_like.
Arguments:
var: variable to check
vname: string to output in case of error for debugging
vexc: Exception to raise in case of error for debugging
Returns:
returns var
"""
_checkname(vname)
_checkexc(vexc)
var = np.asarray(var) # cast to array
if len(var.shape) != 2:
raise vexc(vname + ' must be a 2D array')
else: # is 2-D
if not var.shape[0] == var.shape[1]:
raise vexc(vname + ' must be a square 2D array')
if (not np.isrealobj(var)) and (not np.iscomplexobj(var)):
raise vexc(vname + ' must be a real or complex square 2D array')
return var
def threeD_array(var, vname, vexc):
"""
Checks whether an object is a 3D numpy array, or castable to one.
Arguments:
var: variable to check
vname: string to output in case of error for debugging
vexc: Exception to raise in case of error for debugging
Returns:
returns var
"""
_checkname(vname)
_checkexc(vexc)
var = np.asarray(var) # cast to array
if len(var.shape) != 3:
raise vexc(vname + ' must be a 3D array')
if (not np.isrealobj(var)) and (not np.iscomplexobj(var)):
raise vexc(vname + ' must be a real or complex 3D array')
return var
def real_scalar(var, vname, vexc):
"""
Checks whether an object is a real scalar.
Arguments:
var: variable to check
vname: string to output in case of error for debugging
vexc: Exception to raise in case of error for debugging
Returns:
returns var
"""
_checkname(vname)
_checkexc(vexc)
if not isinstance(var, numbers.Number):
raise vexc(vname + ' must be scalar')
if not np.isrealobj(var):
raise vexc(vname + ' must be real')
return var
def real_nonnegative_scalar(var, vname, vexc):
"""
Checks whether an object is a real nonnegative scalar.
Arguments:
var: variable to check
vname: string to output in case of error for debugging
vexc: Exception to raise in case of error for debugging
Returns:
returns var
"""
_checkname(vname)
_checkexc(vexc)
if not isinstance(var, numbers.Number):
raise vexc(vname + ' must be scalar')
if not np.isrealobj(var):
raise vexc(vname + ' must be real')
if var < 0:
raise vexc(vname + ' must be nonnegative')
return var
def positive_scalar_integer(var, vname, vexc):
"""
Checks whether an object is a positive scalar integer.
Arguments:
var: variable to check
vname: string to output in case of error for debugging
vexc: Exception to raise in case of error for debugging
Returns:
returns var
"""
_checkname(vname)
_checkexc(vexc)
if not isinstance(var, numbers.Number):
raise vexc(vname + ' must be scalar')
if not isinstance(var, int_types):
raise vexc(vname + ' must be integer')
if var <= 0:
raise vexc(vname + ' must be positive')
return var
def nonnegative_scalar_integer(var, vname, vexc):
"""
Checks whether an object is a nonnegative scalar integer.
Arguments:
var: variable to check
vname: string to output in case of error for debugging
vexc: Exception to raise in case of error for debugging
Returns:
returns var
"""
_checkname(vname)
_checkexc(vexc)
if not isinstance(var, numbers.Number):
raise vexc(vname + ' must be scalar')
if not isinstance(var, int_types):
raise vexc(vname + ' must be integer')
if var < 0:
raise vexc(vname + ' must be nonnegative')
return var
def scalar_integer(var, vname, vexc):
"""
Checks whether an object is a scalar integer (no sign dependence).
Arguments:
var: variable to check
vname: string to output in case of error for debugging
vexc: Exception to raise in case of error for debugging
Returns:
returns var
"""
_checkname(vname)
_checkexc(vexc)
if not isinstance(var, numbers.Number):
raise vexc(vname + ' must be scalar')
if not isinstance(var, int_types):
raise vexc(vname + ' must be integer')
return var
def string(var, vname, vexc):
"""
Checks whether an object is a string.
Arguments:
var: variable to check
vname: string to output in case of error for debugging
vexc: Exception to raise in case of error for debugging
Returns:
returns var
"""
_checkname(vname)
_checkexc(vexc)
if not isinstance(var, string_types):
raise vexc(vname + ' must be a string')
return var
def boolean(var, vname, vexc):
"""
Checks whether an object is a bool.
Arguments:
var: variable to check
vname: string to output in case of error for debugging
vexc: Exception to raise in case of error for debugging
Returns:
returns var
"""
_checkname(vname)
_checkexc(vexc)
if not isinstance(var, bool):
raise vexc(vname + ' must be a bool')
return var
def dictionary(var, vname, vexc):
"""
Checks whether an object is a dictionary.
Arguments:
var: variable to check
vname: string to output in case of error for debugging
vexc: Exception to raise in case of error for debugging
Returns:
returns var
"""
_checkname(vname)
_checkexc(vexc)
if not isinstance(var, dict):
raise vexc(vname + ' must be a dict')
return var
|
ajeldoradoREPO_NAMEfalco-pythonPATH_START.@falco-python_extracted@falco-python-master@falco@check.py@.PATH_END.py
|
{
"filename": "test_ogle_remote.py",
"repo_name": "astropy/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/ogle/tests/test_ogle_remote.py",
"type": "Python"
}
|
import pytest
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.utils.exceptions import AstropyDeprecationWarning
from .. import Ogle
@pytest.mark.remote_data
def test_ogle_single():
co = SkyCoord(0, 3, unit=(u.degree, u.degree), frame='galactic')
response = Ogle.query_region(coord=co)
assert len(response) == 1
@pytest.mark.remote_data
def test_ogle_list():
co = SkyCoord(0, 3, unit=(u.degree, u.degree), frame='galactic')
co_list = [co, co, co]
response = Ogle.query_region(coord=co_list)
assert len(response) == 3
assert response['RA[hr]'][0] == response['RA[hr]'][1] == response['RA[hr]'][2]
@pytest.mark.remote_data
def test_ogle_list_values():
co_list = [[0, 0, 0], [3, 3, 3]]
with pytest.warns(AstropyDeprecationWarning):
response = Ogle.query_region(coord=co_list)
assert len(response) == 3
assert response['RA[hr]'][0] == response['RA[hr]'][1] == response['RA[hr]'][2]
|
astropyREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@ogle@tests@test_ogle_remote.py@.PATH_END.py
|
{
"filename": "_size.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/cone/colorbar/tickfont/_size.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="cone.colorbar.tickfont", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@cone@colorbar@tickfont@_size.py@.PATH_END.py
|
{
"filename": "_weight.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/smith/realaxis/tickfont/_weight.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WeightValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self,
plotly_name="weight",
parent_name="layout.smith.realaxis.tickfont",
**kwargs,
):
super(WeightValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
extras=kwargs.pop("extras", ["normal", "bold"]),
max=kwargs.pop("max", 1000),
min=kwargs.pop("min", 1),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@smith@realaxis@tickfont@_weight.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/integrations/prefect-gcp/prefect_gcp/models/__init__.py",
"type": "Python"
}
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@integrations@prefect-gcp@prefect_gcp@models@__init__.py@.PATH_END.py
|
|
{
"filename": "lbfgsb.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/scipy/optimize/lbfgsb.py",
"type": "Python"
}
|
"""
Functions
---------
.. autosummary::
:toctree: generated/
fmin_l_bfgs_b
"""
## License for the Python wrapper
## ==============================
## Copyright (c) 2004 David M. Cooke <cookedm@physics.mcmaster.ca>
## Permission is hereby granted, free of charge, to any person obtaining a
## copy of this software and associated documentation files (the "Software"),
## to deal in the Software without restriction, including without limitation
## the rights to use, copy, modify, merge, publish, distribute, sublicense,
## and/or sell copies of the Software, and to permit persons to whom the
## Software is furnished to do so, subject to the following conditions:
## The above copyright notice and this permission notice shall be included in
## all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
## DEALINGS IN THE SOFTWARE.
## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import array, asarray, float64, int32, zeros
from . import _lbfgsb
from .optimize import (approx_fprime, MemoizeJac, OptimizeResult,
_check_unknown_options, wrap_function,
_approx_fprime_helper)
from scipy.sparse.linalg import LinearOperator
__all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct']
def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
approx_grad=0,
bounds=None, m=10, factr=1e7, pgtol=1e-5,
epsilon=1e-8,
iprint=-1, maxfun=15000, maxiter=15000, disp=None,
callback=None, maxls=20):
"""
Minimize a function func using the L-BFGS-B algorithm.
Parameters
----------
func : callable f(x,*args)
Function to minimise.
x0 : ndarray
Initial guess.
fprime : callable fprime(x,*args), optional
The gradient of `func`. If None, then `func` returns the function
value and the gradient (``f, g = func(x, *args)``), unless
`approx_grad` is True in which case `func` returns only ``f``.
args : sequence, optional
Arguments to pass to `func` and `fprime`.
approx_grad : bool, optional
Whether to approximate the gradient numerically (in which case
`func` returns only the function value).
bounds : list, optional
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter. Use None or +-inf for one of ``min`` or
``max`` when there is no bound in that direction.
m : int, optional
The maximum number of variable metric corrections
used to define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many terms in an
approximation to it.)
factr : float, optional
The iteration stops when
``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
where ``eps`` is the machine precision, which is automatically
generated by the code. Typical values for `factr` are: 1e12 for
low accuracy; 1e7 for moderate accuracy; 10.0 for extremely
high accuracy. See Notes for relationship to `ftol`, which is exposed
(instead of `factr`) by the `scipy.optimize.minimize` interface to
L-BFGS-B.
pgtol : float, optional
The iteration will stop when
``max{|proj g_i | i = 1, ..., n} <= pgtol``
where ``pg_i`` is the i-th component of the projected gradient.
epsilon : float, optional
Step size used when `approx_grad` is True, for numerically
calculating the gradient
iprint : int, optional
Controls the frequency of output. ``iprint < 0`` means no output;
``iprint = 0`` print only one line at the last iteration;
``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
``iprint = 99`` print details of every iteration except n-vectors;
``iprint = 100`` print also the changes of active set and final x;
``iprint > 100`` print details of every iteration including x and g.
disp : int, optional
If zero, then no output. If a positive number, then this over-rides
`iprint` (i.e., `iprint` gets the value of `disp`).
maxfun : int, optional
Maximum number of function evaluations.
maxiter : int, optional
Maximum number of iterations.
callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is the
current parameter vector.
maxls : int, optional
Maximum number of line search steps (per iteration). Default is 20.
Returns
-------
x : array_like
Estimated position of the minimum.
f : float
Value of `func` at the minimum.
d : dict
Information dictionary.
* d['warnflag'] is
- 0 if converged,
- 1 if too many function evaluations or too many iterations,
- 2 if stopped for another reason, given in d['task']
* d['grad'] is the gradient at the minimum (should be 0 ish)
* d['funcalls'] is the number of function calls made.
* d['nit'] is the number of iterations.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'L-BFGS-B' `method` in particular. Note that the
`ftol` option is made available via that interface, while `factr` is
provided via this interface, where `factr` is the factor multiplying
the default machine floating-point precision to arrive at `ftol`:
``ftol = factr * numpy.finfo(float).eps``.
Notes
-----
License of L-BFGS-B (FORTRAN code):
The version included here (in fortran code) is 3.0
(released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd,
and Jorge Nocedal <nocedal@ece.nwu.edu>. It carries the following
condition for use:
This software is freely available, but we expect that all publications
describing work using this software, or all commercial products using it,
quote at least one of the references given below. This software is released
under the BSD License.
References
----------
* R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
Constrained Optimization, (1995), SIAM Journal on Scientific and
Statistical Computing, 16, 5, pp. 1190-1208.
* C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization (1997),
ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
* J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization (2011),
ACM Transactions on Mathematical Software, 38, 1.
"""
# handle fprime/approx_grad
if approx_grad:
fun = func
jac = None
elif fprime is None:
fun = MemoizeJac(func)
jac = fun.derivative
else:
fun = func
jac = fprime
# build options
if disp is None:
disp = iprint
opts = {'disp': disp,
'iprint': iprint,
'maxcor': m,
'ftol': factr * np.finfo(float).eps,
'gtol': pgtol,
'eps': epsilon,
'maxfun': maxfun,
'maxiter': maxiter,
'callback': callback,
'maxls': maxls}
res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
**opts)
d = {'grad': res['jac'],
'task': res['message'],
'funcalls': res['nfev'],
'nit': res['nit'],
'warnflag': res['status']}
f = res['fun']
x = res['x']
return x, f, d
def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None,
disp=None, maxcor=10, ftol=2.2204460492503131e-09,
gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000,
iprint=-1, callback=None, maxls=20, **unknown_options):
"""
Minimize a scalar function of one or more variables using the L-BFGS-B
algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxcor : int
The maximum number of variable metric corrections used to
define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many terms
in an approximation to it.)
ftol : float
The iteration stops when ``(f^k -
f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``.
gtol : float
The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
<= gtol`` where ``pg_i`` is the i-th component of the
projected gradient.
eps : float
Step size used for numerical approximation of the jacobian.
disp : int
Set to True to print convergence messages.
maxfun : int
Maximum number of function evaluations.
maxiter : int
Maximum number of iterations.
maxls : int, optional
Maximum number of line search steps (per iteration). Default is 20.
Notes
-----
The option `ftol` is exposed via the `scipy.optimize.minimize` interface,
but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The
relationship between the two is ``ftol = factr * numpy.finfo(float).eps``.
I.e., `factr` multiplies the default machine floating-point precision to
arrive at `ftol`.
"""
_check_unknown_options(unknown_options)
m = maxcor
epsilon = eps
pgtol = gtol
factr = ftol / np.finfo(float).eps
x0 = asarray(x0).ravel()
n, = x0.shape
if bounds is None:
bounds = [(None, None)] * n
if len(bounds) != n:
raise ValueError('length of x0 != length of bounds')
# unbounded variables must use None, not +-inf, for optimizer to work properly
bounds = [(None if l == -np.inf else l, None if u == np.inf else u) for l, u in bounds]
if disp is not None:
if disp == 0:
iprint = -1
else:
iprint = disp
n_function_evals, fun = wrap_function(fun, ())
if jac is None:
def func_and_grad(x):
f = fun(x, *args)
g = _approx_fprime_helper(x, fun, epsilon, args=args, f0=f)
return f, g
else:
def func_and_grad(x):
f = fun(x, *args)
g = jac(x, *args)
return f, g
nbd = zeros(n, int32)
low_bnd = zeros(n, float64)
upper_bnd = zeros(n, float64)
bounds_map = {(None, None): 0,
(1, None): 1,
(1, 1): 2,
(None, 1): 3}
for i in range(0, n):
l, u = bounds[i]
if l is not None:
low_bnd[i] = l
l = 1
if u is not None:
upper_bnd[i] = u
u = 1
nbd[i] = bounds_map[l, u]
if not maxls > 0:
raise ValueError('maxls must be positive.')
x = array(x0, float64)
f = array(0.0, float64)
g = zeros((n,), float64)
wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64)
iwa = zeros(3*n, int32)
task = zeros(1, 'S60')
csave = zeros(1, 'S60')
lsave = zeros(4, int32)
isave = zeros(44, int32)
dsave = zeros(29, float64)
task[:] = 'START'
n_iterations = 0
while 1:
# x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \
_lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr,
pgtol, wa, iwa, task, iprint, csave, lsave,
isave, dsave, maxls)
task_str = task.tostring()
if task_str.startswith(b'FG'):
# The minimization routine wants f and g at the current x.
# Note that interruptions due to maxfun are postponed
# until the completion of the current minimization iteration.
# Overwrite f and g:
f, g = func_and_grad(x)
elif task_str.startswith(b'NEW_X'):
# new iteration
if n_iterations > maxiter:
task[:] = 'STOP: TOTAL NO. of ITERATIONS EXCEEDS LIMIT'
elif n_function_evals[0] > maxfun:
task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS '
'EXCEEDS LIMIT')
else:
n_iterations += 1
if callback is not None:
callback(x)
else:
break
task_str = task.tostring().strip(b'\x00').strip()
if task_str.startswith(b'CONV'):
warnflag = 0
elif n_function_evals[0] > maxfun:
warnflag = 1
elif n_iterations > maxiter:
warnflag = 1
else:
warnflag = 2
# These two portions of the workspace are described in the mainlb
# subroutine in lbfgsb.f. See line 363.
s = wa[0: m*n].reshape(m, n)
y = wa[m*n: 2*m*n].reshape(m, n)
# See lbfgsb.f line 160 for this portion of the workspace.
# isave(31) = the total number of BFGS updates prior the current iteration;
n_bfgs_updates = isave[30]
n_corrs = min(n_bfgs_updates, maxcor)
hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs])
return OptimizeResult(fun=f, jac=g, nfev=n_function_evals[0],
nit=n_iterations, status=warnflag, message=task_str,
x=x, success=(warnflag == 0), hess_inv=hess_inv)
class LbfgsInvHessProduct(LinearOperator):
"""Linear operator for the L-BFGS approximate inverse Hessian.
This operator computes the product of a vector with the approximate inverse
of the Hessian of the objective function, using the L-BFGS limited
memory approximation to the inverse Hessian, accumulated during the
optimization.
Objects of this class implement the ``scipy.sparse.linalg.LinearOperator``
interface.
Parameters
----------
sk : array_like, shape=(n_corr, n)
Array of `n_corr` most recent updates to the solution vector.
(See [1]).
yk : array_like, shape=(n_corr, n)
Array of `n_corr` most recent updates to the gradient. (See [1]).
References
----------
.. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited
storage." Mathematics of computation 35.151 (1980): 773-782.
"""
def __init__(self, sk, yk):
"""Construct the operator."""
if sk.shape != yk.shape or sk.ndim != 2:
raise ValueError('sk and yk must have matching shape, (n_corrs, n)')
n_corrs, n = sk.shape
super(LbfgsInvHessProduct, self).__init__(
dtype=np.float64, shape=(n, n))
self.sk = sk
self.yk = yk
self.n_corrs = n_corrs
self.rho = 1 / np.einsum('ij,ij->i', sk, yk)
def _matvec(self, x):
"""Efficient matrix-vector multiply with the BFGS matrices.
This calculation is described in Section (4) of [1].
Parameters
----------
x : ndarray
An array with shape (n,) or (n,1).
Returns
-------
y : ndarray
The matrix-vector product
"""
s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
q = np.array(x, dtype=self.dtype, copy=True)
if q.ndim == 2 and q.shape[1] == 1:
q = q.reshape(-1)
alpha = np.zeros(n_corrs)
for i in range(n_corrs-1, -1, -1):
alpha[i] = rho[i] * np.dot(s[i], q)
q = q - alpha[i]*y[i]
r = q
for i in range(n_corrs):
beta = rho[i] * np.dot(y[i], r)
r = r + s[i] * (alpha[i] - beta)
return r
def todense(self):
"""Return a dense array representation of this operator.
Returns
-------
arr : ndarray, shape=(n, n)
An array with the same shape and containing
the same data represented by this `LinearOperator`.
"""
s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
I = np.eye(*self.shape, dtype=self.dtype)
Hk = I
for i in range(n_corrs):
A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i]
A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i]
Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] *
s[i][np.newaxis, :])
return Hk
if __name__ == '__main__':
def func(x):
f = 0.25 * (x[0] - 1) ** 2
for i in range(1, x.shape[0]):
f += (x[i] - x[i-1] ** 2) ** 2
f *= 4
return f
def grad(x):
g = zeros(x.shape, float64)
t1 = x[1] - x[0] ** 2
g[0] = 2 * (x[0] - 1) - 16 * x[0] * t1
for i in range(1, g.shape[0] - 1):
t2 = t1
t1 = x[i + 1] - x[i] ** 2
g[i] = 8 * t2 - 16*x[i] * t1
g[-1] = 8 * t1
return g
def func_and_grad(x):
return func(x), grad(x)
class Problem(object):
def fun(self, x):
return func_and_grad(x)
factr = 1e7
pgtol = 1e-5
n = 25
m = 10
bounds = [(None, None)] * n
for i in range(0, n, 2):
bounds[i] = (1.0, 100)
for i in range(1, n, 2):
bounds[i] = (-100, 100)
x0 = zeros((n,), float64)
x0[:] = 3
x, f, d = fmin_l_bfgs_b(func, x0, fprime=grad, m=m,
factr=factr, pgtol=pgtol)
print(x)
print(f)
print(d)
x, f, d = fmin_l_bfgs_b(func, x0, approx_grad=1,
m=m, factr=factr, pgtol=pgtol)
print(x)
print(f)
print(d)
x, f, d = fmin_l_bfgs_b(func_and_grad, x0, approx_grad=0,
m=m, factr=factr, pgtol=pgtol)
print(x)
print(f)
print(d)
p = Problem()
x, f, d = fmin_l_bfgs_b(p.fun, x0, approx_grad=0,
m=m, factr=factr, pgtol=pgtol)
print(x)
print(f)
print(d)
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@scipy@optimize@lbfgsb.py@.PATH_END.py
|
{
"filename": "basic_plot.py",
"repo_name": "pyspeckit/pyspeckit",
"repo_path": "pyspeckit_extracted/pyspeckit-master/docs/basic_plot.py",
"type": "Python"
}
|
import numpy as np
from astropy import units as u
import pyspeckit
xaxis = np.linspace(-50,150,100.) * u.km/u.s
sigma = 10. * u.km/u.s
center = 50. * u.km/u.s
synth_data = np.exp(-(xaxis-center)**2/(sigma**2 * 2.))
# Add noise
stddev = 0.1
noise = np.random.randn(xaxis.size)*stddev
error = stddev*np.ones_like(synth_data)
data = noise+synth_data
# this will give a "blank header" warning, which is fine
sp = pyspeckit.Spectrum(data=data, error=error, xarr=xaxis,
unit=u.erg/u.s/u.cm**2/u.AA)
sp.plotter()
sp.plotter.savefig('basic_plot_example.png')
# Fit with automatic guesses
sp.specfit(fittype='gaussian')
# (this will produce a plot overlay showing the fit curve and values)
sp.plotter.savefig('basic_plot_example_withfit.png')
# Redo the overlay with no annotation
# remove both the legend and the model overlay
sp.specfit.clear()
# then re-plot the model without an annotation (legend)
sp.specfit.plot_fit(annotate=False)
sp.plotter.savefig('basic_plot_example_withfit_no_annotation.png')
# overlay another spectrum
# We use the 'synthetic' spectrum with no noise, then shift it by 10 km/s
sp2 = pyspeckit.Spectrum(data=synth_data, error=None, xarr=xaxis+10*u.km/u.s,
unit=u.erg/u.s/u.cm**2/u.AA)
# again, remove the overlaid model fit
sp.specfit.clear()
# to overplot, you need to tell the plotter which matplotlib axis to use and
# tell it not to clear the plot first
sp2.plotter(axis=sp.plotter.axis,
clear=False,
color='g')
# sp2.plotter and sp.plotter can both be used here (they refer to the same axis
# and figure now)
sp.plotter.savefig('basic_plot_example_with_second_spectrum_overlaid_in_green.png')
# the plot window will follow the last plotted spectrum's limits by default;
# that can be overridden with the xmin/xmax keywords
sp2.plotter(axis=sp.plotter.axis,
xmin=-100, xmax=200,
ymin=-0.5, ymax=1.5,
clear=False,
color='g')
sp.plotter.savefig('basic_plot_example_with_second_spectrum_overlaid_in_green_wider_limits.png')
# you can also offset the spectra and set different
# this time, we need to clear the axis first, then do a fresh overlay
# fresh plot
sp.plotter(clear=True)
# overlay, shifted down by 0.2 in y and with a wider linewidth
sp2.plotter(axis=sp.plotter.axis,
offset=-0.2,
clear=False,
color='r',
linewidth=2,
alpha=0.5,
)
# you can also modify the axis properties directly
sp.plotter.axis.set_ylim(-0.25, 1.1)
sp2.plotter.savefig('basic_plot_example_with_second_spectrum_offset_overlaid_in_red.png')
|
pyspeckitREPO_NAMEpyspeckitPATH_START.@pyspeckit_extracted@pyspeckit-master@docs@basic_plot.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "djones1040/PythonPhot",
"repo_path": "PythonPhot_extracted/PythonPhot-master/PythonPhot/tests/__init__.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This packages contains affiliated package tests.
"""
|
djones1040REPO_NAMEPythonPhotPATH_START.@PythonPhot_extracted@PythonPhot-master@PythonPhot@tests@__init__.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "zivmaaya/NeuralCMS",
"repo_path": "NeuralCMS_extracted/NeuralCMS-main/README.md",
"type": "Markdown"
}
|
# NeuralCMS
NeuralCMS is a machine-learning model to compute the gravitational moments and mass of Jupiter given seven chosen parameters setting its interior model. The model is trained on over a million interior model solutions computed with the accurate but computationally demanding concentric Maclaurin spheroid method (CMS; Hubbard 2013 DOI:[10.1088/0004-637X/768/1/43](https://ui.adsabs.harvard.edu/link_gateway/2013ApJ...768...43H/doi:10.1088/0004-637X/768/1/43)).
NeuralCMS receives the following interior features as input: protosolar helium abundance (setting the overall planetary abundance) $Y_{\rm proto}$, temperature at 1 bar $T_{\rm 1 bar}$, atmospheric heavy materials (anything heavier than helium) abundance $Z_1$, transition pressure between the inner and the outer envelopes $P_{12}$, dilute core extent $m_{\rm dilute}$, dilute core maximum heavy materials abundance $Z_{\rm dilute}$, and compact core normalize radius $r_{\rm core}$, and computes the lower even degree gravity moments and mass.
Here, we share the trained models presented in Ziv et al. 2024, which was accepted for publication in A&A (DOI:[10.1051/0004-6361/202450223](https://doi.org/10.1051/0004-6361/202450223)), together with a Python notebook to load the models, compute a single interior model, and perform a grid search for interior models consistent with Nasa's Juno mission measured gravity moments and mass.
## Installation using pip
This project uses PyTorch, which requires Python 3.8 or higher.
### The required packages:
- python>=3.8
- torch
- numpy
- tqdm
- itertools
- jupyter
Install the requirements:
```
pip install -r requirements.txt
```
## Getting started
Start working with the NeuralCMS in `NeuralCMS_notebook.ipynb`.
## Acknowledgements
Our numerical interior model (CMS), which NeuralCMS was trained on its results, is based on the model from https://github.com/nmovshov/CMS-planet.
|
zivmaayaREPO_NAMENeuralCMSPATH_START.@NeuralCMS_extracted@NeuralCMS-main@README.md@.PATH_END.py
|
{
"filename": "fused_attention_stablehlo.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/jax/_src/cudnn/fused_attention_stablehlo.py",
"type": "Python"
}
|
# Copyright 2024 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import functools
import json
import math
import jax
from jax import dtypes
from jax._src import core
from jax._src import dispatch
from jax._src.custom_partitioning import custom_partitioning
from jax._src.interpreters import batching
from jax._src.lib import cuda_versions
from jax._src import xla_bridge
from jax.interpreters import mlir
from jax.interpreters import xla
from jax.interpreters.mlir import hlo
from jax.interpreters.mlir import ir
import jax.numpy as jnp
from jax.sharding import NamedSharding, PartitionSpec
Array = jnp.ndarray
class AttentionLayout(enum.Enum):
BTNH = 0
BNTH = 1
class MaskType(enum.Enum):
NO_MASK = 0
PADDING = 1
CAUSAL = 2
PADDING_CAUSAL = 3
ALIBI = 4
def convert_mask_type_to_string(mask_type: MaskType) -> str:
if mask_type == MaskType.NO_MASK:
return "NO_MASK"
elif mask_type == MaskType.PADDING:
return "PADDING"
elif mask_type == MaskType.CAUSAL:
return "CAUSAL"
elif mask_type == MaskType.PADDING_CAUSAL:
return "PADDING_CAUSAL"
elif mask_type == MaskType.ALIBI:
return "ALIBI"
else:
raise ValueError(f"Unexpected mask type: {mask_type}")
def has_padding(mask_type: MaskType) -> bool:
return mask_type == MaskType.PADDING or mask_type == MaskType.PADDING_CAUSAL
def should_export_dbias(bias_shape, query_shape, layout) -> bool:
b_B, b_N, _, _ = bias_shape
if layout == AttentionLayout.BNTH.value:
_, q_N, _, _ = query_shape
else:
_, _, q_N, _ = query_shape
return b_B == 1 and b_N == q_N
def get_large_negative_number(dtype):
# temp WAR as cuDNN has a bug for subtraction between two large negative value
if dtype == jnp.bfloat16:
return jnp.asarray(-2 << 40, dtype=dtype)
elif dtype == jnp.float16:
return jnp.asarray(-2 << 14, dtype=dtype)
else:
raise ValueError("Unsupported dtype for inputs.")
def _normalize_layout(layout: str) -> AttentionLayout:
layout_upper = layout.upper()
if layout_upper in ["BSNH", "BNSH", "BTNH", "BNTH"]:
return AttentionLayout[layout_upper.replace("S", "T")]
else:
raise ValueError(f"Unsupported qkv_layout: {layout}")
def element_type_to_backend_config_type_mapping(dtype):
_element_type_to_backend_config_type_mapping = {
ir.BF16Type.get(): "BF16",
ir.F16Type.get(): "F16",
}
return _element_type_to_backend_config_type_mapping[dtype]
def default_layouts(*shapes):
return [range(len(shape) - 1, -1, -1) for shape in shapes]
def create_dot_product_attention_backend_config(batch,
num_heads,
seq_q,
seq_kv,
dtype,
fmha_scale,
seed,
dropout_rate,
mask_type,
layout,
sliding_window_length,
is_bwd):
# Q, K, V: query, key, value in shape of BT(S)NH or BNT(S)H
# P: BMM1 output in shape of BNTS
# O: BMM2 output in the same shape with Q
# BMM1: Q @ K -> P
# BMM2: P @ V -> O
# BMM1Grad1: dP @ Q -> dK
# BMM1Grad2: dP @ K -> dQ
# BMM2Grad1: P @ dO -> dV
# BMM2Grad2: dO @ V -> dP
if sliding_window_length is None:
sliding_window_length = 0
cudnn_fmha_backend_config = {
"algorithm": {
"algo_id": "0",
"math_type": "TENSOR_OP_MATH",
"tuning_knobs": {"17": "1", "24": "0"},
"is_cudnn_frontend": True,
"workspace_size": "0",
},
"fmha_scale": fmha_scale,
"dropout_rate": dropout_rate,
"intermediate_tensor_shape": {
"element_type": element_type_to_backend_config_type_mapping(dtype),
"dimensions": [str(batch), str(num_heads), str(seq_q), str(seq_kv)],
"tuple_shapes": [],
"layout": {
"dim_level_types": [],
"dim_unique": [],
"dim_ordered": [],
"minor_to_major": ["3", "2", "1", "0"],
"tiles": [],
"element_size_in_bits": "0",
"memory_space": "0",
"index_primitive_type": "PRIMITIVE_TYPE_INVALID",
"pointer_primitive_type": "PRIMITIVE_TYPE_INVALID",
"dynamic_shape_metadata_prefix_bytes": "0",
},
"is_dynamic_dimension": [False, False, False, False],
},
"seed": seed,
"is_flash_attention": True,
"mask_type": convert_mask_type_to_string(mask_type),
"sliding_window_length": sliding_window_length,
}
# We define the contracting and batch dims in the format of
# ((lhs_contracting_dims, rhs_contracting_dims), (lhs_batch_dims,
# rhs_batch_dims)).
if layout == AttentionLayout.BNTH.value:
dims = [
((3, 3), ((0, 1), (0, 1))), # BMM1: BNTH,BNSH->BNTS
((3, 2), ((0, 1), (0, 1))), # BMM2: BNTS,BNSH->BNTH
((2, 2), ((0, 1), (0, 1))), # BMM1_grad_1: BNTS,BNTH->BNSH
((3, 2), ((0, 1), (0, 1))), # BMM1_grad_2: BNTS,BNSH->BNTH
((2, 2), ((0, 1), (0, 1))), # BMM2_grad_1: BNTS,BNTH->BNSH
((3, 3), ((0, 1), (0, 1))), # BMM2_grad_2: BNTH,BNSH->BNTS
]
else:
dims = [
((3, 3), ((0, 2), (0, 2))), # BMM1: BTNH,BSNH->BNTS
((3, 1), ((0, 1), (0, 2))), # BMM2: BNTS,BSNH->BTNH
((2, 1), ((0, 1), (0, 2))), # BMM1_grad_1: BNTS,BTNH->BSNH
((3, 1), ((0, 1), (0, 2))), # BMM1_grad_2: BNTS,BSNH->BTNH
((2, 1), ((0, 1), (0, 2))), # BMM2_grad_1: BNTS,BTNH->BSNH
((3, 3), ((0, 2), (0, 2))), # BMM2_grad_2: BTNH,BSNH->BNTS
]
keys = [
"bmm1_dot_dimension_numbers",
"bmm2_dot_dimension_numbers",
"bmm1_grad_gemm1_dot_dimension_numbers",
"bmm1_grad_gemm2_dot_dimension_numbers",
"bmm2_grad_gemm1_dot_dimension_numbers",
"bmm2_grad_gemm2_dot_dimension_numbers",
]
fwd_dot_number = {}
bwd_dot_number = {}
for idx, (key, ((lc, rc), (lb, rb))) in enumerate(zip(keys, dims)):
dims_to_write = fwd_dot_number if idx < 2 else bwd_dot_number
dims_to_write[key] = {
"lhs_contracting_dimensions": [str(lc)],
"rhs_contracting_dimensions": [str(rc)],
"lhs_batch_dimensions": [str(i) for i in lb],
"rhs_batch_dimensions": [str(i) for i in rb],
}
if is_bwd:
cudnn_fmha_backend_config = {**cudnn_fmha_backend_config, **bwd_dot_number}
else:
cudnn_fmha_backend_config = {**cudnn_fmha_backend_config, **fwd_dot_number}
backend_config = {
"operation_queue_id":"0",
"wait_on_operation_queues":[],
"cudnn_fmha_backend_config": cudnn_fmha_backend_config
}
backend_config = json.dumps(backend_config)
return backend_config
# mapping from (is_bwd, has_dropout, has_bias) to custom call name
_custom_name_maps = {
# fMHA forward call targets.
(False, False, False): "__cudnn$fmhaSoftmax",
(False, False, True): "__cudnn$fmhaScaleBiasSoftmax",
(False, True, False): "__cudnn$fmhaSoftmaxDropout",
(False, True, True): "__cudnn$fmhaScaleBiasSoftmaxDropout",
# fMHA backward call targets.
(True, False, False): "__cudnn$fmhaSoftmaxBackward",
(True, False, True): "__cudnn$fmhaScaleBiasSoftmaxBackward",
(True, True, False): "__cudnn$fmhaSoftmaxDropoutBackward",
(True, True, True): "__cudnn$fmhaScaleBiasSoftmaxDropoutBackward",
}
def get_custom_call_name(has_bias, has_dropout, is_bwd):
return _custom_name_maps[(is_bwd, has_dropout, has_bias)]
def check_layout(query, key, value, bias, q_seqlen, kv_seqlen, layout):
def check_eq(a, b, c, msg):
if not (a == b == c):
raise ValueError(f"{msg} must be same, got {a}, {b}, {b}")
q_rank, k_rank, v_rank = len(query.shape), len(key.shape), len(value.shape)
if q_rank != 4:
raise ValueError(f"Q must have a rank of 4, got {q_rank}")
check_eq(q_rank, k_rank, v_rank, "QKV rank")
q_dtype, k_dtype, v_dtype = query.dtype, key.dtype, value.dtype
if q_dtype not in [jnp.bfloat16, jnp.float16]:
raise NotImplementedError(f"Q must be fp16 or bf16, got {q_dtype}")
check_eq(q_dtype, k_dtype, v_dtype, "QKV dtype")
if layout == AttentionLayout.BNTH:
qB, qN, qT, qH = query.shape
kB, kN, kS, kH = key.shape
vB, vN, vS, vH = value.shape
else:
assert layout == AttentionLayout.BTNH
qB, qT, qN, qH = query.shape
kB, kS, kN, kH = key.shape
vB, vS, vN, vH = value.shape
check_eq(qB, kB, vB, "QKV batch")
check_eq(qH, kH, vH, "QKV dim_per_head")
if kN != vN:
raise ValueError(f"KV must have same number of heads, got {kN} vs {vN}")
if kS != vS:
raise ValueError(f"KV must have same seq length, got {kS} vs {vS}")
# check bias/q_seqlen/kv_seqlen
if bias is not None:
_, _, bT, bS = bias.shape
if bT != qT or bS != vS:
raise ValueError(
f"Bias must have same seq length as QKV, got {bT} and {bS}")
if q_seqlen is not None:
q_seq_dtype = q_seqlen.dtype
q_seq_rank = len(q_seqlen.shape)
if q_seq_dtype != jnp.int32:
raise ValueError(f"q_seqlen must have int32 datatype, got {q_seq_dtype}")
if q_seq_rank != 1:
raise ValueError(f"q_seqlen must have a rank of 1, got {q_seq_rank}")
q_seq_b = q_seqlen.shape[0]
if q_seq_b != qB:
raise ValueError(f"q_seqlen must have same batch as Q, got {q_seq_b}")
if kv_seqlen is not None:
kv_seq_dtype = kv_seqlen.dtype
kv_seq_rank = len(kv_seqlen.shape)
if kv_seq_dtype != jnp.int32:
raise ValueError(
f"kv_seqlen must have int32 datatype, got {kv_seq_dtype}")
if kv_seq_rank != 1:
raise ValueError(f"kv_seq_rank must have a rank of 1, got {kv_seq_rank}")
kv_seq_b = kv_seqlen.shape[0]
if kv_seq_b != qB:
raise ValueError(f"kv_seqlen must have same batch as Q, got {kv_seq_b}")
def check_is_flash_attention(
query, key, layout: int, cudnn_version, has_bias, is_training):
if layout == AttentionLayout.BNTH.value:
_, _, T, H = query.shape
_, _, S, _ = key.shape
else:
_, T, _, H = query.shape
_, S, _, _ = key.shape
if (H > 128 or H % 8 != 0 or
(is_training and has_bias and (T % 2 != 0 or S % 2 != 0))):
# check if flash attention is supported
# for training, for patterns with bias, seqlen should be divisible by 2
raise NotImplementedError(
f"Unsupported sequence length Q {T}, KV {S} and head dim {H}.")
# check if minimum cudnn version requirement is satisfied
if cudnn_version < 8904:
raise RuntimeError(
"JAX requires cuDNN >= 8.9.4 to use flash cross attention.")
def check_cudnn_version():
# check if cuDNN is installed
if cuda_versions is None:
raise RuntimeError("cuDNN is not detected.")
return cuda_versions.cudnn_get_version()
def check_compute_capability(capability):
if not 'cuda' in xla_bridge.get_backend().platform_version:
return False
d, *_ = jax.local_devices(backend="gpu")
target = tuple(int(x) for x in capability.split("."))
current = tuple(int(x) for x in d.compute_capability.split("."))
return current >= target
def _dot_product_attention_fwd(
query, key, value, bias, q_seqlen, kv_seqlen, scale, seed,
dropout_rate, variadic_args, mask_type, layout,
sliding_window_length, cudnn_version):
# check if flash attention is supported for this attention pattern
check_is_flash_attention(
query, key, layout, cudnn_version, bias is not None, False)
outputs = _dot_product_attention_fwd_p_wrapper.bind(
query, key, value, bias, q_seqlen, kv_seqlen, scale=scale,
seed=seed, dropout_rate=dropout_rate, variadic_args=variadic_args,
mask_type=mask_type, layout=layout,
sliding_window_length=sliding_window_length, is_training=False)
output = outputs[0]
return output
def _dot_product_attention_fwd_rule(
query, key, value, bias, q_seqlen, kv_seqlen, scale, seed,
dropout_rate, variadic_args, mask_type, layout,
sliding_window_length, cudnn_version):
# check if flash attention is supported for this attention pattern
check_is_flash_attention(
query, key, layout, cudnn_version, bias is not None, True)
outputs = _dot_product_attention_fwd_p_wrapper.bind(
query, key, value, bias, q_seqlen, kv_seqlen, scale=scale,
seed=seed, dropout_rate=dropout_rate, variadic_args=variadic_args,
mask_type=mask_type, layout=layout,
sliding_window_length=sliding_window_length, is_training=True)
res = (query, key, value, bias, q_seqlen, kv_seqlen,
outputs[1], outputs[0])
return outputs[0], res
def _dot_product_attention_bwd_rule(
scale, seed, dropout_rate, variadic_args, mask_type, layout,
sliding_window_length, is_training, res, grad_output):
(query, key, value, bias, q_seqlen, kv_seqlen, activation,
fwd_output) = res
grads = _dot_product_attention_bwd_p_wrapper.bind(
query, key, value, bias, q_seqlen, kv_seqlen, activation,
fwd_output, grad_output, scale=scale, seed=seed,
dropout_rate=dropout_rate, variadic_args=variadic_args,
mask_type=mask_type, layout=layout,
sliding_window_length=sliding_window_length
)
grads = (*grads,) + (None,) * (6 - len(grads))
return grads
def _dot_product_attention_fwd_impl(
query, key, value, bias, q_seqlen, kv_seqlen, scale, seed,
dropout_rate, variadic_args, mask_type, layout,
sliding_window_length, is_training):
# args: {Q, K, V, mask*, bias*}
outputs = _dot_product_attention_fwd_p.bind(
query, key, value, bias, q_seqlen, kv_seqlen, scale=scale,
seed=seed, dropout_rate=dropout_rate, variadic_args=variadic_args,
mask_type=mask_type, layout=layout,
sliding_window_length=sliding_window_length, is_training=is_training)
return outputs
def _dot_product_attention_bwd_impl(
query, key, value, bias, q_seqlen, kv_seqlen, activation, fwd_output,
grad_output, scale, seed, dropout_rate, variadic_args, mask_type, layout,
sliding_window_length):
grads = _dot_product_attention_bwd_p.bind(
query, key, value, bias, q_seqlen, kv_seqlen, activation,
fwd_output, grad_output, scale=scale, seed=seed,
dropout_rate=dropout_rate, variadic_args=variadic_args,
mask_type=mask_type, layout=layout,
sliding_window_length=sliding_window_length)
return grads
def _dot_product_attention_fwd_abstract(
query, key, value, bias, q_seqlen, kv_seqlen, *, scale, seed,
dropout_rate, variadic_args, mask_type, layout,
sliding_window_length, is_training):
query_dtype = dtypes.canonicalize_dtype(query.dtype)
if layout == AttentionLayout.BNTH.value:
B, N, T, _ = query.shape
_, _, S, _ = key.shape
else:
B, T, N, _ = query.shape
_, S, _, _ = key.shape
output_shape = query.shape
softmax_stat_shape = (B, N, T)
if is_training:
return (
core.ShapedArray(output_shape, query_dtype), # output
core.ShapedArray(softmax_stat_shape, jnp.float32), # softmax_stat
)
else:
return (
core.ShapedArray(output_shape, query_dtype), # output
)
def _dot_product_attention_bwd_abstract(
query, key, value, bias, q_seqlen, kv_seqlen, activation, fwd_output,
grad_output, *, scale, seed, dropout_rate, variadic_args, mask_type,
layout, sliding_window_length):
query_dtype = dtypes.canonicalize_dtype(query.dtype)
key_dtype = dtypes.canonicalize_dtype(key.dtype)
value_dtype = dtypes.canonicalize_dtype(value.dtype)
_, has_dbias = variadic_args
if has_dbias:
# cuDNN supports bias for this case
bias_dtype = dtypes.canonicalize_dtype(bias.dtype)
return (
core.ShapedArray(
query.shape, query_dtype
), # grad query
core.ShapedArray(
key.shape, key_dtype
), # grad key
core.ShapedArray(
value.shape, value_dtype
), # grad value
core.ShapedArray(
bias.shape, bias_dtype
), # grad bias
)
else:
return (
core.ShapedArray(
query.shape, query_dtype
), # grad query
core.ShapedArray(
key.shape, key_dtype
), # grad key
core.ShapedArray(
value.shape, value_dtype
), # grad value
)
def _dot_product_attention_fwd_cuda_lowering(
ctx, query, key, value, bias, q_seqlen, kv_seqlen, scale, seed,
dropout_rate, variadic_args, mask_type, layout,
sliding_window_length, is_training):
query_type = ir.RankedTensorType(query.type)
query_shape = query_type.shape
key_type = ir.RankedTensorType(key.type)
key_shape = key_type.shape
if layout == AttentionLayout.BNTH.value:
B, N, T, H = query_shape
_, _, S, _ = key_shape
output_layout = (3, 2, 1, 0)
output_transpose_perm = mlir.dense_int_array((0, 1, 2, 3))
else:
B, T, N, H = query_shape
_, S, _, _ = key_shape
output_layout = (3, 1, 2, 0)
output_transpose_perm = mlir.dense_int_array((0, 2, 1, 3))
output_shape = (B, N, T, H)
softmax_stat_shape = (B, N, T)
workspace_shape = (0,)
workspace_type = ir.IntegerType.get_unsigned(8)
backend_config = create_dot_product_attention_backend_config(
B, N, T, S, query_type.element_type, scale, seed, dropout_rate,
mask_type, layout, sliding_window_length, is_bwd=False,
)
# {Q, K, V, bias*, q_seqlen*, kv_seqlen*}
# {output, activation*, workspace}
has_dropout = dropout_rate > 0
has_bias, _ = variadic_args
operands = [query, key, value]
if has_bias:
operands.append(bias)
if has_padding(mask_type):
operands.append(q_seqlen)
operands.append(kv_seqlen)
custom_call_name = get_custom_call_name(has_bias, has_dropout, False)
# create output types and layouts
if is_training:
result_types = [
ir.RankedTensorType.get(output_shape, query_type.element_type),
ir.RankedTensorType.get(softmax_stat_shape, ir.F32Type.get()),
ir.RankedTensorType.get(workspace_shape, workspace_type),
]
result_layouts = [output_layout] + default_layouts(softmax_stat_shape, workspace_shape)
else:
result_types = [
ir.RankedTensorType.get(output_shape, query_type.element_type),
ir.RankedTensorType.get(workspace_shape, workspace_type)
]
result_layouts = [output_layout] + default_layouts(workspace_shape)
# create custom call here
out = mlir.custom_call(
custom_call_name,
result_types=result_types,
operands=operands,
backend_config=backend_config,
operand_layouts=default_layouts(
*[ir.RankedTensorType(operand.type).shape for operand in operands]),
result_layouts=result_layouts,
)
# drop workspace memory
# output should be (B, T, N, H) instead of (B, N, T, H)
if is_training:
return [hlo.transpose(out.results[0], output_transpose_perm), out.results[1]]
else:
return [hlo.transpose(out.results[0], output_transpose_perm)]
def _dot_product_attention_bwd_cuda_lowering(
ctx, query, key, value, bias, q_seqlen, kv_seqlen, activation,
fwd_output, grad_output, scale, seed, dropout_rate, variadic_args,
mask_type, layout, sliding_window_length):
query_type = ir.RankedTensorType(query.type)
query_shape = query_type.shape
key_type = ir.RankedTensorType(key.type)
key_shape = key_type.shape
value_type = ir.RankedTensorType(value.type)
if layout == AttentionLayout.BNTH.value:
B, q_N, T, H = query_shape
_, k_N, S, _ = key_shape
grad_layout = (3, 2, 1, 0)
grad_transpose_perm = mlir.dense_int_array((0, 1, 2, 3))
else:
B, T, q_N, H = query_shape
_, S, k_N, _ = key_shape
grad_layout = (3, 1, 2, 0)
grad_transpose_perm = mlir.dense_int_array((0, 2, 1, 3))
workspace_shape = (0,)
workspace_type = ir.IntegerType.get_unsigned(8)
grad_query_shape = (B, q_N, T, H)
grad_key_shape = (B, k_N, S, H)
grad_value_shape = (B, k_N, S, H)
backend_config = create_dot_product_attention_backend_config(
B, q_N, T, S, query_type.element_type, scale, seed, dropout_rate,
mask_type, layout, sliding_window_length, is_bwd=True,
)
# {Q, K, V, activation, dO, bias*, O, q_seqlen*, kv_seqlen*}
# {dQ, dK, dV, dbias*, workspace}
has_dropout = dropout_rate > 0
has_bias, has_dbias = variadic_args
# create operands
operands = [query, key, value, activation, grad_output]
if has_bias:
# flash attention requires bias in the bwd for remat
operands.append(bias)
operands.append(fwd_output)
if has_padding(mask_type):
operands.append(q_seqlen)
operands.append(kv_seqlen)
# get custom call name
custom_call_name = get_custom_call_name(has_bias, has_dropout, True)
# create output types and layouts
# grad_query, grad_key, grad_value
result_types = [
ir.RankedTensorType.get(grad_query_shape, query_type.element_type),
ir.RankedTensorType.get(grad_key_shape, key_type.element_type),
ir.RankedTensorType.get(grad_value_shape, value_type.element_type),
]
result_layouts = [grad_layout, grad_layout, grad_layout]
bias_type = ir.RankedTensorType(bias.type)
bias_shape = bias_type.shape
if has_dbias:
# cuDNN supports bias for this case
result_types.append(
ir.RankedTensorType.get(bias_shape, bias_type.element_type))
result_layouts = result_layouts + default_layouts(bias_shape)
# workspace
result_types.append(ir.RankedTensorType.get(workspace_shape, workspace_type))
result_layouts = result_layouts + default_layouts(workspace_shape)
out = mlir.custom_call(
custom_call_name,
result_types=result_types,
operands=operands,
backend_config=backend_config,
operand_layouts=default_layouts(
*[ir.RankedTensorType(operand.type).shape for operand in operands]),
result_layouts=result_layouts,
)
dqkv = (hlo.transpose(out.results[0], grad_transpose_perm),
hlo.transpose(out.results[1], grad_transpose_perm),
hlo.transpose(out.results[2], grad_transpose_perm))
# Only keep dQ, dK, dV and dBias here
if has_dbias:
return dqkv + (out.results[3],)
else:
return dqkv
# batcher
def _check_valid_batch_dims(bdims):
for dim in bdims:
if dim not in [0, None]:
raise NotImplementedError(
f"Currently only support batch_dim in [0, None], but got {dim=}")
def _dot_product_attention_fwd_batcher(
batched_args, batch_dims, *, scale, seed, dropout_rate, variadic_args,
mask_type, layout, sliding_window_length, is_training):
_check_valid_batch_dims(batch_dims)
query, key, value, bias, q_seqlen, kv_seqlen = batched_args
query_bdim = batch_dims[0]
if is_training:
out_bdims = query_bdim, query_bdim
else:
out_bdims = (query_bdim,)
if layout == AttentionLayout.BNTH.value:
*Bs, N, T, _ = query.shape
*_, _, S, _ = key.shape
else:
*Bs, T, N, _ = query.shape
*_, S, _, _ = key.shape
B = math.prod(Bs)
has_bias, _ = variadic_args
original_shape = query.shape
# reshape to 4D shape
query = jnp.reshape(query, (B,) + query.shape[-3:])
key = jnp.reshape(key, (B,) + key.shape[-3:])
value = jnp.reshape(value, (B,) + key.shape[-3:])
if has_bias and batch_dims[3] is not None:
bias = jnp.reshape(bias, (B, N, T, S))
if has_padding(mask_type):
q_seqlen = jnp.reshape(q_seqlen, (B, ))
kv_seqlen = jnp.reshape(kv_seqlen, (B, ))
outputs = _dot_product_attention_fwd_p_wrapper.bind(
query, key, value, bias, q_seqlen, kv_seqlen, scale=scale,
seed=seed, dropout_rate=dropout_rate, variadic_args=variadic_args,
mask_type=mask_type, layout=layout,
sliding_window_length=sliding_window_length, is_training=is_training)
# reshape to original shape
output = outputs[0]
output = jnp.reshape(output, original_shape)
if is_training:
activation = outputs[1]
activation = jnp.reshape(activation, (*Bs, N, T))
return (output, activation), out_bdims
else:
return (output,), out_bdims
def _dot_product_attention_bwd_batcher(
batched_args, batch_dims, *, scale, seed, dropout_rate, variadic_args,
mask_type, layout, sliding_window_length):
_check_valid_batch_dims(batch_dims)
query, key, value, bias, q_seqlen, \
kv_seqlen, activation, fwd_output, grad_output = batched_args
query_bdim = batch_dims[0]
out_bdims = query_bdim, query_bdim, query_bdim
if layout == AttentionLayout.BNTH.value:
*Bs, N, T, _ = query.shape
*_, _, S, _ = key.shape
else:
*Bs, T, N, _ = query.shape
*_, S, _, _ = key.shape
B = math.prod(Bs)
has_bias, has_dbias = variadic_args
# Reset the has_dbias if the combined batch size is not 1, because cuDNN only
# supports dbias with a single batch. In this case, an all-zero dbias will be
# appended instead.
if B > 1:
variadic_args = (has_bias, False)
original_query_shape = query.shape
original_key_shape = key.shape
original_value_shape = value.shape
original_bias_shape = bias.shape if has_bias else None
# reshape to 4D shape
query = jnp.reshape(query, (B,) + query.shape[-3:])
key = jnp.reshape(key, (B,) + key.shape[-3:])
value = jnp.reshape(value, (B,) + key.shape[-3:])
if has_bias and batch_dims[3] is not None:
bias = jnp.reshape(bias, (B, N, T, S))
if has_padding(mask_type):
q_seqlen = jnp.reshape(q_seqlen, (B, ))
kv_seqlen = jnp.reshape(kv_seqlen, (B, ))
activation = jnp.reshape(activation, (B, N, T))
fwd_output = jnp.reshape(fwd_output, (B,) + query.shape[-3:])
grad_output = jnp.reshape(grad_output, (B,) + query.shape[-3:])
grads = _dot_product_attention_bwd_p_wrapper.bind(
query, key, value, bias, q_seqlen, kv_seqlen, activation,
fwd_output, grad_output, scale=scale, seed=seed,
dropout_rate=dropout_rate, variadic_args=variadic_args,
mask_type=mask_type, layout=layout,
sliding_window_length=sliding_window_length,
)
# reshape to original shape
grads[0] = jnp.reshape(grads[0], original_query_shape)
grads[1] = jnp.reshape(grads[1], original_key_shape)
grads[2] = jnp.reshape(grads[2], original_value_shape)
if has_dbias:
assert has_bias
if variadic_args[1]:
grads[3] = jnp.reshape(grads[3], original_bias_shape)
else:
grads.append(jnp.zeros(original_bias_shape, bias.dtype))
out_bdims += (batch_dims[3],)
return grads, out_bdims
# custom partitioning
def _get_padded_spec(arg_info):
spec = None if arg_info.sharding is None else arg_info.sharding.spec
ndim = arg_info.ndim
if spec is None:
return (None,) * ndim
assert len(spec) <= ndim
return spec + (None,) * (ndim - len(spec))
def _check_qkv_bias_mask_spec(
query_spec, key_spec, value_spec, bias_spec):
# check qkv spec
if not query_spec == key_spec == value_spec:
raise ValueError("Query, key and value should have same sharding.")
*batch_spec, q_seq_spec, num_head_spec, head_spec = query_spec
if q_seq_spec is not None:
raise ValueError("Sharding on sequence dim is not allowed.")
if head_spec is not None:
raise ValueError("Sharding on head dim is not allowed.")
# check bias spec
if bias_spec:
*bias_batch_spec, bias_num_head_spec, bias_q_seq_spec, bias_kv_seq_spec = bias_spec
if any(bias_batch_spec) and bias_batch_spec != batch_spec or \
bias_num_head_spec is not None and bias_num_head_spec != num_head_spec:
raise ValueError(
"Query and bias should have same sharding on batch and num_head dim.")
if bias_q_seq_spec is not None or bias_kv_seq_spec is not None:
raise ValueError("Sharding on bias sequence dim is not allowed.")
# fwd custom partition
def _infer_fwd_output_sharding(mesh, arg_shapes, variadic_args, is_training):
# only sharding on batch and num_head dim is allowed
# (*batch, q_seq, num_head, head)
query_spec = _get_padded_spec(arg_shapes[0])
# (*batch, kv_seq, num_head, head)
key_spec = _get_padded_spec(arg_shapes[1])
value_spec = _get_padded_spec(arg_shapes[2])
has_bias, _ = variadic_args
bias_spec = _get_padded_spec(arg_shapes[3]) if has_bias else None
_check_qkv_bias_mask_spec(
query_spec, key_spec, value_spec, bias_spec)
# keep out sharding same as query sharding since they have same shape
out_sharding = NamedSharding(mesh, PartitionSpec(*query_spec))
if is_training:
# activation sharding
*batch_spec, q_seq_spec, num_head_spec, _ = query_spec
activation_sharding = NamedSharding(
mesh, PartitionSpec(*batch_spec, num_head_spec, q_seq_spec, None))
return [out_sharding, activation_sharding]
return [out_sharding]
_dot_product_attention_fwd_lower = custom_partitioning(
_dot_product_attention_fwd_impl, static_argnums=(6, 7, 8, 9, 10, 11, 12, 13))
def _dot_product_attention_fwd_infer_sharding_from_operands(
scale, seed, dropout_rate, variadic_args, mask_type, layout, sliding_window_length,
is_training, mesh, arg_shapes, result_shape):
return _infer_fwd_output_sharding(mesh, arg_shapes, variadic_args, is_training)
def _dot_product_attention_fwd_partition(
scale, seed, dropout_rate, variadic_args, mask_type, layout, sliding_window_length,
is_training, mesh, arg_shapes, result_shape):
# args sharding
arg_shardings = tuple(arg_i.sharding for arg_i in arg_shapes)
out_shardings = _infer_fwd_output_sharding(
mesh, arg_shapes, variadic_args, is_training)
impl = functools.partial(
_dot_product_attention_fwd_impl,
scale=scale,
seed=seed,
dropout_rate=dropout_rate,
variadic_args=variadic_args,
mask_type=mask_type,
layout=layout,
sliding_window_length=sliding_window_length,
is_training=is_training,
)
return mesh, impl, out_shardings, arg_shardings
# bwd custom partition
def _infer_bwd_output_sharding(mesh, arg_shapes, variadic_args):
# (*batch, q_seq, num_head, head)
query_spec = _get_padded_spec(arg_shapes[0])
# (*batch, kv_seq, num_head, head)
key_spec = _get_padded_spec(arg_shapes[1])
value_spec = _get_padded_spec(arg_shapes[2])
has_bias, has_dbias = variadic_args
bias_spec = _get_padded_spec(arg_shapes[3]) if has_bias else None
_check_qkv_bias_mask_spec(
query_spec, key_spec, value_spec, bias_spec)
# keep grad query sharding same as query sharding
grad_query_sharding = NamedSharding(mesh, PartitionSpec(*query_spec))
grad_key_sharding = NamedSharding(mesh, PartitionSpec(*key_spec))
grad_value_sharding = NamedSharding(mesh, PartitionSpec(*key_spec))
out_shardings = [grad_query_sharding, grad_key_sharding, grad_value_sharding]
if has_dbias:
grad_bias_sharding = NamedSharding(mesh, PartitionSpec(*bias_spec))
out_shardings = out_shardings + [grad_bias_sharding]
return out_shardings
_dot_product_attention_bwd_lower = custom_partitioning(
_dot_product_attention_bwd_impl, static_argnums=(9, 10, 11, 12, 13, 14, 15)
)
def _dot_product_attention_bwd_infer_sharding_from_operands(
scale, seed, dropout_rate, variadic_args, mask_type, layout,
sliding_window_length, mesh, arg_shapes, result_shape):
return _infer_bwd_output_sharding(mesh, arg_shapes, variadic_args)
def _dot_product_attention_bwd_partition(
scale, seed, dropout_rate, variadic_args, mask_type, layout,
sliding_window_length, mesh, arg_shapes, result_shape):
out_shardings = _infer_bwd_output_sharding(mesh, arg_shapes, variadic_args)
# args sharding
arg_shardings = tuple(arg_i.sharding for arg_i in arg_shapes)
def sharded_impl(*args):
impl = functools.partial(
_dot_product_attention_bwd_impl,
scale=scale,
seed=seed,
dropout_rate=dropout_rate,
variadic_args=variadic_args,
mask_type=mask_type,
layout=layout,
sliding_window_length=sliding_window_length,
)
grads = impl(*args)
_, has_dbias = variadic_args
if has_dbias:
query_spec = arg_shardings[0].spec
batch_spec = query_spec[0]
local_dbias = grads[3]
global_dbias = jax.lax.psum(local_dbias, batch_spec)
grads = grads[:3] + [global_dbias]
return grads
return mesh, sharded_impl, out_shardings, arg_shardings
# Create dot_product_attention_fwd_p for forward operation.
_dot_product_attention_fwd_p = core.Primitive("dot_product_attention_fwd")
_dot_product_attention_fwd_p.multiple_results = True
_dot_product_attention_fwd_p.def_impl(
functools.partial(xla.apply_primitive, _dot_product_attention_fwd_p)
)
_dot_product_attention_fwd_p.def_abstract_eval(
_dot_product_attention_fwd_abstract
)
mlir.register_lowering(
_dot_product_attention_fwd_p,
_dot_product_attention_fwd_cuda_lowering,
platform="cuda",
)
_dot_product_attention_fwd_p_wrapper = core.Primitive(
"dot_product_attention_fwd_wrapper"
)
_dot_product_attention_fwd_p_wrapper.multiple_results = True
_dot_product_attention_fwd_p_wrapper.def_impl(_dot_product_attention_fwd_impl)
_dot_product_attention_fwd_p_wrapper.def_abstract_eval(
_dot_product_attention_fwd_abstract
)
# Create dot_product_attention_bwd_p for backward operation.
_dot_product_attention_bwd_p = core.Primitive("dot_product_attention_bwd")
_dot_product_attention_bwd_p.multiple_results = True
_dot_product_attention_bwd_p.def_impl(
functools.partial(xla.apply_primitive, _dot_product_attention_bwd_p)
)
_dot_product_attention_bwd_p.def_abstract_eval(
_dot_product_attention_bwd_abstract
)
mlir.register_lowering(
_dot_product_attention_bwd_p,
_dot_product_attention_bwd_cuda_lowering,
platform="cuda",
)
_dot_product_attention_bwd_p_wrapper = core.Primitive(
"dot_product_attention_bwd_wrapper"
)
_dot_product_attention_bwd_p_wrapper.multiple_results = True
_dot_product_attention_bwd_p_wrapper.def_impl(_dot_product_attention_bwd_impl)
_dot_product_attention_bwd_p_wrapper.def_abstract_eval(
_dot_product_attention_bwd_abstract
)
batching.primitive_batchers[
_dot_product_attention_fwd_p_wrapper
] = _dot_product_attention_fwd_batcher
batching.primitive_batchers[
_dot_product_attention_bwd_p_wrapper
] = _dot_product_attention_bwd_batcher
_dot_product_attention_fwd_lower.def_partition(
infer_sharding_from_operands=_dot_product_attention_fwd_infer_sharding_from_operands,
partition=_dot_product_attention_fwd_partition)
mlir.register_lowering(_dot_product_attention_fwd_p_wrapper,
mlir.lower_fun(_dot_product_attention_fwd_lower, multiple_results=True))
_dot_product_attention_bwd_lower.def_partition(
infer_sharding_from_operands=_dot_product_attention_bwd_infer_sharding_from_operands,
partition=_dot_product_attention_bwd_partition)
mlir.register_lowering(_dot_product_attention_bwd_p_wrapper,
mlir.lower_fun(_dot_product_attention_bwd_lower, multiple_results=True))
dispatch.prim_requires_devices_during_lowering.add(
_dot_product_attention_fwd_p
)
dispatch.prim_requires_devices_during_lowering.add(
_dot_product_attention_fwd_p_wrapper
)
dispatch.prim_requires_devices_during_lowering.add(
_dot_product_attention_bwd_p
)
dispatch.prim_requires_devices_during_lowering.add(
_dot_product_attention_bwd_p_wrapper
)
@functools.partial(jax.custom_vjp, nondiff_argnums=(6, 7, 8, 9, 10, 11, 12, 13))
def _dot_product_attention(query: Array,
key: Array,
value: Array,
bias: Array,
q_seqlen: Array,
kv_seqlen: Array,
scale: float,
seed: int,
dropout_rate: float,
variadic_args: tuple[bool, ...],
mask_type: bool,
layout: int,
sliding_window_length: int | None,
cudnn_version: int):
output = _dot_product_attention_fwd(
query, key, value, bias, q_seqlen, kv_seqlen, scale=scale,
seed=seed, dropout_rate=dropout_rate, variadic_args=variadic_args,
mask_type=mask_type, layout=layout, sliding_window_length=sliding_window_length,
cudnn_version=cudnn_version)
return output
# _dot_product_attention_fwd must have the same func signature as _dot_product_attention
_dot_product_attention.defvjp(_dot_product_attention_fwd_rule, _dot_product_attention_bwd_rule)
# User interface
def dot_product_attention(query: Array,
key: Array,
value: Array,
bias: Array | None = None,
mask: Array | None = None,
q_seqlen: Array | None = None,
kv_seqlen: Array | None = None,
*,
scale: float = 1.0,
mask_type: MaskType = MaskType.NO_MASK,
seed: int = 42,
dropout_rate: float = 0.,
qkv_layout: str = "BTNH",
sliding_window_length: int | None = None):
"""Computes dot-product attention given query (Q), key (K), and value (V).
This function serves as the core operation for applying attention
mechanisms as described in the paper [https://arxiv.org/abs/1706.03762].
Initially, it determines the attention weights by processing Q and K,
subsequently combining the outcomes using K. Throughout this function, we
utilize the following uppercase letters to represent specific parameters of
array:
B = batch size
S = length of the key/value (source)
T = length of the query (target)
N = number of attention heads
H = dimensions of each attention head.
The supported layouts for Q, K, V are either BT(S)NH or BNT(S)H, and they must
adhere to the same layout. The output layout remains consistent with Q,
defaulting to BT(S)NH.
Args:
query: Queries for attention calculation with a shape of BTNH or BNTH.
key: Keys for attention calculation with a shape of BSNH or BNSH.
value: Values to be used in attention with a shape of BSNH or BNSH.
bias: Bias to be added to logits with a shape of BNTS.
mask: Mask used to filter out logits with a shape of BNTS.
q_seqlen: Non padded sequence length of Queries with a shape of B.
kv_seqlen: Non padded sequence length of Keys and Values with a shape of B.
scale: Scale for the query.
dropout_rate: Dropout rate.
qkv_layout: Layout string, with supported formats being BTNH, BNTH, BSNH,
BNSH.
sliding_window_length: Window size to make attention only attend to each
token's left local window (pos - sliding_window_length, pos] where `pos`
is the index of each token. E.g., if sliding_window_length == 3 and the
sequence is [0, 1, 2, 3, c, 4, 5], token `c` can attend to [4, 5, c].
Returns:
Output of the same shape as the query.
"""
# TODO(b/380898464): Check the compute capability, e.g., require GPU device,
# in the kernel implementation (c++) code.
cudnn_version = check_cudnn_version()
layout = _normalize_layout(qkv_layout)
if has_padding(mask_type) and (q_seqlen is None or kv_seqlen is None):
raise ValueError("Require q_seqlen and kv_seqlen to generate padding mask")
if sliding_window_length is not None and sliding_window_length <= 0:
raise ValueError(
f"Require sliding_window_length > 0, got {sliding_window_length}")
if bias is not None:
# reshape bias to have 4D shape
bias = bias.reshape((1,) * (4 - len(bias.shape)) + bias.shape)
if mask is not None:
if mask.dtype == jnp.bool:
large_negative_number = get_large_negative_number(query.dtype)
mask = jnp.where(mask, jnp.asarray(0, query.dtype), large_negative_number)
# reshape mask to have 4D shape
mask = mask.reshape((1,) * (4 - len(mask.shape)) + mask.shape) # type: ignore[union-attr]
# combine bias and mask
if bias is None:
bias = mask
else:
if mask is not None:
# should be broadcast to same shape
bias = bias + mask
# check if input shape and data type is compatiable
check_layout(query, key, value, bias, q_seqlen, kv_seqlen, layout)
has_bias = bias is not None
has_dbias = has_bias and \
should_export_dbias(bias.shape, query.shape, layout) # type: ignore[union-attr]
variadic_args = (has_bias, has_dbias)
if bias is None:
bias = jnp.zeros(0, dtype=query.dtype)
if q_seqlen is None:
q_seqlen = jnp.zeros(0, dtype=query.dtype)
if kv_seqlen is None:
kv_seqlen = jnp.zeros(0, dtype=query.dtype)
output = _dot_product_attention(
query, key, value, bias, q_seqlen, kv_seqlen, scale, seed,
dropout_rate, variadic_args, mask_type, layout.value, sliding_window_length,
cudnn_version)
return output
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@_src@cudnn@fused_attention_stablehlo.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "guillochon/MOSFiT",
"repo_path": "MOSFiT_extracted/MOSFiT-master/mosfit/modules/seds/__init__.py",
"type": "Python"
}
|
"""Initilization procedure for `SED` modules."""
import inspect
import os
import sys
path = os.path.dirname(os.path.abspath(__file__))
__all__ = []
for py in [
f[:-3] for f in os.listdir(path)
if f.endswith('.py') and f != '__init__.py'
]:
mod = __import__('.'.join([__name__, py]), fromlist=[py])
classes = [
x[1] for x in inspect.getmembers(mod)
if (inspect.isroutine(x[1]) or inspect.isclass(x[1])
) and inspect.getmodule(x[1]) == mod
]
for cls in classes:
__all__.append(cls.__name__)
setattr(sys.modules[__name__], cls.__name__, cls)
|
guillochonREPO_NAMEMOSFiTPATH_START.@MOSFiT_extracted@MOSFiT-master@mosfit@modules@seds@__init__.py@.PATH_END.py
|
{
"filename": "huber.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scikit-learn/py2/sklearn/linear_model/huber.py",
"type": "Python"
}
|
# Authors: Manoj Kumar mks542@nyu.edu
# License: BSD 3 clause
import numpy as np
from scipy import optimize, sparse
from ..base import BaseEstimator, RegressorMixin
from .base import LinearModel
from ..utils import check_X_y
from ..utils import check_consistent_length
from ..utils import axis0_safe_slice
from ..utils.extmath import safe_sparse_dot
def _huber_loss_and_gradient(w, X, y, epsilon, alpha, sample_weight=None):
"""Returns the Huber loss and the gradient.
Parameters
----------
w : ndarray, shape (n_features + 1,) or (n_features + 2,)
Feature vector.
w[:n_features] gives the coefficients
w[-1] gives the scale factor and if the intercept is fit w[-2]
gives the intercept factor.
X : ndarray, shape (n_samples, n_features)
Input data.
y : ndarray, shape (n_samples,)
Target vector.
epsilon : float
Robustness of the Huber estimator.
alpha : float
Regularization parameter.
sample_weight : ndarray, shape (n_samples,), optional
Weight assigned to each sample.
Returns
-------
loss: float
Huber loss.
gradient: ndarray, shape (len(w))
Returns the derivative of the Huber loss with respect to each
coefficient, intercept and the scale as a vector.
"""
X_is_sparse = sparse.issparse(X)
_, n_features = X.shape
fit_intercept = (n_features + 2 == w.shape[0])
if fit_intercept:
intercept = w[-2]
sigma = w[-1]
w = w[:n_features]
n_samples = np.sum(sample_weight)
# Calculate the values where |y - X'w -c / sigma| > epsilon
# The values above this threshold are outliers.
linear_loss = y - safe_sparse_dot(X, w)
if fit_intercept:
linear_loss -= intercept
abs_linear_loss = np.abs(linear_loss)
outliers_mask = abs_linear_loss > epsilon * sigma
# Calculate the linear loss due to the outliers.
# This is equal to (2 * M * |y - X'w -c / sigma| - M**2) * sigma
outliers = abs_linear_loss[outliers_mask]
num_outliers = np.count_nonzero(outliers_mask)
n_non_outliers = X.shape[0] - num_outliers
# n_sq_outliers includes the weight give to the outliers while
# num_outliers is just the number of outliers.
outliers_sw = sample_weight[outliers_mask]
n_sw_outliers = np.sum(outliers_sw)
outlier_loss = (2. * epsilon * np.sum(outliers_sw * outliers) -
sigma * n_sw_outliers * epsilon ** 2)
# Calculate the quadratic loss due to the non-outliers.-
# This is equal to |(y - X'w - c)**2 / sigma**2| * sigma
non_outliers = linear_loss[~outliers_mask]
weighted_non_outliers = sample_weight[~outliers_mask] * non_outliers
weighted_loss = np.dot(weighted_non_outliers.T, non_outliers)
squared_loss = weighted_loss / sigma
if fit_intercept:
grad = np.zeros(n_features + 2)
else:
grad = np.zeros(n_features + 1)
# Gradient due to the squared loss.
X_non_outliers = -axis0_safe_slice(X, ~outliers_mask, n_non_outliers)
grad[:n_features] = (
2. / sigma * safe_sparse_dot(weighted_non_outliers, X_non_outliers))
# Gradient due to the linear loss.
signed_outliers = np.ones_like(outliers)
signed_outliers_mask = linear_loss[outliers_mask] < 0
signed_outliers[signed_outliers_mask] = -1.0
X_outliers = axis0_safe_slice(X, outliers_mask, num_outliers)
sw_outliers = sample_weight[outliers_mask] * signed_outliers
grad[:n_features] -= 2. * epsilon * (
safe_sparse_dot(sw_outliers, X_outliers))
# Gradient due to the penalty.
grad[:n_features] += alpha * 2. * w
# Gradient due to sigma.
grad[-1] = n_samples
grad[-1] -= n_sw_outliers * epsilon ** 2
grad[-1] -= squared_loss / sigma
# Gradient due to the intercept.
if fit_intercept:
grad[-2] = -2. * np.sum(weighted_non_outliers) / sigma
grad[-2] -= 2. * epsilon * np.sum(sw_outliers)
loss = n_samples * sigma + squared_loss + outlier_loss
loss += alpha * np.dot(w, w)
return loss, grad
class HuberRegressor(LinearModel, RegressorMixin, BaseEstimator):
"""Linear regression model that is robust to outliers.
The Huber Regressor optimizes the squared loss for the samples where
``|(y - X'w) / sigma| < epsilon`` and the absolute loss for the samples
where ``|(y - X'w) / sigma| > epsilon``, where w and sigma are parameters
to be optimized. The parameter sigma makes sure that if y is scaled up
or down by a certain factor, one does not need to rescale epsilon to
achieve the same robustness. Note that this does not take into account
the fact that the different features of X may be of different scales.
This makes sure that the loss function is not heavily influenced by the
outliers while not completely ignoring their effect.
Read more in the :ref:`User Guide <huber_regression>`
.. versionadded:: 0.18
Parameters
----------
epsilon : float, greater than 1.0, default 1.35
The parameter epsilon controls the number of samples that should be
classified as outliers. The smaller the epsilon, the more robust it is
to outliers.
max_iter : int, default 100
Maximum number of iterations that scipy.optimize.fmin_l_bfgs_b
should run for.
alpha : float, default 0.0001
Regularization parameter.
warm_start : bool, default False
This is useful if the stored attributes of a previously used model
has to be reused. If set to False, then the coefficients will
be rewritten for every call to fit.
fit_intercept : bool, default True
Whether or not to fit the intercept. This can be set to False
if the data is already centered around the origin.
tol : float, default 1e-5
The iteration will stop when
``max{|proj g_i | i = 1, ..., n}`` <= ``tol``
where pg_i is the i-th component of the projected gradient.
Attributes
----------
coef_ : array, shape (n_features,)
Features got by optimizing the Huber loss.
intercept_ : float
Bias.
scale_ : float
The value by which ``|y - X'w - c|`` is scaled down.
n_iter_ : int
Number of iterations that fmin_l_bfgs_b has run for.
Not available if SciPy version is 0.9 and below.
outliers_: array, shape (n_samples,)
A boolean mask which is set to True where the samples are identified
as outliers.
References
----------
.. [1] Peter J. Huber, Elvezio M. Ronchetti, Robust Statistics
Concomitant scale estimates, pg 172
.. [2] Art B. Owen (2006), A robust hybrid of lasso and ridge regression.
http://statweb.stanford.edu/~owen/reports/hhu.pdf
"""
def __init__(self, epsilon=1.35, max_iter=100, alpha=0.0001,
warm_start=False, fit_intercept=True, tol=1e-05):
self.epsilon = epsilon
self.max_iter = max_iter
self.alpha = alpha
self.warm_start = warm_start
self.fit_intercept = fit_intercept
self.tol = tol
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,)
Weight given to each sample.
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(
X, y, copy=False, accept_sparse=['csr'], y_numeric=True)
if sample_weight is not None:
sample_weight = np.array(sample_weight)
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones_like(y)
if self.epsilon < 1.0:
raise ValueError(
"epsilon should be greater than or equal to 1.0, got %f"
% self.epsilon)
if self.warm_start and hasattr(self, 'coef_'):
parameters = np.concatenate(
(self.coef_, [self.intercept_, self.scale_]))
else:
if self.fit_intercept:
parameters = np.zeros(X.shape[1] + 2)
else:
parameters = np.zeros(X.shape[1] + 1)
# Make sure to initialize the scale parameter to a strictly
# positive value:
parameters[-1] = 1
# Sigma or the scale factor should be non-negative.
# Setting it to be zero might cause undefined bounds hence we set it
# to a value close to zero.
bounds = np.tile([-np.inf, np.inf], (parameters.shape[0], 1))
bounds[-1][0] = np.finfo(np.float64).eps * 10
# Type Error caused in old versions of SciPy because of no
# maxiter argument ( <= 0.9).
try:
parameters, f, dict_ = optimize.fmin_l_bfgs_b(
_huber_loss_and_gradient, parameters,
args=(X, y, self.epsilon, self.alpha, sample_weight),
maxiter=self.max_iter, pgtol=self.tol, bounds=bounds,
iprint=0)
except TypeError:
parameters, f, dict_ = optimize.fmin_l_bfgs_b(
_huber_loss_and_gradient, parameters,
args=(X, y, self.epsilon, self.alpha, sample_weight),
bounds=bounds)
if dict_['warnflag'] == 2:
raise ValueError("HuberRegressor convergence failed:"
" l-BFGS-b solver terminated with %s"
% dict_['task'].decode('ascii'))
self.n_iter_ = dict_.get('nit', None)
self.scale_ = parameters[-1]
if self.fit_intercept:
self.intercept_ = parameters[-2]
else:
self.intercept_ = 0.0
self.coef_ = parameters[:X.shape[1]]
residual = np.abs(
y - safe_sparse_dot(X, self.coef_) - self.intercept_)
self.outliers_ = residual > self.scale_ * self.epsilon
return self
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scikit-learn@py2@sklearn@linear_model@huber.py@.PATH_END.py
|
{
"filename": "test_centroidOtsu.py",
"repo_name": "lsst-ts/ts_wep",
"repo_path": "ts_wep_extracted/ts_wep-main/tests/centroid/test_centroidOtsu.py",
"type": "Python"
}
|
# This file is part of ts_wep.
#
# Developed for the LSST Telescope and Site Systems.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
import unittest
import numpy as np
from lsst.ts.wep.centroid import CentroidOtsu
from lsst.ts.wep.utils import getModulePath
class TestCentroidOtsu(unittest.TestCase):
"""Test the CentroidOtsu class."""
def setUp(self):
self.centroid = CentroidOtsu()
def testGetCenterAndR(self):
imgDonut = self._prepareDonutImg(1000)
realcx, realcy, realR = self.centroid.getCenterAndR(imgDonut)
self.assertAlmostEqual(realcx, 59.7526, places=3)
self.assertAlmostEqual(realcy, 59.3366, places=3)
self.assertAlmostEqual(realR, 47.6698, places=3)
def _prepareDonutImg(self, seed):
# Read the image file
imgFile = os.path.join(
getModulePath(),
"tests",
"testData",
"testImages",
"LSST_NE_SN25",
"z11_0.25_intra.txt",
)
imgDonut = np.loadtxt(imgFile)
# This assumes this "txt" file is in the format
# I[0,0] I[0,1]
# I[1,0] I[1,1]
imgDonut = imgDonut[::-1, :]
# Add the noise to simulate the amplifier image
np.random.seed(seed=seed)
d0, d1 = imgDonut.shape
noise = np.random.rand(d0, d1) * 10
return imgDonut + noise
if __name__ == "__main__":
# Do the unit test
unittest.main()
|
lsst-tsREPO_NAMEts_wepPATH_START.@ts_wep_extracted@ts_wep-main@tests@centroid@test_centroidOtsu.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/layout/slider/_font.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Font(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.slider"
_path_str = "layout.slider.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One",
"Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow",
"Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# lineposition
# ------------
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
# shadow
# ------
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# style
# -----
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
# textcase
# --------
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
# variant
# -------
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
# weight
# ------
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets the font of the slider step labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.slider.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.slider.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.slider.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("lineposition", None)
_v = lineposition if lineposition is not None else _v
if _v is not None:
self["lineposition"] = _v
_v = arg.pop("shadow", None)
_v = shadow if shadow is not None else _v
if _v is not None:
self["shadow"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("style", None)
_v = style if style is not None else _v
if _v is not None:
self["style"] = _v
_v = arg.pop("textcase", None)
_v = textcase if textcase is not None else _v
if _v is not None:
self["textcase"] = _v
_v = arg.pop("variant", None)
_v = variant if variant is not None else _v
if _v is not None:
self["variant"] = _v
_v = arg.pop("weight", None)
_v = weight if weight is not None else _v
if _v is not None:
self["weight"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@layout@slider@_font.py@.PATH_END.py
|
{
"filename": "_coloraxis.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatterternary/marker/_coloraxis.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColoraxisValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(
self, plotly_name="coloraxis", parent_name="scatterternary.marker", **kwargs
):
super(ColoraxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", None),
edit_type=kwargs.pop("edit_type", "calc"),
regex=kwargs.pop("regex", "/^coloraxis([2-9]|[1-9][0-9]+)?$/"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatterternary@marker@_coloraxis.py@.PATH_END.py
|
{
"filename": "read_gnu.py",
"repo_name": "AMReX-Astro/Castro",
"repo_path": "Castro_extracted/Castro-main/Exec/radiation_tests/RadSuOlsonMG/python/read_gnu.py",
"type": "Python"
}
|
#!/usr/bin/python
from numpy import *
def read_gnu_file(filenm):
x = []
y = []
f = open(filenm, 'r')
line = f.readline()
t = float(line.split('"')[1].split('=')[2])
for line in f.readlines():
if not line[0] == ";":
words = line.split()
x.append(float(words[0]))
y.append(float(words[1]))
f.close()
return array(y), array(x), t
|
AMReX-AstroREPO_NAMECastroPATH_START.@Castro_extracted@Castro-main@Exec@radiation_tests@RadSuOlsonMG@python@read_gnu.py@.PATH_END.py
|
{
"filename": "retrieve_fake_fsps_data.py",
"repo_name": "ArgonneCPAC/dsps",
"repo_path": "dsps_extracted/dsps-main/dsps/data_loaders/retrieve_fake_fsps_data.py",
"type": "Python"
}
|
"""
"""
import numpy as np
from jax.scipy.stats import norm
import os
from .defaults import SSPData
_THIS_DRNAME = os.path.dirname(os.path.abspath(__file__))
def load_fake_ssp_data():
ssp_lgmet = _get_lgzlegend()
ssp_lg_age_gyr = _get_log_age_gyr()
ssp_wave = _get_ssp_wave()
ssp_flux = _get_spec_ssp()
return SSPData(ssp_lgmet, ssp_lg_age_gyr, ssp_wave, ssp_flux)
def load_fake_filter_transmission_curves():
wave = _get_ssp_wave()
lgwave = np.log10(wave)
u = _lsst_u_trans(lgwave)
g = _lsst_g_trans(lgwave)
r = _lsst_r_trans(lgwave)
i = _lsst_i_trans(lgwave)
z = _lsst_z_trans(lgwave)
y = _lsst_y_trans(lgwave)
return wave, u, g, r, i, z, y
def _get_log_age_gyr():
log_age_gyr = np.arange(-3.5, 1.2, 0.05)
return log_age_gyr
def _get_lgzlegend():
lgzlegend = np.log10(zlegend)
return lgzlegend
def _get_ssp_wave():
n_wave_ssp = 1963
ssp_wave = np.linspace(100, 20_000, n_wave_ssp)
return ssp_wave
def _get_spec_ssp():
drn = os.path.join(_THIS_DRNAME, "tests", "testing_data")
ssp_wave = _get_ssp_wave()
n_wave_ssp = ssp_wave.size
ssp_plaw_data_c0 = np.loadtxt(os.path.join(drn, "ssp_plaw_data_c0.txt"))
ssp_plaw_data_c1 = np.loadtxt(os.path.join(drn, "ssp_plaw_data_c1.txt"))
n_met, n_age = ssp_plaw_data_c0.shape
spec_ssp = np.zeros((n_met, n_age, n_wave_ssp))
for iz in range(n_met):
for iage in range(n_age):
c0 = ssp_plaw_data_c0[iz, iage]
c1 = ssp_plaw_data_c1[iz, iage]
spec_ssp[iz, iage, :] = 10 ** (c0 + c1 * np.log10(ssp_wave))
return spec_ssp
def _lsst_u_trans(x):
return norm.pdf(x, loc=3.57, scale=0.022) / 80
def _lsst_g_trans(x):
return norm.pdf(x, loc=3.68, scale=0.04) / 20
def _lsst_r_trans(x):
return norm.pdf(x, loc=3.79, scale=0.03) / 25
def _lsst_i_trans(x):
return norm.pdf(x, loc=3.875, scale=0.025) / 30
def _lsst_z_trans(x):
return norm.pdf(x, loc=3.935, scale=0.017) / 47
def _lsst_y_trans(x):
return norm.pdf(x, loc=3.985, scale=0.017) / 85
def _get_filter_waves():
n_bands, n_filter_waves = 6, 1906
wave_mins = np.array((3200.0, 3200.0, 3200.0, 4084.0, 4084.0, 4085.0))
wave_maxs = np.array((9084.0, 9085.0, 9086.0, 10987.0, 10988.0, 10989.0))
filter_waves = np.zeros((n_bands, n_filter_waves))
for iband in range(n_bands):
xmin, xmax = wave_mins[iband], wave_maxs[iband]
filter_waves[iband, :] = np.linspace(xmin, xmax, n_filter_waves)
return filter_waves
def _get_filter_trans():
filter_waves = _get_filter_waves()
n_bands, n_filter_waves = filter_waves.shape
filter_trans = np.zeros((n_bands, n_filter_waves))
func_list = (
_lsst_u_trans,
_lsst_g_trans,
_lsst_r_trans,
_lsst_i_trans,
_lsst_z_trans,
_lsst_y_trans,
)
for iband, func in enumerate(func_list):
wave = filter_waves[iband, :]
filter_trans[iband, :] = func(np.log10(wave))
return filter_trans
zlegend = np.array(
[
0.0002,
0.0003,
0.0004,
0.0005,
0.0006,
0.0008,
0.001,
0.0012,
0.0016,
0.002,
0.0025,
0.0031,
0.0039,
0.0049,
0.0061,
0.0077,
0.0096,
0.012,
0.015,
0.019,
0.024,
0.03,
]
)
|
ArgonneCPACREPO_NAMEdspsPATH_START.@dsps_extracted@dsps-main@dsps@data_loaders@retrieve_fake_fsps_data.py@.PATH_END.py
|
{
"filename": "logical_operations.py",
"repo_name": "cds-astro/mocpy",
"repo_path": "mocpy_extracted/mocpy-master/docs/examples/logical_operations.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
from astropy.visualization.wcsaxes.frame import EllipticalFrame
from astropy.wcs import WCS
from mocpy import MOC
# Load Galex and SDSS
sdss = MOC.from_fits("./../../resources/P-SDSS9-r.fits")
galex = MOC.from_fits("./../../resources/P-GALEXGR6-AIS-FUV.fits")
# Compute their intersection
inter = sdss & galex
union = sdss + galex
# Plot the MOC using matplotlib
fig = plt.figure(111, figsize=(10, 8))
# Define a astropy WCS
wcs = WCS(
{
"naxis": 2,
"naxis1": 3240,
"naxis2": 1620,
"crpix1": 1620.5,
"crpix2": 810.5,
"cdelt1": -0.1,
"cdelt2": 0.1,
"ctype1": "RA---AIT",
"ctype2": "DEC--AIT",
},
)
ax = fig.add_subplot(1, 1, 1, projection=wcs, frame_class=EllipticalFrame)
# Call fill with a matplotlib axe and the `~astropy.wcs.WCS` wcs object.
union.fill(
ax=ax,
wcs=wcs,
alpha=0.5,
fill=True,
color="blue",
linewidth=0,
label="Union",
)
union.border(ax=ax, wcs=wcs, alpha=1, color="black")
inter.fill(
ax=ax,
wcs=wcs,
alpha=0.5,
fill=True,
color="green",
linewidth=0,
label="Intersection",
)
inter.border(ax=ax, wcs=wcs, alpha=1, color="black")
ax.legend()
ax.set_aspect(1.0)
plt.xlabel("ra")
plt.ylabel("dec")
plt.title("Logical operations between SDSS and GALEX")
plt.grid(color="black", linestyle="dotted")
plt.show()
|
cds-astroREPO_NAMEmocpyPATH_START.@mocpy_extracted@mocpy-master@docs@examples@logical_operations.py@.PATH_END.py
|
{
"filename": "config.py",
"repo_name": "SatoshiHamano/WARP",
"repo_path": "WARP_extracted/WARP-main/warp/config.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os.path
from astropy.io import fits
import sys
import numpy as np
from warp.Spec2Dtools import header_key_read
import traceback
def alternativequestion(question, anss, defans):
flagans = False
while not flagans:
ansinput = input(question)
if ansinput in anss:
flagans = True
else:
print("Answers: ", anss)
if ansinput != "":
return ansinput
else:
return defans
def valueQuestion(question, lowlim, upplim, defans):
flagans = False
while not flagans:
ansinput = input(question)
if ansinput == "":
flagans = True
else:
try:
ansinputfloat = float(ansinput)
if ansinputfloat > lowlim and ansinputfloat < upplim:
flagans = True
else:
print("Please input float number bewteen {} and {}. (Your answer: {})".format(lowlim, upplim,
ansinput))
except:
print("Please input float number bewteen {} and {}. (Your answer: {})".format(lowlim, upplim, ansinput))
if ansinput == "":
return defans
else:
return ansinputfloat
def constant_str_length(comment):
constlength = 72
print("\033[31m\n=== %s %s\n\033[0m" % (comment, ("=" * (constlength - len(comment) - 4))))
class config:
def __init__(self, statusFile='INDEF'):
self.saturation_thres = 35000.
self.flag_apscatter = True
self.flag_manual_aperture = False
self.flag_skysub = False
self.flag_bpmask = True
self.flag_skyemission = False
self.flag_wsmeasure = True
self.flag_wscorrect = True
self.flag_wsmanual = False
self.flag_extract2d = False
self.skysub_mode = "none"
self.skysubModeList = ["none", "average", "median", "minimum", "fit"]
self.cutrange_list = [1.05, 1.30]
self.fluxinput = "no"
self.CRthreshold = 10.
self.CRvaratio = 2.
self.CRslitposratio = 1.5
self.CRmaxsigma = 20.
self.CRfixsigma = False
self.status = statusFile
self.frameNumberLimit = 28
self.reduceFullData = True
self.selectedOrders = []
def inputDataList(self, listfile, oldFormat=False):
# open input file list
rfile = open(listfile, "r")
rlines = rfile.readlines()
rfile.close()
# read the file names of object frame and corresponding sky frame.
self.objectlist = []
self.skylist = []
self.lowlim_input = []
self.upplim_input = []
self.skysub_region = []
self.waveshift_man = []
if oldFormat:
for i in range(len(rlines)):
rl1 = rlines[i].split()
for j in rl1:
if j.find("ws=") != -1:
self.waveshift_man.append(float(j.lstrip("ws=")))
rl1.remove(j)
break
if j == rl1[-1]:
self.waveshift_man.append(0.)
self.objectlist.append(rl1[0])
self.skylist.append(rl1[1])
if self.flag_manual_aperture:
self.lowlim_input.append(float(rl1[2]))
self.upplim_input.append(float(rl1[3]))
if self.flag_skysub:
self.skysub_region.append(rl1[-1])
else:
self.skysub_region.append("INDEF")
else:
for i in range(len(rlines)):
rl1 = rlines[i].split()
self.objectlist.append(rl1[0])
self.skylist.append(rl1[1])
flagwsinput = False
flagbginput = False
for j in rl1[2:]:
if j.find("ap=") != -1:
aptmp = j.lstrip("ap=")
self.lowlim_input.append(float(aptmp.split(":")[0]))
self.upplim_input.append(float(aptmp.split(":")[1]))
if j.find("bg=") != -1:
self.skysub_region.append(j.lstrip("bg="))
flagbginput = True
if j.find("ws=") != -1:
self.waveshift_man.append(float(j.lstrip("ws=")))
flagwsinput = True
if not flagwsinput:
self.waveshift_man.append(0.)
if not flagbginput:
self.skysub_region.append("INDEF")
if len(self.objectlist) != len(self.lowlim_input) and self.flag_manual_aperture:
print("Aperture range parameter is not written in input list.")
sys.exit()
if len(self.objectlist) != len(self.skysub_region) and self.flag_skysub:
print("Background region parameter is not written in input list.")
sys.exit()
# synthesize the object frame list and sky frame list without overlaps.
self.imagelist = list(set(self.objectlist + self.skylist))
self.imagelist.sort()
self.imagelist = np.array(self.imagelist)
self.objnum = len(self.objectlist)
self.imnum = len(self.imagelist)
self.imobjid = []
for i in range(self.imnum):
if self.imagelist[i] in self.objectlist:
self.imobjid.append(self.objectlist.index(self.imagelist[i]))
else:
self.imobjid.append("sky")
def readInputDataHeader(self):
self.objname = np.array([])
self.nodpos = np.array([])
self.satupix = np.array([])
self.svfr_str = np.array([])
self.svfr_end = np.array([])
self.period = np.array([])
self.setting = np.array([])
self.slit = np.array([])
self.instmode = np.array([])
for i in range(self.imnum):
hdulist_obj = fits.open(self.imagelist[i] + ".fits")
prihdr_obj = hdulist_obj[0].header
data_obj = hdulist_obj[0].data
hdulist_obj.close()
data_obj[data_obj <= self.saturation_thres] = 0
data_obj[data_obj > self.saturation_thres] = 1
self.satupix = np.append(self.satupix, np.sum(data_obj))
self.objname = np.append(self.objname,
header_key_read(prihdr_obj, "OBJECT").replace(" ", "_").replace(" ", "_").replace("'", "_").replace(
"\"", "_").replace('#', '_').replace('/', '_'))
self.nodpos = np.append(self.nodpos, header_key_read(prihdr_obj, "NODPOS"))
self.svfr_str = np.append(self.svfr_str, header_key_read(prihdr_obj, "SVFR-STR") + ".fits")
self.svfr_end = np.append(self.svfr_end, header_key_read(prihdr_obj, "SVFR-END") + ".fits")
self.period = np.append(self.period, header_key_read(prihdr_obj, "PERIOD"))
self.setting = np.append(self.setting, header_key_read(prihdr_obj, "SETTING"))
self.slit = np.append(self.slit, header_key_read(prihdr_obj, "SLIT"))
self.instmode = np.append(self.instmode, header_key_read(prihdr_obj, "INSTMODE"))
self.objnameRep = self.objname[self.imagelist == self.objectlist[0]][0]
self.flag_svimage = True
for i in range(self.imnum):
if self.svfr_str[i].find("N/A") != -1 and self.svfr_end[i].find("N/A") != -1:
self.flag_svimage = False
self.objname_obj = [self.objname[self.imagelist == i][0] for i in self.objectlist]
self.nodpos_obj = [self.nodpos[self.imagelist == i][0] for i in self.objectlist]
def readInputCalib(self, inputlist):
para = []
calpath = os.path.dirname(inputlist)
if calpath != "":
calpath += "/"
for line in open(inputlist, "r"):
items = line.split()
para.append(str(items[0]))
self.flat_file = calpath + para[1]
self.mask_file = calpath + para[2]
self.comp_file = calpath + para[3]
self.ap_file = calpath + para[4]
self.aptrans_file = calpath + para[5]
self.apsc_maskfile = calpath + para[6]
if os.path.exists(self.flat_file):
flatf = fits.open(self.flat_file)
prihdr_flat = flatf[0].header
flatf.close()
self.flatSetting = header_key_read(prihdr_flat, "SETTING")
self.flatPeriod = header_key_read(prihdr_flat, "PERIOD")
self.flatSlit = header_key_read(prihdr_flat, "SLIT")
self.flatMode = header_key_read(prihdr_flat, "INSTMODE")
if os.path.exists(self.comp_file):
compf = fits.open(self.comp_file)
prihdr_comp = compf[0].header
compf.close()
self.dyinput = prihdr_comp["CDELT1"]
self.compSetting = header_key_read(prihdr_comp, "SETTING")
self.compPeriod = header_key_read(prihdr_comp, "PERIOD")
self.compSlit = header_key_read(prihdr_comp, "SLIT")
self.compMode = header_key_read(prihdr_comp, "INSTMODE")
return None
def readParamQuery(self):
print("======================================================")
print("=== Please answer to the following questions. ===")
print("=== OR ===")
print("=== Just press enter key (adopt default settings). ===")
print("======================================================")
ynDict = {"yes": True, "no": False}
tfDict = {True: "yes", False: "no"}
# self.flagapscatter = ynDict[
# alternativequestion("Subtract scattered light? (def:{}) :".format(tfDict[self.flag_apscatter]),
# ["yes", "no", ""], tfDict[self.flag_apscatter])]
self.flag_manual_aperture = ynDict[
alternativequestion("Adopt aperture ranges read from the input file? (def:{}) :".format(tfDict[self.flag_manual_aperture]),
["yes", "no", ""], tfDict[self.flag_manual_aperture])]
# self.skysub_mode = alternativequestion(
# "Subtract background spectra from object spectra? (def:{}) :".format(self.skysub_mode),
# self.skysubModeList + [""], self.skysub_mode)
self.flag_skysub = ynDict[
alternativequestion("Subtract background spectra from object spectra? (def:{}) :".format(tfDict[self.flag_skysub]),
["yes", "no", ""], tfDict[self.flag_skysub])]
self.skysub_mode = "average" if self.flag_skysub else "none"
self.flag_bpmask = ynDict[
alternativequestion("Detect and interpolate the cosmic rays? (def:{}) :".format(tfDict[self.flag_bpmask]),
["yes", "no", ""], tfDict[self.flag_bpmask])]
# ans_query_CRparams = alternativequestion(
# "Change any parameters in the cosmic ray detection algorithm? (def:no) :",
# ["yes", "no", "detail", ""], "no")
# if ans_query_CRparams == "yes":
# self.CRthreshold = valueQuestion(
# "Threshold for the cosmic ray detection (def: {} sigma) :".format(self.CRthreshold), 3., 100.,
# self.CRthreshold)
# self.CRfixsigma = ynDict[
# alternativequestion("Fix the threshold sigma (def: {}) :".format(tfDict[self.CRfixsigma]),
# ["yes", "no", ""], tfDict[self.CRfixsigma])]
# elif ans_query_CRparams == "detail":
# self.CRthreshold = valueQuestion(
# "Threshold for the cosmic ray detection (def: {} sigma) :".format(self.CRthreshold), 3., 100.,
# self.CRthreshold)
# self.CRfixsigma = ynDict[
# alternativequestion("Fix the threshold sigma (def: {}) :".format(tfDict[self.CRfixsigma]),
# ["yes", "no", ""], tfDict[self.CRfixsigma])]
# if not self.CRfixsigma:
# self.CRmaxsigma = valueQuestion(
# "Maximum threshold for the cosmic ray detection (def: {}) :".format(self.CRmaxsigma), 3., 100.,
# self.CRmaxsigma)
# self.CRvaratio = valueQuestion(
# "Threshold for the variance / average of the cosmic ray distribution (def: {}) :".format(
# self.CRvaratio), 1., 100., self.CRvaratio)
# self.CRslitposratio = valueQuestion(
# "Threshold for the cosmic ray number ratio between the slit positions (def: {}) :".format(
# self.CRslitposratio), 1., 100., self.CRslitposratio)
# self.flag_skyemission = ynDict[
# alternativequestion("Extract the spectra from sky frame? (def: {}) :".format(tfDict[self.flag_skyemission]),
# ["yes", "no", ""], tfDict[self.flag_skyemission])]
self.flag_wsmeasure = ynDict[
alternativequestion("Measure the spectra offsets among multiple frames? (def: {}) :".format(tfDict[self.flag_wsmeasure]),
["yes", "no", ""], tfDict[self.flag_wsmeasure])]
self.flag_wscorrect = ynDict[
alternativequestion("Correct the spectra offsets among multiple frames? (def: {}) :".format(tfDict[self.flag_wscorrect]),
["yes", "no", ""], tfDict[self.flag_wscorrect])]
if self.flag_wsmeasure and self.flag_wscorrect:
self.flag_wsmanual = ynDict[alternativequestion(
"Use the spectra offsets values written in list file? (def: {}) :".format(tfDict[self.flag_wsmanual]),
["yes", "no", ""], tfDict[self.flag_wsmanual])]
elif not self.flag_wsmeasure and self.flag_wscorrect:
self.flag_wsmanual = True
else:
self.flag_wsmanual = False
# self.fluxinput = alternativequestion(
# "Conserve the flux in the transformation? (def: {}) :".format(self.fluxinput),
# ["yes", "no", ""], self.fluxinput)
# self.flag_skysub = True if self.skysub_mode != "none" else False
self.flag_extract2d = ynDict[
alternativequestion("Extract two-dimensional spec (wavelength and spatial dimensions)? (def: {}) :".format(tfDict[self.flag_extract2d]),
["yes", "no", ""], tfDict[self.flag_extract2d])]
self.showAllParams()
return None
def readParamFile(self, inputlist):
# read calibration_parameters.txt, in which the option and parameters for the pipeline is stored.
ynDict = {"yes": True, "no": False}
tfDict = {True: "yes", False: "no"}
for line in open(inputlist, "r"):
if line.find("Apscatter") != -1:
self.flag_apscatter = ynDict[line.split(":")[1].split()[0]]
if line.find("Manual Aperture") != -1:
self.flag_manual_aperture = ynDict[line.split(":")[1].split()[0]]
if line.find("Cosmic Ray Correction") != -1:
self.flag_bpmask = ynDict[line.split(":")[1].split()[0]]
if line.find("Extract all orders") != -1:
self.reduceFullData = ynDict[line.split(":")[1].split()[0]]
if line.find("Selected orders") != -1:
if line.split(":")[1].split()[0] != "no":
tmpline = line.split(":")[1].split(",")
self.selectedOrders = [int(tmpline[i].split()[0]) for i in range(len(tmpline))]
if line.find("Background Subtraction") != -1:
if line.split(":")[1].split()[0] != "none":
self.flag_skysub = True
self.skysub_mode = line.split(":")[1].split()[0]
if line.find("Set cut range") != -1:
if line.split(":")[1].split()[0] != "no":
tmpline = line.split(":")[1].split(",")
self.cutrange_list = [float(tmpline[i].split()[0]) for i in range(len(tmpline))]
if line.find("CUTRANSFORM flux") != -1:
self.fluxinput = line.split(":")[1].split()[0]
if line.find("Sky Emission") != -1:
self.flag_skyemission = ynDict[line.split(":")[1].split()[0]]
if line.find("Measure Shift") != -1:
self.flag_wsmeasure = ynDict[line.split(":")[1].split()[0]]
if line.find("Correct Shift") != -1:
self.flag_wscorrect = ynDict[line.split(":")[1].split()[0]]
if line.find("Manual Shift") != -1:
self.flag_wsmanual = ynDict[line.split(":")[1].split()[0]]
if line.find("Cosmic ray threshold sigma") != -1:
try:
self.CRthreshold = float(line.split(":")[1].split()[0])
except:
print("\033[31m WARNING: The input value could not be converted to float. value={} was set. \033[0m".format(
self.CRthreshold))
if line.find("Cosmic ray maximum sigma") != -1:
try:
self.CRmaxsigma = float(line.split(":")[1].split()[0])
except:
print("\033[31m WARNING: The input value could not be converted to float. value={} was set. \033[0m".format(self.CRmaxsigma))
if line.find("Cosmic ray Var/Ave ratio") != -1:
try:
self.CRvaratio = float(line.split(":")[1].split()[0])
except:
print("\033[31m WARNING: The input value could not be converted to float. value={} was set. \033[0m".format(self.CRvaratio))
if line.find("Cosmic ray ratio between slit positions") != -1:
try:
self.CRslitposratio = float(line.split(":")[1].split()[0])
except:
print("\033[31m WARNING: The input value could not be converted to float. value={} was set. \033[0m".format(
self.CRslitposratio))
if line.find("Cosmic ray fix sigma") != -1:
self.CRfixsigma = ynDict[line.split(":")[1].split()[0]]
if line.find("Extract 2d spectrum") != -1:
self.flag_extract2d = ynDict[line.split(":")[1].split()[0]]
self.showAllParams()
return None
def setFastModeParam(self):
self.flag_apscatter = True
self.flag_manual_aperture = False
self.flag_skysub = False
self.flag_bpmask = False
self.flag_skyemission = False
self.flag_wsmeasure = False
self.flag_wscorrect = False
self.flag_wsmanual = False
self.flag_extract2d = False
self.skysub_mode = "none"
self.cutrange_list = [1.05]
self.fluxinput = "no"
self.CRthreshold = 10.
self.CRvaratio = 2.
self.CRslitposratio = 1.5
self.CRmaxsigma = 20.
self.CRfixsigma = False
self.reduceFullData = True
self.showAllParams()
def writeStatus(self, pipelineVer, startTimeStr):
status_file = open(self.status, "a")
status_file.write("WARP ver.%s\n" % (pipelineVer))
status_file.write("Starting time: %s\n" % startTimeStr)
status_file.write("\n")
status_file.close()
def writeElapsedTime(self, endTimeStr, elapsedTime, status):
status_file = open(self.status, "a")
status_file.write("Termination time: %s\n" % (endTimeStr))
status_file.write("Elapsed time: %dm%.1fs\n\n" % (int(elapsedTime / 60), elapsedTime % 60))
status_file.write("Pipeline status: {}".format(status))
status_file.close()
def writePipelineSettings(self):
status_file = open(self.status, "a")
tfDict = {True: "yes", False: "no"}
status_file.write("\nSettings:\n")
status_file.write(" Apscatter: %s\n" % tfDict[self.flag_apscatter])
status_file.write(" Manual Aperture: %s\n" % tfDict[self.flag_manual_aperture])
status_file.write(" Background Subtraction: %s\n" % self.skysub_mode)
status_file.write(" Extract all orders: %s\n" % tfDict[self.reduceFullData])
if self.reduceFullData:
status_file.write(" Selected orders: ")
for m in self.selectedOrders:
status_file.write(str(m))
if m != self.cutrange_list[-1]:
status_file.write(", ")
status_file.write(" Cosmic Ray Correction: %s\n" % tfDict[self.flag_bpmask])
status_file.write(" Cosmic ray threshold sigma: %.1f\n" % self.CRthreshold)
status_file.write(" Cosmic ray maximum sigma: %.1f\n" % self.CRmaxsigma)
status_file.write(" Cosmic ray Var/Ave ratio: %.1f\n" % self.CRvaratio)
status_file.write(" Cosmic ray ratio between slit positions: %.1f\n" % self.CRslitposratio)
status_file.write(" Cosmic ray fix sigma: %s\n" % tfDict[self.CRfixsigma])
status_file.write(" Set cut range: ")
for i in self.cutrange_list:
status_file.write(str(i))
if i != self.cutrange_list[-1]:
status_file.write(", ")
status_file.write("\n")
status_file.write(" Sky Emission: %s\n" % tfDict[self.flag_skyemission])
status_file.write(" Measure Shift: %s\n" % tfDict[self.flag_wsmeasure])
status_file.write(" Correct Shift: %s\n" % tfDict[self.flag_wscorrect])
status_file.write(" Manual Shift: %s\n" % tfDict[self.flag_wsmanual])
status_file.write(" CUTRANSFORM flux: %s\n" % self.fluxinput)
status_file.write(" Extract 2d spectrum: %s\n" % tfDict[self.flag_extract2d])
status_file.write("\n")
status_file.close()
def writeInputDataList(self):
status_file = open(self.status, "a")
status_file.write("Input data:\n")
for i in range(len(self.objectlist)):
dataLine = " {} {}".format(self.objectlist[i], self.skylist[i])
if len(self.lowlim_input) == len(self.objectlist) and self.flag_manual_aperture:
dataLine += " ap={}:{}".format(self.lowlim_input[i], self.upplim_input[i])
if len(self.skysub_region) == len(self.objectlist) and self.flag_skysub:
dataLine += " bg={}".format(self.skysub_region[i])
if len(self.waveshift_man) == len(self.objectlist) and self.flag_wsmanual:
dataLine += " ws={}".format(self.waveshift_man[i])
dataLine += "\n"
status_file.write(dataLine)
status_file.write("\n")
status_file.close()
def writeError(self, error):
with open(self.status, "a") as f:
f.write('{}\n'.format(error))
traceback.print_exc(file=f)
print('\033[31m {} \033[0m\n'.format(error))
print(traceback.format_exc())
print("\033[31m !!!Please send the \'status.txt\' to the developer to report the error.!!! \033[0m")
def writeParam(self):
status_file = open(self.status, "a")
status_file.write("WARP Settings and Parameters:\n")
status_file.write(" flag_apscatter: {}\n".format(self.flag_apscatter))
status_file.write(" flag_manual_aperture: {}\n".format(self.flag_manual_aperture))
status_file.write(" flag_skysub: {}\n".format(self.flag_skysub))
status_file.write(" flag_bpmask: {}\n".format(self.flag_bpmask))
status_file.write(" flag_skyemission: {}\n".format(self.flag_skyemission))
status_file.write(" flag_wsmeasure: {}\n".format(self.flag_wsmeasure))
status_file.write(" flag_wscorrect: {}\n".format(self.flag_wscorrect))
status_file.write(" flag_wsmanual: {}\n".format(self.flag_wsmanual))
status_file.write(" flag_extract2d: {}\n".format(self.flag_extract2d))
status_file.write(" skysub_mode: {}\n".format(self.skysub_mode))
status_file.write(" skysubModeList: {}\n".format(self.skysubModeList))
status_file.write(" cutrange_list: {}\n".format(self.cutrange_list))
status_file.write(" fluxinput: {}\n".format(self.fluxinput))
status_file.write(" CRthreshold: {}\n".format(self.CRthreshold))
status_file.write(" CRvaratio: {}\n".format(self.CRvaratio))
status_file.write(" CRslitposratio: {}\n".format(self.CRslitposratio))
status_file.write(" CRmaxsigma: {}\n".format(self.CRmaxsigma))
status_file.write(" CRfixsigma: {}\n".format(self.CRfixsigma))
status_file.write(" reduceFullData: {}\n".format(self.reduceFullData))
status_file.write(" selectedOrders: {}\n".format(self.selectedOrders))
status_file.write("\n")
status_file.close()
def readObservationInfo(self, hdulist):
self.acqtime = [header_key_read(i, "ACQTIME1").split("-")[-1] for i in hdulist]
self.acqdate = [header_key_read(hdulist[i], "ACQTIME1").split()[0].rstrip(self.acqtime[i]).rstrip("-") for i in
range(self.imnum)]
self.exptime = [header_key_read(i, "EXPTIME") for i in hdulist]
self.inttime = [header_key_read(i, "INTTIME") for i in hdulist]
self.ra_hours = [header_key_read(i, "RA") for i in hdulist]
self.dec_degree = [header_key_read(i, "DEC") for i in hdulist]
self.modes = [header_key_read(i, "INSTMODE") for i in hdulist]
self.teles = [header_key_read(i, "TELESCOP") for i in hdulist]
self.seeing = [header_key_read(i, "SEEING") for i in hdulist]
self.period = [header_key_read(i, "PERIOD") for i in hdulist]
self.setting = [header_key_read(i, "SETTING") for i in hdulist]
self.airmass = [header_key_read(i, "AIRMASS") for i in hdulist]
self.airmass_start = [header_key_read(i, "AIRM-STR") for i in hdulist]
self.airmass_end = [header_key_read(i, "AIRM-END") for i in hdulist]
self.ut_start = [header_key_read(i, "UT-STR") for i in hdulist]
self.ut_end = [header_key_read(i, "UT-END") for i in hdulist]
self.observatory = [header_key_read(i, "OBSERVAT") for i in hdulist]
self.observer = [header_key_read(i, "OBSERVER") for i in hdulist]
self.humidity = [header_key_read(i, "OUT-HUM") for i in hdulist]
self.temperature = [header_key_read(i, "OUT-TMP") for i in hdulist]
self.air_pressure = [header_key_read(i, "OUT-PRS") for i in hdulist]
self.wind_speed = [header_key_read(i, "OUT-WND") for i in hdulist]
self.wodbtheme = [header_key_read(i, "WODBTHEM").replace("_", " ") for i in hdulist]
self.wodbobsid = [header_key_read(i, "WODBOBS") for i in hdulist]
self.wodbstd = [header_key_read(i, "WODBSTD") for i in hdulist]
self.wodbpi = [header_key_read(i, "WODBPI") for i in hdulist]
self.slitwidth = [header_key_read(i, "SLIT") for i in hdulist]
self.agstatus = [header_key_read(i, "AUTOGUID") for i in hdulist]
self.slitpa = [header_key_read(i, "SLT-PA") for i in hdulist]
self.nodpat = [header_key_read(i, "NODPAT") for i in hdulist]
self.nodamp = [header_key_read(i, "NODAMP") for i in hdulist]
def checkDataStatus(self, showDetail=True, ignore=False, statusOutput=False):
settingSet = set([self.setting[i] for i in range(self.imnum)] + [self.compSetting, self.flatSetting])
periodSet = set([self.period[i] for i in range(self.imnum)] + [self.compPeriod, self.flatPeriod])
slitSet = set([self.slit[i] for i in range(self.imnum)] + [self.compSlit, self.flatSlit])
modeSet = set([self.instmode[i] for i in range(self.imnum)] + [self.compMode, self.flatMode])
if showDetail:
print("\n# Instrument setting IDs from fits header")
print("Inputs: ", self.setting)
print("Calibs (comp,flat): {}, {}".format(self.compSetting, self.flatSetting))
print("\n# Period IDs from fits header")
print("Inputs: ", self.period)
print("Calibs (comp,flat): {}, {}".format(self.compPeriod, self.flatPeriod))
print("\n# Slit from fits header")
print("Inputs: ", self.slit)
print("Calibs (comp, flat): {}, {}".format(self.compSlit, self.flatSlit))
print("\n# Mode from fits header")
print("Inputs: ", self.instmode)
print("Calibs (comp, flat): {}, {}".format(self.compMode, self.flatMode))
if statusOutput:
status_file = open(self.status, "a")
status_file.write("Data status:\n")
status_file.write("# Instrument setting IDs from fits header\n")
status_file.write(" Inputs: {}\n".format(self.setting))
status_file.write(" Calibs (comp,flat): {}, {}\n".format(self.compSetting, self.flatSetting))
status_file.write("# Period IDs from fits header\n")
status_file.write(" Inputs: {}\n".format(self.period))
status_file.write(" Calibs (comp,flat): {}, {}\n".format(self.compPeriod, self.flatPeriod))
status_file.write("# Slit from fits header\n")
status_file.write(" Inputs: {}\n".format(self.slit))
status_file.write(" Calibs (comp, flat): {}, {}\n".format(self.compSlit, self.flatSlit))
status_file.write("# Mode from fits header\n")
status_file.write(" Inputs: {}\n".format(self.instmode))
status_file.write(" Calibs (comp, flat): {}, {}\n".format(self.compMode, self.flatMode))
status_file.write("\n")
status_file.close()
if len(settingSet) == 1 and len(periodSet) == 1 and len(slitSet) == 1 and len(modeSet) == 1:
return True
else:
if not ignore:
print("\033[31m WARNING: Multiple datatypes are mixed in the input data. \033[0m")
return False
def showAllParams(self):
print("## WARP Settings and Parameters")
print("flag_apscatter: ", self.flag_apscatter)
print("flag_manual_aperture: ", self.flag_manual_aperture)
print("flag_skysub: ", self.flag_skysub)
print("flag_bpmask: ", self.flag_bpmask)
print("flag_skyemission: ", self.flag_skyemission)
print("flag_wsmeasure: ", self.flag_wsmeasure)
print("flag_wscorrect: ", self.flag_wscorrect)
print("flag_wsmanual: ", self.flag_wsmanual)
print("flag_extract2d: ", self.flag_extract2d)
print("skysub_mode: ", self.skysub_mode)
print("skysubModeList: ", self.skysubModeList)
print("cutrange_list: ", self.cutrange_list)
print("fluxinput: ", self.fluxinput)
print("CRthreshold: ", self.CRthreshold)
print("CRvaratio: ", self.CRvaratio)
print("CRslitposratio: ", self.CRslitposratio)
print("CRmaxsigma: ", self.CRmaxsigma)
print("CRfixsigma: ", self.CRfixsigma)
print("reduceFullData: ", self.reduceFullData)
print("selectedOrders: ", self.selectedOrders)
print()
def showAllCalibs(self):
print("## Calibration files")
print("flat_file: ", self.flat_file)
print("mask_file: ", self.mask_file)
print("comp_file: ", self.comp_file)
print("ap_file: ", self.ap_file)
print("aptrans_file: ", self.aptrans_file)
print("apsc_maskfile: ", self.apsc_maskfile)
print("dyinput: ", self.dyinput)
print()
|
SatoshiHamanoREPO_NAMEWARPPATH_START.@WARP_extracted@WARP-main@warp@config.py@.PATH_END.py
|
{
"filename": "msk.py",
"repo_name": "cvxopt/cvxopt",
"repo_path": "cvxopt_extracted/cvxopt-master/src/python/msk.py",
"type": "Python"
}
|
"""
CVXOPT interface for MOSEK 8
"""
# Copyright 2012-2023 M. Andersen and L. Vandenberghe.
# Copyright 2010-2011 L. Vandenberghe.
# Copyright 2004-2009 J. Dahl and L. Vandenberghe.
#
# This file is part of CVXOPT.
#
# CVXOPT is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# CVXOPT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import mosek
from cvxopt import matrix, spmatrix, sparse
import sys
def streamprinter(text):
sys.stdout.write(text)
sys.stdout.flush()
inf = 0.0
options = {}
def lp(c, G, h, A=None, b=None, taskfile=None, **kwargs):
"""
Solves a pair of primal and dual LPs
minimize c'*x maximize -h'*z - b'*y
subject to G*x + s = h subject to G'*z + A'*y + c = 0
A*x = b z >= 0.
s >= 0
using MOSEK 8.
(solsta, x, z, y) = lp(c, G, h, A=None, b=None).
Input arguments
c is n x 1, G is m x n, h is m x 1, A is p x n, b is p x 1. G and
A must be dense or sparse 'd' matrices. c, h and b are dense 'd'
matrices with one column. The default values for A and b are
empty matrices with zero rows.
Optionally, the interface can write a .task file, required for
support questions on the MOSEK solver.
Return values
solsta is a MOSEK solution status key.
If solsta is mosek.solsta.optimal, then (x, y, z) contains the
primal-dual solution.
If solsta is mosek.solsta.prim_infeas_cer, then (x, y, z) is a
certificate of primal infeasibility.
If solsta is mosek.solsta.dual_infeas_cer, then (x, y, z) is a
certificate of dual infeasibility.
If solsta is mosek.solsta.unknown, then (x, y, z) are all None.
Other return values for solsta include:
mosek.solsta.dual_feas
mosek.solsta.near_dual_feas
mosek.solsta.near_optimal
mosek.solsta.near_prim_and_dual_feas
mosek.solsta.near_prim_feas
mosek.solsta.prim_and_dual_feas
mosek.solsta.prim_feas
in which case the (x,y,z) value may not be well-defined.
x, y, z the primal-dual solution.
Options are passed to MOSEK solvers via the msk.options dictionary.
For example, the following turns off output from the MOSEK solvers
>>> msk.options = {mosek.iparam.log: 0}
see the MOSEK Python API manual.
"""
with mosek.Env() as env:
if type(c) is not matrix or c.typecode != 'd' or c.size[1] != 1:
raise TypeError("'c' must be a dense column matrix")
n = c.size[0]
if n < 1: raise ValueError("number of variables must be at least 1")
if (type(G) is not matrix and type(G) is not spmatrix) or \
G.typecode != 'd' or G.size[1] != n:
raise TypeError("'G' must be a dense or sparse 'd' matrix "\
"with %d columns" %n)
m = G.size[0]
if m == 0: raise ValueError("m cannot be 0")
if type(h) is not matrix or h.typecode != 'd' or h.size != (m,1):
raise TypeError("'h' must be a 'd' matrix of size (%d,1)" %m)
if A is None: A = spmatrix([], [], [], (0,n), 'd')
if (type(A) is not matrix and type(A) is not spmatrix) or \
A.typecode != 'd' or A.size[1] != n:
raise TypeError("'A' must be a dense or sparse 'd' matrix "\
"with %d columns" %n)
p = A.size[0]
if b is None: b = matrix(0.0, (0,1))
if type(b) is not matrix or b.typecode != 'd' or b.size != (p,1):
raise TypeError("'b' must be a dense matrix of size (%d,1)" %p)
bkc = m*[ mosek.boundkey.up ] + p*[ mosek.boundkey.fx ]
blc = m*[ -inf ] + [ bi for bi in b ]
buc = list(h) + list(b)
bkx = n*[mosek.boundkey.fr]
blx = n*[ -inf ]
bux = n*[ +inf ]
colptr, asub, acof = sparse([G,A]).CCS
aptrb, aptre = colptr[:-1], colptr[1:]
with env.Task(0,0) as task:
task.set_Stream (mosek.streamtype.log, streamprinter)
# set MOSEK options
options = kwargs.get('options',globals()['options'])
for (param, val) in options.items():
if str(param)[:6] == "iparam":
task.putintparam(param, val)
elif str(param)[:6] == "dparam":
task.putdouparam(param, val)
elif str(param)[:6] == "sparam":
task.putstrparam(param, val)
else:
raise ValueError("invalid MOSEK parameter: " + str(param))
task.inputdata (m+p, # number of constraints
n, # number of variables
list(c), # linear objective coefficients
0.0, # objective fixed value
list(aptrb),
list(aptre),
list(asub),
list(acof),
bkc,
blc,
buc,
bkx,
blx,
bux)
task.putobjsense(mosek.objsense.minimize)
if taskfile:
task.writetask(taskfile)
task.optimize()
task.solutionsummary (mosek.streamtype.msg);
solsta = task.getsolsta(mosek.soltype.bas)
x, z = n*[ 0.0 ], m*[ 0.0 ]
task.getsolutionslice(mosek.soltype.bas, mosek.solitem.xx, 0, n, x)
task.getsolutionslice(mosek.soltype.bas, mosek.solitem.suc, 0, m, z)
x, z = matrix(x), matrix(z)
if p != 0:
yu, yl = p*[0.0], p*[0.0]
task.getsolutionslice(mosek.soltype.bas, mosek.solitem.suc, m, m+p, yu)
task.getsolutionslice(mosek.soltype.bas, mosek.solitem.slc, m, m+p, yl)
y = matrix(yu) - matrix(yl)
else:
y = matrix(0.0, (0,1))
if (solsta is mosek.solsta.unknown):
return (solsta, None, None, None)
else:
return (solsta, x, z, y)
def conelp(c, G, h, dims=None, taskfile=None, **kwargs):
"""
Solves a pair of primal and dual SOCPs
minimize c'*x
subject to G*x + s = h
s >= 0
maximize -h'*z
subject to G'*z + c = 0
z >= 0
using MOSEK 8.
The inequalities are with respect to a cone C defined as the Cartesian
product of N + M + 1 cones:
C = C_0 x C_1 x .... x C_N x C_{N+1} x ... x C_{N+M}.
The first cone C_0 is the nonnegative orthant of dimension ml.
The next N cones are second order cones of dimension mq[0], ...,
mq[N-1]. The second order cone of dimension m is defined as
{ (u0, u1) in R x R^{m-1} | u0 >= ||u1||_2 }.
The next M cones are positive semidefinite cones of order ms[0], ...,
ms[M-1] >= 0.
The formats of G and h are identical to that used in solvers.conelp().
Input arguments.
c is a dense 'd' matrix of size (n,1).
dims is a dictionary with the dimensions of the components of C.
It has three fields.
- dims['l'] = ml, the dimension of the nonnegative orthant C_0.
(ml >= 0.)
- dims['q'] = mq = [ mq[0], mq[1], ..., mq[N-1] ], a list of N
integers with the dimensions of the second order cones C_1, ...,
C_N. (N >= 0 and mq[k] >= 1.)
- dims['s'] = ms = [ ms[0], ms[1], ..., ms[M-1] ], a list of M
integers with the orders of the semidefinite cones C_{N+1}, ...,
C_{N+M}. (M >= 0 and ms[k] >= 0.)
The default value of dims is {'l': G.size[0], 'q': [], 's': []}.
G is a dense or sparse 'd' matrix of size (K,n), where
K = ml + mq[0] + ... + mq[N-1] + ms[0]**2 + ... + ms[M-1]**2.
Each column of G describes a vector
v = ( v_0, v_1, ..., v_N, vec(v_{N+1}), ..., vec(v_{N+M}) )
in V = R^ml x R^mq[0] x ... x R^mq[N-1] x S^ms[0] x ... x S^ms[M-1]
stored as a column vector
[ v_0; v_1; ...; v_N; vec(v_{N+1}); ...; vec(v_{N+M}) ].
Here, if u is a symmetric matrix of order m, then vec(u) is the
matrix u stored in column major order as a vector of length m**2.
We use BLAS unpacked 'L' storage, i.e., the entries in vec(u)
corresponding to the strictly upper triangular entries of u are
not referenced.
h is a dense 'd' matrix of size (K,1), representing a vector in V,
in the same format as the columns of G.
A is a dense or sparse 'd' matrix of size (p,n). The default value
is a sparse 'd' matrix of size (0,n).
b is a dense 'd' matrix of size (p,1). The default value is a
dense 'd' matrix of size (0,1).
Optionally, the interface can write a .task file, required for
support questions on the MOSEK solver.
Return values
solsta is a MOSEK solution status key.
If solsta is mosek.solsta.optimal,
then (x, zl, zq, zs) contains the primal-dual solution.
If solsta is moseksolsta.prim_infeas_cer,
then (x, zl, zq, zs) is a certificate of dual infeasibility.
If solsta is moseksolsta.dual_infeas_cer,
then (x, zl, zq, zs) is a certificate of primal infeasibility.
If solsta is mosek.solsta.unknown,
then (x, zl, zq, zs) are all None
Other return values for solsta include:
mosek.solsta.dual_feas
mosek.solsta.near_dual_feas
mosek.solsta.near_optimal
mosek.solsta.near_prim_and_dual_feas
mosek.solsta.near_prim_feas
mosek.solsta.prim_and_dual_feas
mosek.solsta.prim_feas
in which case the (x,y,z) value may not be well-defined.
x, z the primal-dual solution.
Options are passed to MOSEK solvers via the msk.options dictionary,
e.g., the following turns off output from the MOSEK solvers
>>> msk.options = {mosek.iparam.log:0}
see the MOSEK Python API manual.
"""
with mosek.Env() as env:
if dims is None:
(solsta, x, y, z) = lp(c, G, h)
return (solsta, x, z, None)
N, n = G.size
ml, mq, ms = dims['l'], dims['q'], [ k*k for k in dims['s'] ]
cdim = ml + sum(mq) + sum(ms)
if cdim == 0: raise ValueError("ml+mq+ms cannot be 0")
# Data for kth 'q' constraint are found in rows indq[k]:indq[k+1] of G.
indq = [ dims['l'] ]
for k in dims['q']: indq = indq + [ indq[-1] + k ]
# Data for the kth 's' constraint are found in rows indq[-1] + (inds[k]:inds[k+1]) of G.
inds = [ 0 ]
for k in dims['s']: inds = inds + [ inds[-1] + k*k ]
if type(h) is not matrix or h.typecode != 'd' or h.size[1] != 1:
raise TypeError("'h' must be a 'd' matrix with 1 column")
if type(G) is matrix or type(G) is spmatrix:
if G.typecode != 'd' or G.size[0] != cdim:
raise TypeError("'G' must be a 'd' matrix with %d rows " %cdim)
if h.size[0] != cdim:
raise TypeError("'h' must have %d rows" %cdim)
else:
raise TypeError("'G' must be a matrix")
if len(dims['q']) and min(dims['q'])<1: raise TypeError(
"dimensions of quadratic cones must be positive")
if len(dims['s']) and min(dims['s'])<1: raise TypeError(
"dimensions of semidefinite cones must be positive")
bkc = n*[ mosek.boundkey.fx ]
blc = list(-c)
buc = list(-c)
dimx = ml + sum(mq)
bkx = ml*[ mosek.boundkey.lo ] + sum(mq)*[ mosek.boundkey.fr ]
blx = ml*[ 0.0 ] + sum(mq)*[ -inf ]
bux = dimx*[ +inf ]
c = list(-h)
cl, cs = c[:dimx], sparse(c[dimx:])
Gl, Gs = sparse(G[:dimx,:]), sparse(G[dimx:,:])
colptr, asub, acof = Gl.T.CCS
aptrb, aptre = colptr[:-1], colptr[1:]
with env.Task(0,0) as task:
task.set_Stream (mosek.streamtype.log, streamprinter)
# set MOSEK options
options = kwargs.get('options',globals()['options'])
for (param, val) in options.items():
if str(param)[:6] == "iparam":
task.putintparam(param, val)
elif str(param)[:6] == "dparam":
task.putdouparam(param, val)
elif str(param)[:6] == "sparam":
task.putstrparam(param, val)
else:
raise ValueError("invalid MOSEK parameter: "+str(param))
task.inputdata (n, # number of constraints
dimx, # number of variables
cl, # linear objective coefficients
0.0, # objective fixed value
list(aptrb),
list(aptre),
list(asub),
list(acof),
bkc,
blc,
buc,
bkx,
blx,
bux)
task.putobjsense(mosek.objsense.maximize)
numbarvar = len(dims['s'])
task.appendbarvars(dims['s'])
barcsubj, barcsubk, barcsubl = (inds[-1])*[ 0 ], (inds[-1])*[ 0 ], (inds[-1])*[ 0 ]
barcval = [ -h[indq[-1]+k] for k in range(inds[0], inds[-1])]
for s in range(numbarvar):
for (k,idx) in enumerate(range(inds[s],inds[s+1])):
barcsubk[idx] = k // dims['s'][s]
barcsubl[idx] = k % dims['s'][s]
barcsubj[idx] = s
# filter out upper triangular part
trilidx = [ idx for idx in range(len(barcsubk)) if barcsubk[idx] >= barcsubl[idx] ]
barcsubj = [ barcsubj[k] for k in trilidx ]
barcsubk = [ barcsubk[k] for k in trilidx ]
barcsubl = [ barcsubl[k] for k in trilidx ]
barcval = [ barcval[k] for k in trilidx ]
task.putbarcblocktriplet(len(trilidx), barcsubj, barcsubk, barcsubl, barcval)
Gst = Gs.T
barasubi = len(Gst)*[ 0 ]
barasubj = len(Gst)*[ 0 ]
barasubk = len(Gst)*[ 0 ]
barasubl = len(Gst)*[ 0 ]
baraval = len(Gst)*[ 0.0 ]
colptr, row, val = Gst.CCS
for s in range(numbarvar):
for j in range(ms[s]):
for idx in range(colptr[inds[s]+j], colptr[inds[s]+j+1]):
barasubi[idx] = row[idx]
barasubj[idx] = s
barasubk[idx] = j // dims['s'][s]
barasubl[idx] = j % dims['s'][s]
baraval[idx] = val[idx]
# filter out upper triangular part
trilidx = [ idx for (idx, (k,l)) in enumerate(zip(barasubk,barasubl)) if k >= l ]
barasubi = [ barasubi[k] for k in trilidx ]
barasubj = [ barasubj[k] for k in trilidx ]
barasubk = [ barasubk[k] for k in trilidx ]
barasubl = [ barasubl[k] for k in trilidx ]
baraval = [ baraval[k] for k in trilidx ]
task.putbarablocktriplet(len(trilidx), barasubi, barasubj, barasubk, barasubl, baraval)
for k in range(len(mq)):
task.appendcone(mosek.conetype.quad, 0.0,
range(ml+sum(mq[:k]),ml+sum(mq[:k+1])))
if taskfile:
task.writetask(taskfile)
task.optimize()
task.solutionsummary (mosek.streamtype.msg);
solsta = task.getsolsta(mosek.soltype.itr)
xu, xl, zq = n*[ 0.0 ], n*[ 0.0 ], sum(mq)*[ 0.0 ]
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.slc, 0, n, xl)
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.suc, 0, n, xu)
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.xx, ml, dimx, zq)
x = matrix(xu)-matrix(xl)
zq = matrix(zq)
for s in range(numbarvar):
xx = (dims['s'][s]*(dims['s'][s] + 1) >> 1)*[0.0]
task.getbarxj(mosek.soltype.itr, s, xx)
xs = matrix(0.0, (dims['s'][s], dims['s'][s]))
idx = 0
for j in range(dims['s'][s]):
for i in range(j,dims['s'][s]):
xs[i,j] = xx[idx]
if i != j:
xs[j,i] = xx[idx]
idx += 1
zq = matrix([zq, xs[:]])
if ml:
zl = ml*[ 0.0 ]
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.xx, 0, ml, zl)
zl = matrix(zl)
else:
zl = matrix(0.0, (0,1))
if (solsta is mosek.solsta.unknown):
return (solsta, None, None)
else:
return (solsta, x, matrix([zl, zq]))
def socp(c, Gl=None, hl=None, Gq=None, hq=None, taskfile=None, **kwargs):
"""
Solves a pair of primal and dual SOCPs
minimize c'*x
subject to Gl*x + sl = hl
Gq[k]*x + sq[k] = hq[k], k = 0, ..., N-1
sl >= 0,
sq[k] >= 0, k = 0, ..., N-1
maximize -hl'*zl - sum_k hq[k]'*zq[k]
subject to Gl'*zl + sum_k Gq[k]'*zq[k] + c = 0
zl >= 0, zq[k] >= 0, k = 0, ..., N-1.
using MOSEK 8.
solsta, x, zl, zq = socp(c, Gl = None, hl = None, Gq = None, hq = None, taskfile=None)
Return values
solsta is a MOSEK solution status key.
If solsta is mosek.solsta.optimal,
then (x, zl, zq) contains the primal-dual solution.
If solsta is mosek.solsta.prim_infeas_cer,
then (x, zl, zq) is a certificate of dual infeasibility.
If solsta is mosek.solsta.dual_infeas_cer,
then (x, zl, zq) is a certificate of primal infeasibility.
If solsta is mosek.solsta.unknown,
then (x, zl, zq) are all None
Other return values for solsta include:
mosek.solsta.dual_feas
mosek.solsta.near_dual_feas
mosek.solsta.near_optimal
mosek.solsta.near_prim_and_dual_feas
mosek.solsta.near_prim_feas
mosek.solsta.prim_and_dual_feas
mosek.solsta.prim_feas
in which case the (x,y,z) value may not be well-defined.
x, zl, zq the primal-dual solution.
Options are passed to MOSEK solvers via the msk.options dictionary,
e.g., the following turns off output from the MOSEK solvers
>>> msk.options = {mosek.iparam.log: 0}
see the MOSEK Python API manual.
Optionally, the interface can write a .task file, required for
support questions on the MOSEK solver.
"""
with mosek.Env() as env:
if type(c) is not matrix or c.typecode != 'd' or c.size[1] != 1:
raise TypeError("'c' must be a dense column matrix")
n = c.size[0]
if n < 1: raise ValueError("number of variables must be at least 1")
if Gl is None: Gl = spmatrix([], [], [], (0,n), tc='d')
if (type(Gl) is not matrix and type(Gl) is not spmatrix) or \
Gl.typecode != 'd' or Gl.size[1] != n:
raise TypeError("'Gl' must be a dense or sparse 'd' matrix "\
"with %d columns" %n)
ml = Gl.size[0]
if hl is None: hl = matrix(0.0, (0,1))
if type(hl) is not matrix or hl.typecode != 'd' or \
hl.size != (ml,1):
raise TypeError("'hl' must be a dense 'd' matrix of " \
"size (%d,1)" %ml)
if Gq is None: Gq = []
if type(Gq) is not list or [ G for G in Gq if (type(G) is not matrix
and type(G) is not spmatrix) or G.typecode != 'd' or
G.size[1] != n ]:
raise TypeError("'Gq' must be a list of sparse or dense 'd' "\
"matrices with %d columns" %n)
mq = [ G.size[0] for G in Gq ]
a = [ k for k in range(len(mq)) if mq[k] == 0 ]
if a: raise TypeError("the number of rows of Gq[%d] is zero" %a[0])
if hq is None: hq = []
if type(hq) is not list or len(hq) != len(mq) or [ h for h in hq if
(type(h) is not matrix and type(h) is not spmatrix) or
h.typecode != 'd' ]:
raise TypeError("'hq' must be a list of %d dense or sparse "\
"'d' matrices" %len(mq))
a = [ k for k in range(len(mq)) if hq[k].size != (mq[k], 1) ]
if a:
k = a[0]
raise TypeError("'hq[%d]' has size (%d,%d). Expected size "\
"is (%d,1)." %(k, hq[k].size[0], hq[k].size[1], mq[k]))
N = ml + sum(mq)
h = matrix(0.0, (N,1))
if type(Gl) is matrix or [ Gk for Gk in Gq if type(Gk) is matrix ]:
G = matrix(0.0, (N, n))
else:
G = spmatrix([], [], [], (N, n), 'd')
h[:ml] = hl
G[:ml,:] = Gl
ind = ml
for k in range(len(mq)):
h[ind : ind + mq[k]] = hq[k]
G[ind : ind + mq[k], :] = Gq[k]
ind += mq[k]
bkc = n*[ mosek.boundkey.fx ]
blc = list(-c)
buc = list(-c)
bkx = ml*[ mosek.boundkey.lo ] + sum(mq)*[ mosek.boundkey.fr ]
blx = ml*[ 0.0 ] + sum(mq)*[ -inf ]
bux = N*[ +inf ]
c = -h
colptr, asub, acof = sparse([G.T]).CCS
aptrb, aptre = colptr[:-1], colptr[1:]
with env.Task(0,0) as task:
task.set_Stream (mosek.streamtype.log, streamprinter)
# set MOSEK options
options = kwargs.get('options',globals()['options'])
for (param, val) in options.items():
if str(param)[:6] == "iparam":
task.putintparam(param, val)
elif str(param)[:6] == "dparam":
task.putdouparam(param, val)
elif str(param)[:6] == "sparam":
task.putstrparam(param, val)
else:
raise ValueError("invalid MOSEK parameter: "+str(param))
task.inputdata (n, # number of constraints
N, # number of variables
list(c), # linear objective coefficients
0.0, # objective fixed value
list(aptrb),
list(aptre),
list(asub),
list(acof),
bkc,
blc,
buc,
bkx,
blx,
bux)
task.putobjsense(mosek.objsense.maximize)
for k in range(len(mq)):
task.appendcone(mosek.conetype.quad, 0.0,
list(range(ml+sum(mq[:k]),ml+sum(mq[:k+1]))))
if taskfile:
task.writetask(taskfile)
task.optimize()
task.solutionsummary (mosek.streamtype.msg);
solsta = task.getsolsta(mosek.soltype.itr)
xu, xl, zq = n*[0.0], n*[0.0], sum(mq)*[0.0]
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.slc, 0, n, xl)
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.suc, 0, n, xu)
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.xx, ml, N, zq)
x = matrix(xu) - matrix(xl)
zq = [ matrix(zq[sum(mq[:k]):sum(mq[:k+1])]) for k in range(len(mq)) ]
if ml:
zl = ml*[0.0]
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.xx, 0, ml,
zl)
zl = matrix(zl)
else:
zl = matrix(0.0, (0,1))
if (solsta is mosek.solsta.unknown):
return (solsta, None, None, None)
else:
return (solsta, x, zl, zq)
def qp(P, q, G=None, h=None, A=None, b=None, taskfile=None, **kwargs):
"""
Solves a quadratic program
minimize (1/2)*x'*P*x + q'*x
subject to G*x <= h
A*x = b.
using MOSEK 8.
solsta, x, z, y = qp(P, q, G=None, h=None, A=None, b=None, taskfile=None)
Return values
solsta is a MOSEK solution status key.
If solsta is mosek.solsta.optimal,
then (x, y, z) contains the primal-dual solution.
If solsta is mosek.solsta.prim_infeas_cer,
then (x, y, z) is a certificate of primal infeasibility.
If solsta is mosek.solsta.dual_infeas_cer,
then (x, y, z) is a certificate of dual infeasibility.
If solsta is mosek.solsta.unknown, then (x, y, z) are all None.
Other return values for solsta include:
mosek.solsta.dual_feas
mosek.solsta.near_dual_feas
mosek.solsta.near_optimal
mosek.solsta.near_prim_and_dual_feas
mosek.solsta.near_prim_feas
mosek.solsta.prim_and_dual_feas
mosek.solsta.prim_feas
in which case the (x,y,z) value may not be well-defined.
x, z, y the primal-dual solution.
Options are passed to MOSEK solvers via the msk.options dictionary,
e.g., the following turns off output from the MOSEK solvers
>>> msk.options = {mosek.iparam.log: 0}
see the MOSEK Python API manual.
Optionally, the interface can write a .task file, required for
support questions on the MOSEK solver.
"""
with mosek.Env() as env:
if (type(P) is not matrix and type(P) is not spmatrix) or \
P.typecode != 'd' or P.size[0] != P.size[1]:
raise TypeError("'P' must be a square dense or sparse 'd' matrix ")
n = P.size[0]
if n < 1: raise ValueError("number of variables must be at least 1")
if type(q) is not matrix or q.typecode != 'd' or q.size != (n,1):
raise TypeError("'q' must be a 'd' matrix of size (%d,1)" %n)
if G is None: G = spmatrix([], [], [], (0,n), 'd')
if (type(G) is not matrix and type(G) is not spmatrix) or \
G.typecode != 'd' or G.size[1] != n:
raise TypeError("'G' must be a dense or sparse 'd' matrix "\
"with %d columns" %n)
m = G.size[0]
if h is None: h = matrix(0.0, (0,1))
if type(h) is not matrix or h.typecode != 'd' or h.size != (m,1):
raise TypeError("'h' must be a 'd' matrix of size (%d,1)" %m)
if A is None: A = spmatrix([], [], [], (0,n), 'd')
if (type(A) is not matrix and type(A) is not spmatrix) or \
A.typecode != 'd' or A.size[1] != n:
raise TypeError("'A' must be a dense or sparse 'd' matrix "\
"with %d columns" %n)
p = A.size[0]
if b is None: b = matrix(0.0, (0,1))
if type(b) is not matrix or b.typecode != 'd' or b.size != (p,1):
raise TypeError("'b' must be a dense matrix of size (%d,1)" %p)
if m+p == 0: raise ValueError("m + p must be greater than 0")
c = list(q)
bkc = m*[ mosek.boundkey.up ] + p*[ mosek.boundkey.fx ]
blc = m*[ -inf ] + [ bi for bi in b ]
buc = list(h)+list(b)
bkx = n*[mosek.boundkey.fr]
blx = n*[ -inf ]
bux = n*[ +inf ]
colptr, asub, acof = sparse([G,A]).CCS
aptrb, aptre = colptr[:-1], colptr[1:]
with env.Task(0,0) as task:
task.set_Stream (mosek.streamtype.log, streamprinter)
# set MOSEK options
options = kwargs.get('options',globals()['options'])
for (param, val) in options.items():
if str(param)[:6] == "iparam":
task.putintparam(param, val)
elif str(param)[:6] == "dparam":
task.putdouparam(param, val)
elif str(param)[:6] == "sparam":
task.putstrparam(param, val)
else:
raise ValueError("invalid MOSEK parameter: "+str(param))
task.inputdata (m+p, # number of constraints
n, # number of variables
c, # linear objective coefficients
0.0, # objective fixed value
list(aptrb),
list(aptre),
list(asub),
list(acof),
bkc,
blc,
buc,
bkx,
blx,
bux)
Ps = sparse(P)
I, J = Ps.I, Ps.J
tril = [ k for k in range(len(I)) if I[k] >= J[k] ]
task.putqobj(list(I[tril]), list(J[tril]), list(Ps.V[tril]))
task.putobjsense(mosek.objsense.minimize)
if taskfile:
task.writetask(taskfile)
task.optimize()
task.solutionsummary (mosek.streamtype.msg);
solsta = task.getsolsta(mosek.soltype.itr)
x = n*[ 0.0 ]
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.xx, 0, n, x)
x = matrix(x)
if m != 0:
z = m*[0.0]
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.suc, 0, m,
z)
z = matrix(z)
else:
z = matrix(0.0, (0,1))
if p != 0:
yu, yl = p*[0.0], p*[0.0]
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.suc, m, m+p,
yu)
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.slc, m, m+p,
yl)
y = matrix(yu) - matrix(yl)
else:
y = matrix(0.0, (0,1))
if (solsta is mosek.solsta.unknown):
return (solsta, None, None, None)
else:
return (solsta, x, z, y)
def ilp(c, G, h, A=None, b=None, I=None, taskfile=None, **kwargs):
"""
Solves the mixed integer LP
minimize c'*x
subject to G*x + s = h
A*x = b
s >= 0
xi integer, forall i in I
using MOSEK 8.
solsta, x = ilp(c, G, h, A=None, b=None, I=None, taskfile=None).
Input arguments
G is m x n, h is m x 1, A is p x n, b is p x 1. G and A must be
dense or sparse 'd' matrices. h and b are dense 'd' matrices
with one column. The default values for A and b are empty
matrices with zero rows.
I is a Python set with indices of integer elements of x. By
default all elements in x are constrained to be integer, i.e.,
the default value of I is I = set(range(n))
Dual variables are not returned for MOSEK.
Optionally, the interface can write a .task file, required for
support questions on the MOSEK solver.
Return values
solsta is a MOSEK solution status key.
If solsta is mosek.solsta.integer_optimal, then x contains
the solution.
If solsta is mosek.solsta.unknown, then x is None.
Other return values for solsta include:
mosek.solsta.near_integer_optimal
in which case the x value may not be well-defined,
c.f., section 17.48 of the MOSEK Python API manual.
x is the solution
Options are passed to MOSEK solvers via the msk.options dictionary,
e.g., the following turns off output from the MOSEK solvers
>>> msk.options = {mosek.iparam.log: 0}
see the MOSEK Python API manual.
"""
with mosek.Env() as env:
if type(c) is not matrix or c.typecode != 'd' or c.size[1] != 1:
raise TypeError("'c' must be a dense column matrix")
n = c.size[0]
if n < 1: raise ValueError("number of variables must be at least 1")
if (type(G) is not matrix and type(G) is not spmatrix) or \
G.typecode != 'd' or G.size[1] != n:
raise TypeError("'G' must be a dense or sparse 'd' matrix "\
"with %d columns" %n)
m = G.size[0]
if m == 0: raise ValueError("m cannot be 0")
if type(h) is not matrix or h.typecode != 'd' or h.size != (m,1):
raise TypeError("'h' must be a 'd' matrix of size (%d,1)" %m)
if A is None: A = spmatrix([], [], [], (0,n), 'd')
if (type(A) is not matrix and type(A) is not spmatrix) or \
A.typecode != 'd' or A.size[1] != n:
raise TypeError("'A' must be a dense or sparse 'd' matrix "\
"with %d columns" %n)
p = A.size[0]
if b is None: b = matrix(0.0, (0,1))
if type(b) is not matrix or b.typecode != 'd' or b.size != (p,1):
raise TypeError("'b' must be a dense matrix of size (%d,1)" %p)
if I is None: I = set(range(n))
if type(I) is not set:
raise TypeError("invalid argument for integer index set")
for i in I:
if type(i) is not int:
raise TypeError("invalid integer index set I")
if len(I) > 0 and min(I) < 0: raise IndexError(
"negative element in integer index set I")
if len(I) > 0 and max(I) > n-1: raise IndexError(
"maximum element in in integer index set I is larger than n-1")
bkc = m*[ mosek.boundkey.up ] + p*[ mosek.boundkey.fx ]
blc = m*[ -inf ] + [ bi for bi in b ]
buc = list(h) + list(b)
bkx = n*[mosek.boundkey.fr]
blx = n*[ -inf ]
bux = n*[ +inf ]
colptr, asub, acof = sparse([G,A]).CCS
aptrb, aptre = colptr[:-1], colptr[1:]
with env.Task(0,0) as task:
task.set_Stream (mosek.streamtype.log, streamprinter)
# set MOSEK options
options = kwargs.get('options',globals()['options'])
for (param, val) in options.items():
if str(param)[:6] == "iparam":
task.putintparam(param, val)
elif str(param)[:6] == "dparam":
task.putdouparam(param, val)
elif str(param)[:6] == "sparam":
task.putstrparam(param, val)
else:
raise ValueError("invalid MOSEK parameter: "+str(param))
task.inputdata (m+p, # number of constraints
n, # number of variables
list(c), # linear objective coefficients
0.0, # objective fixed value
list(aptrb),
list(aptre),
list(asub),
list(acof),
bkc,
blc,
buc,
bkx,
blx,
bux)
task.putobjsense(mosek.objsense.minimize)
# Define integer variables
if len(I) > 0:
task.putvartypelist(list(I), len(I)*[ mosek.variabletype.type_int ])
task.putintparam (mosek.iparam.mio_mode, mosek.miomode.satisfied)
if taskfile:
task.writetask(taskfile)
task.optimize()
task.solutionsummary (mosek.streamtype.msg);
if len(I) > 0:
solsta = task.getsolsta(mosek.soltype.itg)
else:
solsta = task.getsolsta(mosek.soltype.bas)
x = n*[0.0]
if len(I) > 0:
task.getsolutionslice(mosek.soltype.itg, mosek.solitem.xx, 0, n, x)
else:
task.getsolutionslice(mosek.soltype.bas, mosek.solitem.xx, 0, n, x)
x = matrix(x)
if (solsta is mosek.solsta.unknown):
return (solsta, None)
else:
return (solsta, x)
|
cvxoptREPO_NAMEcvxoptPATH_START.@cvxopt_extracted@cvxopt-master@src@python@msk.py@.PATH_END.py
|
{
"filename": "export.py",
"repo_name": "vterron/lemon",
"repo_path": "lemon_extracted/lemon-master/export.py",
"type": "Python"
}
|
#! /usr/bin/env python2
# encoding: UTF-8
# Author: Victor Terron (c) 2020
# Email: `echo vt2rron1iaa32s | tr 132 @.e`
# License: GNU GPLv3
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
_DESCRIPTION = """
Print the light curve of an object stored in a LEMONdB.
This command takes as input the right ascension and declination of an object,
and finds the one stored in the LEMONdB that's close to these coordinates. It
then prints to standard output the (a) time, (b) differential magnitude and
(c) signal-to-noise ratio of all the points in the light curve of the object
in the specified photometric filter.
"""
import argparse
import astropy.time
import os.path
import prettytable
import re
import sys
import time
# LEMON modules
import database
import passband
import util.coords
import util
parser = argparse.ArgumentParser(description=_DESCRIPTION)
parser.add_argument(
"db_path",
metavar="LEMON_DB",
type=str,
help="the LEMON database with the light curves",
)
parser.add_argument(
"ra",
metavar="<right ascension>",
type=float,
help="Right adcension of the astronomical object, " "in decimal degrees.",
)
parser.add_argument(
"dec",
metavar="<declination>",
type=float,
help="Declination of the astronomical object, in " "decimal degrees.",
)
parser.add_argument(
"filter",
metavar="<photometric filter>",
type=passband.Passband,
help="The name of the photometric filter.",
)
parser.add_argument(
"--decimal_places",
dest="places",
type=int,
default=3,
help="Round floating-point numbers to this many decimal places.",
)
parser.add_argument(
"--output_file",
dest="output",
type=argparse.FileType("w"),
default=sys.stdout,
help="File to which to write the light curve data points",
)
def main(arguments=None):
if arguments is None:
arguments = sys.argv[1:]
args = parser.parse_args(args=arguments)
with database.LEMONdB(args.db_path) as db:
print("Input coordinates:")
print("α: {} ({})".format(args.ra, util.coords.ra_str(args.ra)))
print("δ: {} ({})".format(args.dec, util.coords.dec_str(args.dec)))
star_id, distance = db.star_closest_to_world_coords(args.ra, args.dec)
star = db.get_star(star_id)
print()
print("Selected star:")
print("ID: {}".format(star_id))
print("α: {} ({})".format(star.ra, util.coords.ra_str(star.ra)))
print("δ: {} ({})".format(star.dec, util.coords.dec_str(star.dec)))
print("Distance to input coordinates: {} deg".format(distance))
print()
if args.output == sys.stdout:
print("Light curve in {!r} photometric filter:".format(args.filter))
star_diff = db.get_light_curve(star_id, args.filter)
if star_diff is None:
raise ValueError(
"no light curve for {!r} photometric filter".format(args.filter)
)
table = prettytable.PrettyTable()
table.field_names = ["Date (UTC)", "JD", "Δ Mag", "SNR"]
def format_float(f):
"""Returns f as a string rounded to parser.places decimal places."""
return "{:.{places}f}".format(f, places=args.places)
for unix_time, magnitude, snr in star_diff:
jd = astropy.time.Time(unix_time, format="unix").jd
table.add_row(
[
util.utctime(unix_time, suffix=False),
format_float(jd),
format_float(magnitude),
format_float(snr),
]
)
args.output.write(str(table))
args.output.write("\n")
if args.output != sys.stdout:
print(
"Wrote light curve in {!r} photometric filter to {!r}.".format(
args.filter, args.output.name
)
)
if __name__ == "__main__":
sys.exit(main())
|
vterronREPO_NAMElemonPATH_START.@lemon_extracted@lemon-master@export.py@.PATH_END.py
|
{
"filename": "ah_bootstrap.py",
"repo_name": "astropy/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/ah_bootstrap.py",
"type": "Python"
}
|
"""
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken and by default the system-installed version
of astropy-helpers will be used (however, ``ah_bootstrap.use_astropy_helpers``
may be called manually from within the setup.py script).
This behavior can also be controlled using the ``--auto-use`` and
``--no-auto-use`` command-line flags. For clarity, an alias for
``--no-auto-use`` is ``--use-system-astropy-helpers``, and we recommend using
the latter if needed.
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import io
import locale
import os
import re
import subprocess as sp
import sys
from distutils import log
from distutils.debug import DEBUG
from configparser import ConfigParser, RawConfigParser
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
# This is the minimum Python version required for astropy-helpers
__minimum_python_version__ = (3, 5)
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
UPPER_VERSION_EXCLUSIVE = None
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
# Start off by parsing the setup.cfg file
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
SETUP_CFG = ConfigParser()
if os.path.exists('setup.cfg'):
try:
SETUP_CFG.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
# We used package_name in the package template for a while instead of name
if SETUP_CFG.has_option('metadata', 'name'):
parent_package = SETUP_CFG.get('metadata', 'name')
elif SETUP_CFG.has_option('metadata', 'package_name'):
parent_package = SETUP_CFG.get('metadata', 'package_name')
else:
parent_package = None
if SETUP_CFG.has_option('options', 'python_requires'):
python_requires = SETUP_CFG.get('options', 'python_requires')
# The python_requires key has a syntax that can be parsed by SpecifierSet
# in the packaging package. However, we don't want to have to depend on that
# package, so instead we can use setuptools (which bundles packaging). We
# have to add 'python' to parse it with Requirement.
from pkg_resources import Requirement
req = Requirement.parse('python' + python_requires)
# We want the Python version as a string, which we can get from the platform module
import platform
# strip off trailing '+' incase this is a dev install of python
python_version = platform.python_version().strip('+')
# allow pre-releases to count as 'new enough'
if not req.specifier.contains(python_version, True):
if parent_package is None:
message = "ERROR: Python {} is required by this package\n".format(req.specifier)
else:
message = "ERROR: Python {} is required by {}\n".format(req.specifier, parent_package)
sys.stderr.write(message)
sys.exit(1)
if sys.version_info < __minimum_python_version__:
if parent_package is None:
message = "ERROR: Python {} or later is required by astropy-helpers\n".format(
__minimum_python_version__)
else:
message = "ERROR: Python {} or later is required by astropy-helpers for {}\n".format(
__minimum_python_version__, parent_package)
sys.stderr.write(message)
sys.exit(1)
_str_types = (str, bytes)
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Check that setuptools 30.3 or later is present
from distutils.version import LooseVersion
try:
import setuptools
assert LooseVersion(setuptools.__version__) >= LooseVersion('30.3')
except (ImportError, AssertionError):
sys.stderr.write("ERROR: setuptools 30.3 or later is required by astropy-helpers\n")
sys.exit(1)
SETUPTOOLS_LT_42 = LooseVersion(setuptools.__version__) < LooseVersion('42')
# typing as a dependency for 1.6.1+ Sphinx causes issues when imported after
# initializing submodule with ah_boostrap.py
# See discussion and references in
# https://github.com/astropy/astropy-helpers/issues/302
try:
import typing # noqa
except ImportError:
pass
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat # noqa
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if not isinstance(path, str):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not SETUP_CFG.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not SETUP_CFG.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = SETUP_CFG.getboolean('ah_bootstrap', option)
else:
value = SETUP_CFG.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
if '--auto-use' in argv:
config['auto_use'] = True
argv.remove('--auto-use')
if '--no-auto-use' in argv:
config['auto_use'] = False
argv.remove('--no-auto-use')
if '--use-system-astropy-helpers' in argv:
config['auto_use'] = False
argv.remove('--use-system-astropy-helpers')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# First, remove any previously imported versions of astropy_helpers;
# this is necessary for nested installs where one package's installer
# is installing another package via setuptools.sandbox.run_setup, as in
# the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return None
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
# We use subprocess instead of run_setup from setuptools to
# avoid segmentation faults - see the following for more details:
# https://github.com/cython/cython/issues/2104
sp.check_output([sys.executable, 'setup.py', 'egg_info'], cwd=path)
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
# For setuptools>=42, the allow_hosts option can't
# be used because pip doesn't support it.
if allow_hosts is not None and SETUPTOOLS_LT_42:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
if UPPER_VERSION_EXCLUSIVE is None:
req = DIST_NAME
else:
req = '{0}<{1}'.format(DIST_NAME, UPPER_VERSION_EXCLUSIVE)
attrs = {'setup_requires': [req]}
# NOTE: we need to parse the config file (e.g. setup.cfg) to make sure
# it honours the options set in the [easy_install] section, and we need
# to explicitly fetch the requirement eggs as setup_requires does not
# get honored in recent versions of setuptools:
# https://github.com/pypa/setuptools/issues/1273
try:
context = _verbose if DEBUG else _silence
with context():
dist = _Distribution(attrs=attrs)
try:
dist.parse_config_files(ignore_option_errors=True)
dist.fetch_build_eggs(req)
except TypeError:
# On older versions of setuptools, ignore_option_errors
# doesn't exist, and the above two lines are not needed
# so we can just continue
pass
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
r'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
r'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBootstrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBootstrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# https://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, str):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, str):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _verbose():
yield
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
|
astropyREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@ah_bootstrap.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "rodluger/planetplanet",
"repo_path": "planetplanet_extracted/planetplanet-master/README.md",
"type": "Markdown"
}
|
<div align="center">
<img src="https://rodluger.github.io/planetplanet/_images/title.gif" width="400px">
</img>
<br/><br/>
<p><a href="https://travis-ci.org/rodluger/planetplanet"><img src="https://travis-ci.org/rodluger/planetplanet.svg?branch=master"/></a>
<a href="http://dx.doi.org/10.5281/zenodo.997391"><img src="https://img.shields.io/badge/doi-zenodo-568AB8.svg?style=flat"/></a>
<a href="https://raw.githubusercontent.com/rodluger/planetplanet/master/LICENSE?token=AI5FKxGMJTv55h2EE_AuXW2gofnIaRDeks5Zm0unwA%3D%3D"><img src="https://img.shields.io/badge/license-GPL-a2a2a2.svg?style=flat"/></a>
<a href="https://rodluger.github.io/planetplanet/PPOs.pdf"><img src="https://img.shields.io/badge/read-the_paper-fd7709.svg?style=flat"/></a>
<a href="https://rodluger.github.io/planetplanet/index.html"><img src="https://img.shields.io/badge/read-the_docs-AF5891.svg?style=flat"/></a>
</p>
</div>
# Overview
`planetplanet` is a general photodynamical code for modeling exoplanet transits, secondary eclipses, phase curves, and exomoons, as well as eclipsing binaries, circumbinary planets, and more. The code was originally developed to model planet-planet occultation (PPO) light curves for the TRAPPIST-1 system. During a PPO, a planet
occults (transits) the disk of another planet in the same planetary system, blocking its thermal
(and reflected) light, which can be measured photometrically by a distant observer.
`planetplanet` is coded in C and wrapped in a user-friendly Python interface. Once installed, generating light curves is as easy as
```python
import planetplanet as pp
import numpy as np
import matplotlib.pyplot as pl
star = pp.Star('A', m = 0.1, r = 0.1, limbdark = [0.4, 0.26])
planet = pp.Planet('b', m = 1, r = 1, t0 = 0., per = 3.)
system = pp.System(star, planet)
time = np.arange(-1., 1., 0.0001)
system.compute(time)
system.plot_occultation('A', time = 0)
pl.show()
```
<div align="center">
<img src="https://rodluger.github.io/misc/transit.gif" alt="Exoplanet transit light curve" width="500px">
</div>
Please check out the [documentation](https://rodluger.github.io/planetplanet/index.html) or read the [paper](https://rodluger.github.io/planetplanet/PPOs.pdf) for more information.
# Installation
The `planetplanet` code is now `pip`-installable:
```
pip install planetplanet
```
Alternatively, to install from source:
```
git clone git@github.com:rodluger/planetplanet.git
cd planetplanet
git submodule init && git submodule update
python setup.py develop
```
Note that you may need to install the [GNU Scientific Library](https://www.gnu.org/software/gsl/). On a Mac, it's as simple as
```
brew install gsl
```
# Just for fun
Here's a an example of a planet-planet occultation [**[code]**](https://github.com/rodluger/planetplanet/blob/master/scripts/occultation.py):
<div align="center">
<img src="https://rodluger.github.io/misc/ppo.gif" alt="Planet-planet occultation" width="500px">
</div>
And here's a wacky example of a transit of a circumbinary exomoon [**[code]**](https://github.com/rodluger/planetplanet/blob/master/scripts/circumbinary_exomoon.py):
<div align="center">
<img src="https://rodluger.github.io/misc/cbexomoon.gif" alt="Circumbinary exomoon" width="500px">
</div>
|
rodlugerREPO_NAMEplanetplanetPATH_START.@planetplanet_extracted@planetplanet-master@README.md@.PATH_END.py
|
{
"filename": "test_corrs.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/time/tests/test_corrs.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy import units as u
from astropy.coordinates import EarthLocation, SkyCoord, solar_system_ephemeris
from astropy.time import Time, TimeDelta
from astropy.utils import iers
from astropy.utils.compat.optional_deps import HAS_JPLEPHEM
class TestHelioBaryCentric:
"""
Verify time offsets to the solar system barycentre and the heliocentre.
Uses the WHT observing site.
Tests are against values returned at time of initial creation of these
routines. They agree to an independent SLALIB based implementation
to 20 microseconds.
"""
@classmethod
def setup_class(cls):
cls.orig_auto_download = iers.conf.auto_download
iers.conf.auto_download = False
@classmethod
def teardown_class(cls):
iers.conf.auto_download = cls.orig_auto_download
def setup_method(self):
wht = EarthLocation(342.12 * u.deg, 28.758333333333333 * u.deg, 2327 * u.m)
self.obstime = Time("2013-02-02T23:00", location=wht)
self.obstime2 = Time("2013-08-02T23:00", location=wht)
self.obstimeArr = Time(["2013-02-02T23:00", "2013-08-02T23:00"], location=wht)
self.star = SkyCoord(
"08:08:08 +32:00:00", unit=(u.hour, u.degree), frame="icrs"
)
def test_heliocentric(self):
hval = self.obstime.light_travel_time(self.star, "heliocentric")
assert isinstance(hval, TimeDelta)
assert hval.scale == "tdb"
assert abs(hval - 461.43037870502235 * u.s) < 1.0 * u.us
def test_barycentric(self):
bval = self.obstime.light_travel_time(self.star, "barycentric")
assert isinstance(bval, TimeDelta)
assert bval.scale == "tdb"
assert abs(bval - 460.58538779827836 * u.s) < 1.0 * u.us
def test_arrays(self):
bval1 = self.obstime.light_travel_time(self.star, "barycentric")
bval2 = self.obstime2.light_travel_time(self.star, "barycentric")
bval_arr = self.obstimeArr.light_travel_time(self.star, "barycentric")
hval1 = self.obstime.light_travel_time(self.star, "heliocentric")
hval2 = self.obstime2.light_travel_time(self.star, "heliocentric")
hval_arr = self.obstimeArr.light_travel_time(self.star, "heliocentric")
assert hval_arr[0] - hval1 < 1.0 * u.us
assert hval_arr[1] - hval2 < 1.0 * u.us
assert bval_arr[0] - bval1 < 1.0 * u.us
assert bval_arr[1] - bval2 < 1.0 * u.us
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
def test_ephemerides(self):
bval1 = self.obstime.light_travel_time(self.star, "barycentric")
with solar_system_ephemeris.set("jpl"):
bval2 = self.obstime.light_travel_time(
self.star, "barycentric", ephemeris="jpl"
)
# should differ by less than 0.1 ms, but not be the same
assert abs(bval1 - bval2) < 1.0 * u.ms
assert abs(bval1 - bval2) > 1.0 * u.us
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@time@tests@test_corrs.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "dnarayanan/powderday",
"repo_path": "powderday_extracted/powderday-master/README.md",
"type": "Markdown"
}
|
Welcome to Powderday!
Powderday is a dust radiative transfer package designed to interface
with galaxy formation simulations in order to produce spectral energy
distributions, as well as realistic images.
For documentation, please see the docs at:
[http://powderday.readthedocs.org](http://powderday.readthedocs.org)
The manual contains full installation instructions.
|
dnarayananREPO_NAMEpowderdayPATH_START.@powderday_extracted@powderday-master@README.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/sunburst/marker/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._showscale import ShowscaleValidator
from ._reversescale import ReversescaleValidator
from ._pattern import PatternValidator
from ._line import LineValidator
from ._colorssrc import ColorssrcValidator
from ._colorscale import ColorscaleValidator
from ._colors import ColorsValidator
from ._colorbar import ColorbarValidator
from ._coloraxis import ColoraxisValidator
from ._cmin import CminValidator
from ._cmid import CmidValidator
from ._cmax import CmaxValidator
from ._cauto import CautoValidator
from ._autocolorscale import AutocolorscaleValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._showscale.ShowscaleValidator",
"._reversescale.ReversescaleValidator",
"._pattern.PatternValidator",
"._line.LineValidator",
"._colorssrc.ColorssrcValidator",
"._colorscale.ColorscaleValidator",
"._colors.ColorsValidator",
"._colorbar.ColorbarValidator",
"._coloraxis.ColoraxisValidator",
"._cmin.CminValidator",
"._cmid.CmidValidator",
"._cmax.CmaxValidator",
"._cauto.CautoValidator",
"._autocolorscale.AutocolorscaleValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@sunburst@marker@__init__.py@.PATH_END.py
|
{
"filename": "actor.py",
"repo_name": "enthought/mayavi",
"repo_path": "mayavi_extracted/mayavi-master/mayavi/components/actor.py",
"type": "Python"
}
|
"""A simple actor component.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005-2020, Enthought, Inc.
# License: BSD Style.
import vtk
# Enthought library imports.
from traits.api import Instance, Bool, Enum
from tvtk.api import tvtk
from traits.api import DelegatesTo
# Local imports.
from mayavi.core.component import Component
from mayavi.core.source import Source
from mayavi.core.utils import get_new_output
######################################################################
# `Actor` class.
######################################################################
class Actor(Component):
# The version of this class. Used for persistence.
__version__ = 0
# The mapper.
mapper = Instance(tvtk.Mapper, record=True)
# The actor.
actor = Instance(tvtk.Actor, record=True)
# The actor's property.
property = Instance(tvtk.Property, record=True)
# FIXME: None of the texture stuff is picklable. This will NOT be
# supported till the pickling infrastructure is cleaned up and
# fixed.
# If texturing is enabled for the actor or not
enable_texture = Bool(False, desc='if texturing is enabled')
# The source of the texture's image
texture_source_object = Instance(Source)
# The actors texture
texture = Instance(tvtk.Texture, record=True)
# The texture coord generation mode.
tcoord_generator_mode = Enum('none', 'cylinder', 'sphere', 'plane',
desc='the mode for texture coord generation')
# Texture coord generator.
tcoord_generator = Instance(tvtk.Object, allow_none=True)
# Composite data filter.
comp_data_geom_filter = Instance(tvtk.CompositeDataGeometryFilter)
######################################################################
# `object` interface
######################################################################
def __get_pure_state__(self):
d = super(Actor, self).__get_pure_state__()
for attr in ('texture', 'texture_source_object',
'enable_texture', 'tcoord_generator_mode',
'tcoord_generator'):
d.pop(attr,None)
return d
######################################################################
# `Component` interface
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* its tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters.
"""
self.mapper = tvtk.PolyDataMapper(use_lookup_table_scalar_range=1)
self.actor = tvtk.Actor()
self.property = self.actor.property
self.texture = tvtk.Texture()
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when the input fires a
`pipeline_changed` event.
"""
if (len(self.inputs) == 0) or \
(len(self.inputs[0].outputs) == 0):
return
input = self.inputs[0].outputs[0]
if input is None:
return
self._connect_mapper(input)
self._tcoord_generator_mode_changed(self.tcoord_generator_mode)
self.render()
def update_data(self):
"""Override this method to do what is necessary when upstream
data changes.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
# Invoke render to update any changes.
from mayavi.modules.outline import Outline
from mayavi.components.glyph import Glyph
#FIXME: A bad hack, but without these checks results in seg fault
input = self.inputs[0]
if isinstance(input, Outline) or isinstance(input, Glyph):
self.mapper.update(0)
else:
self.mapper.update()
self.render()
######################################################################
# `Actor` interface
######################################################################
def set_lut(self, lut):
"""Set the Lookup table to use."""
self.mapper.lookup_table = lut
# A hack to avoid a problem with the VRML output that seems to
# ignore the use_lookup_table_scalar_range setting
# on the mapping
self.mapper.scalar_range = lut.table_range
######################################################################
# Non-public interface.
######################################################################
def _setup_handlers(self, old, new):
if old is not None:
old.on_trait_change(self.render, remove=True)
new.on_trait_change(self.render)
def _get_correct_input(self, input):
do = get_new_output(input)
if do.is_a('vtkCompositeDataSet'):
cdgf = self.comp_data_geom_filter
cdgf.input_connection = input.output_port
return cdgf
else:
return input
def _comp_data_geom_filter_default(self):
return tvtk.CompositeDataGeometryFilter()
def _connect_mapper(self, input):
if input is None:
return
inp = self._get_correct_input(input)
self.configure_input(self.mapper, inp)
def _mapper_changed(self, old, new):
# Setup the handlers.
self._setup_handlers(old, new)
# Setup the LUT.
if old is not None:
self.set_lut(old.lookup_table)
# Setup the inputs to the mapper.
if (len(self.inputs) > 0) and (len(self.inputs[0].outputs) > 0):
self._connect_mapper(self.inputs[0].outputs[0])
# Setup the actor's mapper.
actor = self.actor
if actor is not None:
actor.mapper = new
self.render()
def _actor_changed(self, old, new):
# Setup the handlers.
self._setup_handlers(old, new)
# Set the mapper.
mapper = self.mapper
if mapper is not None:
new.mapper = mapper
# Set the property.
prop = self.property
if prop is not None:
new.property = prop
# Setup the `actors` trait.
self.actors = [new]
def _property_changed(self, old, new):
# Setup the handlers.
self._setup_handlers(old, new)
# Setup the actor.
actor = self.actor
if new is not actor.property:
actor.property = new
def _foreground_changed_for_scene(self, old, new):
# Change the default color for the actor.
self.property.color = new
self.render()
def _scene_changed(self, old, new):
super(Actor, self)._scene_changed(old, new)
self._foreground_changed_for_scene(None, new.foreground)
def _enable_texture_changed(self, value):
if self.texture_source_object is None :
self.actor.texture = None
return
if value:
self.actor.texture = self.texture
else:
self.actor.texture = None
def _can_object_give_image_data(self, source):
if source is None:
return False
if not isinstance(source, Source):
return False
if source.get_output_dataset().is_a('vtkImageData'):
return True
return False
def _change_texture_input(self):
if self._can_object_give_image_data(self.texture_source_object):
self.configure_connection(
self.texture, self.texture_source_object
)
self.actor.texture = self.texture
else:
self.texture_source_object = None
def _texture_source_object_changed(self,old,new):
if old is not None :
old.on_trait_change(self._change_texture_input,
'pipeline_changed',
remove=True)
if new is not None :
new.on_trait_change(self._change_texture_input,
'pipeline_changed' )
if new is not None:
self._change_texture_input()
else:
self.actor.texture = None
self.texture.input = None
self.texture.input_connection = None
def _texture_changed(self,value):
# Setup the actor's texture.
actor = self.actor
if actor is not None and (value.input is not None
or value.input_connection is not None):
actor.texture = value
self.texture.on_trait_change(self.render)
self.render()
def _tcoord_generator_mode_changed(self, value):
inp = self.inputs
if (len(inp) == 0) or \
(len(inp[0].outputs) == 0):
return
old_tg = self.tcoord_generator
if old_tg is not None:
old_tg.on_trait_change(self.render, remove=True)
if value == 'none':
self.tcoord_generator = None
self._connect_mapper(inp[0].outputs[0])
else:
tg_dict = {'cylinder': tvtk.TextureMapToCylinder,
'sphere': tvtk.TextureMapToSphere,
'plane': tvtk.TextureMapToPlane}
tg = tg_dict[value]()
self.tcoord_generator = tg
actual_input = self._get_correct_input(inp[0].outputs[0])
self.configure_connection(tg, actual_input)
self.configure_connection(self.mapper, tg)
tg = self.tcoord_generator
if tg is not None:
tg.on_trait_change(self.render)
self.render()
|
enthoughtREPO_NAMEmayaviPATH_START.@mayavi_extracted@mayavi-master@mayavi@components@actor.py@.PATH_END.py
|
{
"filename": "Space.py",
"repo_name": "ledatelescope/bifrost",
"repo_path": "bifrost_extracted/bifrost-master/python/bifrost/Space.py",
"type": "Python"
}
|
# Copyright (c) 2016-2023, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bifrost.libbifrost import _bf, _th
from typing import Union
from bifrost import telemetry
telemetry.track_module()
class Space(object):
def __init__(self, s: Union[str,_th.BFspace_enum,_bf.BFspace]):
if isinstance(s, str):
self._space = getattr(_th.BFspace_enum, s)
elif isinstance(s, (_th.BFspace_enum, _bf.BFspace, int)):
self._space = _th.BFspace_enum(s)
else:
raise ValueError(f"'{s}' is not a space")
def as_BFspace(self) -> _bf.BFspace:
return _bf.BFspace(self._space.value)
def __str__(self):
return self._space.name
|
ledatelescopeREPO_NAMEbifrostPATH_START.@bifrost_extracted@bifrost-master@python@bifrost@Space.py@.PATH_END.py
|
{
"filename": "mrsa.py",
"repo_name": "HajimeKawahara/sot",
"repo_path": "sot_extracted/sot-master/src/sot/nmfmap/mrsa.py",
"type": "Python"
}
|
import numpy as np
def mrsa(vec1,vec2):
v1=(vec1-np.mean(vec1))
v2=(vec2-np.mean(vec2))
v1norm=np.sqrt(np.dot(v1,v1))
v2norm=np.sqrt(np.dot(v2,v2))
naib=np.dot(v1,v2)/v1norm/v2norm
if naib>1.0:
naib=1.0
return 1.0/np.pi*np.arccos(naib)
def mrsa_meanX(X):
Xini=np.load("Xinit.npz")["arr_0"]
mrsa_arr=[]
for i in range(0,3):
ma=[]
for j in range(0,3):
ma.append(mrsa(Xini[i,:],X[j,:]))
minma=np.min(np.array(ma))
mrsa_arr.append(minma)
mrsa_arr=np.array(mrsa_arr)
return (np.mean(mrsa_arr))
if __name__=='__main__':
import read_data
import sys
# axfile="npz/T116/T116_L2-VRLD_A-2.0X4.0j99000.npz"
axfile=sys.argv[1]
A,X,resall=read_data.readax(axfile)
mmrsa=mrsa_meanX(X)
print(mmrsa)
|
HajimeKawaharaREPO_NAMEsotPATH_START.@sot_extracted@sot-master@src@sot@nmfmap@mrsa.py@.PATH_END.py
|
{
"filename": "_ticklen.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattermap/marker/colorbar/_ticklen.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicklenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="ticklen", parent_name="scattermap.marker.colorbar", **kwargs
):
super(TicklenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattermap@marker@colorbar@_ticklen.py@.PATH_END.py
|
{
"filename": "_volume.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/layout/template/data/_volume.py",
"type": "Python"
}
|
from plotly.graph_objs import Volume
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@layout@template@data@_volume.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "spacetelescope/jwst",
"repo_path": "jwst_extracted/jwst-main/jwst/assign_mtwcs/tests/data/__init__.py",
"type": "Python"
}
|
spacetelescopeREPO_NAMEjwstPATH_START.@jwst_extracted@jwst-main@jwst@assign_mtwcs@tests@data@__init__.py@.PATH_END.py
|
|
{
"filename": "schema.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/agents/schema.py",
"type": "Python"
}
|
from typing import Any, Dict, List, Tuple
from langchain_core.agents import AgentAction
from langchain_core.prompts.chat import ChatPromptTemplate
class AgentScratchPadChatPromptTemplate(ChatPromptTemplate):
"""Chat prompt template for the agent scratchpad."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
def _construct_agent_scratchpad(
self, intermediate_steps: List[Tuple[AgentAction, str]]
) -> str:
if len(intermediate_steps) == 0:
return ""
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
return (
f"This was your previous work "
f"(but I haven't seen any of it! I only see what "
f"you return as final answer):\n{thoughts}"
)
def _merge_partial_and_user_variables(self, **kwargs: Any) -> Dict[str, Any]:
intermediate_steps = kwargs.pop("intermediate_steps")
kwargs["agent_scratchpad"] = self._construct_agent_scratchpad(
intermediate_steps
)
return kwargs
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@agents@schema.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/distribute/experimental/rpc/README.md",
"type": "Markdown"
}
|
## Experimental TensorFlow RPC Ops.
This directory contains kernels for RPC Ops in TensorFlow distribute package.
Note*: These ops may move to a separate repository in the future.
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@distribute@experimental@rpc@README.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattergl/marker/colorbar/tickformatstop/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._value import ValueValidator
from ._templateitemname import TemplateitemnameValidator
from ._name import NameValidator
from ._enabled import EnabledValidator
from ._dtickrange import DtickrangeValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._value.ValueValidator",
"._templateitemname.TemplateitemnameValidator",
"._name.NameValidator",
"._enabled.EnabledValidator",
"._dtickrange.DtickrangeValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattergl@marker@colorbar@tickformatstop@__init__.py@.PATH_END.py
|
{
"filename": "scanner_gui_run.ipynb",
"repo_name": "Skuggsja-Lab/skuggsja-scan",
"repo_path": "skuggsja-scan_extracted/skuggsja-scan-main/scanner_gui_run.ipynb",
"type": "Jupyter Notebook"
}
|
# Monitoring while scanning
## Imports
```python
from robodk.robolink import * # API to communicate with RoboDK
from robodk.robomath import * # basic matrix operations
import numpy as np
from datetime import datetime
import os
```
## Raster
```python
runfile("scan_control_gui.py")
```
An exception has occurred, use %tb to see the full traceback.
SystemExit: 0
Export command from .ui folder
```python
# pyuic6 -o scan_control_gui.py scan_control_gui.ui
```
End of file for .ui to .py files from QT Designer
```python
# class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
# def __init__(self, *args, obj=None, **kwargs):
# super(MainWindow, self).__init__(*args, **kwargs)
# self.setupUi(self)
# if __name__ == '__main__':
# if not QtWidgets.QApplication.instance():
# app = QtWidgets.QApplication(sys.argv)
# else:
# app = QtWidgets.QApplication.instance()
# window = MainWindow()
# window.show()
# sys.exit(app.exec())
```
|
Skuggsja-LabREPO_NAMEskuggsja-scanPATH_START.@skuggsja-scan_extracted@skuggsja-scan-main@scanner_gui_run.ipynb@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "mslonina/Mechanic",
"repo_path": "Mechanic_extracted/Mechanic-master/README.md",
"type": "Markdown"
}
|
Mechanic
--------
### Overview
We develop the Mechanic package, which is a flexible numerical framework to
handle and automate massive numerical simulations. We assume that these computations rely
on testing a huge range of initial conditions, while each test (single simulation run) may
be computed as a standalone task, either on one or a group of CPUs. A natural way to
analyse such data sets is to run the simulations in parallel; however, managing that "by
hand" is usually a cumbersome job and introduces human-based errors. A usage of queue
scripts and job control mechanisms remains still task-dependent.
The Mechanic framework relies on the core-module approach (similar to
server-client architecture, in terms of the MPI farm model). It provides a
relatively abstract layer of flexible and powerful module interface which
makes it possible to easily adapt the existing code. The modules are loaded
dynamically during run-time. The Mechanic module interface provides
a template structure that allows to run specific functions on particular nodes, e.g. only
on slave CPUs. User may choose among a number of available functions to communicate the
slave tasks with the Mechanic core, and manage the simulation in every detail. The module
interface applies both to C and Fortran2003 codes. It is possible to connect software
written in other programming languages/frameworks, i.e. NVIDIA CUDA or OpenCL.
The underlying idea of the Mechanic's core is to make it completely
transparent to the numerical problem handled by the given, external (user
supplied) module. Thus, the framework might be installed system-wide and
become a helper tool for users, who need to perform a large number of
computations. The top layer of the framework is focused on handling
massive simulations in every technical aspect -- through basic
configuration of the runs, sending, receiving and storing data, restarting
runs at each stage.
The code is written in C, uses HDF5 data layer and comes with Fortran
bindings. It was developed and tested to work equally well in fake-MPI mode
(like on a single CPU) and on a large CPU cluster, either in 32- and 64-bits environments
(currently, Linux and Mac OS X are actively maintained).
Although the framework remains in early alpha stage, some existing Fortran77 and C codes
were successfully ported and ran with Mechanic, showing huge potential of the code, as
well as number of features to improve and develop. The development and testing the
framework is ongoing.
Mechanic is BSD-licensed. The source code package comes with few example
modules and is freely available at http://git.astri.umk.pl/projects/mechanic
### Publications
- Slonina M., Gozdziewski K., Migaszewski C., 2012arXiv1202.6513S
- Migaszewski C., Slonina M., Gozdziewski K., 2012arXiv1205.0822M
- Gozdziewski K. et al, 2012arXiv1205.4164G
- Slonina M., Gozdziewski K., Migaszewski C., Rozenkiewicz A., 2012arXiv1205.1341S
- Slonina M., Gozdziewski K., Migaszewski C., Astrophysics Source Code Library, record ascl:1205.001
### Posters
- Slonina M., Gozdziewski K., Migaszewski C., Simtech2011 (Stuttgart, June 2011)
- Slonina M., Gozdziewski K., Migaszewski C., Orbital Couples: "Pas de Deux" in the Solar System and the Milky Way (Paris, October 2011)
### Acknowledgments
This project is supported by the Polish Ministry of Science and Higher Education through the grant N/N203/402739. This work is conducted within the POWIEW project of the European Regional Development Fund in Innovative Economy Programme POIG.02.03.00-00-018/08.
|
msloninaREPO_NAMEMechanicPATH_START.@Mechanic_extracted@Mechanic-master@README.md@.PATH_END.py
|
{
"filename": "diagnostics.py",
"repo_name": "danielrd6/ifscube",
"repo_path": "ifscube_extracted/ifscube-master/ifscube/diagnostics.py",
"type": "Python"
}
|
import numpy as np
from numpy import ma
import matplotlib.pyplot as plt
from matplotlib import transforms
class bpt:
"""
See Baldwin, Philips & Terlevich 198?,
Kauffmann et al. 2003
"""
def __init__(self, ha, n2, hb, o3):
for line in ('ha', 'n2', 'hb', 'o3'):
if not isinstance(eval(line), ma.masked_array):
self.__dict__.update({line: ma.masked_array(eval(line))})
else:
self.__dict__[line] = eval(line)
def kauffmann2003(self):
ax = self.ax
x = np.linspace(ax.get_xlim()[0], -.1)
y = 0.61 / (x - 0.05) + 1.3
ax.plot(x, y, ls='dashed', color='C2')
return
def plot(self, ax=None, fig=None, xlim=(-1.5, .5),
ylim=(-1.2, 1.5), **kwargs):
if fig is None and ax is None:
fig = plt.figure(1, figsize=(6, 6))
if ax is None:
ax = fig.add_subplot(111)
self.ax = ax
ax.set_xlabel(r'$\log_{10}$ [N II]/H$\alpha$')
ax.set_ylabel(r'$\log_{10}$ [O III]/H$\beta$')
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
self.x = ma.log10(self.n2 / self.ha)
self.y = ma.log10(self.o3 / self.hb)
ax.scatter(self.x, self.y, **kwargs)
self.kauffmann2003()
return
class whan_diagram:
"""
See Cid Fernandes, R. et al 2011 MNRAS 413, 1687.
"""
def __init__(self, wha, flux_ha, flux_n2):
for line in ('wha', 'flux_ha', 'flux_n2'):
if not isinstance(eval(line), ma.masked_array):
self.__dict__.update({line: ma.masked_array(eval(line))})
else:
self.__dict__[line] = eval(line)
self.x = ma.log10(self.flux_n2 / self.flux_ha)
self.y = ma.log10(self.wha)
def plot(self, ax=None, fig=None, text_opts={}, xlim=None, ylim=None, **kwargs):
if fig is None and ax is None:
fig = plt.figure(1, figsize=(6, 6))
if ax is None:
ax = fig.add_subplot(111)
ax.set_xlabel(r'$\log_{10}$ [N II]/H$\alpha$')
ax.set_ylabel(r'$\log_{10} {\rm W}_{{\rm H}\alpha}$ ($\AA$)')
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
inv = ax.transAxes.inverted()
# wha < 3 ==> Retired and passive galaxies
ax.axhline(np.log10(3), color='k')
# 3 < wha < 6 ==> weak AGN
xm = inv.transform(ax.transData.transform((-.4, 0)))[0]
print(xm)
ax.axhline(np.log10(6), xmin=xm, color='k')
# log10([N II] / Ha) < -0.4 ==> Star forming galaxies
ym = inv.transform(ax.transData.transform((0, np.log10(3))))[1]
# ax.axvline(-.4, ymin=ym)
ax.axvline(-.4, color='k')
ax.text(.05, .95, 'SF', ha='left', transform=ax.transAxes, **text_opts)
ax.text(.95, .95, 'sAGN', ha='right', transform=ax.transAxes, **text_opts)
trans = transforms.blended_transform_factory(ax.transAxes, ax.transData)
ax.text(.95, np.log10(4), 'wAGN', ha='right', transform=trans, **text_opts)
ax.text(.95, .05, 'Passive galaxies', ha='right', transform=ax.transAxes, **text_opts)
ax.scatter(self.x, self.y, **kwargs)
|
danielrd6REPO_NAMEifscubePATH_START.@ifscube_extracted@ifscube-master@ifscube@diagnostics.py@.PATH_END.py
|
{
"filename": "attention.py",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/jax/experimental/pallas/ops/gpu/attention.py",
"type": "Python"
}
|
# Copyright 2023 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing fused attention forward and backward pass."""
from __future__ import annotations
import functools
from typing import Any
import jax
from jax import lax
from jax.experimental import pallas as pl
from jax.experimental.pallas import triton as plgpu
import jax.numpy as jnp
import numpy as np
DEFAULT_MASK_VALUE = -0.7 * float(np.finfo(np.dtype("float32")).max)
def mha_forward_kernel(
q_ref,
k_ref,
v_ref, # Input arrays
segment_ids_ref: jax.Array | None, # segment_id arrays
o_ref: Any, # Output
*residual_refs: Any, # Residual outputs
num_heads: int,
sm_scale: float,
causal: bool,
block_q: int,
block_d: int,
block_k: int,
):
seq_len = k_ref.shape[0]
start_q = pl.program_id(0)
# o is the buffer where we accumulate the output on sram.
# m_i and l_i (see FlashAttention paper) are updated during the k,v loop.
m_i = jnp.zeros(block_q, dtype=jnp.float32) - float('inf')
l_i = jnp.zeros(block_q, dtype=jnp.float32)
# acc is the buffer where we accumulate the output on sram.
o = jnp.zeros((block_q, block_d), dtype=jnp.float32)
# Load q: it will stay in L1 throughout. Indices form a matrix because we
# read, compute, and write all in 2d chunks. 1 element ~= 1 CUDA thread index.
# q tile has shape [block_q, block_d], block_d == head_dim.
curr_q_slice = pl.dslice(start_q * block_q, block_q)
q = q_ref[...]
q_segment_ids = (
None
if segment_ids_ref is None
else pl.load(segment_ids_ref, (curr_q_slice,))
)
# In FlashAttention algorithm 1 there are 2 loops: slow over tiles of kv (size
# (Bc == block_k here), and fast over blocks of q (size Br == block_q here).
# Here we only loop over blocks of kv to process entire seq_len, the loop over
# blocks of q is carried out by the grid.
def body(start_k, carry):
o_prev, m_prev, l_prev = carry
curr_k_slice = pl.dslice(start_k * block_k, block_k)
k = pl.load(k_ref, (curr_k_slice, slice(None)))
qk = pl.dot(q, k.T) # [block_q, block_k]
if sm_scale != 1.:
qk *= sm_scale # [block_q, block_k]
# Avoids Triton crash.
# if num_heads > 2:
# qk = qk.astype(q_ref.dtype)
# qk = qk.astype(jnp.float32)
if causal or segment_ids_ref is not None:
mask = None
if segment_ids_ref is not None:
kv_segment_ids = pl.load(segment_ids_ref, (curr_k_slice,))
mask = segment_mask(q_segment_ids, kv_segment_ids)
if causal:
span_q = start_q * block_q + jnp.arange(block_q)
span_k = start_k * block_k + jnp.arange(block_k)
causal_mask = span_q[:, None] >= span_k[None, :]
mask = (
causal_mask if mask is None else jnp.logical_and(mask, causal_mask)
)
# Apply mask to qk.
qk = jnp.where(mask, qk, DEFAULT_MASK_VALUE)
m_curr = qk.max(axis=-1)
m_next = jnp.maximum(m_prev, m_curr)
correction = jnp.exp(m_prev - m_next)
l_prev_corr = correction * l_prev
s_curr = jnp.exp(
qk - m_next[:, None]
) # Use m_next instead of m_curr to avoid a correction on l_curr
l_curr = s_curr.sum(axis=-1)
l_next = l_prev_corr + l_curr
o_prev_corr = correction[:, None] * o_prev
v = pl.load(v_ref, (curr_k_slice, pl.dslice(block_d)))
o_curr = pl.dot(s_curr.astype(v.dtype), v)
o_next = o_prev_corr + o_curr
return o_next, m_next, l_next
if causal:
# Ceildiv (`pl.cdiv` and `//` do not work due to type of start_q)
upper_bound = lax.div(block_q * (start_q + 1) + block_k - 1, block_k)
else:
upper_bound = pl.cdiv(seq_len, block_k)
o, m_i, l_i = lax.fori_loop(0, upper_bound, body, (o, m_i, l_i))
# We keep an unscaled version of o during the scan over seq_len. Scaling it
# by the last l_i gives us the correct final output. See section 3.1.1 in the
# FlashAttention-2 paper: https://arxiv.org/pdf/2307.08691.
o /= l_i[:, None]
if residual_refs:
lse_ref = residual_refs[0]
lse_ref[...] = m_i + jnp.log(l_i)
# Write output to dram.
o_ref[...] = o.astype(o_ref.dtype)
def segment_mask(
q_segment_ids: jax.Array,
kv_segment_ids: jax.Array,
):
# [B, T, 1] or [T, 1]
q_segment_ids = jnp.expand_dims(q_segment_ids, axis=-1)
# [B, 1, S] or [1, S]
if kv_segment_ids.ndim == 1:
kv_segment_ids = jnp.expand_dims(kv_segment_ids, axis=0)
else:
kv_segment_ids = jnp.expand_dims(kv_segment_ids, axis=1)
return jnp.equal(q_segment_ids, kv_segment_ids).astype(jnp.bool_)
@functools.partial(
jax.custom_vjp, nondiff_argnums=[4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
)
@functools.partial(
jax.jit,
static_argnames=[
"sm_scale",
"causal",
"block_q",
"block_k",
"backward_pass_impl",
"num_warps",
"num_stages",
"grid",
"interpret",
"debug",
],
)
def mha(
q,
k,
v,
segment_ids: jnp.ndarray | None,
sm_scale: float = 1.0,
causal: bool = False,
block_q: int = 128,
block_k: int = 128,
backward_pass_impl: str = "triton",
num_warps: int | None = None,
num_stages: int = 2,
grid: tuple[int, ...] | None = None,
interpret: bool = False,
debug: bool = False,
):
del backward_pass_impl
batch_size, q_seq_len, num_heads, head_dim = q.shape
kv_seq_len = k.shape[1]
block_q = min(block_q, q_seq_len)
block_k = min(block_k, kv_seq_len)
# Heuristics.
grid_ = grid
if grid_ is None:
grid_ = (pl.cdiv(q_seq_len, block_q), batch_size, num_heads)
num_warps_ = num_warps
if num_warps_ is None:
num_warps_ = 4 if head_dim <= 64 else 8
kernel = functools.partial(mha_forward_kernel, num_heads=num_heads,
sm_scale=sm_scale, block_q=block_q,
block_k=block_k, block_d=head_dim,
causal=causal)
in_specs = [
pl.BlockSpec(
(None, block_q, None, head_dim), lambda i, j, k: (j, i, k, 0)
),
pl.BlockSpec(
(None, kv_seq_len, None, head_dim), lambda _, j, k: (j, 0, k, 0)
),
pl.BlockSpec(
(None, kv_seq_len, None, head_dim), lambda _, j, k: (j, 0, k, 0)
),
]
in_specs.append(
None # type: ignore[arg-type]
if segment_ids is None
else pl.BlockSpec((None, kv_seq_len), lambda _, j, k: (j, 0))
)
out_shape = jax.ShapeDtypeStruct(shape=q.shape, dtype=q.dtype)
return pl.pallas_call(
kernel,
grid=grid_,
in_specs=in_specs,
out_specs=pl.BlockSpec(
(None, block_q, None, head_dim), lambda i, j, k: (j, i, k, 0)
),
compiler_params=plgpu.TritonCompilerParams(
num_warps=num_warps_, num_stages=num_stages),
out_shape=out_shape,
debug=debug,
interpret=interpret,
name="mha_forward",
)(q, k, v, segment_ids)
def _mha_forward(
q,
k,
v,
segment_ids: jax.Array | None,
sm_scale: float,
causal: bool,
block_q: int,
block_k: int,
backward_pass_impl: str,
num_warps: int | None,
num_stages: int,
grid: Any,
interpret: bool,
debug: bool,
):
del backward_pass_impl
batch_size, q_seq_len, num_heads, head_dim = q.shape
kv_seq_len = k.shape[1]
block_q = min(block_q, q_seq_len)
block_k = min(block_k, kv_seq_len)
# Heuristics.
grid_ = grid
if grid_ is None:
grid_ = (pl.cdiv(q_seq_len, block_q), batch_size, num_heads)
num_warps_ = num_warps
if num_warps_ is None:
num_warps_ = 4 if head_dim <= 64 else 8
kernel = functools.partial(mha_forward_kernel, num_heads=num_heads,
sm_scale=sm_scale, causal=causal, block_q=block_q,
block_k=block_k, block_d=head_dim)
out_shape = [
jax.ShapeDtypeStruct(shape=q.shape, dtype=q.dtype), # out
jax.ShapeDtypeStruct(
shape=(batch_size, num_heads, q_seq_len), dtype=jnp.float32 # lse
),
]
in_specs = [
pl.BlockSpec(
(None, block_q, None, head_dim), lambda i, j, k: (j, i, k, 0)
),
pl.BlockSpec(
(None, kv_seq_len, None, head_dim), lambda _, j, k: (j, 0, k, 0)
),
pl.BlockSpec(
(None, kv_seq_len, None, head_dim), lambda _, j, k: (j, 0, k, 0)
),
]
in_specs.append(
None # type: ignore[arg-type]
if segment_ids is None
else pl.BlockSpec((None, kv_seq_len), lambda _, j, k: (j, 0))
)
out, lse = pl.pallas_call(
kernel,
grid=grid_,
in_specs=in_specs,
out_specs=[
pl.BlockSpec(
(None, block_q, None, head_dim), lambda i, j, k: (j, i, k, 0)
),
pl.BlockSpec((None, None, block_q), lambda i, j, k: (j, k, i)),
],
compiler_params=dict(
triton=dict(num_warps=num_warps_, num_stages=num_stages)
),
out_shape=out_shape,
debug=debug,
interpret=interpret,
name="mha_forward",
)(q, k, v, segment_ids)
return out, (q, k, v, segment_ids, out, lse)
def _preprocess_backward_kernel(out_ref, dout_ref, delta_ref):
# load
o = out_ref[...].astype(jnp.float32)
do = dout_ref[...].astype(jnp.float32)
# compute
delta = jnp.sum(o * do, axis=1)
# write-back
delta_ref[...] = delta.astype(delta_ref.dtype)
@jax.named_scope("preprocess_backward")
def _preprocess_backward(out, do, lse, block_q: int,
debug: bool, interpret: bool):
batch_size, seq_len, num_heads, head_dim = out.shape
out_shape = jax.ShapeDtypeStruct(lse.shape, lse.dtype)
delta = pl.pallas_call(
_preprocess_backward_kernel,
grid=(pl.cdiv(seq_len, block_q), batch_size, num_heads),
in_specs=[
pl.BlockSpec(
(None, block_q, None, head_dim), lambda i, j, k: (j, i, k, 0)
),
pl.BlockSpec(
(None, block_q, None, head_dim), lambda i, j, k: (j, i, k, 0)
),
],
out_specs=pl.BlockSpec((None, None, block_q), lambda i, j, k: (j, k, i)),
compiler_params=dict(triton=dict(num_warps=4, num_stages=3)),
out_shape=out_shape,
debug=debug,
interpret=interpret,
name="mha_preprocess_backward",
)(out, do)
return delta
# This kernel computes dK_i, dV_i and dQ_i in parallel across the sequence
# length.
# Inspired by the triton tutorial: https://github.com/triton-lang/triton/blob/main/python/tutorials/06-fused-attention.py
def mha_backward_kernel(
# Inputs
q_ref,
k_ref,
v_ref,
segment_ids_ref: jax.Array | None,
out_ref,
do_scaled_ref,
lse_ref,
delta_ref,
# Outputs
dq_ref,
dk_ref,
dv_ref,
*,
sm_scale: float,
causal: bool,
block_q1: int,
block_k1: int,
block_q2: int,
block_k2: int,
block_d: int,
):
del out_ref # Not needed
q_seq_len = q_ref.shape[0]
kv_seq_len = k_ref.shape[0]
# Scan #1: dK and dV
# 1. Load a block of K and V of size (block_k1, head_dim) in SMEM.
# 2. Iterate through Q in chunks of (block_q1, head_dim) to accumulate
# dK and dV.
start_k = pl.program_id(2)
curr_k_slice = pl.dslice(start_k * block_k1, block_k1)
dv = jnp.zeros([block_k1, block_d], dtype=jnp.float32)
dk = jnp.zeros([block_k1, block_d], dtype=jnp.float32)
v = pl.load(v_ref, (curr_k_slice, slice(None)))
k = pl.load(k_ref, (curr_k_slice, slice(None)))
span_k = start_k * block_k1 + jnp.arange(block_k1)
kv_segment_ids = (
None
if segment_ids_ref is None
else pl.load(segment_ids_ref, (curr_k_slice,))
)
def inner_loop_dkdv(start_q, carry):
dv, dk = carry
curr_q_slice = pl.dslice(start_q * block_q1, block_q1)
q = pl.load(q_ref, (curr_q_slice, slice(None)))
qk = pl.dot(q, k.T)
if sm_scale != 1.0:
qk *= sm_scale
if causal or segment_ids_ref is not None:
mask = None
if segment_ids_ref is not None:
q_segment_ids = pl.load(segment_ids_ref, (curr_q_slice,))
mask = segment_mask(q_segment_ids, kv_segment_ids)
if causal:
span_q = start_q * block_q1 + jnp.arange(block_q1)
causal_mask = span_q[:, None] >= span_k[None, :]
mask = (
causal_mask if mask is None else jnp.logical_and(mask, causal_mask)
)
qk = jnp.where(mask, qk, DEFAULT_MASK_VALUE)
lse = pl.load(lse_ref, (curr_q_slice,))
di = pl.load(delta_ref, (curr_q_slice,))
do = pl.load(do_scaled_ref, (curr_q_slice, slice(None)))
p = jnp.exp(qk - lse[:, None])
dv = dv + pl.dot(p.astype(do.dtype).T, do)
dp = jnp.zeros((block_q1, block_k1), dtype=jnp.float32) - di[:, None]
dp = dp + pl.dot(do, v.T)
ds = p * dp
if sm_scale != 1.0:
ds = ds * sm_scale
dk = dk + pl.dot(ds.astype(q_ref.dtype).T, q)
return dv, dk
lower_bound = lax.div(start_k * block_k1, block_q1) if causal else 0
dv, dk = lax.fori_loop(
lower_bound, pl.cdiv(q_seq_len, block_q1), inner_loop_dkdv, (dv, dk)
)
dv_ref[...] = dv.astype(dv_ref.dtype)
dk_ref[...] = dk.astype(dk_ref.dtype)
del dv, dk
# Scan #2: dQ
# 1. Load a block of Q of size (block_q2, head_dim) in SMEM.
# 2. Iterate through K and V in chunks of (block_k2, head_dim) to
# accumulate dQ.
start_q = pl.program_id(2)
curr_q_slice = pl.ds(start_q * block_q2, block_q2)
span_q = start_q * block_q2 + jnp.arange(block_q2)
dq = jnp.zeros([block_q2, block_d], dtype=jnp.float32)
q = pl.load(q_ref, (curr_q_slice, slice(None)))
q_segment_ids = (
None
if segment_ids_ref is None
else pl.load(segment_ids_ref, (curr_q_slice,))
)
lse = pl.load(lse_ref, (curr_q_slice,))
do = pl.load(do_scaled_ref, (curr_q_slice, slice(None)))
di = pl.load(delta_ref, (curr_q_slice,))
def inner_loop_dq(start_k, dq):
curr_k_slice = pl.dslice(start_k * block_k2, block_k2)
k = pl.load(k_ref, (curr_k_slice, slice(None)))
v = pl.load(v_ref, (curr_k_slice, slice(None)))
qk = pl.dot(q, k.T)
if sm_scale != 1.0:
qk *= sm_scale
if causal or segment_ids_ref is not None:
mask = None
if segment_ids_ref is not None:
kv_segment_ids = pl.load(segment_ids_ref, (curr_k_slice,))
mask = segment_mask(q_segment_ids, kv_segment_ids)
if causal:
span_k = start_k * block_k2 + jnp.arange(block_k2)
causal_mask = span_q[:, None] >= span_k[None, :]
mask = (
causal_mask if mask is None else jnp.logical_and(mask, causal_mask)
)
qk = jnp.where(mask, qk, DEFAULT_MASK_VALUE)
p = jnp.exp(qk - lse[:, None])
dp = jnp.zeros((block_q2, block_k2), dtype=jnp.float32) - di[:, None]
dp = dp + pl.dot(do, v.T)
ds = p * dp
if sm_scale != 1.0:
ds = ds * sm_scale
dq = dq + pl.dot(ds.astype(k.dtype), k).astype(dq.dtype)
return dq
if causal:
upper_bound = lax.div((start_q + 1) * block_q2, block_k2)
else:
upper_bound = pl.cdiv(kv_seq_len, block_k2)
dq = lax.fori_loop(0, upper_bound, inner_loop_dq, (dq))
dq_ref[...] = dq.astype(dq_ref.dtype)
def _mha_backward(sm_scale: float, causal: bool, block_q: int, block_k: int,
backward_pass_impl: str, num_warps: int | None,
num_stages: int, grid: Any, interpret: bool,
debug: bool, res, do):
del num_warps, num_stages, grid
q, k, v, segment_ids, out, lse = res
if backward_pass_impl == "xla":
return jax.vjp(
functools.partial(mha_reference, sm_scale=sm_scale, causal=causal),
q,
k,
v,
segment_ids,
)[1](do)
elif backward_pass_impl == "triton":
batch_size, q_seq_len, num_heads, head_dim = q.shape
kv_seq_len = k.shape[1]
block_q = min(block_q, q_seq_len)
block_k = min(block_k, kv_seq_len)
delta = _preprocess_backward(out, do, lse, block_q, debug, interpret)
out_shapes = [
jax.ShapeDtypeStruct(q.shape, q.dtype),
jax.ShapeDtypeStruct(k.shape, k.dtype),
jax.ShapeDtypeStruct(v.shape, v.dtype),
]
in_specs = [
pl.BlockSpec(
(None, q_seq_len, None, head_dim), lambda i, j, _: (i, 0, j, 0)
),
pl.BlockSpec(
(None, kv_seq_len, None, head_dim), lambda i, j, _: (i, 0, j, 0)
),
pl.BlockSpec(
(None, kv_seq_len, None, head_dim), lambda i, j, _: (i, 0, j, 0)
),
pl.BlockSpec(
(None, q_seq_len, None, head_dim), lambda i, j, _: (i, 0, j, 0)
),
pl.BlockSpec(
(None, q_seq_len, None, head_dim), lambda i, j, _: (i, 0, j, 0)
),
pl.BlockSpec((None, None, q_seq_len), lambda i, j, _: (i, j, 0)),
pl.BlockSpec((None, None, q_seq_len), lambda i, j, _: (i, j, 0)),
]
if segment_ids is None:
in_specs.insert(3, None) # type: ignore[arg-type]
else:
in_specs.insert(3, pl.BlockSpec((None, kv_seq_len), lambda i, j, _: (i, 0)))
grid = (batch_size, num_heads, pl.cdiv(kv_seq_len, block_k))
num_warps = 8
dq, dk, dv = pl.pallas_call(
functools.partial(
mha_backward_kernel,
sm_scale=sm_scale,
causal=causal,
block_q1=block_q,
block_k1=block_k,
block_q2=block_q,
block_k2=block_k,
block_d=head_dim,
),
out_shape=out_shapes,
in_specs=in_specs,
grid=grid,
out_specs=[
pl.BlockSpec(
(None, block_q, None, head_dim),
lambda i, j, k: (i, k, j, 0), # dq
),
pl.BlockSpec(
(None, block_k, None, head_dim),
lambda i, j, k: (i, k, j, 0), # dk
),
pl.BlockSpec(
(None, block_k, None, head_dim),
lambda i, j, k: (i, k, j, 0), # dv
),
],
name="mha_backward",
debug=debug,
interpret=interpret,
compiler_params=dict(triton=dict(num_warps=num_warps, num_stages=2)),
)(q, k, v, segment_ids, out, do, lse, delta)
else:
raise ValueError(f"Invalid backward pass implementation: {backward_pass_impl}")
return dq.astype(q.dtype), dk, dv, None
mha.defvjp(_mha_forward, _mha_backward)
@functools.partial(jax.jit, static_argnames=['sm_scale', 'causal'])
def mha_reference(
q,
k,
v,
segment_ids: jnp.ndarray | None,
sm_scale=1.0,
causal: bool = False,
):
q_seq_len = q.shape[1]
kv_seq_len = k.shape[1]
logits = jnp.einsum('bqhc,bkhc->bhqk', q, k).astype(jnp.float32)
mask = None
if segment_ids is not None:
mask = jnp.expand_dims(segment_mask(segment_ids, segment_ids), 1)
mask = jnp.broadcast_to(mask, logits.shape)
if causal:
causal_mask = jnp.tril(jnp.ones((1, 1, q_seq_len, kv_seq_len), dtype=bool))
causal_mask = jnp.broadcast_to(causal_mask, logits.shape)
mask = causal_mask if mask is None else jnp.logical_and(mask, causal_mask)
logits = logits if mask is None else jnp.where(mask, logits, float("-inf"))
weights = jax.nn.softmax(logits * sm_scale).astype(q.dtype)
return jnp.einsum('bhqk,bkhc->bqhc', weights, v)
|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@experimental@pallas@ops@gpu@attention.py@.PATH_END.py
|
{
"filename": "hub_callback_container.py",
"repo_name": "glue-viz/glue",
"repo_path": "glue_extracted/glue-main/glue/core/hub_callback_container.py",
"type": "Python"
}
|
import weakref
from functools import partial
__all__ = ['HubCallbackContainer']
class HubCallbackContainer(object):
"""
A list-like container for callback functions. We need to be careful with
storing references to methods, because if a callback method is on a class
which contains both the callback and a callback property, a circular
reference is created which results in a memory leak. Instead, we need to use
a weak reference which results in the callback being removed if the instance
is destroyed. This container class takes care of this automatically.
Adapted from echo.CallbackContainer.
"""
def __init__(self):
self.callbacks = {}
def _wrap(self, handler, filter):
"""
Given a function/method, this will automatically wrap a method using
weakref to avoid circular references.
"""
if not callable(handler):
raise TypeError("Only callable handlers can be stored in CallbackContainer")
if filter is not None and not callable(filter):
raise TypeError("Only callable filters can be stored in CallbackContainer")
if self.is_bound_method(handler):
# We are dealing with a bound method. Method references aren't
# persistent, so instead we store a reference to the function
# and instance.
value = (weakref.ref(handler.__func__),
weakref.ref(handler.__self__, self._auto_remove))
else:
value = (handler, None)
if self.is_bound_method(filter):
# We are dealing with a bound method. Method references aren't
# persistent, so instead we store a reference to the function
# and instance.
value += (weakref.ref(filter.__func__),
weakref.ref(filter.__self__, self._auto_remove))
else:
value += (filter, None)
return value
def _auto_remove(self, method_instance):
# Called when weakref detects that the instance on which a method was
# defined has been garbage collected.
remove = []
for key, value in self.callbacks.items():
if value[1] is method_instance or value[3] is method_instance:
remove.append(key)
for key in remove:
self.callbacks.pop(key)
def __contains__(self, message_class):
return message_class in self.callbacks
def __getitem__(self, message_class):
callback = self.callbacks[message_class]
if callback[1] is None:
result = (callback[0],)
else:
func = callback[0]()
inst = callback[1]()
result = (partial(func, inst),)
if callback[3] is None:
result += (callback[2],)
else:
func = callback[2]()
inst = callback[3]()
result += (partial(func, inst),)
return result
def __iter__(self):
for message_class in self.callbacks:
yield self[message_class]
def __len__(self):
return len(self.callbacks)
def keys(self):
return self.callbacks.keys()
@staticmethod
def is_bound_method(func):
return hasattr(func, '__func__') and getattr(func, '__self__', None) is not None
def __setitem__(self, message_class, value):
handler, filter = value
self.callbacks[message_class] = self._wrap(handler, filter)
def pop(self, message_class):
return self.callbacks.pop(message_class)
def remove_handler(self, handler):
if self.is_bound_method(handler):
for message_class in sorted(self.callbacks):
callback = self.callbacks[message_class]
if callback[1] is not None and handler.__func__ is callback[0]() and handler.__self__ is callback[1]():
self.callbacks.pop(callback)
else:
for message_class in sorted(self.callbacks):
callback = self.callbacks[message_class]
if callback[1] is None and handler is callback[0]:
self.callbacks.pop(callback)
|
glue-vizREPO_NAMEgluePATH_START.@glue_extracted@glue-main@glue@core@hub_callback_container.py@.PATH_END.py
|
{
"filename": "v1_schema.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/prefect/_internal/pydantic/v1_schema.py",
"type": "Python"
}
|
import inspect
import typing
import warnings
import pydantic
from pydantic.v1 import BaseModel as V1BaseModel
def is_v1_model(v: typing.Any) -> bool:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", category=pydantic.warnings.PydanticDeprecatedSince20
)
if isinstance(v, V1BaseModel):
return True
try:
if inspect.isclass(v) and issubclass(v, V1BaseModel):
return True
except TypeError:
pass
return False
def is_v1_type(v: typing.Any) -> bool:
if is_v1_model(v):
return True
try:
return v.__module__.startswith("pydantic.v1.types")
except AttributeError:
return False
def has_v1_type_as_param(signature: inspect.Signature) -> bool:
parameters = signature.parameters.values()
for p in parameters:
# check if this parameter is a v1 model
if is_v1_type(p.annotation):
return True
# check if this parameter is a collection of types
for v in typing.get_args(p.annotation):
if is_v1_type(v):
return True
return False
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@prefect@_internal@pydantic@v1_schema.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/scene/xaxis/tickfont/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="layout.scene.xaxis.tickfont", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@scene@xaxis@tickfont@_color.py@.PATH_END.py
|
{
"filename": "_visible.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/updatemenu/_visible.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="visible", parent_name="layout.updatemenu", **kwargs
):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@updatemenu@_visible.py@.PATH_END.py
|
{
"filename": "SConscript.py",
"repo_name": "rat-pac/rat-pac",
"repo_path": "rat-pac_extracted/rat-pac-master/python/SCons/Script/SConscript.py",
"type": "Python"
}
|
"""SCons.Script.SConscript
This module defines the Python API provided to SConscript and SConstruct
files.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Script/SConscript.py 4043 2009/02/23 09:06:45 scons"
import SCons
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Environment
import SCons.Errors
import SCons.Node
import SCons.Node.Alias
import SCons.Node.FS
import SCons.Platform
import SCons.SConf
import SCons.Script.Main
import SCons.Tool
import SCons.Util
import os
import os.path
import re
import string
import sys
import traceback
import types
import UserList
# The following variables used to live in this module. Some
# SConscript files out there may have referred to them directly as
# SCons.Script.SConscript.*. This is now supported by some special
# handling towards the bottom of the SConscript.__init__.py module.
#Arguments = {}
#ArgList = []
#BuildTargets = TargetList()
#CommandLineTargets = []
#DefaultTargets = []
class SConscriptReturn(Exception):
pass
launch_dir = os.path.abspath(os.curdir)
GlobalDict = None
# global exports set by Export():
global_exports = {}
# chdir flag
sconscript_chdir = 1
def get_calling_namespaces():
"""Return the locals and globals for the function that called
into this module in the current call stack."""
try: 1/0
except ZeroDivisionError:
# Don't start iterating with the current stack-frame to
# prevent creating reference cycles (f_back is safe).
frame = sys.exc_info()[2].tb_frame.f_back
# Find the first frame that *isn't* from this file. This means
# that we expect all of the SCons frames that implement an Export()
# or SConscript() call to be in this file, so that we can identify
# the first non-Script.SConscript frame as the user's local calling
# environment, and the locals and globals dictionaries from that
# frame as the calling namespaces. See the comment below preceding
# the DefaultEnvironmentCall block for even more explanation.
while frame.f_globals.get("__name__") == __name__:
frame = frame.f_back
return frame.f_locals, frame.f_globals
def compute_exports(exports):
"""Compute a dictionary of exports given one of the parameters
to the Export() function or the exports argument to SConscript()."""
loc, glob = get_calling_namespaces()
retval = {}
try:
for export in exports:
if SCons.Util.is_Dict(export):
retval.update(export)
else:
try:
retval[export] = loc[export]
except KeyError:
retval[export] = glob[export]
except KeyError, x:
raise SCons.Errors.UserError, "Export of non-existent variable '%s'"%x
return retval
class Frame:
"""A frame on the SConstruct/SConscript call stack"""
def __init__(self, fs, exports, sconscript):
self.globals = BuildDefaultGlobals()
self.retval = None
self.prev_dir = fs.getcwd()
self.exports = compute_exports(exports) # exports from the calling SConscript
# make sure the sconscript attr is a Node.
if isinstance(sconscript, SCons.Node.Node):
self.sconscript = sconscript
elif sconscript == '-':
self.sconscript = None
else:
self.sconscript = fs.File(str(sconscript))
# the SConstruct/SConscript call stack:
call_stack = []
# For documentation on the methods in this file, see the scons man-page
def Return(*vars, **kw):
retval = []
try:
fvars = SCons.Util.flatten(vars)
for var in fvars:
for v in string.split(var):
retval.append(call_stack[-1].globals[v])
except KeyError, x:
raise SCons.Errors.UserError, "Return of non-existent variable '%s'"%x
if len(retval) == 1:
call_stack[-1].retval = retval[0]
else:
call_stack[-1].retval = tuple(retval)
stop = kw.get('stop', True)
if stop:
raise SConscriptReturn
stack_bottom = '% Stack boTTom %' # hard to define a variable w/this name :)
def _SConscript(fs, *files, **kw):
top = fs.Top
sd = fs.SConstruct_dir.rdir()
exports = kw.get('exports', [])
# evaluate each SConscript file
results = []
for fn in files:
call_stack.append(Frame(fs, exports, fn))
old_sys_path = sys.path
try:
SCons.Script.sconscript_reading = SCons.Script.sconscript_reading + 1
if fn == "-":
exec sys.stdin in call_stack[-1].globals
else:
if isinstance(fn, SCons.Node.Node):
f = fn
else:
f = fs.File(str(fn))
_file_ = None
# Change directory to the top of the source
# tree to make sure the os's cwd and the cwd of
# fs match so we can open the SConscript.
fs.chdir(top, change_os_dir=1)
if f.rexists():
_file_ = open(f.rfile().get_abspath(), "r")
elif f.has_src_builder():
# The SConscript file apparently exists in a source
# code management system. Build it, but then clear
# the builder so that it doesn't get built *again*
# during the actual build phase.
f.build()
f.built()
f.builder_set(None)
if f.exists():
_file_ = open(f.get_abspath(), "r")
if _file_:
# Chdir to the SConscript directory. Use a path
# name relative to the SConstruct file so that if
# we're using the -f option, we're essentially
# creating a parallel SConscript directory structure
# in our local directory tree.
#
# XXX This is broken for multiple-repository cases
# where the SConstruct and SConscript files might be
# in different Repositories. For now, cross that
# bridge when someone comes to it.
try:
src_dir = kw['src_dir']
except KeyError:
ldir = fs.Dir(f.dir.get_path(sd))
else:
ldir = fs.Dir(src_dir)
if not ldir.is_under(f.dir):
# They specified a source directory, but
# it's above the SConscript directory.
# Do the sensible thing and just use the
# SConcript directory.
ldir = fs.Dir(f.dir.get_path(sd))
try:
fs.chdir(ldir, change_os_dir=sconscript_chdir)
except OSError:
# There was no local directory, so we should be
# able to chdir to the Repository directory.
# Note that we do this directly, not through
# fs.chdir(), because we still need to
# interpret the stuff within the SConscript file
# relative to where we are logically.
fs.chdir(ldir, change_os_dir=0)
# TODO Not sure how to handle src_dir here
os.chdir(f.rfile().dir.get_abspath())
# Append the SConscript directory to the beginning
# of sys.path so Python modules in the SConscript
# directory can be easily imported.
sys.path = [ f.dir.get_abspath() ] + sys.path
# This is the magic line that actually reads up
# and executes the stuff in the SConscript file.
# The locals for this frame contain the special
# bottom-of-the-stack marker so that any
# exceptions that occur when processing this
# SConscript can base the printed frames at this
# level and not show SCons internals as well.
call_stack[-1].globals.update({stack_bottom:1})
old_file = call_stack[-1].globals.get('__file__')
try:
del call_stack[-1].globals['__file__']
except KeyError:
pass
try:
try:
exec _file_ in call_stack[-1].globals
except SConscriptReturn:
pass
finally:
if old_file is not None:
call_stack[-1].globals.update({__file__:old_file})
else:
SCons.Warnings.warn(SCons.Warnings.MissingSConscriptWarning,
"Ignoring missing SConscript '%s'" % f.path)
finally:
SCons.Script.sconscript_reading = SCons.Script.sconscript_reading - 1
sys.path = old_sys_path
frame = call_stack.pop()
try:
fs.chdir(frame.prev_dir, change_os_dir=sconscript_chdir)
except OSError:
# There was no local directory, so chdir to the
# Repository directory. Like above, we do this
# directly.
fs.chdir(frame.prev_dir, change_os_dir=0)
rdir = frame.prev_dir.rdir()
rdir._create() # Make sure there's a directory there.
try:
os.chdir(rdir.get_abspath())
except OSError, e:
# We still couldn't chdir there, so raise the error,
# but only if actions are being executed.
#
# If the -n option was used, the directory would *not*
# have been created and we should just carry on and
# let things muddle through. This isn't guaranteed
# to work if the SConscript files are reading things
# from disk (for example), but it should work well
# enough for most configurations.
if SCons.Action.execute_actions:
raise e
results.append(frame.retval)
# if we only have one script, don't return a tuple
if len(results) == 1:
return results[0]
else:
return tuple(results)
def SConscript_exception(file=sys.stderr):
"""Print an exception stack trace just for the SConscript file(s).
This will show users who have Python errors where the problem is,
without cluttering the output with all of the internal calls leading
up to where we exec the SConscript."""
exc_type, exc_value, exc_tb = sys.exc_info()
tb = exc_tb
while tb and not tb.tb_frame.f_locals.has_key(stack_bottom):
tb = tb.tb_next
if not tb:
# We did not find our exec statement, so this was actually a bug
# in SCons itself. Show the whole stack.
tb = exc_tb
stack = traceback.extract_tb(tb)
try:
type = exc_type.__name__
except AttributeError:
type = str(exc_type)
if type[:11] == "exceptions.":
type = type[11:]
file.write('%s: %s:\n' % (type, exc_value))
for fname, line, func, text in stack:
file.write(' File "%s", line %d:\n' % (fname, line))
file.write(' %s\n' % text)
def annotate(node):
"""Annotate a node with the stack frame describing the
SConscript file and line number that created it."""
tb = sys.exc_info()[2]
while tb and not tb.tb_frame.f_locals.has_key(stack_bottom):
tb = tb.tb_next
if not tb:
# We did not find any exec of an SConscript file: what?!
raise SCons.Errors.InternalError, "could not find SConscript stack frame"
node.creator = traceback.extract_stack(tb)[0]
# The following line would cause each Node to be annotated using the
# above function. Unfortunately, this is a *huge* performance hit, so
# leave this disabled until we find a more efficient mechanism.
#SCons.Node.Annotate = annotate
class SConsEnvironment(SCons.Environment.Base):
"""An Environment subclass that contains all of the methods that
are particular to the wrapper SCons interface and which aren't
(or shouldn't be) part of the build engine itself.
Note that not all of the methods of this class have corresponding
global functions, there are some private methods.
"""
#
# Private methods of an SConsEnvironment.
#
def _exceeds_version(self, major, minor, v_major, v_minor):
"""Return 1 if 'major' and 'minor' are greater than the version
in 'v_major' and 'v_minor', and 0 otherwise."""
return (major > v_major or (major == v_major and minor > v_minor))
def _get_major_minor_revision(self, version_string):
"""Split a version string into major, minor and (optionally)
revision parts.
This is complicated by the fact that a version string can be
something like 3.2b1."""
version = string.split(string.split(version_string, ' ')[0], '.')
v_major = int(version[0])
v_minor = int(re.match('\d+', version[1]).group())
if len(version) >= 3:
v_revision = int(re.match('\d+', version[2]).group())
else:
v_revision = 0
return v_major, v_minor, v_revision
def _get_SConscript_filenames(self, ls, kw):
"""
Convert the parameters passed to # SConscript() calls into a list
of files and export variables. If the parameters are invalid,
throws SCons.Errors.UserError. Returns a tuple (l, e) where l
is a list of SConscript filenames and e is a list of exports.
"""
exports = []
if len(ls) == 0:
try:
dirs = kw["dirs"]
except KeyError:
raise SCons.Errors.UserError, \
"Invalid SConscript usage - no parameters"
if not SCons.Util.is_List(dirs):
dirs = [ dirs ]
dirs = map(str, dirs)
name = kw.get('name', 'SConscript')
files = map(lambda n, name = name: os.path.join(n, name), dirs)
elif len(ls) == 1:
files = ls[0]
elif len(ls) == 2:
files = ls[0]
exports = self.Split(ls[1])
else:
raise SCons.Errors.UserError, \
"Invalid SConscript() usage - too many arguments"
if not SCons.Util.is_List(files):
files = [ files ]
if kw.get('exports'):
exports.extend(self.Split(kw['exports']))
variant_dir = kw.get('variant_dir') or kw.get('build_dir')
if variant_dir:
if len(files) != 1:
raise SCons.Errors.UserError, \
"Invalid SConscript() usage - can only specify one SConscript with a variant_dir"
duplicate = kw.get('duplicate', 1)
src_dir = kw.get('src_dir')
if not src_dir:
src_dir, fname = os.path.split(str(files[0]))
files = [os.path.join(str(variant_dir), fname)]
else:
if not isinstance(src_dir, SCons.Node.Node):
src_dir = self.fs.Dir(src_dir)
fn = files[0]
if not isinstance(fn, SCons.Node.Node):
fn = self.fs.File(fn)
if fn.is_under(src_dir):
# Get path relative to the source directory.
fname = fn.get_path(src_dir)
files = [os.path.join(str(variant_dir), fname)]
else:
files = [fn.abspath]
kw['src_dir'] = variant_dir
self.fs.VariantDir(variant_dir, src_dir, duplicate)
return (files, exports)
#
# Public methods of an SConsEnvironment. These get
# entry points in the global name space so they can be called
# as global functions.
#
def Configure(self, *args, **kw):
if not SCons.Script.sconscript_reading:
raise SCons.Errors.UserError, "Calling Configure from Builders is not supported."
kw['_depth'] = kw.get('_depth', 0) + 1
return apply(SCons.Environment.Base.Configure, (self,)+args, kw)
def Default(self, *targets):
SCons.Script._Set_Default_Targets(self, targets)
def EnsureSConsVersion(self, major, minor, revision=0):
"""Exit abnormally if the SCons version is not late enough."""
scons_ver = self._get_major_minor_revision(SCons.__version__)
if scons_ver < (major, minor, revision):
if revision:
scons_ver_string = '%d.%d.%d' % (major, minor, revision)
else:
scons_ver_string = '%d.%d' % (major, minor)
print "SCons %s or greater required, but you have SCons %s" % \
(scons_ver_string, SCons.__version__)
sys.exit(2)
def EnsurePythonVersion(self, major, minor):
"""Exit abnormally if the Python version is not late enough."""
try:
v_major, v_minor, v_micro, release, serial = sys.version_info
python_ver = (v_major, v_minor)
except AttributeError:
python_ver = self._get_major_minor_revision(sys.version)[:2]
if python_ver < (major, minor):
v = string.split(sys.version, " ", 1)[0]
print "Python %d.%d or greater required, but you have Python %s" %(major,minor,v)
sys.exit(2)
def Exit(self, value=0):
sys.exit(value)
def Export(self, *vars):
for var in vars:
global_exports.update(compute_exports(self.Split(var)))
def GetLaunchDir(self):
global launch_dir
return launch_dir
def GetOption(self, name):
name = self.subst(name)
return SCons.Script.Main.GetOption(name)
def Help(self, text):
text = self.subst(text, raw=1)
SCons.Script.HelpFunction(text)
def Import(self, *vars):
try:
frame = call_stack[-1]
globals = frame.globals
exports = frame.exports
for var in vars:
var = self.Split(var)
for v in var:
if v == '*':
globals.update(global_exports)
globals.update(exports)
else:
if exports.has_key(v):
globals[v] = exports[v]
else:
globals[v] = global_exports[v]
except KeyError,x:
raise SCons.Errors.UserError, "Import of non-existent variable '%s'"%x
def SConscript(self, *ls, **kw):
def subst_element(x, subst=self.subst):
if SCons.Util.is_List(x):
x = map(subst, x)
else:
x = subst(x)
return x
ls = map(subst_element, ls)
subst_kw = {}
for key, val in kw.items():
if SCons.Util.is_String(val):
val = self.subst(val)
elif SCons.Util.is_List(val):
result = []
for v in val:
if SCons.Util.is_String(v):
v = self.subst(v)
result.append(v)
val = result
subst_kw[key] = val
files, exports = self._get_SConscript_filenames(ls, subst_kw)
subst_kw['exports'] = exports
return apply(_SConscript, [self.fs,] + files, subst_kw)
def SConscriptChdir(self, flag):
global sconscript_chdir
sconscript_chdir = flag
def SetOption(self, name, value):
name = self.subst(name)
SCons.Script.Main.SetOption(name, value)
#
#
#
SCons.Environment.Environment = SConsEnvironment
def Configure(*args, **kw):
if not SCons.Script.sconscript_reading:
raise SCons.Errors.UserError, "Calling Configure from Builders is not supported."
kw['_depth'] = 1
return apply(SCons.SConf.SConf, args, kw)
# It's very important that the DefaultEnvironmentCall() class stay in this
# file, with the get_calling_namespaces() function, the compute_exports()
# function, the Frame class and the SConsEnvironment.Export() method.
# These things make up the calling stack leading up to the actual global
# Export() or SConscript() call that the user issued. We want to allow
# users to export local variables that they define, like so:
#
# def func():
# x = 1
# Export('x')
#
# To support this, the get_calling_namespaces() function assumes that
# the *first* stack frame that's not from this file is the local frame
# for the Export() or SConscript() call.
_DefaultEnvironmentProxy = None
def get_DefaultEnvironmentProxy():
global _DefaultEnvironmentProxy
if not _DefaultEnvironmentProxy:
default_env = SCons.Defaults.DefaultEnvironment()
_DefaultEnvironmentProxy = SCons.Environment.NoSubstitutionProxy(default_env)
return _DefaultEnvironmentProxy
class DefaultEnvironmentCall:
"""A class that implements "global function" calls of
Environment methods by fetching the specified method from the
DefaultEnvironment's class. Note that this uses an intermediate
proxy class instead of calling the DefaultEnvironment method
directly so that the proxy can override the subst() method and
thereby prevent expansion of construction variables (since from
the user's point of view this was called as a global function,
with no associated construction environment)."""
def __init__(self, method_name, subst=0):
self.method_name = method_name
if subst:
self.factory = SCons.Defaults.DefaultEnvironment
else:
self.factory = get_DefaultEnvironmentProxy
def __call__(self, *args, **kw):
env = self.factory()
method = getattr(env, self.method_name)
return apply(method, args, kw)
def BuildDefaultGlobals():
"""
Create a dictionary containing all the default globals for
SConstruct and SConscript files.
"""
global GlobalDict
if GlobalDict is None:
GlobalDict = {}
import SCons.Script
d = SCons.Script.__dict__
def not_a_module(m, d=d, mtype=type(SCons.Script)):
return type(d[m]) != mtype
for m in filter(not_a_module, dir(SCons.Script)):
GlobalDict[m] = d[m]
return GlobalDict.copy()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
rat-pacREPO_NAMErat-pacPATH_START.@rat-pac_extracted@rat-pac-master@python@SCons@Script@SConscript.py@.PATH_END.py
|
{
"filename": "p_tuning.md",
"repo_name": "huggingface/peft",
"repo_path": "peft_extracted/peft-main/docs/source/package_reference/p_tuning.md",
"type": "Markdown"
}
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# P-tuning
[P-tuning](https://hf.co/papers/2103.10385) adds trainable prompt embeddings to the input that is optimized by a prompt encoder to find a better prompt, eliminating the need to manually design prompts. The prompt tokens can be added anywhere in the input sequence, and p-tuning also introduces anchor tokens for improving performance.
The abstract from the paper is:
*While GPTs with traditional fine-tuning fail to achieve strong results on natural language understanding (NLU), we show that GPTs can be better than or comparable to similar-sized BERTs on NLU tasks with a novel method P-tuning -- which employs trainable continuous prompt embeddings. On the knowledge probing (LAMA) benchmark, the best GPT recovers 64\% (P@1) of world knowledge without any additional text provided during test time, which substantially improves the previous best by 20+ percentage points. On the SuperGlue benchmark, GPTs achieve comparable and sometimes better performance to similar-sized BERTs in supervised learning. Importantly, we find that P-tuning also improves BERTs' performance in both few-shot and supervised settings while largely reducing the need for prompt engineering. Consequently, P-tuning outperforms the state-of-the-art approaches on the few-shot SuperGlue benchmark.*.
## PromptEncoderConfig
[[autodoc]] tuners.p_tuning.config.PromptEncoderConfig
## PromptEncoder
[[autodoc]] tuners.p_tuning.model.PromptEncoder
|
huggingfaceREPO_NAMEpeftPATH_START.@peft_extracted@peft-main@docs@source@package_reference@p_tuning.md@.PATH_END.py
|
{
"filename": "_lightposition.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/isosurface/_lightposition.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LightpositionValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="lightposition", parent_name="isosurface", **kwargs):
super(LightpositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Lightposition"),
data_docs=kwargs.pop(
"data_docs",
"""
x
Numeric vector, representing the X coordinate
for each vertex.
y
Numeric vector, representing the Y coordinate
for each vertex.
z
Numeric vector, representing the Z coordinate
for each vertex.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@isosurface@_lightposition.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "PrincetonUniversity/athena",
"repo_path": "athena_extracted/athena-master/README.md",
"type": "Markdown"
}
|
athena
======
<!-- Jenkins Status Badge in Markdown (with view), unprotected, flat style -->
<!-- In general, need to be on Princeton VPN, logged into Princeton CAS, with ViewStatus access to Jenkins instance to click on unprotected Build Status Badge, but server is configured to whitelist GitHub -->
<!-- [](https://jenkins.princeton.edu/job/athena/job/PrincetonUniversity_athena_jenkins_master/) -->
[](https://www.repostatus.org/#active)
[](https://doi.org/10.5281/zenodo.11660592)
[](https://codecov.io/gh/PrincetonUniversity/athena)
[](https://opensource.org/licenses/BSD-3-Clause)
[](code_of_conduct.md)
<!--[](https://github.com/PrincetonUniversity/athena-public-version/issues)
[](https://github.com/PrincetonUniversity/athena-public-version/pulls) -->
<p align="center">
<img width="345" height="345" src="https://user-images.githubusercontent.com/1410981/115276281-759d8580-a108-11eb-9fc9-833480b97f95.png">
</p>
Athena++ radiation GRMHD code and adaptive mesh refinement (AMR) framework
Please read [our contributing guidelines](./CONTRIBUTING.md) for details on how to participate.
## Citation
To cite Athena++ in your publication, please use the following BibTeX to refer to the code's [method paper](https://ui.adsabs.harvard.edu/abs/2020ApJS..249....4S/abstract):
```
@article{Stone2020,
doi = {10.3847/1538-4365/ab929b},
url = {https://doi.org/10.3847%2F1538-4365%2Fab929b},
year = 2020,
month = jun,
publisher = {American Astronomical Society},
volume = {249},
number = {1},
pages = {4},
author = {James M. Stone and Kengo Tomida and Christopher J. White and Kyle G. Felker},
title = {The Athena$\mathplus$$\mathplus$ Adaptive Mesh Refinement Framework: Design and Magnetohydrodynamic Solvers},
journal = {The Astrophysical Journal Supplement Series},
}
```
Additionally, you can add a reference to `https://github.com/PrincetonUniversity/athena` in a footnote.
Finally, we have minted DOIs for each released version of Athena++ on Zenodo. This practice encourages computational reproducibility, since you can specify exactly which version of the code was used to produce the results in your publication. `10.5281/zenodo.4455879` is the DOI which cites _all_ versions of the code; it will always resolve to the latest release. Click on the Zenodo badge above to get access to BibTeX, etc. info related to these DOIs, e.g.:
```
@software{athena,
author = {Athena++ development team},
title = {PrincetonUniversity/athena: Athena++ v24.0},
month = jun,
year = 2024,
publisher = {Zenodo},
version = {24.0},
doi = {10.5281/zenodo.11660592},
url = {https://doi.org/10.5281/zenodo.11660592}
}
```
|
PrincetonUniversityREPO_NAMEathenaPATH_START.@athena_extracted@athena-master@README.md@.PATH_END.py
|
{
"filename": "test_coordinate_helpers.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/visualization/wcsaxes/tests/test_coordinate_helpers.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from unittest.mock import patch
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import pytest
from astropy import units as u
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.visualization.wcsaxes.coordinate_helpers import CoordinateHelper
from astropy.visualization.wcsaxes.core import WCSAxes
from astropy.wcs import WCS
MSX_HEADER = fits.Header.fromtextfile(get_pkg_data_filename("data/msx_header"))
def teardown_function(function):
plt.close("all")
def test_getaxislabel(ignore_matplotlibrc):
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect="equal")
ax.coords[0].set_axislabel("X")
ax.coords[1].set_axislabel("Y")
assert ax.coords[0].get_axislabel() == "X"
assert ax.coords[1].get_axislabel() == "Y"
@pytest.fixture
def ax():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect="equal")
fig.add_axes(ax)
return ax
def assert_label_draw(ax, x_label, y_label):
ax.coords[0].set_axislabel("Label 1")
ax.coords[1].set_axislabel("Label 2")
with patch.object(ax.coords[0]._axislabels, "set_position") as pos1:
with patch.object(ax.coords[1]._axislabels, "set_position") as pos2:
ax.figure.canvas.draw()
assert pos1.call_count == x_label
assert pos2.call_count == y_label
def test_label_visibility_rules_default(ignore_matplotlibrc, ax):
assert_label_draw(ax, True, True)
def test_label_visibility_rules_label(ignore_matplotlibrc, ax):
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999] * u.one)
assert_label_draw(ax, False, False)
def test_label_visibility_rules_ticks(ignore_matplotlibrc, ax):
ax.coords[0].set_axislabel_visibility_rule("ticks")
ax.coords[1].set_axislabel_visibility_rule("ticks")
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999] * u.one)
assert_label_draw(ax, True, False)
def test_label_visibility_rules_always(ignore_matplotlibrc, ax):
ax.coords[0].set_axislabel_visibility_rule("always")
ax.coords[1].set_axislabel_visibility_rule("always")
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999] * u.one)
assert_label_draw(ax, True, True)
def test_format_unit():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=WCS(MSX_HEADER))
fig.add_axes(ax)
# Force a draw which is required for format_coord to work
ax.figure.canvas.draw()
ori_fu = ax.coords[1].get_format_unit()
assert ori_fu == "deg"
ax.coords[1].set_format_unit("arcsec")
fu = ax.coords[1].get_format_unit()
assert fu == "arcsec"
def test_set_separator():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=WCS(MSX_HEADER))
fig.add_axes(ax)
# Force a draw which is required for format_coord to work
ax.figure.canvas.draw()
ax.coords[1].set_format_unit("deg")
assert ax.coords[1].format_coord(4) == "4\xb000'00\""
ax.coords[1].set_separator((":", ":", ""))
assert ax.coords[1].format_coord(4) == "4:00:00"
ax.coords[1].set_separator("abc")
assert ax.coords[1].format_coord(4) == "4a00b00c"
ax.coords[1].set_separator(None)
assert ax.coords[1].format_coord(4) == "4\xb000'00\""
@pytest.mark.parametrize(
"draw_grid, expected_visibility", [(True, True), (False, False), (None, True)]
)
def test_grid_variations(ignore_matplotlibrc, draw_grid, expected_visibility):
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect="equal")
fig.add_axes(ax)
transform = transforms.Affine2D().scale(2.0)
coord_helper = CoordinateHelper(parent_axes=ax, transform=transform)
coord_helper.grid(draw_grid=draw_grid)
assert coord_helper._grid_lines_kwargs["visible"] == expected_visibility
def test_get_position():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect="equal")
fig.add_axes(ax)
assert ax.coords[0].get_ticks_position() == ["b", "r", "t", "l"]
assert ax.coords[1].get_ticks_position() == ["b", "r", "t", "l"]
assert ax.coords[0].get_ticklabel_position() == ["#"]
assert ax.coords[1].get_ticklabel_position() == ["#"]
assert ax.coords[0].get_axislabel_position() == ["#"]
assert ax.coords[1].get_axislabel_position() == ["#"]
fig.canvas.draw()
assert ax.coords[0].get_ticks_position() == ["b", "r", "t", "l"]
assert ax.coords[1].get_ticks_position() == ["b", "r", "t", "l"]
assert ax.coords[0].get_ticklabel_position() == ["b", "#"]
assert ax.coords[1].get_ticklabel_position() == ["l", "#"]
assert ax.coords[0].get_axislabel_position() == ["b", "#"]
assert ax.coords[1].get_axislabel_position() == ["l", "#"]
ax.coords[0].set_ticks_position("br")
ax.coords[1].set_ticks_position("tl")
ax.coords[0].set_ticklabel_position("bt")
ax.coords[1].set_ticklabel_position("rl")
ax.coords[0].set_axislabel_position("t")
ax.coords[1].set_axislabel_position("r")
assert ax.coords[0].get_ticks_position() == ["b", "r"]
assert ax.coords[1].get_ticks_position() == ["t", "l"]
assert ax.coords[0].get_ticklabel_position() == ["b", "t"]
assert ax.coords[1].get_ticklabel_position() == ["r", "l"]
assert ax.coords[0].get_axislabel_position() == ["t"]
assert ax.coords[1].get_axislabel_position() == ["r"]
def test_deprecated_getters():
fig, _ = plt.subplots()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect="equal")
helper = CoordinateHelper(parent_axes=ax)
with pytest.warns(AstropyDeprecationWarning):
ticks = helper.ticks
assert not ticks.get_display_minor_ticks()
with pytest.warns(AstropyDeprecationWarning):
ticklabels = helper.ticklabels
assert ticklabels.text == {}
with pytest.warns(AstropyDeprecationWarning):
axislabels = helper.axislabels
assert axislabels.get_visibility_rule() == "labels"
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@visualization@wcsaxes@tests@test_coordinate_helpers.py@.PATH_END.py
|
{
"filename": "conftest.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/parso/py3/tests/conftest.py",
"type": "Python"
}
|
import re
import tempfile
import shutil
import logging
import os
from pathlib import Path
import pytest
import yatest.common
import parso
from parso import cache
from parso.utils import parse_version_string
collect_ignore = ["setup.py"]
_SUPPORTED_VERSIONS = '3.6', '3.7', '3.8', '3.9', '3.10'
@pytest.fixture(scope='session')
def clean_parso_cache():
"""
Set the default cache directory to a temporary directory during tests.
Note that you can't use built-in `tmpdir` and `monkeypatch`
fixture here because their scope is 'function', which is not used
in 'session' scope fixture.
This fixture is activated in ../pytest.ini.
"""
old = cache._default_cache_path
tmp = tempfile.mkdtemp(prefix='parso-test-')
cache._default_cache_path = Path(tmp)
yield
cache._default_cache_path = old
shutil.rmtree(tmp)
def pytest_addoption(parser):
parser.addoption("--logging", "-L", action='store_true',
help="Enables the logging output.")
def pytest_generate_tests(metafunc):
if 'normalizer_issue_case' in metafunc.fixturenames:
base_dir = os.path.join(yatest.common.test_source_path(), 'normalizer_issue_files')
cases = list(colllect_normalizer_tests(base_dir))
metafunc.parametrize(
'normalizer_issue_case',
cases,
ids=[c.name for c in cases]
)
elif 'each_version' in metafunc.fixturenames:
metafunc.parametrize('each_version', _SUPPORTED_VERSIONS)
elif 'version_ge_py38' in metafunc.fixturenames:
ge38 = set(_SUPPORTED_VERSIONS) - {'3.6', '3.7'}
metafunc.parametrize('version_ge_py38', sorted(ge38))
class NormalizerIssueCase:
"""
Static Analysis cases lie in the static_analysis folder.
The tests also start with `#!`, like the goto_definition tests.
"""
def __init__(self, path):
self.path = path
self.name = os.path.basename(path)
match = re.search(r'python([\d.]+)\.py', self.name)
self.python_version = match and match.group(1)
def colllect_normalizer_tests(base_dir):
for f_name in os.listdir(base_dir):
if f_name.endswith(".py"):
path = os.path.join(base_dir, f_name)
yield NormalizerIssueCase(path)
def pytest_configure(config):
if config.option.logging:
root = logging.getLogger()
root.setLevel(logging.DEBUG)
#ch = logging.StreamHandler(sys.stdout)
#ch.setLevel(logging.DEBUG)
#formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
#ch.setFormatter(formatter)
#root.addHandler(ch)
class Checker:
def __init__(self, version, is_passing):
self.version = version
self._is_passing = is_passing
self.grammar = parso.load_grammar(version=self.version)
def parse(self, code):
if self._is_passing:
return parso.parse(code, version=self.version, error_recovery=False)
else:
self._invalid_syntax(code)
def _invalid_syntax(self, code):
with pytest.raises(parso.ParserSyntaxError):
module = parso.parse(code, version=self.version, error_recovery=False)
# For debugging
print(module.children)
def get_error(self, code):
errors = list(self.grammar.iter_errors(self.grammar.parse(code)))
assert bool(errors) != self._is_passing
if errors:
return errors[0]
def get_error_message(self, code):
error = self.get_error(code)
if error is None:
return
return error.message
def assert_no_error_in_passing(self, code):
if self._is_passing:
module = self.grammar.parse(code)
assert not list(self.grammar.iter_errors(module))
@pytest.fixture
def works_not_in_py(each_version):
return Checker(each_version, False)
@pytest.fixture
def works_in_py(each_version):
return Checker(each_version, True)
@pytest.fixture
def works_ge_py38(each_version):
version_info = parse_version_string(each_version)
return Checker(each_version, version_info >= (3, 8))
@pytest.fixture
def works_ge_py39(each_version):
version_info = parse_version_string(each_version)
return Checker(each_version, version_info >= (3, 9))
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@parso@py3@tests@conftest.py@.PATH_END.py
|
{
"filename": "_bordercolor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/cone/colorbar/_bordercolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="cone.colorbar", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@cone@colorbar@_bordercolor.py@.PATH_END.py
|
{
"filename": "plot_plummer.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/examples/textbook/plot_plummer.py",
"type": "Python"
}
|
"""
Example AMUSE script to generate a Plummer sphere and plot the results.
"""
###BOOKLISTSTART###
from matplotlib.pyplot import show, xlim, ylim, figure
from amuse.plot import scatter, xlabel, ylabel
from amuse.lab import new_plummer_model
def main(N=10):
figure(figsize=(5,5))
bodies = new_plummer_model(N)
scatter(bodies.x, bodies.y)
xlim(-1, 1)
ylim(-1, 1)
xlabel("X")
ylabel("Y")
show()
###BOOKLISTSTOP###
def new_option_parser():
from optparse import OptionParser
result = OptionParser()
result.add_option("-N", dest="N", type="int", default=1000,
help="number of stars [1000]")
return result
if __name__ in ('__main__', '__plot__'):
o, arguments = new_option_parser().parse_args()
main(**o.__dict__)
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@examples@textbook@plot_plummer.py@.PATH_END.py
|
{
"filename": "lexer.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Jinja2/py2/jinja2/lexer.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""Implements a Jinja / Python combination lexer. The ``Lexer`` class
is used to do some preprocessing. It filters out invalid operators like
the bitshift operators we don't allow in templates. It separates
template code and python code in expressions.
"""
import re
from ast import literal_eval
from collections import deque
from operator import itemgetter
from ._compat import implements_iterator
from ._compat import intern
from ._compat import iteritems
from ._compat import text_type
from .exceptions import TemplateSyntaxError
from .utils import LRUCache
# cache for the lexers. Exists in order to be able to have multiple
# environments with the same lexer
_lexer_cache = LRUCache(50)
# static regular expressions
whitespace_re = re.compile(r"\s+", re.U)
newline_re = re.compile(r"(\r\n|\r|\n)")
string_re = re.compile(
r"('([^'\\]*(?:\\.[^'\\]*)*)'" r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S
)
integer_re = re.compile(r"(\d+_)*\d+")
float_re = re.compile(
r"""
(?<!\.) # doesn't start with a .
(\d+_)*\d+ # digits, possibly _ separated
(
(\.(\d+_)*\d+)? # optional fractional part
e[+\-]?(\d+_)*\d+ # exponent part
|
\.(\d+_)*\d+ # required fractional part
)
""",
re.IGNORECASE | re.VERBOSE,
)
try:
# check if this Python supports Unicode identifiers
compile("föö", "<unknown>", "eval")
except SyntaxError:
# Python 2, no Unicode support, use ASCII identifiers
name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*")
check_ident = False
else:
# Unicode support, import generated re pattern and set flag to use
# str.isidentifier to validate during lexing.
from ._identifier import pattern as name_re
check_ident = True
# internal the tokens and keep references to them
TOKEN_ADD = intern("add")
TOKEN_ASSIGN = intern("assign")
TOKEN_COLON = intern("colon")
TOKEN_COMMA = intern("comma")
TOKEN_DIV = intern("div")
TOKEN_DOT = intern("dot")
TOKEN_EQ = intern("eq")
TOKEN_FLOORDIV = intern("floordiv")
TOKEN_GT = intern("gt")
TOKEN_GTEQ = intern("gteq")
TOKEN_LBRACE = intern("lbrace")
TOKEN_LBRACKET = intern("lbracket")
TOKEN_LPAREN = intern("lparen")
TOKEN_LT = intern("lt")
TOKEN_LTEQ = intern("lteq")
TOKEN_MOD = intern("mod")
TOKEN_MUL = intern("mul")
TOKEN_NE = intern("ne")
TOKEN_PIPE = intern("pipe")
TOKEN_POW = intern("pow")
TOKEN_RBRACE = intern("rbrace")
TOKEN_RBRACKET = intern("rbracket")
TOKEN_RPAREN = intern("rparen")
TOKEN_SEMICOLON = intern("semicolon")
TOKEN_SUB = intern("sub")
TOKEN_TILDE = intern("tilde")
TOKEN_WHITESPACE = intern("whitespace")
TOKEN_FLOAT = intern("float")
TOKEN_INTEGER = intern("integer")
TOKEN_NAME = intern("name")
TOKEN_STRING = intern("string")
TOKEN_OPERATOR = intern("operator")
TOKEN_BLOCK_BEGIN = intern("block_begin")
TOKEN_BLOCK_END = intern("block_end")
TOKEN_VARIABLE_BEGIN = intern("variable_begin")
TOKEN_VARIABLE_END = intern("variable_end")
TOKEN_RAW_BEGIN = intern("raw_begin")
TOKEN_RAW_END = intern("raw_end")
TOKEN_COMMENT_BEGIN = intern("comment_begin")
TOKEN_COMMENT_END = intern("comment_end")
TOKEN_COMMENT = intern("comment")
TOKEN_LINESTATEMENT_BEGIN = intern("linestatement_begin")
TOKEN_LINESTATEMENT_END = intern("linestatement_end")
TOKEN_LINECOMMENT_BEGIN = intern("linecomment_begin")
TOKEN_LINECOMMENT_END = intern("linecomment_end")
TOKEN_LINECOMMENT = intern("linecomment")
TOKEN_DATA = intern("data")
TOKEN_INITIAL = intern("initial")
TOKEN_EOF = intern("eof")
# bind operators to token types
operators = {
"+": TOKEN_ADD,
"-": TOKEN_SUB,
"/": TOKEN_DIV,
"//": TOKEN_FLOORDIV,
"*": TOKEN_MUL,
"%": TOKEN_MOD,
"**": TOKEN_POW,
"~": TOKEN_TILDE,
"[": TOKEN_LBRACKET,
"]": TOKEN_RBRACKET,
"(": TOKEN_LPAREN,
")": TOKEN_RPAREN,
"{": TOKEN_LBRACE,
"}": TOKEN_RBRACE,
"==": TOKEN_EQ,
"!=": TOKEN_NE,
">": TOKEN_GT,
">=": TOKEN_GTEQ,
"<": TOKEN_LT,
"<=": TOKEN_LTEQ,
"=": TOKEN_ASSIGN,
".": TOKEN_DOT,
":": TOKEN_COLON,
"|": TOKEN_PIPE,
",": TOKEN_COMMA,
";": TOKEN_SEMICOLON,
}
reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
assert len(operators) == len(reverse_operators), "operators dropped"
operator_re = re.compile(
"(%s)" % "|".join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x)))
)
ignored_tokens = frozenset(
[
TOKEN_COMMENT_BEGIN,
TOKEN_COMMENT,
TOKEN_COMMENT_END,
TOKEN_WHITESPACE,
TOKEN_LINECOMMENT_BEGIN,
TOKEN_LINECOMMENT_END,
TOKEN_LINECOMMENT,
]
)
ignore_if_empty = frozenset(
[TOKEN_WHITESPACE, TOKEN_DATA, TOKEN_COMMENT, TOKEN_LINECOMMENT]
)
def _describe_token_type(token_type):
if token_type in reverse_operators:
return reverse_operators[token_type]
return {
TOKEN_COMMENT_BEGIN: "begin of comment",
TOKEN_COMMENT_END: "end of comment",
TOKEN_COMMENT: "comment",
TOKEN_LINECOMMENT: "comment",
TOKEN_BLOCK_BEGIN: "begin of statement block",
TOKEN_BLOCK_END: "end of statement block",
TOKEN_VARIABLE_BEGIN: "begin of print statement",
TOKEN_VARIABLE_END: "end of print statement",
TOKEN_LINESTATEMENT_BEGIN: "begin of line statement",
TOKEN_LINESTATEMENT_END: "end of line statement",
TOKEN_DATA: "template data / text",
TOKEN_EOF: "end of template",
}.get(token_type, token_type)
def describe_token(token):
"""Returns a description of the token."""
if token.type == TOKEN_NAME:
return token.value
return _describe_token_type(token.type)
def describe_token_expr(expr):
"""Like `describe_token` but for token expressions."""
if ":" in expr:
type, value = expr.split(":", 1)
if type == TOKEN_NAME:
return value
else:
type = expr
return _describe_token_type(type)
def count_newlines(value):
"""Count the number of newline characters in the string. This is
useful for extensions that filter a stream.
"""
return len(newline_re.findall(value))
def compile_rules(environment):
"""Compiles all the rules from the environment into a list of rules."""
e = re.escape
rules = [
(
len(environment.comment_start_string),
TOKEN_COMMENT_BEGIN,
e(environment.comment_start_string),
),
(
len(environment.block_start_string),
TOKEN_BLOCK_BEGIN,
e(environment.block_start_string),
),
(
len(environment.variable_start_string),
TOKEN_VARIABLE_BEGIN,
e(environment.variable_start_string),
),
]
if environment.line_statement_prefix is not None:
rules.append(
(
len(environment.line_statement_prefix),
TOKEN_LINESTATEMENT_BEGIN,
r"^[ \t\v]*" + e(environment.line_statement_prefix),
)
)
if environment.line_comment_prefix is not None:
rules.append(
(
len(environment.line_comment_prefix),
TOKEN_LINECOMMENT_BEGIN,
r"(?:^|(?<=\S))[^\S\r\n]*" + e(environment.line_comment_prefix),
)
)
return [x[1:] for x in sorted(rules, reverse=True)]
class Failure(object):
"""Class that raises a `TemplateSyntaxError` if called.
Used by the `Lexer` to specify known errors.
"""
def __init__(self, message, cls=TemplateSyntaxError):
self.message = message
self.error_class = cls
def __call__(self, lineno, filename):
raise self.error_class(self.message, lineno, filename)
class Token(tuple):
"""Token class."""
__slots__ = ()
lineno, type, value = (property(itemgetter(x)) for x in range(3))
def __new__(cls, lineno, type, value):
return tuple.__new__(cls, (lineno, intern(str(type)), value))
def __str__(self):
if self.type in reverse_operators:
return reverse_operators[self.type]
elif self.type == "name":
return self.value
return self.type
def test(self, expr):
"""Test a token against a token expression. This can either be a
token type or ``'token_type:token_value'``. This can only test
against string values and types.
"""
# here we do a regular string equality check as test_any is usually
# passed an iterable of not interned strings.
if self.type == expr:
return True
elif ":" in expr:
return expr.split(":", 1) == [self.type, self.value]
return False
def test_any(self, *iterable):
"""Test against multiple token expressions."""
for expr in iterable:
if self.test(expr):
return True
return False
def __repr__(self):
return "Token(%r, %r, %r)" % (self.lineno, self.type, self.value)
@implements_iterator
class TokenStreamIterator(object):
"""The iterator for tokenstreams. Iterate over the stream
until the eof token is reached.
"""
def __init__(self, stream):
self.stream = stream
def __iter__(self):
return self
def __next__(self):
token = self.stream.current
if token.type is TOKEN_EOF:
self.stream.close()
raise StopIteration()
next(self.stream)
return token
@implements_iterator
class TokenStream(object):
"""A token stream is an iterable that yields :class:`Token`\\s. The
parser however does not iterate over it but calls :meth:`next` to go
one token ahead. The current active token is stored as :attr:`current`.
"""
def __init__(self, generator, name, filename):
self._iter = iter(generator)
self._pushed = deque()
self.name = name
self.filename = filename
self.closed = False
self.current = Token(1, TOKEN_INITIAL, "")
next(self)
def __iter__(self):
return TokenStreamIterator(self)
def __bool__(self):
return bool(self._pushed) or self.current.type is not TOKEN_EOF
__nonzero__ = __bool__ # py2
@property
def eos(self):
"""Are we at the end of the stream?"""
return not self
def push(self, token):
"""Push a token back to the stream."""
self._pushed.append(token)
def look(self):
"""Look at the next token."""
old_token = next(self)
result = self.current
self.push(result)
self.current = old_token
return result
def skip(self, n=1):
"""Got n tokens ahead."""
for _ in range(n):
next(self)
def next_if(self, expr):
"""Perform the token test and return the token if it matched.
Otherwise the return value is `None`.
"""
if self.current.test(expr):
return next(self)
def skip_if(self, expr):
"""Like :meth:`next_if` but only returns `True` or `False`."""
return self.next_if(expr) is not None
def __next__(self):
"""Go one token ahead and return the old one.
Use the built-in :func:`next` instead of calling this directly.
"""
rv = self.current
if self._pushed:
self.current = self._pushed.popleft()
elif self.current.type is not TOKEN_EOF:
try:
self.current = next(self._iter)
except StopIteration:
self.close()
return rv
def close(self):
"""Close the stream."""
self.current = Token(self.current.lineno, TOKEN_EOF, "")
self._iter = None
self.closed = True
def expect(self, expr):
"""Expect a given token type and return it. This accepts the same
argument as :meth:`jinja2.lexer.Token.test`.
"""
if not self.current.test(expr):
expr = describe_token_expr(expr)
if self.current.type is TOKEN_EOF:
raise TemplateSyntaxError(
"unexpected end of template, expected %r." % expr,
self.current.lineno,
self.name,
self.filename,
)
raise TemplateSyntaxError(
"expected token %r, got %r" % (expr, describe_token(self.current)),
self.current.lineno,
self.name,
self.filename,
)
try:
return self.current
finally:
next(self)
def get_lexer(environment):
"""Return a lexer which is probably cached."""
key = (
environment.block_start_string,
environment.block_end_string,
environment.variable_start_string,
environment.variable_end_string,
environment.comment_start_string,
environment.comment_end_string,
environment.line_statement_prefix,
environment.line_comment_prefix,
environment.trim_blocks,
environment.lstrip_blocks,
environment.newline_sequence,
environment.keep_trailing_newline,
)
lexer = _lexer_cache.get(key)
if lexer is None:
lexer = Lexer(environment)
_lexer_cache[key] = lexer
return lexer
class OptionalLStrip(tuple):
"""A special tuple for marking a point in the state that can have
lstrip applied.
"""
__slots__ = ()
# Even though it looks like a no-op, creating instances fails
# without this.
def __new__(cls, *members, **kwargs):
return super(OptionalLStrip, cls).__new__(cls, members)
class Lexer(object):
"""Class that implements a lexer for a given environment. Automatically
created by the environment class, usually you don't have to do that.
Note that the lexer is not automatically bound to an environment.
Multiple environments can share the same lexer.
"""
def __init__(self, environment):
# shortcuts
e = re.escape
def c(x):
return re.compile(x, re.M | re.S)
# lexing rules for tags
tag_rules = [
(whitespace_re, TOKEN_WHITESPACE, None),
(float_re, TOKEN_FLOAT, None),
(integer_re, TOKEN_INTEGER, None),
(name_re, TOKEN_NAME, None),
(string_re, TOKEN_STRING, None),
(operator_re, TOKEN_OPERATOR, None),
]
# assemble the root lexing rule. because "|" is ungreedy
# we have to sort by length so that the lexer continues working
# as expected when we have parsing rules like <% for block and
# <%= for variables. (if someone wants asp like syntax)
# variables are just part of the rules if variable processing
# is required.
root_tag_rules = compile_rules(environment)
# block suffix if trimming is enabled
block_suffix_re = environment.trim_blocks and "\\n?" or ""
# If lstrip is enabled, it should not be applied if there is any
# non-whitespace between the newline and block.
self.lstrip_unless_re = c(r"[^ \t]") if environment.lstrip_blocks else None
self.newline_sequence = environment.newline_sequence
self.keep_trailing_newline = environment.keep_trailing_newline
# global lexing rules
self.rules = {
"root": [
# directives
(
c(
"(.*?)(?:%s)"
% "|".join(
[
r"(?P<raw_begin>%s(\-|\+|)\s*raw\s*(?:\-%s\s*|%s))"
% (
e(environment.block_start_string),
e(environment.block_end_string),
e(environment.block_end_string),
)
]
+ [
r"(?P<%s>%s(\-|\+|))" % (n, r)
for n, r in root_tag_rules
]
)
),
OptionalLStrip(TOKEN_DATA, "#bygroup"),
"#bygroup",
),
# data
(c(".+"), TOKEN_DATA, None),
],
# comments
TOKEN_COMMENT_BEGIN: [
(
c(
r"(.*?)((?:\-%s\s*|%s)%s)"
% (
e(environment.comment_end_string),
e(environment.comment_end_string),
block_suffix_re,
)
),
(TOKEN_COMMENT, TOKEN_COMMENT_END),
"#pop",
),
(c("(.)"), (Failure("Missing end of comment tag"),), None),
],
# blocks
TOKEN_BLOCK_BEGIN: [
(
c(
r"(?:\-%s\s*|%s)%s"
% (
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re,
)
),
TOKEN_BLOCK_END,
"#pop",
),
]
+ tag_rules,
# variables
TOKEN_VARIABLE_BEGIN: [
(
c(
r"\-%s\s*|%s"
% (
e(environment.variable_end_string),
e(environment.variable_end_string),
)
),
TOKEN_VARIABLE_END,
"#pop",
)
]
+ tag_rules,
# raw block
TOKEN_RAW_BEGIN: [
(
c(
r"(.*?)((?:%s(\-|\+|))\s*endraw\s*(?:\-%s\s*|%s%s))"
% (
e(environment.block_start_string),
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re,
)
),
OptionalLStrip(TOKEN_DATA, TOKEN_RAW_END),
"#pop",
),
(c("(.)"), (Failure("Missing end of raw directive"),), None),
],
# line statements
TOKEN_LINESTATEMENT_BEGIN: [
(c(r"\s*(\n|$)"), TOKEN_LINESTATEMENT_END, "#pop")
]
+ tag_rules,
# line comments
TOKEN_LINECOMMENT_BEGIN: [
(
c(r"(.*?)()(?=\n|$)"),
(TOKEN_LINECOMMENT, TOKEN_LINECOMMENT_END),
"#pop",
)
],
}
def _normalize_newlines(self, value):
"""Called for strings and template data to normalize it to unicode."""
return newline_re.sub(self.newline_sequence, value)
def tokenize(self, source, name=None, filename=None, state=None):
"""Calls tokeniter + tokenize and wraps it in a token stream."""
stream = self.tokeniter(source, name, filename, state)
return TokenStream(self.wrap(stream, name, filename), name, filename)
def wrap(self, stream, name=None, filename=None):
"""This is called with the stream as returned by `tokenize` and wraps
every token in a :class:`Token` and converts the value.
"""
for lineno, token, value in stream:
if token in ignored_tokens:
continue
elif token == TOKEN_LINESTATEMENT_BEGIN:
token = TOKEN_BLOCK_BEGIN
elif token == TOKEN_LINESTATEMENT_END:
token = TOKEN_BLOCK_END
# we are not interested in those tokens in the parser
elif token in (TOKEN_RAW_BEGIN, TOKEN_RAW_END):
continue
elif token == TOKEN_DATA:
value = self._normalize_newlines(value)
elif token == "keyword":
token = value
elif token == TOKEN_NAME:
value = str(value)
if check_ident and not value.isidentifier():
raise TemplateSyntaxError(
"Invalid character in identifier", lineno, name, filename
)
elif token == TOKEN_STRING:
# try to unescape string
try:
value = (
self._normalize_newlines(value[1:-1])
.encode("ascii", "backslashreplace")
.decode("unicode-escape")
)
except Exception as e:
msg = str(e).split(":")[-1].strip()
raise TemplateSyntaxError(msg, lineno, name, filename)
elif token == TOKEN_INTEGER:
value = int(value.replace("_", ""))
elif token == TOKEN_FLOAT:
# remove all "_" first to support more Python versions
value = literal_eval(value.replace("_", ""))
elif token == TOKEN_OPERATOR:
token = operators[value]
yield Token(lineno, token, value)
def tokeniter(self, source, name, filename=None, state=None):
"""This method tokenizes the text and returns the tokens in a
generator. Use this method if you just want to tokenize a template.
"""
source = text_type(source)
lines = source.splitlines()
if self.keep_trailing_newline and source:
for newline in ("\r\n", "\r", "\n"):
if source.endswith(newline):
lines.append("")
break
source = "\n".join(lines)
pos = 0
lineno = 1
stack = ["root"]
if state is not None and state != "root":
assert state in ("variable", "block"), "invalid state"
stack.append(state + "_begin")
statetokens = self.rules[stack[-1]]
source_length = len(source)
balancing_stack = []
lstrip_unless_re = self.lstrip_unless_re
newlines_stripped = 0
line_starting = True
while 1:
# tokenizer loop
for regex, tokens, new_state in statetokens:
m = regex.match(source, pos)
# if no match we try again with the next rule
if m is None:
continue
# we only match blocks and variables if braces / parentheses
# are balanced. continue parsing with the lower rule which
# is the operator rule. do this only if the end tags look
# like operators
if balancing_stack and tokens in (
TOKEN_VARIABLE_END,
TOKEN_BLOCK_END,
TOKEN_LINESTATEMENT_END,
):
continue
# tuples support more options
if isinstance(tokens, tuple):
groups = m.groups()
if isinstance(tokens, OptionalLStrip):
# Rule supports lstrip. Match will look like
# text, block type, whitespace control, type, control, ...
text = groups[0]
# Skipping the text and first type, every other group is the
# whitespace control for each type. One of the groups will be
# -, +, or empty string instead of None.
strip_sign = next(g for g in groups[2::2] if g is not None)
if strip_sign == "-":
# Strip all whitespace between the text and the tag.
stripped = text.rstrip()
newlines_stripped = text[len(stripped) :].count("\n")
groups = (stripped,) + groups[1:]
elif (
# Not marked for preserving whitespace.
strip_sign != "+"
# lstrip is enabled.
and lstrip_unless_re is not None
# Not a variable expression.
and not m.groupdict().get(TOKEN_VARIABLE_BEGIN)
):
# The start of text between the last newline and the tag.
l_pos = text.rfind("\n") + 1
if l_pos > 0 or line_starting:
# If there's only whitespace between the newline and the
# tag, strip it.
if not lstrip_unless_re.search(text, l_pos):
groups = (text[:l_pos],) + groups[1:]
for idx, token in enumerate(tokens):
# failure group
if token.__class__ is Failure:
raise token(lineno, filename)
# bygroup is a bit more complex, in that case we
# yield for the current token the first named
# group that matched
elif token == "#bygroup":
for key, value in iteritems(m.groupdict()):
if value is not None:
yield lineno, key, value
lineno += value.count("\n")
break
else:
raise RuntimeError(
"%r wanted to resolve "
"the token dynamically"
" but no group matched" % regex
)
# normal group
else:
data = groups[idx]
if data or token not in ignore_if_empty:
yield lineno, token, data
lineno += data.count("\n") + newlines_stripped
newlines_stripped = 0
# strings as token just are yielded as it.
else:
data = m.group()
# update brace/parentheses balance
if tokens == TOKEN_OPERATOR:
if data == "{":
balancing_stack.append("}")
elif data == "(":
balancing_stack.append(")")
elif data == "[":
balancing_stack.append("]")
elif data in ("}", ")", "]"):
if not balancing_stack:
raise TemplateSyntaxError(
"unexpected '%s'" % data, lineno, name, filename
)
expected_op = balancing_stack.pop()
if expected_op != data:
raise TemplateSyntaxError(
"unexpected '%s', "
"expected '%s'" % (data, expected_op),
lineno,
name,
filename,
)
# yield items
if data or tokens not in ignore_if_empty:
yield lineno, tokens, data
lineno += data.count("\n")
line_starting = m.group()[-1:] == "\n"
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
# in an infinite loop
pos2 = m.end()
# handle state changes
if new_state is not None:
# remove the uppermost state
if new_state == "#pop":
stack.pop()
# resolve the new state by group checking
elif new_state == "#bygroup":
for key, value in iteritems(m.groupdict()):
if value is not None:
stack.append(key)
break
else:
raise RuntimeError(
"%r wanted to resolve the "
"new state dynamically but"
" no group matched" % regex
)
# direct state name given
else:
stack.append(new_state)
statetokens = self.rules[stack[-1]]
# we are still at the same position and no stack change.
# this means a loop without break condition, avoid that and
# raise error
elif pos2 == pos:
raise RuntimeError(
"%r yielded empty string without stack change" % regex
)
# publish new function and start again
pos = pos2
break
# if loop terminated without break we haven't found a single match
# either we are at the end of the file or we have a problem
else:
# end of text
if pos >= source_length:
return
# something went wrong
raise TemplateSyntaxError(
"unexpected char %r at %d" % (source[pos], pos),
lineno,
name,
filename,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Jinja2@py2@jinja2@lexer.py@.PATH_END.py
|
{
"filename": "test_ontotext_graphdb_graph.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/integration_tests/graphs/test_ontotext_graphdb_graph.py",
"type": "Python"
}
|
from pathlib import Path
import pytest
from langchain_community.graphs import OntotextGraphDBGraph
"""
cd libs/community/tests/integration_tests/graphs/docker-compose-ontotext-graphdb
./start.sh
"""
def test_query_method_with_valid_query() -> None:
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/langchain",
query_ontology="CONSTRUCT {?s ?p ?o}"
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
query_results = graph.query(
"PREFIX voc: <https://swapi.co/vocabulary/> "
"PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
"SELECT ?eyeColor "
"WHERE {"
' ?besalisk rdfs:label "Dexter Jettster" ; '
" voc:eyeColor ?eyeColor ."
"}"
)
assert len(query_results) == 1
assert len(query_results[0]) == 1
assert str(query_results[0][0]) == "yellow"
def test_query_method_with_invalid_query() -> None:
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/langchain",
query_ontology="CONSTRUCT {?s ?p ?o}"
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
with pytest.raises(ValueError) as e:
graph.query(
"PREFIX : <https://swapi.co/vocabulary/> "
"PREFIX owl: <http://www.w3.org/2002/07/owl#> "
"PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
"PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> "
"SELECT ?character (MAX(?lifespan) AS ?maxLifespan) "
"WHERE {"
" ?species a :Species ;"
" :character ?character ;"
" :averageLifespan ?lifespan ."
" FILTER(xsd:integer(?lifespan))"
"} "
"ORDER BY DESC(?maxLifespan) "
"LIMIT 1"
)
assert (
str(e.value)
== "You did something wrong formulating either the URI or your SPARQL query"
)
def test_get_schema_with_query() -> None:
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/langchain",
query_ontology="CONSTRUCT {?s ?p ?o}"
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
from rdflib import Graph
assert len(Graph().parse(data=graph.get_schema, format="turtle")) == 19
@pytest.mark.parametrize(
"rdf_format, file_extension",
[
("json-ld", "json"),
("json-ld", "jsonld"),
("json-ld", "json-ld"),
("xml", "rdf"),
("xml", "xml"),
("xml", "owl"),
("pretty-xml", "xml"),
("n3", "n3"),
("turtle", "ttl"),
("nt", "nt"),
("trig", "trig"),
("nquads", "nq"),
("nquads", "nquads"),
("trix", "trix"),
],
)
def test_get_schema_from_file(
tmp_path: Path, rdf_format: str, file_extension: str
) -> None:
expected_number_of_ontology_statements = 19
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/langchain",
query_ontology="CONSTRUCT {?s ?p ?o}"
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
from rdflib import ConjunctiveGraph, Graph
assert (
len(Graph().parse(data=graph.get_schema, format="turtle"))
== expected_number_of_ontology_statements
)
# serialize the ontology schema loaded with the query in a local file
# in various rdf formats and check that this results
# in the same number of statements
conjunctive_graph = ConjunctiveGraph()
ontology_context = conjunctive_graph.get_context("https://swapi.co/ontology/")
ontology_context.parse(data=graph.get_schema, format="turtle")
assert len(ontology_context) == expected_number_of_ontology_statements
assert len(conjunctive_graph) == expected_number_of_ontology_statements
local_file = tmp_path / ("starwars-ontology." + file_extension)
conjunctive_graph.serialize(local_file, format=rdf_format)
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/langchain",
local_file=str(local_file),
)
assert (
len(Graph().parse(data=graph.get_schema, format="turtle"))
== expected_number_of_ontology_statements
)
@pytest.mark.parametrize(
"rdf_format", ["json-ld", "xml", "n3", "turtle", "nt", "trig", "nquads", "trix"]
)
def test_get_schema_from_file_with_explicit_rdf_format(
tmp_path: Path, rdf_format: str
) -> None:
expected_number_of_ontology_statements = 19
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/langchain",
query_ontology="CONSTRUCT {?s ?p ?o}"
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
from rdflib import ConjunctiveGraph, Graph
assert (
len(Graph().parse(data=graph.get_schema, format="turtle"))
== expected_number_of_ontology_statements
)
# serialize the ontology schema loaded with the query in a local file
# in various rdf formats and check that this results
# in the same number of statements
conjunctive_graph = ConjunctiveGraph()
ontology_context = conjunctive_graph.get_context("https://swapi.co/ontology/")
ontology_context.parse(data=graph.get_schema, format="turtle")
assert len(ontology_context) == expected_number_of_ontology_statements
assert len(conjunctive_graph) == expected_number_of_ontology_statements
local_file = tmp_path / "starwars-ontology.txt"
conjunctive_graph.serialize(local_file, format=rdf_format)
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/langchain",
local_file=str(local_file),
local_file_format=rdf_format,
)
assert (
len(Graph().parse(data=graph.get_schema, format="turtle"))
== expected_number_of_ontology_statements
)
def test_get_schema_from_file_with_wrong_extension(tmp_path: Path) -> None:
expected_number_of_ontology_statements = 19
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/langchain",
query_ontology="CONSTRUCT {?s ?p ?o}"
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
from rdflib import ConjunctiveGraph, Graph
assert (
len(Graph().parse(data=graph.get_schema, format="turtle"))
== expected_number_of_ontology_statements
)
conjunctive_graph = ConjunctiveGraph()
ontology_context = conjunctive_graph.get_context("https://swapi.co/ontology/")
ontology_context.parse(data=graph.get_schema, format="turtle")
assert len(ontology_context) == expected_number_of_ontology_statements
assert len(conjunctive_graph) == expected_number_of_ontology_statements
local_file = tmp_path / "starwars-ontology.trig"
conjunctive_graph.serialize(local_file, format="nquads")
with pytest.raises(ValueError):
OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/langchain",
local_file=str(local_file),
)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@integration_tests@graphs@test_ontotext_graphdb_graph.py@.PATH_END.py
|
{
"filename": "_showticksuffix.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/densitymapbox/colorbar/_showticksuffix.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowticksuffixValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="showticksuffix",
parent_name="densitymapbox.colorbar",
**kwargs,
):
super(ShowticksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@densitymapbox@colorbar@_showticksuffix.py@.PATH_END.py
|
{
"filename": "make_auto_diff_type.py",
"repo_name": "adamjermyn/Skye",
"repo_path": "Skye_extracted/Skye-main/auto_diff/python/make_auto_diff_type.py",
"type": "Python"
}
|
version https://git-lfs.github.com/spec/v1
oid sha256:0f0334fdaee7bd5a79e4a835c33ec43fdbbe46010c4eab785e151bfbdad1473d
size 6354
|
adamjermynREPO_NAMESkyePATH_START.@Skye_extracted@Skye-main@auto_diff@python@make_auto_diff_type.py@.PATH_END.py
|
{
"filename": "_lineposition.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/indicator/number/font/_lineposition.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LinepositionValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(
self, plotly_name="lineposition", parent_name="indicator.number.font", **kwargs
):
super(LinepositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
extras=kwargs.pop("extras", ["none"]),
flags=kwargs.pop("flags", ["under", "over", "through"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@indicator@number@font@_lineposition.py@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/contour/colorbar/title/font/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="shadow", parent_name="contour.colorbar.title.font", **kwargs
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@contour@colorbar@title@font@_shadow.py@.PATH_END.py
|
{
"filename": "_structures.py",
"repo_name": "davidharvey1986/pyRRG",
"repo_path": "pyRRG_extracted/pyRRG-master/unittests/bugFixPyRRG/lib/python3.7/site-packages/pip/_vendor/packaging/_structures.py",
"type": "Python"
}
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
class InfinityType(object):
def __repr__(self):
# type: () -> str
return "Infinity"
def __hash__(self):
# type: () -> int
return hash(repr(self))
def __lt__(self, other):
# type: (object) -> bool
return False
def __le__(self, other):
# type: (object) -> bool
return False
def __eq__(self, other):
# type: (object) -> bool
return isinstance(other, self.__class__)
def __ne__(self, other):
# type: (object) -> bool
return not isinstance(other, self.__class__)
def __gt__(self, other):
# type: (object) -> bool
return True
def __ge__(self, other):
# type: (object) -> bool
return True
def __neg__(self):
# type: (object) -> NegativeInfinityType
return NegativeInfinity
Infinity = InfinityType()
class NegativeInfinityType(object):
def __repr__(self):
# type: () -> str
return "-Infinity"
def __hash__(self):
# type: () -> int
return hash(repr(self))
def __lt__(self, other):
# type: (object) -> bool
return True
def __le__(self, other):
# type: (object) -> bool
return True
def __eq__(self, other):
# type: (object) -> bool
return isinstance(other, self.__class__)
def __ne__(self, other):
# type: (object) -> bool
return not isinstance(other, self.__class__)
def __gt__(self, other):
# type: (object) -> bool
return False
def __ge__(self, other):
# type: (object) -> bool
return False
def __neg__(self):
# type: (object) -> InfinityType
return Infinity
NegativeInfinity = NegativeInfinityType()
|
davidharvey1986REPO_NAMEpyRRGPATH_START.@pyRRG_extracted@pyRRG-master@unittests@bugFixPyRRG@lib@python3.7@site-packages@pip@_vendor@packaging@_structures.py@.PATH_END.py
|
{
"filename": "palette_choices.py",
"repo_name": "mwaskom/seaborn",
"repo_path": "seaborn_extracted/seaborn-master/examples/palette_choices.py",
"type": "Python"
}
|
"""
Color palette choices
=====================
"""
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(style="white", context="talk")
rs = np.random.RandomState(8)
# Set up the matplotlib figure
f, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(7, 5), sharex=True)
# Generate some sequential data
x = np.array(list("ABCDEFGHIJ"))
y1 = np.arange(1, 11)
sns.barplot(x=x, y=y1, hue=x, palette="rocket", ax=ax1)
ax1.axhline(0, color="k", clip_on=False)
ax1.set_ylabel("Sequential")
# Center the data to make it diverging
y2 = y1 - 5.5
sns.barplot(x=x, y=y2, hue=x, palette="vlag", ax=ax2)
ax2.axhline(0, color="k", clip_on=False)
ax2.set_ylabel("Diverging")
# Randomly reorder the data to make it qualitative
y3 = rs.choice(y1, len(y1), replace=False)
sns.barplot(x=x, y=y3, hue=x, palette="deep", ax=ax3)
ax3.axhline(0, color="k", clip_on=False)
ax3.set_ylabel("Qualitative")
# Finalize the plot
sns.despine(bottom=True)
plt.setp(f.axes, yticks=[])
plt.tight_layout(h_pad=2)
|
mwaskomREPO_NAMEseabornPATH_START.@seaborn_extracted@seaborn-master@examples@palette_choices.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.