metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "__init__.py",
"repo_name": "enthought/mayavi",
"repo_path": "mayavi_extracted/mayavi-master/mayavi/tools/data_wizards/__init__.py",
"type": "Python"
}
|
enthoughtREPO_NAMEmayaviPATH_START.@mayavi_extracted@mayavi-master@mayavi@tools@data_wizards@__init__.py@.PATH_END.py
|
|
{
"filename": "hydro_minimal.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/examples/textbook/hydro_minimal.py",
"type": "Python"
}
|
###BOOKLISTSTART1###
from amuse.lab import *
def main(N, Mtot, Rvir, t_end):
converter=nbody_system.nbody_to_si(Mtot, Rvir)
gas = new_plummer_gas_model(N, convert_nbody=converter)
hydro = Gadget2(converter)
hydro.gas_particles.add_particles(gas)
Etot_init = hydro.kinetic_energy \
+ hydro.potential_energy + hydro.thermal_energy
hydro.evolve_model(t_end)
write_set_to_file(hydro.particles, "hydro.h5", "hdf5")
Ekin = hydro.kinetic_energy
Epot = hydro.potential_energy
Eth = hydro.thermal_energy
Etot = Ekin + Epot + Eth
Q = (Ekin+Eth)/Epot
dE = (Etot_init-Etot)/Etot
com = hydro.gas_particles.center_of_mass()
print("T=", hydro.get_time(), "M=", hydro.gas_particles.mass.sum(), end=' ')
print("E= ", Etot, "Q= ", Q, "dE=", dE, "CoM=", com.in_(units.RSun))
hydro.stop()
###BOOKLISTSTOP1###
def new_option_parser():
from amuse.units.optparse import OptionParser
result = OptionParser()
result.add_option("-N", dest="N", type="int",default = 100,
help="number of gas particles [%default]")
result.add_option("-t", unit=units.Myr,
dest="t_end", type="float", default = 6|units.hour,
help="end time of the simulation [%default]")
result.add_option("-M", unit=units.MSun,
dest="Mtot", type="float", default = 1|units.MSun,
help="Mass of the cloud [%default]")
result.add_option("-R", unit=units.RSun,
dest="Rvir", type="float", default = 1|units.RSun,
help="Radius of the cloud [%default]")
return result
if __name__ in ('__main__', '__plot__'):
o, arguments = new_option_parser().parse_args()
main(**o.__dict__)
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@examples@textbook@hydro_minimal.py@.PATH_END.py
|
{
"filename": "test_spectral_window.py",
"repo_name": "ska-sa/katdal",
"repo_path": "katdal_extracted/katdal-master/katdal/test/test_spectral_window.py",
"type": "Python"
}
|
###############################################################################
# Copyright (c) 2018,2021-2022, National Research Foundation (SARAO)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""Tests for :py:mod:`katdal.spectral_window`."""
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from katdal.spectral_window import SpectralWindow
class TestSpectralWindow:
def setup_method(self):
self.lsb = SpectralWindow(1000.0, 10.0, 6, sideband=-1, product='lsb')
self.usb = SpectralWindow(1000.0, 10.0, 6, sideband=1, band='X')
self.odd = SpectralWindow(1000.0, 10.0, 5, sideband=1)
# channel_width will not be an exact float. The values have been
# chosen so that bandwidth / num_chans * num_chans does not quite
# equal bandwidth.
self.inexact = SpectralWindow(1000.0, None, 14, sideband=1,
bandwidth=230.0)
def test_width_properties(self):
assert self.lsb.channel_width == 10.0
assert self.lsb.bandwidth == 60.0
assert self.inexact.channel_width == 230.0 / 14
assert self.inexact.bandwidth == 230.0
def test_channel_freqs(self):
assert_array_equal(self.lsb.channel_freqs,
[1030.0, 1020.0, 1010.0, 1000.0, 990.0, 980.0])
assert_array_equal(self.usb.channel_freqs,
[970.0, 980.0, 990.0, 1000.0, 1010.0, 1020.0])
assert_array_equal(self.odd.channel_freqs,
[980.0, 990.0, 1000.0, 1010.0, 1020.0])
assert_array_almost_equal(self.inexact.channel_freqs,
np.arange(14) * 230.0 / 14 + 885.0)
# Check that the exactly representable values are exact
assert self.inexact.channel_freqs[0] == 885.0
assert self.inexact.channel_freqs[7] == 1000.0
def test_repr(self):
# Just a smoke test to check that it doesn't crash
repr(self.lsb)
repr(self.usb)
def test_subrange(self):
lsb_sub = self.lsb.subrange(0, 3)
assert_array_equal(lsb_sub.channel_freqs, [1030.0, 1020.0, 1010.0])
assert lsb_sub.product == self.lsb.product
usb_sub = self.usb.subrange(2, 6)
assert_array_equal(usb_sub.channel_freqs,
[990.0, 1000.0, 1010.0, 1020.0])
assert usb_sub.band == self.usb.band
# Check that updated bandwidth doesn't have rounding errors
inexact_sub = self.inexact.subrange(0, 7)
assert inexact_sub.bandwidth == 115.0
def test_rechannelise_same(self):
lsb = self.lsb.rechannelise(6)
assert lsb == self.lsb
def test_rechannelise_to_even(self):
lsb = self.lsb.rechannelise(2)
assert_array_equal(lsb.channel_freqs, [1020.0, 990.0])
usb = self.usb.rechannelise(2)
assert_array_equal(usb.channel_freqs, [980.0, 1010.0])
def test_rechannelise_to_odd(self):
lsb = self.lsb.rechannelise(3)
assert_array_equal(lsb.channel_freqs, [1025.0, 1005.0, 985.0])
usb = self.usb.rechannelise(3)
assert_array_equal(usb.channel_freqs, [975.0, 995.0, 1015.0])
odd = self.odd.rechannelise(1)
assert_array_equal(odd.channel_freqs, [1000.0])
|
ska-saREPO_NAMEkatdalPATH_START.@katdal_extracted@katdal-master@katdal@test@test_spectral_window.py@.PATH_END.py
|
{
"filename": "test_nonlin.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/scipy/optimize/tests/test_nonlin.py",
"type": "Python"
}
|
""" Unit tests for nonlinear solvers
Author: Ondrej Certik
May 2007
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_
import pytest
from scipy._lib.six import xrange
from scipy.optimize import nonlin, root
from numpy import matrix, diag, dot
from numpy.linalg import inv
import numpy as np
from .test_minpack import pressure_network
SOLVERS = {'anderson': nonlin.anderson, 'diagbroyden': nonlin.diagbroyden,
'linearmixing': nonlin.linearmixing, 'excitingmixing': nonlin.excitingmixing,
'broyden1': nonlin.broyden1, 'broyden2': nonlin.broyden2,
'krylov': nonlin.newton_krylov}
MUST_WORK = {'anderson': nonlin.anderson, 'broyden1': nonlin.broyden1,
'broyden2': nonlin.broyden2, 'krylov': nonlin.newton_krylov}
#-------------------------------------------------------------------------------
# Test problems
#-------------------------------------------------------------------------------
def F(x):
x = np.asmatrix(x).T
d = matrix(diag([3,2,1.5,1,0.5]))
c = 0.01
f = -d*x - c*float(x.T*x)*x
return f
F.xin = [1,1,1,1,1]
F.KNOWN_BAD = {}
def F2(x):
return x
F2.xin = [1,2,3,4,5,6]
F2.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
'excitingmixing': nonlin.excitingmixing}
def F2_lucky(x):
return x
F2_lucky.xin = [0,0,0,0,0,0]
F2_lucky.KNOWN_BAD = {}
def F3(x):
A = np.mat('-2 1 0; 1 -2 1; 0 1 -2')
b = np.mat('1 2 3')
return np.dot(A, x) - b
F3.xin = [1,2,3]
F3.KNOWN_BAD = {}
def F4_powell(x):
A = 1e4
return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)]
F4_powell.xin = [-1, -2]
F4_powell.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
'excitingmixing': nonlin.excitingmixing,
'diagbroyden': nonlin.diagbroyden}
def F5(x):
return pressure_network(x, 4, np.array([.5, .5, .5, .5]))
F5.xin = [2., 0, 2, 0]
F5.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
'linearmixing': nonlin.linearmixing,
'diagbroyden': nonlin.diagbroyden}
def F6(x):
x1, x2 = x
J0 = np.array([[-4.256, 14.7],
[0.8394989, 0.59964207]])
v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6,
np.sin(x2 * np.exp(x1) - 1)])
return -np.linalg.solve(J0, v)
F6.xin = [-0.5, 1.4]
F6.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
'linearmixing': nonlin.linearmixing,
'diagbroyden': nonlin.diagbroyden}
#-------------------------------------------------------------------------------
# Tests
#-------------------------------------------------------------------------------
class TestNonlin(object):
"""
Check the Broyden methods for a few test problems.
broyden1, broyden2, and newton_krylov must succeed for
all functions. Some of the others don't -- tests in KNOWN_BAD are skipped.
"""
def _check_nonlin_func(self, f, func, f_tol=1e-2):
x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0)
assert_(np.absolute(f(x)).max() < f_tol)
def _check_root(self, f, method, f_tol=1e-2):
res = root(f, f.xin, method=method,
options={'ftol': f_tol, 'maxiter': 200, 'disp': 0})
assert_(np.absolute(res.fun).max() < f_tol)
@pytest.mark.xfail
def _check_func_fail(self, *a, **kw):
pass
def test_problem_nonlin(self):
for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
for func in SOLVERS.values():
if func in f.KNOWN_BAD.values():
if func in MUST_WORK.values():
self._check_func_fail(f, func)
continue
self._check_nonlin_func(f, func)
def test_tol_norm_called(self):
# Check that supplying tol_norm keyword to nonlin_solve works
self._tol_norm_used = False
def local_norm_func(x):
self._tol_norm_used = True
return np.absolute(x).max()
nonlin.newton_krylov(F, F.xin, f_tol=1e-2, maxiter=200, verbose=0,
tol_norm=local_norm_func)
assert_(self._tol_norm_used)
def test_problem_root(self):
for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
for meth in SOLVERS:
if meth in f.KNOWN_BAD:
if meth in MUST_WORK:
self._check_func_fail(f, meth)
continue
self._check_root(f, meth)
class TestSecant(object):
"""Check that some Jacobian approximations satisfy the secant condition"""
xs = [np.array([1,2,3,4,5], float),
np.array([2,3,4,5,1], float),
np.array([3,4,5,1,2], float),
np.array([4,5,1,2,3], float),
np.array([9,1,9,1,3], float),
np.array([0,1,9,1,3], float),
np.array([5,5,7,1,1], float),
np.array([1,2,7,5,1], float),]
fs = [x**2 - 1 for x in xs]
def _check_secant(self, jac_cls, npoints=1, **kw):
"""
Check that the given Jacobian approximation satisfies secant
conditions for last `npoints` points.
"""
jac = jac_cls(**kw)
jac.setup(self.xs[0], self.fs[0], None)
for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
jac.update(x, f)
for k in xrange(min(npoints, j+1)):
dx = self.xs[j-k+1] - self.xs[j-k]
df = self.fs[j-k+1] - self.fs[j-k]
assert_(np.allclose(dx, jac.solve(df)))
# Check that the `npoints` secant bound is strict
if j >= npoints:
dx = self.xs[j-npoints+1] - self.xs[j-npoints]
df = self.fs[j-npoints+1] - self.fs[j-npoints]
assert_(not np.allclose(dx, jac.solve(df)))
def test_broyden1(self):
self._check_secant(nonlin.BroydenFirst)
def test_broyden2(self):
self._check_secant(nonlin.BroydenSecond)
def test_broyden1_update(self):
# Check that BroydenFirst update works as for a dense matrix
jac = nonlin.BroydenFirst(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
B = np.identity(5) * (-1/0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
B += (df - dot(B, dx))[:,None] * dx[None,:] / dot(dx, dx)
jac.update(x, f)
assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13))
def test_broyden2_update(self):
# Check that BroydenSecond update works as for a dense matrix
jac = nonlin.BroydenSecond(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
H = np.identity(5) * (-0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
H += (dx - dot(H, df))[:,None] * df[None,:] / dot(df, df)
jac.update(x, f)
assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13))
def test_anderson(self):
# Anderson mixing (with w0=0) satisfies secant conditions
# for the last M iterates, see [Ey]_
#
# .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3)
class TestLinear(object):
"""Solve a linear equation;
some methods find the exact solution in a finite number of steps"""
def _check(self, jac, N, maxiter, complex=False, **kw):
np.random.seed(123)
A = np.random.randn(N, N)
if complex:
A = A + 1j*np.random.randn(N, N)
b = np.random.randn(N)
if complex:
b = b + 1j*np.random.randn(N)
def func(x):
return dot(A, x) - b
sol = nonlin.nonlin_solve(func, np.zeros(N), jac, maxiter=maxiter,
f_tol=1e-6, line_search=None, verbose=0)
assert_(np.allclose(dot(A, sol), b, atol=1e-6))
def test_broyden1(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True)
def test_broyden2(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True)
def test_anderson(self):
# Anderson is rather similar to Broyden, if given enough storage space
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False)
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True)
def test_krylov(self):
# Krylov methods solve linear systems exactly in N inner steps
self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10)
self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10)
class TestJacobianDotSolve(object):
"""Check that solve/dot methods in Jacobian approximations are consistent"""
def _func(self, x):
return x**2 - 1 + np.dot(self.A, x)
def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
np.random.seed(123)
N = 7
def rand(*a):
q = np.random.rand(*a)
if complex:
q = q + 1j*np.random.rand(*a)
return q
def assert_close(a, b, msg):
d = abs(a - b).max()
f = tol + abs(b).max()*tol
if d > f:
raise AssertionError('%s: err %g' % (msg, d))
self.A = rand(N, N)
# initialize
x0 = np.random.rand(N)
jac = jac_cls(**kw)
jac.setup(x0, self._func(x0), self._func)
# check consistency
for k in xrange(2*N):
v = rand(N)
if hasattr(jac, '__array__'):
Jd = np.array(jac)
if hasattr(jac, 'solve'):
Gv = jac.solve(v)
Gv2 = np.linalg.solve(Jd, v)
assert_close(Gv, Gv2, 'solve vs array')
if hasattr(jac, 'rsolve'):
Gv = jac.rsolve(v)
Gv2 = np.linalg.solve(Jd.T.conj(), v)
assert_close(Gv, Gv2, 'rsolve vs array')
if hasattr(jac, 'matvec'):
Jv = jac.matvec(v)
Jv2 = np.dot(Jd, v)
assert_close(Jv, Jv2, 'dot vs array')
if hasattr(jac, 'rmatvec'):
Jv = jac.rmatvec(v)
Jv2 = np.dot(Jd.T.conj(), v)
assert_close(Jv, Jv2, 'rmatvec vs array')
if hasattr(jac, 'matvec') and hasattr(jac, 'solve'):
Jv = jac.matvec(v)
Jv2 = jac.solve(jac.matvec(Jv))
assert_close(Jv, Jv2, 'dot vs solve')
if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'):
Jv = jac.rmatvec(v)
Jv2 = jac.rmatvec(jac.rsolve(Jv))
assert_close(Jv, Jv2, 'rmatvec vs rsolve')
x = rand(N)
jac.update(x, self._func(x))
def test_broyden1(self):
self._check_dot(nonlin.BroydenFirst, complex=False)
self._check_dot(nonlin.BroydenFirst, complex=True)
def test_broyden2(self):
self._check_dot(nonlin.BroydenSecond, complex=False)
self._check_dot(nonlin.BroydenSecond, complex=True)
def test_anderson(self):
self._check_dot(nonlin.Anderson, complex=False)
self._check_dot(nonlin.Anderson, complex=True)
def test_diagbroyden(self):
self._check_dot(nonlin.DiagBroyden, complex=False)
self._check_dot(nonlin.DiagBroyden, complex=True)
def test_linearmixing(self):
self._check_dot(nonlin.LinearMixing, complex=False)
self._check_dot(nonlin.LinearMixing, complex=True)
def test_excitingmixing(self):
self._check_dot(nonlin.ExcitingMixing, complex=False)
self._check_dot(nonlin.ExcitingMixing, complex=True)
def test_krylov(self):
self._check_dot(nonlin.KrylovJacobian, complex=False, tol=1e-3)
self._check_dot(nonlin.KrylovJacobian, complex=True, tol=1e-3)
class TestNonlinOldTests(object):
""" Test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def test_broyden1(self):
x = nonlin.broyden1(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_broyden2(self):
x = nonlin.broyden2(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_anderson(self):
x = nonlin.anderson(F,F.xin,iter=12,alpha=0.03,M=5)
assert_(nonlin.norm(x) < 0.33)
def test_linearmixing(self):
x = nonlin.linearmixing(F,F.xin,iter=60,alpha=0.5)
assert_(nonlin.norm(x) < 1e-7)
assert_(nonlin.norm(F(x)) < 1e-7)
def test_exciting(self):
x = nonlin.excitingmixing(F,F.xin,iter=20,alpha=0.5)
assert_(nonlin.norm(x) < 1e-5)
assert_(nonlin.norm(F(x)) < 1e-5)
def test_diagbroyden(self):
x = nonlin.diagbroyden(F,F.xin,iter=11,alpha=1)
assert_(nonlin.norm(x) < 1e-8)
assert_(nonlin.norm(F(x)) < 1e-8)
def test_root_broyden1(self):
res = root(F, F.xin, method='broyden1',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_broyden2(self):
res = root(F, F.xin, method='broyden2',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_anderson(self):
res = root(F, F.xin, method='anderson',
options={'nit': 12,
'jac_options': {'alpha': 0.03, 'M': 5}})
assert_(nonlin.norm(res.x) < 0.33)
def test_root_linearmixing(self):
res = root(F, F.xin, method='linearmixing',
options={'nit': 60,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-7)
assert_(nonlin.norm(res.fun) < 1e-7)
def test_root_excitingmixing(self):
res = root(F, F.xin, method='excitingmixing',
options={'nit': 20,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-5)
assert_(nonlin.norm(res.fun) < 1e-5)
def test_root_diagbroyden(self):
res = root(F, F.xin, method='diagbroyden',
options={'nit': 11,
'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-8)
assert_(nonlin.norm(res.fun) < 1e-8)
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@scipy@optimize@tests@test_nonlin.py@.PATH_END.py
|
{
"filename": "nddata.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/nddata/nddata.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the base NDData class.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from copy import deepcopy
from .nddata_base import NDDataBase
from .nduncertainty import NDUncertainty, UnknownUncertainty
from .. import log
from ..units import Unit, Quantity
from ..utils.metadata import MetaData
__all__ = ['NDData']
_meta_doc = """`dict`-like : Additional meta information about the dataset."""
class NDData(NDDataBase):
"""
A container for `numpy.ndarray`-based datasets, using the
`~astropy.nddata.NDDataBase` interface.
The key distinction from raw `numpy.ndarray` is the presence of
additional metadata such as uncertainty, mask, unit, a coordinate system
and/or a dictionary containing further meta information. This class *only*
provides a container for *storing* such datasets. For further functionality
take a look at the ``See also`` section.
Parameters
-----------
data : `numpy.ndarray`-like or `NDData`-like
The dataset.
uncertainty : any type, optional
Uncertainty in the dataset.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, for example ``"std"`` for standard deviation or
``"var"`` for variance. A metaclass defining such an interface is
`NDUncertainty` - but isn't mandatory. If the uncertainty has no such
attribute the uncertainty is stored as `UnknownUncertainty`.
Defaults to ``None``.
mask : any type, optional
Mask for the dataset. Masks should follow the ``numpy`` convention that
**valid** data points are marked by ``False`` and **invalid** ones with
``True``.
Defaults to ``None``.
wcs : any type, optional
World coordinate system (WCS) for the dataset.
Default is ``None``.
meta : `dict`-like object, optional
Additional meta information about the dataset. If no meta is provided
an empty `collections.OrderedDict` is created.
Default is ``None``.
unit : `~astropy.units.Unit`-like or str, optional
Unit for the dataset. Strings that can be converted to a
`~astropy.units.Unit` are allowed.
Default is ``None``.
copy : `bool`, optional
Indicates whether to save the arguments as copy. ``True`` copies
every attribute before saving it while ``False`` tries to save every
parameter as reference.
Note however that it is not always possible to save the input as
reference.
Default is ``False``.
.. versionadded:: 1.2
Raises
------
TypeError
In case ``data`` or ``meta`` don't meet the restrictions.
Notes
-----
Each attribute can be accessed through the homonymous instance attribute:
``data`` in a `NDData` object can be accessed through the `data`
attribute::
>>> from astropy.nddata import NDData
>>> nd = NDData([1,2,3])
>>> nd.data
array([1, 2, 3])
Given a conflicting implicit and an explicit parameter during
initialization, for example the ``data`` is a `~astropy.units.Quantity` and
the unit parameter is not ``None``, then the implicit parameter is replaced
(without conversion) by the explicit one and a warning is issued::
>>> import numpy as np
>>> import astropy.units as u
>>> q = np.array([1,2,3,4]) * u.m
>>> nd2 = NDData(q, unit=u.cm)
INFO: overwriting Quantity's current unit with specified unit. [astropy.nddata.nddata]
>>> nd2.data # doctest: +FLOAT_CMP
array([1., 2., 3., 4.])
>>> nd2.unit
Unit("cm")
See also
--------
NDDataRef
NDDataArray
"""
# Instead of a custom property use the MetaData descriptor also used for
# Tables. It will check if the meta is dict-like or raise an exception.
meta = MetaData(doc=_meta_doc, copy=False)
def __init__(self, data, uncertainty=None, mask=None, wcs=None,
meta=None, unit=None, copy=False):
# Rather pointless since the NDDataBase does not implement any setting
# but before the NDDataBase did call the uncertainty
# setter. But if anyone wants to alter this behaviour again the call
# to the superclass NDDataBase should be in here.
super(NDData, self).__init__()
# Check if data is any type from which to collect some implicitly
# passed parameters.
if isinstance(data, NDData): # don't use self.__class__ (issue #4137)
# Of course we need to check the data because subclasses with other
# init-logic might be passed in here. We could skip these
# tests if we compared for self.__class__ but that has other
# drawbacks.
# Comparing if there is an explicit and an implicit unit parameter.
# If that is the case use the explicit one and issue a warning
# that there might be a conflict. In case there is no explicit
# unit just overwrite the unit parameter with the NDData.unit
# and proceed as if that one was given as parameter. Same for the
# other parameters.
if (unit is not None and data.unit is not None and
unit != data.unit):
log.info("overwriting NDData's current "
"unit with specified unit.")
elif data.unit is not None:
unit = data.unit
if uncertainty is not None and data.uncertainty is not None:
log.info("overwriting NDData's current "
"uncertainty with specified uncertainty.")
elif data.uncertainty is not None:
uncertainty = data.uncertainty
if mask is not None and data.mask is not None:
log.info("overwriting NDData's current "
"mask with specified mask.")
elif data.mask is not None:
mask = data.mask
if wcs is not None and data.wcs is not None:
log.info("overwriting NDData's current "
"wcs with specified wcs.")
elif data.wcs is not None:
wcs = data.wcs
if meta is not None and data.meta is not None:
log.info("overwriting NDData's current "
"meta with specified meta.")
elif data.meta is not None:
meta = data.meta
data = data.data
else:
if hasattr(data, 'mask') and hasattr(data, 'data'):
# Separating data and mask
if mask is not None:
log.info("overwriting Masked Objects's current "
"mask with specified mask.")
else:
mask = data.mask
# Just save the data for further processing, we could be given
# a masked Quantity or something else entirely. Better to check
# it first.
data = data.data
if isinstance(data, Quantity):
if unit is not None and unit != data.unit:
log.info("overwriting Quantity's current "
"unit with specified unit.")
else:
unit = data.unit
data = data.value
# Quick check on the parameters if they match the requirements.
if (not hasattr(data, 'shape') or not hasattr(data, '__getitem__') or
not hasattr(data, '__array__')):
# Data doesn't look like a numpy array, try converting it to
# one.
data = np.array(data, subok=True, copy=False)
# Another quick check to see if what we got looks like an array
# rather than an object (since numpy will convert a
# non-numerical/non-string inputs to an array of objects).
if data.dtype == 'O':
raise TypeError("could not convert data to numpy array.")
if unit is not None:
unit = Unit(unit)
if copy:
# Data might have been copied before but no way of validating
# without another variable.
data = deepcopy(data)
mask = deepcopy(mask)
wcs = deepcopy(wcs)
meta = deepcopy(meta)
uncertainty = deepcopy(uncertainty)
# Actually - copying the unit is unnecessary but better safe
# than sorry :-)
unit = deepcopy(unit)
# Store the attributes
self._data = data
self.mask = mask
self._wcs = wcs
self.meta = meta # TODO: Make this call the setter sometime
self._unit = unit
# Call the setter for uncertainty to further check the uncertainty
self.uncertainty = uncertainty
def __str__(self):
return str(self.data)
def __repr__(self):
prefix = self.__class__.__name__ + '('
body = np.array2string(self.data, separator=', ', prefix=prefix)
return ''.join([prefix, body, ')'])
@property
def data(self):
"""
`~numpy.ndarray`-like : The stored dataset.
"""
return self._data
@property
def mask(self):
"""
any type : Mask for the dataset, if any.
Masks should follow the ``numpy`` convention that valid data points are
marked by ``False`` and invalid ones with ``True``.
"""
return self._mask
@mask.setter
def mask(self, value):
self._mask = value
@property
def unit(self):
"""
`~astropy.units.Unit` : Unit for the dataset, if any.
"""
return self._unit
@property
def wcs(self):
"""
any type : A world coordinate system (WCS) for the dataset, if any.
"""
return self._wcs
@property
def uncertainty(self):
"""
any type : Uncertainty in the dataset, if any.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, such as ``'std'`` for standard deviation or
``'var'`` for variance. A metaclass defining such an interface is
`~astropy.nddata.NDUncertainty` but isn't mandatory.
"""
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
# There is one requirements on the uncertainty: That
# it has an attribute 'uncertainty_type'.
# If it does not match this requirement convert it to an unknown
# uncertainty.
if not hasattr(value, 'uncertainty_type'):
log.info('uncertainty should have attribute uncertainty_type.')
value = UnknownUncertainty(value, copy=False)
# If it is a subclass of NDUncertainty we must set the
# parent_nddata attribute. (#4152)
if isinstance(value, NDUncertainty):
# In case the uncertainty already has a parent create a new
# instance because we need to assume that we don't want to
# steal the uncertainty from another NDData object
if value._parent_nddata is not None:
value = value.__class__(value, copy=False)
# Then link it to this NDData instance (internally this needs
# to be saved as weakref but that's done by NDUncertainty
# setter).
value.parent_nddata = self
self._uncertainty = value
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@astropy@nddata@nddata.py@.PATH_END.py
|
{
"filename": "encoders.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/redis/py3/redis/_parsers/encoders.py",
"type": "Python"
}
|
from ..exceptions import DataError
class Encoder:
"Encode strings to bytes-like and decode bytes-like to strings"
__slots__ = "encoding", "encoding_errors", "decode_responses"
def __init__(self, encoding, encoding_errors, decode_responses):
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
def encode(self, value):
"Return a bytestring or bytes-like representation of the value"
if isinstance(value, (bytes, memoryview)):
return value
elif isinstance(value, bool):
# special case bool since it is a subclass of int
raise DataError(
"Invalid input of type: 'bool'. Convert to a "
"bytes, string, int or float first."
)
elif isinstance(value, (int, float)):
value = repr(value).encode()
elif not isinstance(value, str):
# a value we don't know how to deal with. throw an error
typename = type(value).__name__
raise DataError(
f"Invalid input of type: '{typename}'. "
f"Convert to a bytes, string, int or float first."
)
if isinstance(value, str):
value = value.encode(self.encoding, self.encoding_errors)
return value
def decode(self, value, force=False):
"Return a unicode string from the bytes-like representation"
if self.decode_responses or force:
if isinstance(value, memoryview):
value = value.tobytes()
if isinstance(value, bytes):
value = value.decode(self.encoding, self.encoding_errors)
return value
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@redis@py3@redis@_parsers@encoders.py@.PATH_END.py
|
{
"filename": "_version.py",
"repo_name": "handley-lab/anesthetic",
"repo_path": "anesthetic_extracted/anesthetic-master/anesthetic/_version.py",
"type": "Python"
}
|
__version__ = '2.9.1'
|
handley-labREPO_NAMEanestheticPATH_START.@anesthetic_extracted@anesthetic-master@anesthetic@_version.py@.PATH_END.py
|
{
"filename": "tools.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/tools.py",
"type": "Python"
}
|
"""
tools
=====
Functions that USERS will possibly want access to.
"""
import json
import warnings
import os
from plotly import exceptions, optional_imports
from plotly.files import PLOTLY_DIR
DEFAULT_PLOTLY_COLORS = [
"rgb(31, 119, 180)",
"rgb(255, 127, 14)",
"rgb(44, 160, 44)",
"rgb(214, 39, 40)",
"rgb(148, 103, 189)",
"rgb(140, 86, 75)",
"rgb(227, 119, 194)",
"rgb(127, 127, 127)",
"rgb(188, 189, 34)",
"rgb(23, 190, 207)",
]
REQUIRED_GANTT_KEYS = ["Task", "Start", "Finish"]
PLOTLY_SCALES = {
"Greys": ["rgb(0,0,0)", "rgb(255,255,255)"],
"YlGnBu": ["rgb(8,29,88)", "rgb(255,255,217)"],
"Greens": ["rgb(0,68,27)", "rgb(247,252,245)"],
"YlOrRd": ["rgb(128,0,38)", "rgb(255,255,204)"],
"Bluered": ["rgb(0,0,255)", "rgb(255,0,0)"],
"RdBu": ["rgb(5,10,172)", "rgb(178,10,28)"],
"Reds": ["rgb(220,220,220)", "rgb(178,10,28)"],
"Blues": ["rgb(5,10,172)", "rgb(220,220,220)"],
"Picnic": ["rgb(0,0,255)", "rgb(255,0,0)"],
"Rainbow": ["rgb(150,0,90)", "rgb(255,0,0)"],
"Portland": ["rgb(12,51,131)", "rgb(217,30,30)"],
"Jet": ["rgb(0,0,131)", "rgb(128,0,0)"],
"Hot": ["rgb(0,0,0)", "rgb(255,255,255)"],
"Blackbody": ["rgb(0,0,0)", "rgb(160,200,255)"],
"Earth": ["rgb(0,0,130)", "rgb(255,255,255)"],
"Electric": ["rgb(0,0,0)", "rgb(255,250,220)"],
"Viridis": ["rgb(68,1,84)", "rgb(253,231,37)"],
}
# color constants for violin plot
DEFAULT_FILLCOLOR = "#1f77b4"
DEFAULT_HISTNORM = "probability density"
ALTERNATIVE_HISTNORM = "probability"
# Warning format
def warning_on_one_line(message, category, filename, lineno, file=None, line=None):
return "%s:%s: %s:\n\n%s\n\n" % (filename, lineno, category.__name__, message)
warnings.formatwarning = warning_on_one_line
ipython_core_display = optional_imports.get_module("IPython.core.display")
sage_salvus = optional_imports.get_module("sage_salvus")
### mpl-related tools ###
def mpl_to_plotly(fig, resize=False, strip_style=False, verbose=False):
"""Convert a matplotlib figure to plotly dictionary and send.
All available information about matplotlib visualizations are stored
within a matplotlib.figure.Figure object. You can create a plot in python
using matplotlib, store the figure object, and then pass this object to
the fig_to_plotly function. In the background, mplexporter is used to
crawl through the mpl figure object for appropriate information. This
information is then systematically sent to the PlotlyRenderer which
creates the JSON structure used to make plotly visualizations. Finally,
these dictionaries are sent to plotly and your browser should open up a
new tab for viewing! Optionally, if you're working in IPython, you can
set notebook=True and the PlotlyRenderer will call plotly.iplot instead
of plotly.plot to have the graph appear directly in the IPython notebook.
Note, this function gives the user access to a simple, one-line way to
render an mpl figure in plotly. If you need to trouble shoot, you can do
this step manually by NOT running this fuction and entereing the following:
===========================================================================
from plotly.matplotlylib import mplexporter, PlotlyRenderer
# create an mpl figure and store it under a varialble 'fig'
renderer = PlotlyRenderer()
exporter = mplexporter.Exporter(renderer)
exporter.run(fig)
===========================================================================
You can then inspect the JSON structures by accessing these:
renderer.layout -- a plotly layout dictionary
renderer.data -- a list of plotly data dictionaries
"""
matplotlylib = optional_imports.get_module("plotly.matplotlylib")
if matplotlylib:
renderer = matplotlylib.PlotlyRenderer()
matplotlylib.Exporter(renderer).run(fig)
if resize:
renderer.resize()
if strip_style:
renderer.strip_style()
if verbose:
print(renderer.msg)
return renderer.plotly_fig
else:
warnings.warn(
"To use Plotly's matplotlylib functionality, you'll need to have "
"matplotlib successfully installed with all of its dependencies. "
"You're getting this error because matplotlib or one of its "
"dependencies doesn't seem to be installed correctly."
)
### graph_objs related tools ###
def get_subplots(rows=1, columns=1, print_grid=False, **kwargs):
"""Return a dictionary instance with the subplots set in 'layout'.
Example 1:
# stack two subplots vertically
fig = tools.get_subplots(rows=2)
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x1', yaxis='y1')]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
Example 2:
# print out string showing the subplot grid you've put in the layout
fig = tools.get_subplots(rows=3, columns=2, print_grid=True)
Keywords arguments with constant defaults:
rows (kwarg, int greater than 0, default=1):
Number of rows, evenly spaced vertically on the figure.
columns (kwarg, int greater than 0, default=1):
Number of columns, evenly spaced horizontally on the figure.
horizontal_spacing (kwarg, float in [0,1], default=0.1):
Space between subplot columns. Applied to all columns.
vertical_spacing (kwarg, float in [0,1], default=0.05):
Space between subplot rows. Applied to all rows.
print_grid (kwarg, True | False, default=False):
If True, prints a tab-delimited string representation
of your plot grid.
Keyword arguments with variable defaults:
horizontal_spacing (kwarg, float in [0,1], default=0.2 / columns):
Space between subplot columns.
vertical_spacing (kwarg, float in [0,1], default=0.3 / rows):
Space between subplot rows.
"""
# TODO: protected until #282
from plotly.graph_objs import graph_objs
warnings.warn(
"tools.get_subplots is depreciated. " "Please use tools.make_subplots instead."
)
# Throw exception for non-integer rows and columns
if not isinstance(rows, int) or rows <= 0:
raise Exception("Keyword argument 'rows' " "must be an int greater than 0")
if not isinstance(columns, int) or columns <= 0:
raise Exception("Keyword argument 'columns' " "must be an int greater than 0")
# Throw exception if non-valid kwarg is sent
VALID_KWARGS = ["horizontal_spacing", "vertical_spacing"]
for key in kwargs.keys():
if key not in VALID_KWARGS:
raise Exception("Invalid keyword argument: '{0}'".format(key))
# Set 'horizontal_spacing' / 'vertical_spacing' w.r.t. rows / columns
try:
horizontal_spacing = float(kwargs["horizontal_spacing"])
except KeyError:
horizontal_spacing = 0.2 / columns
try:
vertical_spacing = float(kwargs["vertical_spacing"])
except KeyError:
vertical_spacing = 0.3 / rows
fig = dict(layout=graph_objs.Layout()) # will return this at the end
plot_width = (1 - horizontal_spacing * (columns - 1)) / columns
plot_height = (1 - vertical_spacing * (rows - 1)) / rows
plot_num = 0
for rrr in range(rows):
for ccc in range(columns):
xaxis_name = "xaxis{0}".format(plot_num + 1)
x_anchor = "y{0}".format(plot_num + 1)
x_start = (plot_width + horizontal_spacing) * ccc
x_end = x_start + plot_width
yaxis_name = "yaxis{0}".format(plot_num + 1)
y_anchor = "x{0}".format(plot_num + 1)
y_start = (plot_height + vertical_spacing) * rrr
y_end = y_start + plot_height
xaxis = dict(domain=[x_start, x_end], anchor=x_anchor)
fig["layout"][xaxis_name] = xaxis
yaxis = dict(domain=[y_start, y_end], anchor=y_anchor)
fig["layout"][yaxis_name] = yaxis
plot_num += 1
if print_grid:
print("This is the format of your plot grid!")
grid_string = ""
plot = 1
for rrr in range(rows):
grid_line = ""
for ccc in range(columns):
grid_line += "[{0}]\t".format(plot)
plot += 1
grid_string = grid_line + "\n" + grid_string
print(grid_string)
return graph_objs.Figure(fig) # forces us to validate what we just did...
def make_subplots(
rows=1,
cols=1,
shared_xaxes=False,
shared_yaxes=False,
start_cell="top-left",
print_grid=None,
**kwargs,
):
"""Return an instance of plotly.graph_objs.Figure
with the subplots domain set in 'layout'.
Example 1:
# stack two subplots vertically
fig = tools.make_subplots(rows=2)
This is the format of your plot grid:
[ (1,1) x1,y1 ]
[ (2,1) x2,y2 ]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
# or see Figure.append_trace
Example 2:
# subplots with shared x axes
fig = tools.make_subplots(rows=2, shared_xaxes=True)
This is the format of your plot grid:
[ (1,1) x1,y1 ]
[ (2,1) x1,y2 ]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], yaxis='y2')]
Example 3:
# irregular subplot layout (more examples below under 'specs')
fig = tools.make_subplots(rows=2, cols=2,
specs=[[{}, {}],
[{'colspan': 2}, None]])
This is the format of your plot grid!
[ (1,1) x1,y1 ] [ (1,2) x2,y2 ]
[ (2,1) x3,y3 - ]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x3', yaxis='y3')]
Example 4:
# insets
fig = tools.make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}])
This is the format of your plot grid!
[ (1,1) x1,y1 ]
With insets:
[ x2,y2 ] over [ (1,1) x1,y1 ]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
Example 5:
# include subplot titles
fig = tools.make_subplots(rows=2, subplot_titles=('Plot 1','Plot 2'))
This is the format of your plot grid:
[ (1,1) x1,y1 ]
[ (2,1) x2,y2 ]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
Example 6:
# Include subplot title on one plot (but not all)
fig = tools.make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}],
subplot_titles=('','Inset'))
This is the format of your plot grid!
[ (1,1) x1,y1 ]
With insets:
[ x2,y2 ] over [ (1,1) x1,y1 ]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
Keywords arguments with constant defaults:
rows (kwarg, int greater than 0, default=1):
Number of rows in the subplot grid.
cols (kwarg, int greater than 0, default=1):
Number of columns in the subplot grid.
shared_xaxes (kwarg, boolean or list, default=False)
Assign shared x axes.
If True, subplots in the same grid column have one common
shared x-axis at the bottom of the gird.
To assign shared x axes per subplot grid cell (see 'specs'),
send list (or list of lists, one list per shared x axis)
of cell index tuples.
shared_yaxes (kwarg, boolean or list, default=False)
Assign shared y axes.
If True, subplots in the same grid row have one common
shared y-axis on the left-hand side of the gird.
To assign shared y axes per subplot grid cell (see 'specs'),
send list (or list of lists, one list per shared y axis)
of cell index tuples.
start_cell (kwarg, 'bottom-left' or 'top-left', default='top-left')
Choose the starting cell in the subplot grid used to set the
domains of the subplots.
print_grid (kwarg, boolean, default=True):
If True, prints a tab-delimited string representation of
your plot grid.
Keyword arguments with variable defaults:
horizontal_spacing (kwarg, float in [0,1], default=0.2 / cols):
Space between subplot columns.
Applies to all columns (use 'specs' subplot-dependents spacing)
vertical_spacing (kwarg, float in [0,1], default=0.3 / rows):
Space between subplot rows.
Applies to all rows (use 'specs' subplot-dependents spacing)
subplot_titles (kwarg, list of strings, default=empty list):
Title of each subplot.
"" can be included in the list if no subplot title is desired in
that space so that the titles are properly indexed.
specs (kwarg, list of lists of dictionaries):
Subplot specifications.
ex1: specs=[[{}, {}], [{'colspan': 2}, None]]
ex2: specs=[[{'rowspan': 2}, {}], [None, {}]]
- Indices of the outer list correspond to subplot grid rows
starting from the bottom. The number of rows in 'specs'
must be equal to 'rows'.
- Indices of the inner lists correspond to subplot grid columns
starting from the left. The number of columns in 'specs'
must be equal to 'cols'.
- Each item in the 'specs' list corresponds to one subplot
in a subplot grid. (N.B. The subplot grid has exactly 'rows'
times 'cols' cells.)
- Use None for blank a subplot cell (or to move pass a col/row span).
- Note that specs[0][0] has the specs of the 'start_cell' subplot.
- Each item in 'specs' is a dictionary.
The available keys are:
* is_3d (boolean, default=False): flag for 3d scenes
* colspan (int, default=1): number of subplot columns
for this subplot to span.
* rowspan (int, default=1): number of subplot rows
for this subplot to span.
* l (float, default=0.0): padding left of cell
* r (float, default=0.0): padding right of cell
* t (float, default=0.0): padding right of cell
* b (float, default=0.0): padding bottom of cell
- Use 'horizontal_spacing' and 'vertical_spacing' to adjust
the spacing in between the subplots.
insets (kwarg, list of dictionaries):
Inset specifications.
- Each item in 'insets' is a dictionary.
The available keys are:
* cell (tuple, default=(1,1)): (row, col) index of the
subplot cell to overlay inset axes onto.
* is_3d (boolean, default=False): flag for 3d scenes
* l (float, default=0.0): padding left of inset
in fraction of cell width
* w (float or 'to_end', default='to_end') inset width
in fraction of cell width ('to_end': to cell right edge)
* b (float, default=0.0): padding bottom of inset
in fraction of cell height
* h (float or 'to_end', default='to_end') inset height
in fraction of cell height ('to_end': to cell top edge)
column_width (kwarg, list of numbers)
Column_width specifications
- Functions similarly to `column_width` of `plotly.graph_objs.Table`.
Specify a list that contains numbers where the amount of numbers in
the list is equal to `cols`.
- The numbers in the list indicate the proportions that each column
domains take across the full horizontal domain excluding padding.
- For example, if columns_width=[3, 1], horizontal_spacing=0, and
cols=2, the domains for each column would be [0. 0.75] and [0.75, 1]
row_width (kwargs, list of numbers)
Row_width specifications
- Functions similarly to `column_width`. Specify a list that contains
numbers where the amount of numbers in the list is equal to `rows`.
- The numbers in the list indicate the proportions that each row
domains take along the full vertical domain excluding padding.
- For example, if row_width=[3, 1], vertical_spacing=0, and
cols=2, the domains for each row from top to botton would be
[0. 0.75] and [0.75, 1]
"""
import plotly.subplots
warnings.warn(
"plotly.tools.make_subplots is deprecated, "
"please use plotly.subplots.make_subplots instead",
DeprecationWarning,
stacklevel=1,
)
return plotly.subplots.make_subplots(
rows=rows,
cols=cols,
shared_xaxes=shared_xaxes,
shared_yaxes=shared_yaxes,
start_cell=start_cell,
print_grid=print_grid,
**kwargs,
)
warnings.filterwarnings(
"default", r"plotly\.tools\.make_subplots is deprecated", DeprecationWarning
)
def get_graph_obj(obj, obj_type=None):
"""Returns a new graph object.
OLD FUNCTION: this will *silently* strip out invalid pieces of the object.
NEW FUNCTION: no striping of invalid pieces anymore - only raises error
on unrecognized graph_objs
"""
# TODO: Deprecate or move. #283
from plotly.graph_objs import graph_objs
try:
cls = getattr(graph_objs, obj_type)
except (AttributeError, KeyError):
raise exceptions.PlotlyError(
"'{}' is not a recognized graph_obj.".format(obj_type)
)
return cls(obj)
def _replace_newline(obj):
"""Replaces '\n' with '<br>' for all strings in a collection."""
if isinstance(obj, dict):
d = dict()
for key, val in list(obj.items()):
d[key] = _replace_newline(val)
return d
elif isinstance(obj, list):
l = list()
for index, entry in enumerate(obj):
l += [_replace_newline(entry)]
return l
elif isinstance(obj, str):
s = obj.replace("\n", "<br>")
if s != obj:
warnings.warn(
"Looks like you used a newline character: '\\n'.\n\n"
"Plotly uses a subset of HTML escape characters\n"
"to do things like newline (<br>), bold (<b></b>),\n"
"italics (<i></i>), etc. Your newline characters \n"
"have been converted to '<br>' so they will show \n"
"up right on your Plotly figure!"
)
return s
else:
return obj # we return the actual reference... but DON'T mutate.
def return_figure_from_figure_or_data(figure_or_data, validate_figure):
from plotly.graph_objs import Figure
from plotly.basedatatypes import BaseFigure
validated = False
if isinstance(figure_or_data, dict):
figure = figure_or_data
elif isinstance(figure_or_data, list):
figure = {"data": figure_or_data}
elif isinstance(figure_or_data, BaseFigure):
figure = figure_or_data.to_dict()
validated = True
else:
raise exceptions.PlotlyError(
"The `figure_or_data` positional "
"argument must be "
"`dict`-like, `list`-like, or an instance of plotly.graph_objs.Figure"
)
if validate_figure and not validated:
try:
figure = Figure(**figure).to_dict()
except exceptions.PlotlyError as err:
raise exceptions.PlotlyError(
"Invalid 'figure_or_data' argument. "
"Plotly will not be able to properly "
"parse the resulting JSON. If you "
"want to send this 'figure_or_data' "
"to Plotly anyway (not recommended), "
"you can set 'validate=False' as a "
"plot option.\nHere's why you're "
"seeing this error:\n\n{0}"
"".format(err)
)
if not figure["data"]:
raise exceptions.PlotlyEmptyDataError(
"Empty data list found. Make sure that you populated the "
"list of data objects you're sending and try again.\n"
"Questions? Visit support.plot.ly"
)
return figure
# Default colours for finance charts
_DEFAULT_INCREASING_COLOR = "#3D9970" # http://clrs.cc
_DEFAULT_DECREASING_COLOR = "#FF4136"
DIAG_CHOICES = ["scatter", "histogram", "box"]
VALID_COLORMAP_TYPES = ["cat", "seq"]
# Deprecations
class FigureFactory(object):
@staticmethod
def _deprecated(old_method, new_method=None):
if new_method is None:
# The method name stayed the same.
new_method = old_method
warnings.warn(
"plotly.tools.FigureFactory.{} is deprecated. "
"Use plotly.figure_factory.{}".format(old_method, new_method)
)
@staticmethod
def create_2D_density(*args, **kwargs):
FigureFactory._deprecated("create_2D_density", "create_2d_density")
from plotly.figure_factory import create_2d_density
return create_2d_density(*args, **kwargs)
@staticmethod
def create_annotated_heatmap(*args, **kwargs):
FigureFactory._deprecated("create_annotated_heatmap")
from plotly.figure_factory import create_annotated_heatmap
return create_annotated_heatmap(*args, **kwargs)
@staticmethod
def create_candlestick(*args, **kwargs):
FigureFactory._deprecated("create_candlestick")
from plotly.figure_factory import create_candlestick
return create_candlestick(*args, **kwargs)
@staticmethod
def create_dendrogram(*args, **kwargs):
FigureFactory._deprecated("create_dendrogram")
from plotly.figure_factory import create_dendrogram
return create_dendrogram(*args, **kwargs)
@staticmethod
def create_distplot(*args, **kwargs):
FigureFactory._deprecated("create_distplot")
from plotly.figure_factory import create_distplot
return create_distplot(*args, **kwargs)
@staticmethod
def create_facet_grid(*args, **kwargs):
FigureFactory._deprecated("create_facet_grid")
from plotly.figure_factory import create_facet_grid
return create_facet_grid(*args, **kwargs)
@staticmethod
def create_gantt(*args, **kwargs):
FigureFactory._deprecated("create_gantt")
from plotly.figure_factory import create_gantt
return create_gantt(*args, **kwargs)
@staticmethod
def create_ohlc(*args, **kwargs):
FigureFactory._deprecated("create_ohlc")
from plotly.figure_factory import create_ohlc
return create_ohlc(*args, **kwargs)
@staticmethod
def create_quiver(*args, **kwargs):
FigureFactory._deprecated("create_quiver")
from plotly.figure_factory import create_quiver
return create_quiver(*args, **kwargs)
@staticmethod
def create_scatterplotmatrix(*args, **kwargs):
FigureFactory._deprecated("create_scatterplotmatrix")
from plotly.figure_factory import create_scatterplotmatrix
return create_scatterplotmatrix(*args, **kwargs)
@staticmethod
def create_streamline(*args, **kwargs):
FigureFactory._deprecated("create_streamline")
from plotly.figure_factory import create_streamline
return create_streamline(*args, **kwargs)
@staticmethod
def create_table(*args, **kwargs):
FigureFactory._deprecated("create_table")
from plotly.figure_factory import create_table
return create_table(*args, **kwargs)
@staticmethod
def create_trisurf(*args, **kwargs):
FigureFactory._deprecated("create_trisurf")
from plotly.figure_factory import create_trisurf
return create_trisurf(*args, **kwargs)
@staticmethod
def create_violin(*args, **kwargs):
FigureFactory._deprecated("create_violin")
from plotly.figure_factory import create_violin
return create_violin(*args, **kwargs)
def get_config_plotly_server_url():
"""
Function to get the .config file's 'plotly_domain' without importing
the chart_studio package. This property is needed to compute the default
value of the plotly.js config plotlyServerURL, so it is independent of
the chart_studio integration and still needs to live in
Returns
-------
str
"""
config_file = os.path.join(PLOTLY_DIR, ".config")
default_server_url = "https://plot.ly"
if not os.path.exists(config_file):
return default_server_url
with open(config_file, "rt") as f:
try:
config_dict = json.load(f)
if not isinstance(config_dict, dict):
config_dict = {}
except:
# TODO: issue a warning and bubble it up
config_dict = {}
return config_dict.get("plotly_domain", default_server_url)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@tools.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "cta-observatory/cta-lstchain",
"repo_path": "cta-lstchain_extracted/cta-lstchain-main/lstchain/calib/camera/tests/__init__.py",
"type": "Python"
}
|
cta-observatoryREPO_NAMEcta-lstchainPATH_START.@cta-lstchain_extracted@cta-lstchain-main@lstchain@calib@camera@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "motion.py",
"repo_name": "dstndstn/tractor",
"repo_path": "tractor_extracted/tractor-main/tractor/motion.py",
"type": "Python"
}
|
from .utils import *
from .basics import *
class Parallax(ArithmeticParams, ScalarParam):
''' in arcesc '''
stepsize = 1e-3
def __str__(self):
return 'Parallax: %.3f arcsec' % (self.getValue())
class ParallaxWithPrior(Parallax):
def getLogPrior(self):
p = self.getValue()
if p < 0:
return -np.inf
# Lutz & Kelker (1973) PASP 85 573
# in the introduction, yo!
return -4. * np.log(p)
def isLegal(self):
p = self.getValue()
return (p >= 0)
#### FIXME -- cos(Dec)
class PMRaDec(RaDecPos):
@staticmethod
def getName():
return "PMRaDec"
def __str__(self):
return '%s: (%.2f, %.2f) mas/yr' % (self.getName(),
1000. * self. getRaArcsecPerYear(),
1000. * self.getDecArcsecPerYear())
def __init__(self, *args, **kwargs):
self.addParamAliases(ra=0, dec=1)
super(PMRaDec, self).__init__(*args, **kwargs)
self.setStepSizes([1e-6] * 2)
@staticmethod
def getNamedParams():
return dict(pmra=0, pmdec=1)
def getRaArcsecPerYear(self):
return self.pmra * 3600.
def getDecArcsecPerYear(self):
return self.pmdec * 3600.
# def getParamDerivatives(self, img, modelMask=None):
# return [None]*self.numberOfParams()
class MovingPointSource(PointSource):
def __init__(self, pos, brightness, pm, parallax, epoch=0.):
# Assume types...
assert(type(pos) is RaDecPos)
assert(type(pm) is PMRaDec)
super(PointSource, self).__init__(pos, brightness, pm,
Parallax(parallax))
self.epoch = epoch
@staticmethod
def getNamedParams():
return dict(pos=0, brightness=1, pm=2, parallax=3)
def getSourceType(self):
return 'MovingPointSource'
def __str__(self):
return (self.getSourceType() + ' at ' + str(self.pos) +
' with ' + str(self.brightness) + ', pm ' + str(self.pm) +
', parallax ' + str(self.parallax))
def __repr__(self):
return (self.getSourceType() + '(' + repr(self.pos) + ', ' +
repr(self.brightness) + ', ' + repr(self.pm) + ', ' +
repr(self.parallax) + ')')
def getPositionAtTime(self, t):
from astrometry.util.starutil_numpy import radectoxyz, arcsecperrad, axistilt, xyztoradec
dt = (t - self.epoch).toYears()
# Assume "pos" is an RaDecPos
p = self.pos + dt * self.pm
suntheta = t.getSunTheta()
# print 'dt', dt, 'pos', self.pos, 'pm', self.pm, 'dt*pm:', dt * self.pm
# print 'p0: (%.8f, %.8f)' % (self.pos.ra, self.pos.dec)
# print 'p1: (%.8f, %.8f)' % (p.ra, p.dec)
xyz = radectoxyz(p.ra, p.dec)
xyz = xyz[0]
# d(celestial coords)/d(parallax)
# - takes numerical derivatives when it could take analytic ones
# output is in [degrees / arcsec]. Yep. Crazy but true.
# HACK: fmods dRA when it should do something continuous.
# rd2xyz(0,0) is a unit vector; 1/arcsecperrad is (a good approximation to)
# the distance on the unit sphere spanned by an angle of 1 arcsec.
# We take a step of that length and return the change in RA,Dec.
# It's about 1e-5 so we don't renormalize the xyz unit vector.
dxyz1 = radectoxyz(0., 0.) / arcsecperrad
dxyz1 = dxyz1[0]
# - imprecise angle of obliquity
# - implicitly assumes circular orbit
# output is in [degrees / arcsec]. Yep. Crazy but true.
dxyz2 = radectoxyz(90., axistilt) / arcsecperrad
dxyz2 = dxyz2[0]
xyz += self.parallax.getValue() * (dxyz1 * np.cos(suntheta) +
dxyz2 * np.sin(suntheta))
r, d = xyztoradec(xyz)
return RaDecPos(r, d)
def getUnitFluxModelPatch(self, img, minval=0., modelMask=None, **kwargs):
pos = self.getPositionAtTime(img.getTime())
(px, py) = img.getWcs().positionToPixel(pos)
patch = img.getPsf().getPointSourcePatch(
px, py, minval=minval, modelMask=modelMask, **kwargs)
return patch
def getParamDerivatives(self, img, modelMask=None, **kwargs):
'''
MovingPointSource derivatives.
returns [ Patch, Patch, ... ] of length numberOfParams().
'''
t = img.getTime()
pos0 = self.getPositionAtTime(t)
(px0, py0) = img.getWcs().positionToPixel(pos0, self)
patch0 = img.getPsf().getPointSourcePatch(px0, py0, modelMask=modelMask)
counts0 = img.getPhotoCal().brightnessToCounts(self.brightness)
derivs = []
# print 'MovingPointSource.getParamDerivs:'
# print 'initial pixel pos', px0, py0
# Position
# FIXME -- could just compute positional derivatives once and
# reuse them, but have to be careful about frozen-ness -- eg,
# if RA were frozen but not Dec.
# OR, could compute dx,dy and then use CD matrix to convert
# dpos to derivatives.
# pderivs = []
# if ((not self.isParamFrozen('pos')) or
# (not self.isParamFrozen('pm')) or
# (not self.isParamFrozen('parallax'))):
#
# psteps = pos0.getStepSizes(img)
# pvals = pos0.getParams()
# for i,pstep in enumerate(psteps):
# oldval = pos0.setParam(i, pvals[i] + pstep)
# (px,py) = img.getWcs().positionToPixel(pos0, self)
# patchx = img.getPsf().getPointSourcePatch(px, py)
# pos0.setParam(i, oldval)
# dx = (patchx - patch0) * (counts0 / pstep)
# dx.setName('d(ptsrc)/d(pos%i)' % i)
# pderivs.append(dx)
# if not self.isParamFrozen('pos'):
# derivs.extend(pderivs)
def _add_posderivs(p, name):
# uses "globals": t, patch0, counts0
psteps = p.getStepSizes(img)
pvals = p.getParams()
for i, pstep in enumerate(psteps):
oldval = p.setParam(i, pvals[i] + pstep)
tpos = self.getPositionAtTime(t)
(px, py) = img.getWcs().positionToPixel(tpos, self)
# print 'stepping param', name, i, '-->', p, '--> pos', tpos, 'pix pos', px,py
patchx = img.getPsf().getPointSourcePatch(px, py, modelMask=modelMask)
p.setParam(i, oldval)
dx = (patchx - patch0) * (counts0 / pstep)
dx.setName('d(ptsrc)/d(%s%i)' % (name, i))
# print 'deriv', dx.patch.min(), dx.patch.max()
derivs.append(dx)
# print 'Finding RA,Dec derivatives'
if not self.isParamFrozen('pos'):
_add_posderivs(self.pos, 'pos')
# Brightness
# print 'Finding Brightness derivatives'
if not self.isParamFrozen('brightness'):
bsteps = self.brightness.getStepSizes(img)
bvals = self.brightness.getParams()
for i, bstep in enumerate(bsteps):
oldval = self.brightness.setParam(i, bvals[i] + bstep)
countsi = img.getPhotoCal().brightnessToCounts(self.brightness)
self.brightness.setParam(i, oldval)
df = patch0 * ((countsi - counts0) / bstep)
df.setName('d(ptsrc)/d(bright%i)' % i)
derivs.append(df)
# print 'Finding Proper Motion derivatives'
if not self.isParamFrozen('pm'):
# # ASSUME 'pm' is the same type as 'pos'
# dt = (t - self.epoch).toYears()
# for d in pderivs:
# dd = d * dt
# derivs.append(dd)
_add_posderivs(self.pm, 'pm')
# print 'Finding Parallax derivatives'
if not self.isParamFrozen('parallax'):
_add_posderivs(self.parallax, 'parallax')
return derivs
|
dstndstnREPO_NAMEtractorPATH_START.@tractor_extracted@tractor-main@tractor@motion.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/treemap/hoverlabel/_font.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "treemap.hoverlabel"
_path_str = "treemap.hoverlabel.font"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One",
"Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow",
"Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# lineposition
# ------------
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
# linepositionsrc
# ---------------
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
# shadow
# ------
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
# shadowsrc
# ---------
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# style
# -----
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
# stylesrc
# --------
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
# textcase
# --------
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
# textcasesrc
# -----------
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
# variant
# -------
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
# variantsrc
# ----------
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
# weight
# ------
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
# weightsrc
# ---------
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.treemap.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.treemap.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("lineposition", None)
_v = lineposition if lineposition is not None else _v
if _v is not None:
self["lineposition"] = _v
_v = arg.pop("linepositionsrc", None)
_v = linepositionsrc if linepositionsrc is not None else _v
if _v is not None:
self["linepositionsrc"] = _v
_v = arg.pop("shadow", None)
_v = shadow if shadow is not None else _v
if _v is not None:
self["shadow"] = _v
_v = arg.pop("shadowsrc", None)
_v = shadowsrc if shadowsrc is not None else _v
if _v is not None:
self["shadowsrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
_v = arg.pop("style", None)
_v = style if style is not None else _v
if _v is not None:
self["style"] = _v
_v = arg.pop("stylesrc", None)
_v = stylesrc if stylesrc is not None else _v
if _v is not None:
self["stylesrc"] = _v
_v = arg.pop("textcase", None)
_v = textcase if textcase is not None else _v
if _v is not None:
self["textcase"] = _v
_v = arg.pop("textcasesrc", None)
_v = textcasesrc if textcasesrc is not None else _v
if _v is not None:
self["textcasesrc"] = _v
_v = arg.pop("variant", None)
_v = variant if variant is not None else _v
if _v is not None:
self["variant"] = _v
_v = arg.pop("variantsrc", None)
_v = variantsrc if variantsrc is not None else _v
if _v is not None:
self["variantsrc"] = _v
_v = arg.pop("weight", None)
_v = weight if weight is not None else _v
if _v is not None:
self["weight"] = _v
_v = arg.pop("weightsrc", None)
_v = weightsrc if weightsrc is not None else _v
if _v is not None:
self["weightsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@treemap@hoverlabel@_font.py@.PATH_END.py
|
{
"filename": "ii_plus_3d.py",
"repo_name": "astropy/halotools",
"repo_path": "halotools_extracted/halotools-master/halotools/mock_observables/ia_correlations/ii_plus_3d.py",
"type": "Python"
}
|
r"""
Module containing the `~halotools.mock_observables.alignments.ii_plus_3d` function used to
calculate the intrinsic ellipticity-ellipticity (II) correlation
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from math import pi, gamma
from .alignment_helpers import process_3d_alignment_args
from ..mock_observables_helpers import (enforce_sample_has_correct_shape,
get_separation_bins_array, get_line_of_sight_bins_array, get_period, get_num_threads)
from ..pair_counters.mesh_helpers import _enforce_maximum_search_length
from ..pair_counters import positional_marked_npairs_3d, marked_npairs_3d
__all__ = ['ii_plus_3d']
__author__ = ['Duncan Campbell']
np.seterr(divide='ignore', invalid='ignore') # ignore divide by zero in e.g. DD/RR
def ii_plus_3d(sample1, orientations1, ellipticities1, sample2, orientations2, ellipticities2,
rbins, randoms1=None, randoms2=None, weights1=None, weights2=None,
ran_weights1=None, ran_weights2=None, estimator='Natural',
period=None, num_threads=1, approx_cell1_size=None, approx_cell2_size=None):
r"""
Calculate the intrinsic ellipticity-ellipticity correlation function (II),
:math:`\xi_{++}(r)`. See the 'Notes' section for details of this calculation.
Parameters
----------
sample1 : array_like
Npts1 x 3 numpy array containing 3-D positions of points with associated
orientations and ellipticities.
See the :ref:`mock_obs_pos_formatting` documentation page, or the
Examples section below, for instructions on how to transform
your coordinate position arrays into the format accepted by the ``sample1`` and ``sample2`` arguments.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
orientations1 : array_like
Npts1 x 3 numpy array containing projected orientation vectors for each point in ``sample1``.
these will be normalized if not already.
ellipticities1: array_like
Npts1 x 1 numpy array containing ellipticities for each point in ``sample1``.
sample2 : array_like, optional
Npts2 x 3 array containing 3-D positions of points with associated
orientations and ellipticities.
orientations2 : array_like
Npts1 x 3 numpy array containing projected orientation vectors for each point in ``sample2``.
these will be normalized if not already.
ellipticities2: array_like
Npts1 x 1 numpy array containing ellipticities for each point in ``sample2``.
rbins : array_like
array of boundaries defining the radial bins in which
pairs are counted.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
randoms1 : array_like, optional
Nran1 x 3 array containing 3-D positions of randomly distributed points corresponding to ``sample1``.
If no randoms are provided (the default option), the
calculation can proceed using analytical randoms
(only valid for periodic boundary conditions).
randoms2 : array_like, optional
Nran2 x 3 array containing 3-D positions of randomly distributed points corresponding to ``sample2``.
If no randoms are provided (the default option), the
calculation can proceed using analytical randoms
(only valid for periodic boundary conditions).
weights1 : array_like, optional
Npts1 array of weghts. If this parameter is not specified, it is set to numpy.ones(Npts1).
weights2 : array_like, optional
Npts2 array of weghts. If this parameter is not specified, it is set to numpy.ones(Npts2).
ran_weights1 : array_like, optional
Npran1 array of weghts. If this parameter is not specified, it is set to numpy.ones(Nran1).
ran_weights2 : array_like, optional
Nran2 array of weghts. If this parameter is not specified, it is set to numpy.ones(Nran2).
estimator : string, optional
string indicating which estimator to use
period : array_like, optional
Length-3 sequence defining the periodic boundary conditions
in each dimension. If you instead provide a single scalar, Lbox,
period is assumed to be the same in all Cartesian directions.
If set to None (the default option), PBCs are set to infinity,
in which case ``randoms`` must be provided.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
num_threads : int, optional
Number of threads to use in calculation, where parallelization is performed
using the python ``multiprocessing`` module. Default is 1 for a purely serial
calculation, in which case a multiprocessing Pool object will
never be instantiated. A string 'max' may be used to indicate that
the pair counters should use all available cores on the machine.
approx_cell1_size : array_like, optional
Length-3 array serving as a guess for the optimal manner by how points
will be apportioned into subvolumes of the simulation box.
The optimum choice unavoidably depends on the specs of your machine.
Default choice is to use Lbox/10 in each dimension,
which will return reasonable result performance for most use-cases.
Performance can vary sensitively with this parameter, so it is highly
recommended that you experiment with this parameter when carrying out
performance-critical calculations.
approx_cell2_size : array_like, optional
Analogous to ``approx_cell1_size``, but for sample2. See comments for
``approx_cell1_size`` for details.
Returns
-------
correlation_function : numpy.array
*len(rbins)-1* length array containing the correlation function :math:`w_{g+}(r)`
computed in each of the bins defined by input ``rbins``.
Notes
-----
The II-correlation function is calculated as:
.. math::
\xi_{++}(r) = \frac{S_{+}S_{+}}{R_sR_s}
where
.. math::
S_{+}S_{+} = \sum_{i \neq j} w_jw_i e_{+}(j|i)e_{+}(i|j)
:math:`w_j` and :math:`w_j` are weights. Weights are set to 1 for all galaxies by default.
The alingment of the :math:`j`-th galaxy relative to the direction to the :math:`i`-th galaxy is given by:
.. math::
e_{+}(j|i) = e_j\cos(2\phi)
where :math:`e_j` is the ellipticity of the :math:`j`-th galaxy. :math:`\phi` is the angle between the
orientation vector, :math:`\vec{o}_j`, and the direction between the :math:`j`-th
and :math:`i`-th galaxy, :math:`\vec{r}_{i,j}`.
.. math::
\cos(\phi) = \vec{o}_j \cdot \vec{r}_{i,j}
:math:`R_sR_s` are random pair counts,
Examples
--------
For demonstration purposes we create a randomly distributed set of points within a
periodic cube of Lbox = 250 Mpc/h.
>>> Npts = 1000
>>> Lbox = 250
>>> x = np.random.uniform(0, Lbox, Npts)
>>> y = np.random.uniform(0, Lbox, Npts)
>>> z = np.random.uniform(0, Lbox, Npts)
We transform our *x, y, z* points into the array shape used by the pair-counter by
taking the transpose of the result of `numpy.vstack`. This boilerplate transformation
is used throughout the `~halotools.mock_observables` sub-package:
>>> sample1 = np.vstack((x,y,z)).T
Alternatively, you may use the `~halotools.mock_observables.return_xyz_formatted_array`
convenience function for this same purpose, which provides additional wrapper
behavior around `numpy.vstack` such as placing points into redshift-space.
We then create a set of random orientation vectors and ellipticities for each point
>>> random_orientations = np.random.random((Npts,3))
>>> random_ellipticities = np.random.random(Npts)
We can the calculate the projected auto-GI correlation between these points:
>>> rbins = np.logspace(-1,1,10)
>>> w = ii_plus_3d(sample1, random_orientations, random_ellipticities, sample1, random_orientations, random_ellipticities, rbins, period=Lbox)
"""
# process arguments
alignment_args = (sample1, orientations1, ellipticities1, weights1,
sample2, orientations2, ellipticities2, weights2,
randoms1, ran_weights1, randoms2, ran_weights2)
sample1, orientations1, ellipticities1, weights1, sample2,\
orientations2, ellipticities2, weights2, randoms1, ran_weights1,\
randoms2, ran_weights2 = process_3d_alignment_args(*alignment_args)
function_args = (sample1, rbins, sample2, randoms1, randoms2,
period, num_threads, approx_cell1_size, approx_cell2_size)
sample1, rbins, sample2, randoms1, randoms2,\
period, num_threads, PBCs, no_randoms = _ii_plus_3d_process_args(*function_args)
# How many points are there (for normalization purposes)?
N1 = len(sample1)
N2 = len(sample2)
if no_randoms: # set random density the the same as the sampels
NR1 = N1
NR2 = N2
else:
NR1 = len(randoms1)
NR2 = len(randoms2)
#define merk vectors to use in pair counting
# sample 1
marks1 = np.ones((N1, 4))
marks1[:, 0] = ellipticities1 * weights1
marks1[:, 1] = orientations1[:, 0]
marks1[:, 2] = orientations1[:, 1]
marks1[:, 3] = orientations1[:, 2]
# sample 2
marks2 = np.ones((N2, 4))
marks2[:, 0] = ellipticities2 * weights2
marks2[:, 1] = orientations2[:, 0]
marks2[:, 2] = orientations2[:, 1]
marks2[:, 3] = orientations2[:, 2]
# randoms 1
ran_marks1 = np.ones((NR1, 4))
ran_marks1[:, 0] = ran_weights1
ran_marks1[:, 1] = 0 # dummy
ran_marks1[:, 2] = 0 # dummy
ran_marks1[:, 3] = 0 # dummy
# randoms 2
ran_marks2 = np.ones((NR2, 4))
ran_marks2[:, 0] = ran_weights2
ran_marks2[:, 1] = 0 # dummy
ran_marks2[:, 2] = 0 # dummy
ran_marks2[:, 3] = 0 # dummy
do_SS, do_RR = II_estimator_requirements(estimator)
# count marked pairs
if do_SS:
SS = marked_pair_counts(sample1, sample2, marks1, marks2,
rbins, period, num_threads,
approx_cell1_size, approx_cell2_size)
else:
SS = None
# count random pairs
if do_RR:
RR = random_counts(randoms1, randoms2, ran_weights1, ran_weights2,
rbins, N1, N2, no_randoms, period, PBCs,
num_threads, approx_cell1_size, approx_cell2_size)
else:
RR = None
result = II_estimator(SS, RR, N1, N2, NR1, NR2, estimator)
return result # factor of 2pi_max accounts for integration
def II_estimator(SS, RR, N1, N2, NR1, NR2, estimator='Natural'):
r"""
apply the supplied GI estimator to calculate the correlation function.
"""
if estimator == 'Natural':
factor = (NR1*NR2)/(N1*N2)
return factor*(SS/RR)
else:
msg = ('The estimator provided is not supported.')
raise ValueError(msg)
def II_estimator_requirements(estimator):
r"""
Return the requirments for the supplied GI estimator.
"""
do_RR = False
do_SS = False
if estimator == 'Natural':
do_SS = True
do_RR = True
return do_SS, do_RR
else:
msg = ('The estimator provided is not supported.')
raise ValueError(msg)
def marked_pair_counts(sample1, sample2, weights1, weights2, rbins, period,
num_threads, approx_cell1_size, approx_cell2_size):
r"""
Count marked pairs.
"""
weight_func_id = 5
SS = positional_marked_npairs_3d(sample1, sample2, rbins, period=period,
weights1=weights1, weights2=weights2, weight_func_id=weight_func_id,
num_threads=num_threads, approx_cell1_size=approx_cell1_size,
approx_cell2_size=approx_cell1_size)[0]
SS = np.diff(SS, axis=0)
return SS
def random_counts(randoms1, randoms2, ran_weights1, ran_weights2, rbins,
N1, N2, no_randoms, period,
PBCs, num_threads, approx_cell1_size, approx_cell2_size):
r"""
Count random pairs.
"""
if no_randoms is False:
RR = marked_npairs_3d(randoms1, randoms2, rbins,
period=period, num_threads=num_threads, weight_func_id=1,
weights1=ran_weights1, weights2=ran_weights2,
approx_cell1_size=approx_cell1_size,
approx_cell2_size=approx_cell2_size)
RR = np.diff(RR, axis=0)
return RR
else:
# set 'number' or randoms
# setting Nran to Ndata makes normalization simple
NR1 = N1
NR2 = N2
# do volume calculations
v = nball_volume(rbins)
dv = np.diff(v, axis=0)
global_volume = period.prod()
# calculate the random-random pairs.
rhor = (NR1*NR2)/global_volume
RR = (dv*rhor)
return RR.flatten()
def nball_volume(R, k=3):
"""
Calculate the volume of a n-shpere.
This is used for the analytical randoms.
"""
return (np.pi**(k/2.0)/gamma(k/2.0+1.0))*R**k
def _ii_plus_3d_process_args(sample1, rbins, sample2, randoms1, randoms2,
period, num_threads, approx_cell1_size, approx_cell2_size):
r"""
Private method to do bounds-checking on the arguments passed to
`~halotools.mock_observables.alignments.alignments.ii_plus_projected`.
"""
sample1 = enforce_sample_has_correct_shape(sample1)
if randoms1 is not None:
randoms1 = np.atleast_1d(randoms1)
no_randoms1 = False
else: no_randoms1 = True
if randoms2 is not None:
randoms2 = np.atleast_1d(randoms2)
no_randoms2 = False
else: no_randoms2 = True
#if one of the randoms is missing, raise an error
no_randoms = True
if no_randoms1:
if no_randoms2 is False:
msg = "if one set of randoms is provided, both randoms must be provided.\n"
raise ValueError(msg)
elif no_randoms2:
if no_randoms1 is False:
msg = "if one set of randoms is provided, both randoms must be provided.\n"
raise ValueError(msg)
else:
no_randoms = False
rbins = get_separation_bins_array(rbins)
rmax = np.amax(rbins)
period, PBCs = get_period(period)
_enforce_maximum_search_length([rmax, rmax, rmax], period)
if (randoms1 is None) & (PBCs is False):
msg = "If no PBCs are specified, both randoms must be provided.\n"
raise ValueError(msg)
num_threads = get_num_threads(num_threads)
return sample1, rbins, sample2, randoms1, randoms2, period, num_threads, PBCs, no_randoms
|
astropyREPO_NAMEhalotoolsPATH_START.@halotools_extracted@halotools-master@halotools@mock_observables@ia_correlations@ii_plus_3d.py@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "stephane-caron/qpsolvers",
"repo_path": "qpsolvers_extracted/qpsolvers-main/qpsolvers/utils.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: LGPL-3.0-or-later
# Copyright 2016-2022 Stéphane Caron and the qpsolvers contributors
"""Utility functions."""
from typing import Union
import numpy as np
import scipy.sparse as spa
def print_matrix_vector(
A: Union[np.ndarray, spa.csc_matrix],
A_label: str,
b: np.ndarray,
b_label: str,
column_width: int = 24,
) -> None:
"""Print a matrix and vector side by side to the terminal.
Parameters
----------
A :
Union[np.ndarray, spa.csc_matrix] to print.
A_label :
Label for A.
b :
np.ndarray to print.
b_label :
Label for b.
column_width :
Number of characters for the matrix and vector text columns.
"""
if isinstance(A, np.ndarray) and A.ndim == 1:
A = A.reshape((1, A.shape[0]))
if isinstance(A, spa.csc_matrix):
A = A.toarray()
if A.shape[0] == b.shape[0]:
A_string = f"{A_label} =\n{A}"
b_string = f"{b_label} =\n{b.reshape((A.shape[0], 1))}"
elif A.shape[0] > b.shape[0]:
m = b.shape[0]
A_string = f"{A_label} =\n{A[:m]}"
b_string = f"{b_label} =\n{b.reshape(m, 1)}"
A_string += f"\n{A[m:]}"
b_string += "\n " * (A.shape[0] - m)
else: # A.shape[0] < b.shape[0]
n = A.shape[0]
k = b.shape[0] - n
A_string = f"{A_label} =\n{A}"
b_string = f"{b_label} =\n{b[:n].reshape(n, 1)}"
A_string += "\n " * k
b_string += f"\n{b[n:].reshape(k, 1)}"
A_lines = A_string.splitlines()
b_lines = b_string.splitlines()
for i, A_line in enumerate(A_lines):
print(A_line.ljust(column_width) + b_lines[i].ljust(column_width))
|
stephane-caronREPO_NAMEqpsolversPATH_START.@qpsolvers_extracted@qpsolvers-main@qpsolvers@utils.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "rhayes777/PyAutoFit",
"repo_path": "PyAutoFit_extracted/PyAutoFit-main/autofit/database/model/__init__.py",
"type": "Python"
}
|
from .fit import *
from .instance import *
from .model import *
from .prior import *
from .array import *
from .compound import *
from .common import *
|
rhayes777REPO_NAMEPyAutoFitPATH_START.@PyAutoFit_extracted@PyAutoFit-main@autofit@database@model@__init__.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "jpierel14/snsed",
"repo_path": "snsed_extracted/snsed-master/docs/source/conf.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
#
# snsedextend documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 28 14:09:47 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon'
]
napoleon_google_docstring = False
napoleon_use_param = False
napoleon_use_ivar = True
sphinx_gallery_conf = {
#'examples_dirs': '_examples', # path to examples scripts
#'gallery_dirs': 'examples', # path to gallery generated examples
#'backreferences_dir': 'modules/generated', # path to store the module
# using example template
'doc_module': ('snsedextend',), # documented module(s)
'download_section_examples': False,
'download_all_examples': False # don't package up examples.
#'default_thumb_file': '_logo/spectral_white_bkg.png',
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'snsedextend'
copyright = u'2018, J.R. Pierel'
author = u'J.R. Pierel'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.2.7'
# The full version, including alpha/beta/rc tags.
release = u'0.2.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'classic'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'emacs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"relbarbgcolor": "#045A0B",
"sidebarbgcolor":"#08A614",
"sidebartextcolor":'white',
"sidebarlinkcolor":'white',
}
#changes the sidebar so the whole toctree is there all the time.
html_sidebars = { '**': ['globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'], }
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'snsedextenddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'snsedextend.tex', u'snsedextend Documentation',
u'J.R. Pierel', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'snsedextend', u'snsedextend Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'snsedextend', u'snsedextend Documentation',
author, 'snsedextend', 'One line description of project.',
'Miscellaneous'),
]
|
jpierel14REPO_NAMEsnsedPATH_START.@snsed_extracted@snsed-master@docs@source@conf.py@.PATH_END.py
|
{
"filename": "_row.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/table/domain/_row.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class RowValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="row", parent_name="table.domain", **kwargs):
super(RowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@table@domain@_row.py@.PATH_END.py
|
{
"filename": "Util.py",
"repo_name": "Keck-DataReductionPipelines/MosfireDRP",
"repo_path": "MosfireDRP_extracted/MosfireDRP-master/MOSFIRE/Util.py",
"type": "Python"
}
|
Keck-DataReductionPipelinesREPO_NAMEMosfireDRPPATH_START.@MosfireDRP_extracted@MosfireDRP-master@MOSFIRE@Util.py@.PATH_END.py
|
|
{
"filename": "data.py",
"repo_name": "ukatc/AtLAST_sensitivity_calculator",
"repo_path": "AtLAST_sensitivity_calculator_extracted/AtLAST_sensitivity_calculator-main/atlast_sc/data.py",
"type": "Python"
}
|
import math
from dataclasses import dataclass
import astropy.units as u
from astropy.units import Unit, Quantity
from atlast_sc.utils import DataHelper
from atlast_sc.exceptions import UnitException, ValueOutOfRangeException,\
ValueNotAllowedException, ValueTooHighException, ValueTooLowException
class Data:
"""
Default values, default units, allowed units, allowed ranges and/or values
for the parameters used by the sensitivity calculator.
"""
@dataclass
class DataType:
default_value: float = None
default_unit: str = None
lower_value: float = None
lower_value_is_floor: bool = False
upper_value: float = None
upper_value_is_ceil: bool = False
allowed_values: list = None
units: list[str] = None
data_conversion: dict = None
def __post_init__(self):
# Make sure the default value is not infinity
assert not math.isinf(self.default_value)
# If there's a lower value, make sure there's also an upper value
if self.lower_value:
assert self.upper_value is not None
# Make sure the default value is within the permitted range
if not self.lower_value_is_floor:
assert self.default_value >= self.lower_value
else:
assert self.default_value > self.lower_value
# (Handle infinity differently)
if not math.isinf(self.upper_value):
# Make sure the upper value is greater than the lower value
assert self.upper_value > self.lower_value
if not self.upper_value_is_ceil:
assert self.default_value <= self.upper_value
else:
assert self.default_value < self.upper_value
# Make sure an allowed values has not also been specified
assert self.allowed_values is None
# Make sure the lower value is not infinity
assert not math.isinf(self.lower_value)
# If there's an upper value, make sure there's also a lower value
if self.upper_value:
assert self.lower_value is not None
# Make sure an allowed values has not also been specified
assert self.allowed_values is None
if self.units:
# Make sure all the units are valid astropy units
Unit(self.default_unit)
for unit in self.units:
Unit(unit)
# Make sure the default unit is in the list of allowed units
assert self.default_unit in self.units
# If a list of allowed values has been provided, make sure the
# default value is one of these
if self.allowed_values:
assert self.default_value in self.allowed_values
# If the data type has a list of allowed units, evaluate the
# conversion factors between allowed units and the default unit
if self.units:
self.data_conversion = DataHelper.data_conversion_factors(
self.default_unit,
self.units
)
integration_time = DataType(
default_value=100,
default_unit=str(u.s),
lower_value=1,
upper_value=float('inf'),
upper_value_is_ceil=True,
units=[str(u.s), str(u.min), str(u.h)],
)
sensitivity = DataType(
default_value=3.0,
default_unit=str(u.mJy),
lower_value=0,
lower_value_is_floor=True,
upper_value=float('inf'),
upper_value_is_ceil=True,
units=[str(u.uJy), str(u.mJy), str(u.Jy)],
)
# TODO: include km/s. Will have to provide suitable conversion logic
bandwidth = DataType(
default_value=100,
default_unit=str(u.MHz),
lower_value=0,
lower_value_is_floor=True,
upper_value=float('inf'),
upper_value_is_ceil=True,
units=[str(u.Hz), str(u.kHz), str(u.MHz), str(u.GHz)],
)
# Sky frequency of the observations
obs_frequency = DataType(
default_value=100,
default_unit=str(u.GHz),
lower_value=35,
upper_value=950,
units=[str(u.GHz)]
)
# Number of polarisations being observed
n_pol = DataType(
default_value=2,
allowed_values=[1, 2]
)
# Relative Humidity (related to PWV, and ALMA weather bands as described
# in the 'Weather Conditions' section of the user guide
weather = DataType(
default_value=25,
lower_value=5,
upper_value=95
)
# Elevation of the target for calculating air mass
elevation = DataType(
default_value=45,
default_unit=str(u.deg),
lower_value=25,
upper_value=85,
units=[str(u.deg)]
)
# Sideband Ratio - 0 for SSB and 2SB receivers, 1 for DSB receivers
g = DataType(
default_value=0
)
# surface smoothness, set to 25 micron to be consistent with OHB
# design requirements
surface_rms = DataType(
default_value=25,
default_unit=str(u.micron)
)
# radius of the primary mirror
dish_radius = DataType(
default_value=25,
default_unit=str(u.m),
lower_value=1,
upper_value=50,
units=[str(u.m)]
)
# Average ambient Temperature
t_amb = DataType(
default_value=270,
default_unit=str(u.K)
)
# Forward Efficiency - 0.95 based on ALMA Memo 602(https://library.nrao.edu/public/memos/alma/main/memo602.pdf), page 8
eta_eff = DataType(
default_value=0.95
)
# Illumination Efficiency
eta_ill = DataType(
default_value=0.8
)
# Spillover Efficiency
eta_spill = DataType(
default_value=0.95
)
# Lowered efficiency due to blocking
eta_block = DataType(
default_value=0.94
)
# Polarisation Efficiency
eta_pol = DataType(
default_value=0.995
)
# Temperature of the CMB
t_cmb = DataType(
default_value=2.726,
default_unit=str(u.K)
)
param_data_type_dicts = {
't_int': integration_time,
'sensitivity': sensitivity,
'bandwidth': bandwidth,
'obs_freq': obs_frequency,
'n_pol': n_pol,
'weather': weather,
'elevation': elevation,
'g': g,
'surface_rms': surface_rms,
'dish_radius': dish_radius,
'T_amb': t_amb,
'eta_eff': eta_eff,
'eta_ill': eta_ill,
'eta_spill': eta_spill,
'eta_block': eta_block,
'eta_pol': eta_pol,
'T_cmb': t_cmb,
}
class Validator:
"""
Class providing custom validation functions
"""
@staticmethod
def validate_field(key, val):
data_type = Data.param_data_type_dicts[key]
# Validate units on Quantities
if isinstance(val, Quantity):
try:
Validator.validate_units(val.unit, key, data_type)
except UnitException as e:
raise e
# Validate value is allowed
if isinstance(val, Quantity):
# Convert the value to the default units and extract the value
# to be validated
value_to_validate = \
val.to(Unit(data_type.default_unit)).value
else:
value_to_validate = val
try:
Validator.validate_allowed_values(value_to_validate,
key, data_type)
except ValueNotAllowedException as e:
raise e
# Validate value is in permitted range
try:
Validator.validate_in_range(value_to_validate,
key, data_type)
except ValueOutOfRangeException as e:
raise e
@staticmethod
def validate_units(unit, param, data_type):
# Don't need to check the units if the data type is unit-less
if data_type.units is None:
return
if unit not in data_type.units:
raise UnitException(param, data_type.units)
@staticmethod
def validate_in_range(value, param, data_type):
# Don't need to check the value is in the permitted range if
# there is no range specified
if data_type.lower_value is None:
return
# If the lower value is a floor value, make sure the provided value
# is greater than this
if data_type.lower_value_is_floor:
if value <= data_type.lower_value:
raise ValueTooLowException(param, data_type.lower_value,
data_type.default_unit)
# Do a special check for infinity (unlikely scenario, but not
# impossible...)
if math.isinf(value):
raise ValueTooHighException(param, data_type.upper_value,
data_type.default_unit)
# If the upper value is a ceiling value, make sure the provided value
# is less than
if data_type.upper_value_is_ceil:
if value >= data_type.upper_value:
raise ValueTooHighException(param, data_type.upper_value,
data_type.default_unit)
if not (data_type.lower_value <= value <= data_type.upper_value):
raise ValueOutOfRangeException(param,
data_type.lower_value,
data_type.upper_value,
data_type.default_unit)
@staticmethod
def validate_allowed_values(value, param, data_type):
# Don't need to check the value is allowed if there are no
# allowed values specified
if data_type.allowed_values is None:
return
if value not in data_type.allowed_values:
raise ValueNotAllowedException(param,
data_type.allowed_values,
data_type.default_unit)
|
ukatcREPO_NAMEAtLAST_sensitivity_calculatorPATH_START.@AtLAST_sensitivity_calculator_extracted@AtLAST_sensitivity_calculator-main@atlast_sc@data.py@.PATH_END.py
|
{
"filename": "coroutine_subprocess_poc.py",
"repo_name": "SAMI-Galaxy-Survey/sami",
"repo_path": "sami_extracted/sami-master/coroutine_subprocess_poc.py",
"type": "Python"
}
|
"""
git-repository: coroutine_subprocess.py
Based on: https://stackoverflow.com/questions/34020599/asynchronously-receive-output-from-long-running-shell-commands-with-asyncio-pyt
History
-------
Created by: agreen on 13/2/18
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from typing import Any
import asyncio
import logging.config
log = logging.getLogger(__name__)
logging_level = logging.WARNING
# Logging configuration at end of file.
import sys
import time
from asyncio.subprocess import PIPE, STDOUT, DEVNULL
async def inner_layer(shell_command):
p = await asyncio.create_subprocess_shell(shell_command,
stdin=PIPE, stdout=PIPE, stderr=STDOUT)
return (await p.communicate())[0].splitlines()
async def get_lines(shell_command):
res = await inner_layer(shell_command)
return res
async def main():
# get commands output concurrently
coros = [get_lines('"{e}" -c "print({i:d}); import time; time.sleep({i:d})"'
.format(i=i+3, e=sys.executable))
for i in reversed(range(5))]
# for f in asyncio.as_completed(coros): # print in the order they finish
# print(await f)
res = await asyncio.gather(*coros)
for i in res:
print(i)
if __name__ == "__main__":
start_time = time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
print("Took total wall time of {} seconds".format(time.time() - start_time))
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(filename)s:%(lineno)s %(funcName)s: %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'sds': {
'handlers': ['console'],
'level': logging_level
}
}
})
|
SAMI-Galaxy-SurveyREPO_NAMEsamiPATH_START.@sami_extracted@sami-master@coroutine_subprocess_poc.py@.PATH_END.py
|
{
"filename": "_showscale.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/barpolar/marker/_showscale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="showscale", parent_name="barpolar.marker", **kwargs
):
super(ShowscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@barpolar@marker@_showscale.py@.PATH_END.py
|
{
"filename": "_textcasesrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattersmith/textfont/_textcasesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextcasesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="textcasesrc", parent_name="scattersmith.textfont", **kwargs
):
super(TextcasesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattersmith@textfont@_textcasesrc.py@.PATH_END.py
|
{
"filename": "n0_fft.py",
"repo_name": "NextGenCMB/lensitbiases",
"repo_path": "lensitbiases_extracted/lensitbiases-main/lensitbiases/n0_fft.py",
"type": "Python"
}
|
import os
import numpy as np
from lensitbiases.utils_n1 import extcl, cls_dot
from lensitbiases.box import box
import pyfftw
class nhl_fft:
def __init__(self, cls_ivfs, cls_w, lminbox=50, lmaxbox=2500, k2l=None, cls_w2=None):
"""
Note:
for a response calculation set cls_w to the QE qeweights cls, and cls_w2 to the sky response cls (lencls or gradcls typically),
and ivfs to the filterting matrix B^t Cov^{-1} B cls (fals)
"""
lside = 2. * np.pi / lminbox
npix = int(2 * lmaxbox / float(lminbox)) + 1
if npix % 2 == 1: npix += 1
# ===== instance with 2D flat-sky box info
self.box = box(lside, npix, k2l=k2l)
self.shape = self.box.shape
# === Filter and cls array needed later on:
cls_ivfs = {k: extcl(self.box.lmaxbox + int(self.box.lminbox) + 1, cls_ivfs[k]) for k in cls_ivfs.keys()} # filtered maps spectra
cls_w1 = {k: extcl(self.box.lmaxbox + int(self.box.lminbox) + 1, cls_w[k]) for k in cls_w.keys()} # estimator weights spectra
if cls_w2 is None:
cls_w2 = cls_w1
else:
cls_w2 = {k: extcl(self.box.lmaxbox + int(self.box.lminbox) + 1, cls_w2[k]) for k in cls_w2.keys()} # second estimator weights spectra
K_ls, Kw1_ls, w2K_ls, wKw_sym_ls = self._build_cl_ls(cls_ivfs, cls_w1, cls_w2)
self.K_ls = K_ls
self.Kw1_ls = Kw1_ls
self.w2K_ls = w2K_ls
self.wKw_sym_ls = wKw_sym_ls
# We need the symmetric part only of this (there is a trace against symmetric K)
self.cls_w1 = cls_w1
self.cls_w2 = cls_w2
self.cls_ivfs = cls_ivfs
self._cos2p_sin2p = None
# === normalization (for lensing keys at least)
norm = (self.box.shape[0] / self.box.lsides[0]) ** 2 # overall final normalization from rfft'ing
norm *= (float(self.box.lminbox)) ** 4
self.norm = norm
@staticmethod
def _build_cl_ls(cls_ivfs, cls_w1, cls_w2):
K_ls = cls_dot([cls_ivfs])
Kw1_ls = cls_dot([cls_ivfs, cls_w1])
w2K_ls = cls_dot([cls_w2, cls_ivfs])
wKw_sym_ls = 0.5 * (cls_dot([cls_w1, cls_ivfs, cls_w2]) + cls_dot([cls_w2, cls_ivfs, cls_w1]))
# We need the symmetric part only of this (there is a trace against symmetric K)
return K_ls, Kw1_ls, w2K_ls, wKw_sym_ls
def _ifft2(self, rm):
oshape = self.box.shape if rm.ndim == 2 else (rm.shape[0], self.box.shape[0], self.box.shape[1])
inpt = pyfftw.empty_aligned(rm.shape, dtype='complex128')
outp = pyfftw.empty_aligned(oshape, dtype='float64')
ifft2 = pyfftw.FFTW(inpt, outp, axes=(-2, -1), direction='FFTW_BACKWARD', threads=int(os.environ.get('OMP_NUM_THREADS', 1)))
return ifft2(pyfftw.byte_align(rm, dtype='complex128'))
def get_nhl_2d(self, k, _pyfftw=True):
"""Returns unormalized QE noise for each and every 2d multipole on the flat-sky box
Note:
On a square-periodic flat-sky box there can be tiny differences of N0(L) for same |L|
No attempt is at optimization. see get_nhl method for much faster N0 array calculation
"""
X2i = {'T': 0, 'E': 1, 'B': 2}
ny, nx = np.meshgrid(self.box.ny_1d, self.box.nx_1d, indexing='ij')
ls = self.box.ls()
ir2 = self._ifft2 if _pyfftw else np.fft.irfft2
Ss = ['T'] * (k in ['ptt', 'p']) + ['Q', 'U'] * (k in ['p_p', 'p'])
Ts = ['T'] * (k in ['ptt', 'p']) + ['Q', 'U'] * (k in ['p_p', 'p'])
XYs = ['TT'] * (k in ['ptt', 'p']) + ['EE', 'BB'] * (k in ['p_p', 'p']) + ['ET', 'TE'] * (k == 'p')
Fs = np.zeros((3, self.box.shape[0], self.box.shape[1]), dtype=float) # 00, 11 and 01 components
for i, S in enumerate(Ss): # daig and off-diag
for T in Ts[i:]:
K = np.zeros(self.box.rshape, dtype=complex)
wKw_sym_11 = np.zeros(self.box.rshape, dtype=complex)
wKw_sym_00 = np.zeros(self.box.rshape, dtype=complex)
wKw_sym_01 = np.zeros(self.box.rshape, dtype=complex)
w2K_1 = np.zeros(self.box.rshape, dtype=complex)
Kw1_1 = np.zeros(self.box.rshape, dtype=complex)
w2K_0 = np.zeros(self.box.rshape, dtype=complex)
Kw1_0 = np.zeros(self.box.rshape, dtype=complex)
for XY in XYs: # TT, TE, ET, EE, BB for MV or SQE
X,Y = XY
fac = self.box.X2S(S, X) * self.box.X2S(T, Y)
if np.any(fac):
if S != T: fac *= np.sqrt(2.)# off-diagonal terms come with factor of 2
i = X2i[X]; j = X2i[Y]
K += self.K_ls [i, j][ls] * fac
wKw_sym_00 += -1 * self.wKw_sym_ls[i, j][ls] * ny * ny * fac
wKw_sym_11 += -1 * self.wKw_sym_ls[i, j][ls] * nx * nx * fac
wKw_sym_01 += -1 * self.wKw_sym_ls[i, j][ls] * nx * ny * fac
Kw1_0 += 1j * self.Kw1_ls [i, j][ls] * ny * fac
Kw1_1 += 1j * self.Kw1_ls [i, j][ls] * nx * fac
w2K_0 += 1j * self.w2K_ls [i, j][ls] * ny * fac
w2K_1 += 1j * self.w2K_ls [i, j][ls] * nx * fac
ir2K = ir2(K)
Fs[0] += ir2K * ir2(wKw_sym_00) + ir2(Kw1_0) * ir2(w2K_0)
Fs[1] += ir2K * ir2(wKw_sym_11) + ir2(Kw1_1) * ir2(w2K_1)
Fs[2] += ir2K * ir2(wKw_sym_01) + ir2(Kw1_0) * ir2(w2K_1)
Fyy, Fxx, Fxy = np.fft.rfft2(Fs).real
n0_2d_gg = ny ** 2 * Fyy + nx ** 2 * Fxx + 2 * nx * ny * Fxy # lensing gradient
n0_2d_cc = nx ** 2 * Fyy + ny ** 2 * Fxx - 2 * nx * ny * Fxy # lensing curl
return - self.norm * np.array([n0_2d_gg, n0_2d_cc])
def get_nhl_ds_2d(self, k, cls_ivfs_dd, _pyfftw=True):
"""Returns unormalized QE noise for each and every 2d multipole on the flat-sky box
This returns the 'ds' unnormalized expectation (where data spectra do not match sims spectra)
See e.g. Planck papers. For d~s, twice the output is ~ N0
ds ~ 1/2 (\bar X S_WF + \bar S X_WF)
Note:
On a square-periodic flat-sky box there can be tiny differences of N0(L) for same |L|
No attempt is at optimization. see get_nhl method for much faster N0 array calculation
"""
X2i = {'T': 0, 'E': 1, 'B': 2}
ny, nx = np.meshgrid(self.box.ny_1d, self.box.nx_1d, indexing='ij')
ls = self.box.ls()
ir2 = self._ifft2 if _pyfftw else np.fft.irfft2
Ss = ['T'] * (k in ['ptt', 'p']) + ['Q', 'U'] * (k in ['p_p', 'p'])
Ts = ['T'] * (k in ['ptt', 'p']) + ['Q', 'U'] * (k in ['p_p', 'p'])
XYs = ['TT'] * (k in ['ptt', 'p']) + ['EE', 'BB'] * (k in ['p_p', 'p']) + ['ET', 'TE'] * (k == 'p')
Fs = np.zeros((4, self.box.shape[0], self.box.shape[1]), dtype=float) # 00, 11 and 01 10 components
Ks_ls, Ksw_ls, wKs_ls, wKsw_sym_ls = self._build_cl_ls(self.cls_ivfs, self.cls_w1, self.cls_w2)
Kd_ls, Kdw_ls, wKd_ls, wKdw_sym_ls = self._build_cl_ls(cls_ivfs_dd, self.cls_w1, self.cls_w2)
for i, S in enumerate(Ss): # daig and off-diag
for T in Ts: # not certain about syms in all cases, dropping this
Kd = np.zeros(self.box.rshape, dtype=complex)
Ks = np.zeros(self.box.rshape, dtype=complex)
wKdw_sym_11 = np.zeros(self.box.rshape, dtype=complex)
wKdw_sym_00 = np.zeros(self.box.rshape, dtype=complex)
wKdw_sym_01 = np.zeros(self.box.rshape, dtype=complex)
wKsw_sym_11 = np.zeros(self.box.rshape, dtype=complex)
wKsw_sym_00 = np.zeros(self.box.rshape, dtype=complex)
wKsw_sym_01 = np.zeros(self.box.rshape, dtype=complex)
wKd_1 = np.zeros(self.box.rshape, dtype=complex)
Kdw_1 = np.zeros(self.box.rshape, dtype=complex)
wKd_0 = np.zeros(self.box.rshape, dtype=complex)
Kdw_0 = np.zeros(self.box.rshape, dtype=complex)
wKs_1 = np.zeros(self.box.rshape, dtype=complex)
Ksw_1 = np.zeros(self.box.rshape, dtype=complex)
wKs_0 = np.zeros(self.box.rshape, dtype=complex)
Ksw_0 = np.zeros(self.box.rshape, dtype=complex)
for XY in XYs: # TT, TE, ET, EE, BB for MV or SQE
X,Y = XY
fac = self.box.X2S(S, X) * self.box.X2S(T, Y)
if np.any(fac):
#if S != T: fac *= np.sqrt(2.)# off-diagonal terms come with factor of 2
i = X2i[X]; j = X2i[Y]
Kd += Kd_ls [i, j][ls] * fac
wKdw_sym_00 += -1 * wKdw_sym_ls[i, j][ls] * ny * ny * fac
wKdw_sym_11 += -1 * wKdw_sym_ls[i, j][ls] * nx * nx * fac
wKdw_sym_01 += -1 * wKdw_sym_ls[i, j][ls] * nx * ny * fac
Ks += Ks_ls [i, j][ls] * fac
wKsw_sym_00 += -1 * wKsw_sym_ls[i, j][ls] * ny * ny * fac
wKsw_sym_11 += -1 * wKsw_sym_ls[i, j][ls] * nx * nx * fac
wKsw_sym_01 += -1 * wKsw_sym_ls[i, j][ls] * nx * ny * fac
Kdw_0 += 1j * Kdw_ls [i, j][ls] * ny * fac
Kdw_1 += 1j * Kdw_ls [i, j][ls] * nx * fac
wKd_0 += 1j * wKd_ls [i, j][ls] * ny * fac
wKd_1 += 1j * wKd_ls [i, j][ls] * nx * fac
Ksw_0 += 1j * Ksw_ls [i, j][ls] * ny * fac
Ksw_1 += 1j * Ksw_ls [i, j][ls] * nx * fac
wKs_0 += 1j * wKs_ls [i, j][ls] * ny * fac
wKs_1 += 1j * wKs_ls [i, j][ls] * nx * fac
ir2Kd = ir2(Kd)
ir2Ks = ir2(Ks)
Fs[0] += ir2Kd * ir2(wKsw_sym_00) + ir2(Kdw_0) * ir2(wKs_0)
Fs[0] += ir2Ks * ir2(wKdw_sym_00) + ir2(Ksw_0) * ir2(wKd_0)
Fs[1] += ir2Kd * ir2(wKsw_sym_11) + ir2(Kdw_1) * ir2(wKs_1)
Fs[1] += ir2Ks * ir2(wKdw_sym_11) + ir2(Ksw_1) * ir2(wKd_1)
Fs[2] += ir2Kd * ir2(wKsw_sym_01) + ir2(Kdw_0) * ir2(wKs_1)
Fs[2] += ir2Ks * ir2(wKdw_sym_01) + ir2(Ksw_0) * ir2(wKd_1)
Fs[3] += ir2Kd * ir2(wKsw_sym_01) + ir2(Kdw_1) * ir2(wKs_0)
Fs[3] += ir2Ks * ir2(wKdw_sym_01) + ir2(Ksw_1) * ir2(wKd_0)
Fyy, Fxx, Fxy, Fyx = np.fft.rfft2(Fs).real
n0_2d_gg = ny ** 2 * Fyy + nx ** 2 * Fxx + nx * ny * (Fxy + Fyx) # lensing gradient
n0_2d_cc = nx ** 2 * Fyy + ny ** 2 * Fxx - nx * ny * (Fxy + Fyx) # lensing curl
return - 0.25 * self.norm * np.array([n0_2d_gg, n0_2d_cc])
def get_nhl(self, k, _pyfftw=True):
"""Returns unormalized-QE noise for multipole along an axis of the box
Args:
k: QE key. Here only 'ptt', 'p_p' and 'p' are supported, for TT, P-only and 'MV' estimators.
_pyfftw: uses pfttw FFT's by default, falls back to numpy ffts if unset
Note:
Depending on the weight and filtered CMB spectra given as input to the instance the output
matches the 'GMV' or 'SQE' estimator
Note:
This assumes (but does not for) that for all spectra :math:`C_\ell^{TB} = C_\ell^{EB} = 0`
Returns:
*Unormalized* QE Gaussian noise level, for the lensing gradient and lensing curl mode
Note:
To get the true :math:`N_L^{(0)}` this must be multiplied by the normalization (inverse response)
applied to the estimator, often called :math:`A_L` or :math:`\frac{1}{\mathcal R_L}`
This uses 1-dimensional rfft on a subset of the terms used for the 2D map
"""
X2i = {'T': 0, 'E': 1, 'B': 2}
Ss = ['T'] * (k in ['ptt', 'p']) + ['Q', 'U'] * (k in ['p_p', 'p'])
Ts = ['T'] * (k in ['ptt', 'p']) + ['Q', 'U'] * (k in ['p_p', 'p'])
XYs = ['TT'] * (k in ['ptt', 'p']) + ['EE', 'BB'] * (k in ['p_p', 'p']) + ['ET', 'TE'] * (k == 'p')
ir2 = self._ifft2 if _pyfftw else np.fft.irfft2
nx = self.box.nx_1d
ls = self.box.ls()
Fxx = np.zeros(self.box.shape, dtype=float) # 00, 11 and 01 components
for i, S in enumerate(Ss): # off-diag
for T in Ts[i:]:
K = np.zeros(self.box.rshape, dtype=complex)
wKw_sym_11 = np.zeros(self.box.rshape, dtype=complex)
w2K_1 = np.zeros(self.box.rshape, dtype=complex)
Kw1_1 = np.zeros(self.box.rshape, dtype=complex)
for X, Y in XYs:
i = X2i[X]; j = X2i[Y]
fac = self.box.X2S(S, X) * self.box.X2S(T, Y) # off-diagonal terms come with factor of 2
if np.any(fac):
if S != T:
fac *= np.sqrt(2.)
K += self.K_ls [i, j][ls] * fac
wKw_sym_11 += self.wKw_sym_ls[i, j][ls] * (-1 * (nx ** 2)) * fac
Kw1_1 += self.Kw1_ls [i, j][ls] * (1j * nx) * fac
w2K_1 += self.w2K_ls [i, j][ls] * (1j * nx) * fac
Fxx += (ir2(K) * ir2(wKw_sym_11) + ir2(Kw1_1) * ir2(w2K_1))
# 1d fft method using only F11
n0_gg = self.box.nx_1d ** 2 * np.sum(np.fft.rfft(Fxx, axis=1).real, axis=0) # lensing gradient n0
n0_cc = self.box.nx_1d ** 2 * np.sum(np.fft.rfft(Fxx, axis=0).real, axis=1) # lensing curl n0
return -self.norm * np.array([n0_gg, n0_cc]), np.abs(self.box.nx_1d) * self.box.lminbox
|
NextGenCMBREPO_NAMElensitbiasesPATH_START.@lensitbiases_extracted@lensitbiases-main@lensitbiases@n0_fft.py@.PATH_END.py
|
{
"filename": "pspnet.py",
"repo_name": "qubvel/segmentation_models",
"repo_path": "segmentation_models_extracted/segmentation_models-master/segmentation_models/models/pspnet.py",
"type": "Python"
}
|
from keras_applications import get_submodules_from_kwargs
from ._common_blocks import Conv2dBn
from ._utils import freeze_model, filter_keras_submodules
from ..backbones.backbones_factory import Backbones
backend = None
layers = None
models = None
keras_utils = None
# ---------------------------------------------------------------------
# Utility functions
# ---------------------------------------------------------------------
def get_submodules():
return {
'backend': backend,
'models': models,
'layers': layers,
'utils': keras_utils,
}
def check_input_shape(input_shape, factor):
if input_shape is None:
raise ValueError("Input shape should be a tuple of 3 integers, not None!")
h, w = input_shape[:2] if backend.image_data_format() == 'channels_last' else input_shape[1:]
min_size = factor * 6
is_wrong_shape = (
h % min_size != 0 or w % min_size != 0 or
h < min_size or w < min_size
)
if is_wrong_shape:
raise ValueError('Wrong shape {}, input H and W should '.format(input_shape) +
'be divisible by `{}`'.format(min_size))
# ---------------------------------------------------------------------
# Blocks
# ---------------------------------------------------------------------
def Conv1x1BnReLU(filters, use_batchnorm, name=None):
kwargs = get_submodules()
def wrapper(input_tensor):
return Conv2dBn(
filters,
kernel_size=1,
activation='relu',
kernel_initializer='he_uniform',
padding='same',
use_batchnorm=use_batchnorm,
name=name,
**kwargs
)(input_tensor)
return wrapper
def SpatialContextBlock(
level,
conv_filters=512,
pooling_type='avg',
use_batchnorm=True,
):
if pooling_type not in ('max', 'avg'):
raise ValueError('Unsupported pooling type - `{}`.'.format(pooling_type) +
'Use `avg` or `max`.')
Pooling2D = layers.MaxPool2D if pooling_type == 'max' else layers.AveragePooling2D
pooling_name = 'psp_level{}_pooling'.format(level)
conv_block_name = 'psp_level{}'.format(level)
upsampling_name = 'psp_level{}_upsampling'.format(level)
def wrapper(input_tensor):
# extract input feature maps size (h, and w dimensions)
input_shape = backend.int_shape(input_tensor)
spatial_size = input_shape[1:3] if backend.image_data_format() == 'channels_last' else input_shape[2:]
# Compute the kernel and stride sizes according to how large the final feature map will be
# When the kernel factor and strides are equal, then we can compute the final feature map factor
# by simply dividing the current factor by the kernel or stride factor
# The final feature map sizes are 1x1, 2x2, 3x3, and 6x6.
pool_size = up_size = [spatial_size[0] // level, spatial_size[1] // level]
x = Pooling2D(pool_size, strides=pool_size, padding='same', name=pooling_name)(input_tensor)
x = Conv1x1BnReLU(conv_filters, use_batchnorm, name=conv_block_name)(x)
x = layers.UpSampling2D(up_size, interpolation='bilinear', name=upsampling_name)(x)
return x
return wrapper
# ---------------------------------------------------------------------
# PSP Decoder
# ---------------------------------------------------------------------
def build_psp(
backbone,
psp_layer_idx,
pooling_type='avg',
conv_filters=512,
use_batchnorm=True,
final_upsampling_factor=8,
classes=21,
activation='softmax',
dropout=None,
):
input_ = backbone.input
x = (backbone.get_layer(name=psp_layer_idx).output if isinstance(psp_layer_idx, str)
else backbone.get_layer(index=psp_layer_idx).output)
# build spatial pyramid
x1 = SpatialContextBlock(1, conv_filters, pooling_type, use_batchnorm)(x)
x2 = SpatialContextBlock(2, conv_filters, pooling_type, use_batchnorm)(x)
x3 = SpatialContextBlock(3, conv_filters, pooling_type, use_batchnorm)(x)
x6 = SpatialContextBlock(6, conv_filters, pooling_type, use_batchnorm)(x)
# aggregate spatial pyramid
concat_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.Concatenate(axis=concat_axis, name='psp_concat')([x, x1, x2, x3, x6])
x = Conv1x1BnReLU(conv_filters, use_batchnorm, name='aggregation')(x)
# model regularization
if dropout is not None:
x = layers.SpatialDropout2D(dropout, name='spatial_dropout')(x)
# model head
x = layers.Conv2D(
filters=classes,
kernel_size=(3, 3),
padding='same',
kernel_initializer='glorot_uniform',
name='final_conv',
)(x)
x = layers.UpSampling2D(final_upsampling_factor, name='final_upsampling', interpolation='bilinear')(x)
x = layers.Activation(activation, name=activation)(x)
model = models.Model(input_, x)
return model
# ---------------------------------------------------------------------
# PSP Model
# ---------------------------------------------------------------------
def PSPNet(
backbone_name='vgg16',
input_shape=(384, 384, 3),
classes=21,
activation='softmax',
weights=None,
encoder_weights='imagenet',
encoder_freeze=False,
downsample_factor=8,
psp_conv_filters=512,
psp_pooling_type='avg',
psp_use_batchnorm=True,
psp_dropout=None,
**kwargs
):
"""PSPNet_ is a fully convolution neural network for image semantic segmentation
Args:
backbone_name: name of classification model used as feature
extractor to build segmentation model.
input_shape: shape of input data/image ``(H, W, C)``.
``H`` and ``W`` should be divisible by ``6 * downsample_factor`` and **NOT** ``None``!
classes: a number of classes for output (output shape - ``(h, w, classes)``).
activation: name of one of ``keras.activations`` for last model layer
(e.g. ``sigmoid``, ``softmax``, ``linear``).
weights: optional, path to model weights.
encoder_weights: one of ``None`` (random initialization), ``imagenet`` (pre-training on ImageNet).
encoder_freeze: if ``True`` set all layers of encoder (backbone model) as non-trainable.
downsample_factor: one of 4, 8 and 16. Downsampling rate or in other words backbone depth
to construct PSP module on it.
psp_conv_filters: number of filters in ``Conv2D`` layer in each PSP block.
psp_pooling_type: one of 'avg', 'max'. PSP block pooling type (maximum or average).
psp_use_batchnorm: if ``True``, ``BatchNormalisation`` layer between ``Conv2D`` and ``Activation`` layers
is used.
psp_dropout: dropout rate between 0 and 1.
Returns:
``keras.models.Model``: **PSPNet**
.. _PSPNet:
https://arxiv.org/pdf/1612.01105.pdf
"""
global backend, layers, models, keras_utils
submodule_args = filter_keras_submodules(kwargs)
backend, layers, models, keras_utils = get_submodules_from_kwargs(submodule_args)
# control image input shape
check_input_shape(input_shape, downsample_factor)
backbone = Backbones.get_backbone(
backbone_name,
input_shape=input_shape,
weights=encoder_weights,
include_top=False,
**kwargs
)
feature_layers = Backbones.get_feature_layers(backbone_name, n=3)
if downsample_factor == 16:
psp_layer_idx = feature_layers[0]
elif downsample_factor == 8:
psp_layer_idx = feature_layers[1]
elif downsample_factor == 4:
psp_layer_idx = feature_layers[2]
else:
raise ValueError('Unsupported factor - `{}`, Use 4, 8 or 16.'.format(downsample_factor))
model = build_psp(
backbone,
psp_layer_idx,
pooling_type=psp_pooling_type,
conv_filters=psp_conv_filters,
use_batchnorm=psp_use_batchnorm,
final_upsampling_factor=downsample_factor,
classes=classes,
activation=activation,
dropout=psp_dropout,
)
# lock encoder weights for fine-tuning
if encoder_freeze:
freeze_model(backbone, **kwargs)
# loading model weights
if weights is not None:
model.load_weights(weights)
return model
|
qubvelREPO_NAMEsegmentation_modelsPATH_START.@segmentation_models_extracted@segmentation_models-master@segmentation_models@models@pspnet.py@.PATH_END.py
|
{
"filename": "make_ascii_catalog.py",
"repo_name": "grzeimann/Diagnose",
"repo_path": "Diagnose_extracted/Diagnose-main/make_ascii_catalog.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 19 09:36:32 2021
@author: gregz
"""
import glob
import numpy as np
import os.path as op
import sys
from astropy.io import fits
from astropy.table import Table
filename = sys.argv[1]
t = 2.
folder = sys.argv[2]
N = fits.open(filename)[2].shape[0]
filenames = sorted(glob.glob(op.join(folder, 'classification*.fits')))
T = fits.open(filenames[0])['info'].data
objID, RA, Dec, shotid, gmag, rmag = [[T[name][0]]*N for name in
['objID', 'RA', 'Dec', 'shotid', 'gmag', 'rmag']]
imag, zmag, ymag, sn, barycor, mjd, exptime = [[T[name][0]]*N for name in
['imag', 'zmag', 'ymag', 'sn', 'barycor', 'mjd', 'exptime']]
chi2_star, chi2_galaxy, chi2_qso = ([1.]*N, [1.]*N, [1.]*N)
z_star, z_galaxy, z_qso, z_best = ([0.]*N, [0.]*N, [0.]*N, [0.]*N)
classification, stellartype = (['GALAXY']*N, ['W']*N)
colnames = ['objID', 'RA', 'Dec', 'shotid', 'gmag', 'rmag', 'imag', 'zmag',
'ymag', 'sn', 'barycor', 'mjd', 'exptime', 'chi2_star',
'chi2_galaxy', 'chi2_qso', 'z_star', 'z_galaxy', 'z_qso', 'z_best',
'classification', 'stellartype']
format_dict = {'RA': '%0.6f', 'Dec': '%0.6f', 'gmag': '%0.3f', 'rmag': '%0.3f',
'imag': '%0.3f', 'zmag': '%0.3f', 'ymag': '%0.3f',
'sn': '%0.3f', 'barycor': '%0.1f', 'mjd': '%0.8f',
'exptime': '%0.1f', 'z_star': '%0.8f', 'z_galaxy': '%0.5f',
'z_qso': '%0.5f', 'chi2_star': '%0.3f', 'chi2_galaxy': '%0.3f',
'chi2_qso': '%0.3f', 'z_best': '%0.8f'}
i = 0
for fn in filenames:
print('Working on %s' % fn)
f = fits.open(fn)
l = f[1].shape[0]
objID[i:i+l] = f['info'].data['objID']
RA[i:i+l] = f['info'].data['RA']
Dec[i:i+l] = f['info'].data['Dec']
shotid[i:i+l] = f['info'].data['shotid']
gmag[i:i+l] = f['info'].data['gmag']
rmag[i:i+l] = f['info'].data['rmag']
imag[i:i+l] = f['info'].data['imag']
zmag[i:i+l] = f['info'].data['zmag']
ymag[i:i+l] = f['info'].data['ymag']
sn[i:i+l] = f['info'].data['sn']
barycor[i:i+l] = f['info'].data['barycor']
mjd[i:i+l] = f['info'].data['mjd']
exptime[i:i+l] = f['info'].data['exptime']
chi2_star[i:i+l] = f['chi2'].data[:, 0]
chi2_galaxy[i:i+l] = f['chi2'].data[:, 1]
chi2_qso[i:i+l] = f['chi2'].data[:, 2]
f['zs'].data[:, 0] = f['zs'].data[:, 0] / 2.99798e8
z_star[i:i+l] = f['zs'].data[:, 0]
z_galaxy[i:i+l] = f['zs'].data[:, 1]
z_qso[i:i+l] = f['zs'].data[:, 2]
best_ind = np.array(f['class'].data, dtype=int) - 1
zbest = np.ones((l,))*-999.
for j in np.arange(3):
sel = np.where(best_ind == j)[0]
zbest[sel] = f['zs'].data[sel, j]
z_best[i:i+l] = zbest
class_label = np.array(['UNKNOWN'] * l)
for j, label in zip(np.arange(4), ['STAR', 'GALAXY', 'QSO', 'UNKNOWN']):
sel = np.where(best_ind == j)[0]
class_label[sel] = label
classification[i:i+l] = class_label
stellartype[i:i+l] = f['stellartype'].data
i += l
T = Table([objID, RA, Dec, shotid, gmag, rmag, imag, zmag,
ymag, sn, barycor, mjd, exptime, chi2_star,
chi2_galaxy, chi2_qso, z_star, z_galaxy, z_qso, z_best,
classification, stellartype], names=colnames)
T.write('HETVIPS_classifications.txt', format='ascii.fixed_width_two_line',
overwrite=True, formats=format_dict)
|
grzeimannREPO_NAMEDiagnosePATH_START.@Diagnose_extracted@Diagnose-main@make_ascii_catalog.py@.PATH_END.py
|
{
"filename": "maxContextCalc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/fonttools/fontTools/otlLib/maxContextCalc.py",
"type": "Python"
}
|
__all__ = ["maxCtxFont"]
def maxCtxFont(font):
"""Calculate the usMaxContext value for an entire font."""
maxCtx = 0
for tag in ("GSUB", "GPOS"):
if tag not in font:
continue
table = font[tag].table
if not table.LookupList:
continue
for lookup in table.LookupList.Lookup:
for st in lookup.SubTable:
maxCtx = maxCtxSubtable(maxCtx, tag, lookup.LookupType, st)
return maxCtx
def maxCtxSubtable(maxCtx, tag, lookupType, st):
"""Calculate usMaxContext based on a single lookup table (and an existing
max value).
"""
# single positioning, single / multiple substitution
if (tag == "GPOS" and lookupType == 1) or (
tag == "GSUB" and lookupType in (1, 2, 3)
):
maxCtx = max(maxCtx, 1)
# pair positioning
elif tag == "GPOS" and lookupType == 2:
maxCtx = max(maxCtx, 2)
# ligatures
elif tag == "GSUB" and lookupType == 4:
for ligatures in st.ligatures.values():
for ligature in ligatures:
maxCtx = max(maxCtx, ligature.CompCount)
# context
elif (tag == "GPOS" and lookupType == 7) or (tag == "GSUB" and lookupType == 5):
maxCtx = maxCtxContextualSubtable(maxCtx, st, "Pos" if tag == "GPOS" else "Sub")
# chained context
elif (tag == "GPOS" and lookupType == 8) or (tag == "GSUB" and lookupType == 6):
maxCtx = maxCtxContextualSubtable(
maxCtx, st, "Pos" if tag == "GPOS" else "Sub", "Chain"
)
# extensions
elif (tag == "GPOS" and lookupType == 9) or (tag == "GSUB" and lookupType == 7):
maxCtx = maxCtxSubtable(maxCtx, tag, st.ExtensionLookupType, st.ExtSubTable)
# reverse-chained context
elif tag == "GSUB" and lookupType == 8:
maxCtx = maxCtxContextualRule(maxCtx, st, "Reverse")
return maxCtx
def maxCtxContextualSubtable(maxCtx, st, ruleType, chain=""):
"""Calculate usMaxContext based on a contextual feature subtable."""
if st.Format == 1:
for ruleset in getattr(st, "%s%sRuleSet" % (chain, ruleType)):
if ruleset is None:
continue
for rule in getattr(ruleset, "%s%sRule" % (chain, ruleType)):
if rule is None:
continue
maxCtx = maxCtxContextualRule(maxCtx, rule, chain)
elif st.Format == 2:
for ruleset in getattr(st, "%s%sClassSet" % (chain, ruleType)):
if ruleset is None:
continue
for rule in getattr(ruleset, "%s%sClassRule" % (chain, ruleType)):
if rule is None:
continue
maxCtx = maxCtxContextualRule(maxCtx, rule, chain)
elif st.Format == 3:
maxCtx = maxCtxContextualRule(maxCtx, st, chain)
return maxCtx
def maxCtxContextualRule(maxCtx, st, chain):
"""Calculate usMaxContext based on a contextual feature rule."""
if not chain:
return max(maxCtx, st.GlyphCount)
elif chain == "Reverse":
return max(maxCtx, 1 + st.LookAheadGlyphCount)
return max(maxCtx, st.InputGlyphCount + st.LookAheadGlyphCount)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@fonttools@fontTools@otlLib@maxContextCalc.py@.PATH_END.py
|
{
"filename": "_shapesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/funnelarea/marker/pattern/_shapesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShapesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="shapesrc", parent_name="funnelarea.marker.pattern", **kwargs
):
super(ShapesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@funnelarea@marker@pattern@_shapesrc.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "njcuk9999/lbl",
"repo_path": "lbl_extracted/lbl-main/lbl/instruments/README.md",
"type": "Markdown"
}
|
# Instrument directory
- This contains instrument python scripts (and `select.py`)
- Each instrument contains a class i.e. for SPIRou:
```python
from lbl.instruments import default
class Spirou(default.Instrument):
def __init__(self, params):
# call to super function
# noinspection PyTypeChecker
super().__init__("SPIROU")
# set parameters for instrument
self.params = params
# constructor
pass
```
These functions should be overloaded to provide instrument specific functionality
Note as well as adding the `{instrument}.py` file you must add a line to
`select.py`:
```
from lbl.instruments import {INSTRUMENT}
# ...
# and in load_instrument():
# select SPIROU
if instrument.upper() == 'SPIROU':
inst = spirou.Spirou(params)
# select HARPS
elif instrument.upper() == 'HARPS':
inst = harps.Harps(params)
# select {INSTRUMENT}
elif instrument.upper() == '{INSTRUMENT}':
inst = instrument.Instrument(params)
# else instrument is invalid
else:
emsg = 'Instrument name "{0}" invalid'
eargs = [instrument]
raise base_classes.LblException(emsg.format(*eargs))
```
## Param Override method
This method is used to override default parameters (these in turn can be overridden
from command line / yaml / main function inputs), but if these are not overriden
by command line / yaml / main function and the default value (found in
`core.parameters.py`) is not correct for the instrument it should be overriden here.
## Spectrum read function
TODO
## Template read function
TODO
## Blaze read function
TODO
## Wave read function
TODO
|
njcuk9999REPO_NAMElblPATH_START.@lbl_extracted@lbl-main@lbl@instruments@README.md@.PATH_END.py
|
{
"filename": "typesense.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/vectorstores/typesense.py",
"type": "Python"
}
|
from __future__ import annotations
import uuid
from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Union
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.utils import get_from_env
from langchain_core.vectorstores import VectorStore
if TYPE_CHECKING:
from typesense.client import Client
from typesense.collection import Collection
class Typesense(VectorStore):
"""`Typesense` vector store.
To use, you should have the ``typesense`` python package installed.
Example:
.. code-block:: python
from langchain_community.embedding.openai import OpenAIEmbeddings
from langchain_community.vectorstores import Typesense
import typesense
node = {
"host": "localhost", # For Typesense Cloud use xxx.a1.typesense.net
"port": "8108", # For Typesense Cloud use 443
"protocol": "http" # For Typesense Cloud use https
}
typesense_client = typesense.Client(
{
"nodes": [node],
"api_key": "<API_KEY>",
"connection_timeout_seconds": 2
}
)
typesense_collection_name = "langchain-memory"
embedding = OpenAIEmbeddings()
vectorstore = Typesense(
typesense_client=typesense_client,
embedding=embedding,
typesense_collection_name=typesense_collection_name,
text_key="text",
)
"""
def __init__(
self,
typesense_client: Client,
embedding: Embeddings,
*,
typesense_collection_name: Optional[str] = None,
text_key: str = "text",
):
"""Initialize with Typesense client."""
try:
from typesense import Client
except ImportError:
raise ImportError(
"Could not import typesense python package. "
"Please install it with `pip install typesense`."
)
if not isinstance(typesense_client, Client):
raise ValueError(
f"typesense_client should be an instance of typesense.Client, "
f"got {type(typesense_client)}"
)
self._typesense_client = typesense_client
self._embedding = embedding
self._typesense_collection_name = (
typesense_collection_name or f"langchain-{str(uuid.uuid4())}"
)
self._text_key = text_key
@property
def _collection(self) -> Collection:
return self._typesense_client.collections[self._typesense_collection_name]
@property
def embeddings(self) -> Embeddings:
return self._embedding
def _prep_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]],
ids: Optional[List[str]],
) -> List[dict]:
"""Embed and create the documents"""
_ids = ids or (str(uuid.uuid4()) for _ in texts)
_metadatas: Iterable[dict] = metadatas or ({} for _ in texts)
embedded_texts = self._embedding.embed_documents(list(texts))
return [
{"id": _id, "vec": vec, f"{self._text_key}": text, "metadata": metadata}
for _id, vec, text, metadata in zip(_ids, embedded_texts, texts, _metadatas)
]
def _create_collection(self, num_dim: int) -> None:
fields = [
{"name": "vec", "type": "float[]", "num_dim": num_dim},
{"name": f"{self._text_key}", "type": "string"},
{"name": ".*", "type": "auto"},
]
self._typesense_client.collections.create(
{"name": self._typesense_collection_name, "fields": fields}
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embedding and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
from typesense.exceptions import ObjectNotFound
docs = self._prep_texts(texts, metadatas, ids)
try:
self._collection.documents.import_(docs, {"action": "upsert"})
except ObjectNotFound:
# Create the collection if it doesn't already exist
self._create_collection(len(docs[0]["vec"]))
self._collection.documents.import_(docs, {"action": "upsert"})
return [doc["id"] for doc in docs]
def similarity_search_with_score(
self,
query: str,
k: int = 10,
filter: Optional[str] = "",
) -> List[Tuple[Document, float]]:
"""Return typesense documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 10.
Minimum 10 results would be returned.
filter: typesense filter_by expression to filter documents on
Returns:
List of Documents most similar to the query and score for each
"""
embedded_query = [str(x) for x in self._embedding.embed_query(query)]
query_obj = {
"q": "*",
"vector_query": f'vec:([{",".join(embedded_query)}], k:{k})',
"filter_by": filter,
"collection": self._typesense_collection_name,
}
docs = []
response = self._typesense_client.multi_search.perform(
{"searches": [query_obj]}, {}
)
for hit in response["results"][0]["hits"]:
document = hit["document"]
metadata = document["metadata"]
text = document[self._text_key]
score = hit["vector_distance"]
docs.append((Document(page_content=text, metadata=metadata), score))
return docs
def similarity_search(
self,
query: str,
k: int = 10,
filter: Optional[str] = "",
**kwargs: Any,
) -> List[Document]:
"""Return typesense documents most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 10.
Minimum 10 results would be returned.
filter: typesense filter_by expression to filter documents on
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_score = self.similarity_search_with_score(query, k=k, filter=filter)
return [doc for doc, _ in docs_and_score]
@classmethod
def from_client_params(
cls,
embedding: Embeddings,
*,
host: str = "localhost",
port: Union[str, int] = "8108",
protocol: str = "http",
typesense_api_key: Optional[str] = None,
connection_timeout_seconds: int = 2,
**kwargs: Any,
) -> Typesense:
"""Initialize Typesense directly from client parameters.
Example:
.. code-block:: python
from langchain_community.embedding.openai import OpenAIEmbeddings
from langchain_community.vectorstores import Typesense
# Pass in typesense_api_key as kwarg or set env var "TYPESENSE_API_KEY".
vectorstore = Typesense(
OpenAIEmbeddings(),
host="localhost",
port="8108",
protocol="http",
typesense_collection_name="langchain-memory",
)
"""
try:
from typesense import Client
except ImportError:
raise ImportError(
"Could not import typesense python package. "
"Please install it with `pip install typesense`."
)
node = {
"host": host,
"port": str(port),
"protocol": protocol,
}
typesense_api_key = typesense_api_key or get_from_env(
"typesense_api_key", "TYPESENSE_API_KEY"
)
client_config = {
"nodes": [node],
"api_key": typesense_api_key,
"connection_timeout_seconds": connection_timeout_seconds,
}
return cls(Client(client_config), embedding, **kwargs)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
typesense_client: Optional[Client] = None,
typesense_client_params: Optional[dict] = None,
typesense_collection_name: Optional[str] = None,
text_key: str = "text",
**kwargs: Any,
) -> Typesense:
"""Construct Typesense wrapper from raw text."""
if typesense_client:
vectorstore = cls(typesense_client, embedding, **kwargs)
elif typesense_client_params:
vectorstore = cls.from_client_params(
embedding, **typesense_client_params, **kwargs
)
else:
raise ValueError(
"Must specify one of typesense_client or typesense_client_params."
)
vectorstore.add_texts(texts, metadatas=metadatas, ids=ids)
return vectorstore
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@vectorstores@typesense.py@.PATH_END.py
|
{
"filename": "Plot.Opac.py",
"repo_name": "alexrhowe/APOLLO",
"repo_path": "APOLLO_extracted/APOLLO-master/Plot.Opac.py",
"type": "Python"
}
|
from __future__ import print_function
import sys
import numpy as np
import matplotlib.pyplot as plt
if len(sys.argv)<=3:
print('Error: arguments not specified. File1, File2, Pressure, Temperature')
sys.exit()
file1 = sys.argv[1]
file2 = sys.argv[2]
press = np.log10((float)(sys.argv[3]))
temp = np.log10((float)(sys.argv[4]))
table1 = open(file1,'r')
header1 = table1.readline().split()
np1 = (int)(header1[0])
minP1 = (float)(header1[1])
maxP1 = (float)(header1[2])
nt1 = (int)(header1[3])
minT1 = (float)(header1[4])
maxT1 = (float)(header1[5])
len1 = (int)(header1[6])
min1 = (float)(header1[7])
max1 = (float)(header1[8])
if press<minP1 or press>maxP1:
print('Error: pressure out of range.')
sys.exit()
if temp<minT1 or temp>maxT1:
print('Error: temperature out of range.')
sys.exit()
ip1 = (int)(np.floor((press-minP1)/(maxP1-minP1)*(np1-1)))
jt1 = (int)(np.floor((temp-minT1)/(maxT1-minT1)*(nt1-1)))
x1 = np.exp(np.linspace(np.log(min1),np.log(max1),len1))
y1 = np.zeros(len1)
for i in range(0,np1):
for j in range(0,nt1):
line = table1.readline()
if i==ip1 and j==jt1:
line = line.split()
for k in range(0,len1):
y1[k] = (float)(line[k])
table1.close()
table2 = open(file2,'r')
header2 = table2.readline().split()
np2 = (int)(header2[0])
minP2 = (float)(header2[1])
maxP2 = (float)(header2[2])
nt2 = (int)(header2[3])
minT2 = (float)(header2[4])
maxT2 = (float)(header2[5])
len2 = (int)(header2[6])
min2 = (float)(header2[7])
max2 = (float)(header2[8])
if press<minP2 or press>maxP2:
print('Error: pressure out of range.')
sys.exit()
if temp<minT2 or temp>maxT2:
print('Error: temperature out of range.')
sys.exit()
ip2 = (int)(np.floor((press-minP2)/(maxP2-minP2)*(np2-1)))
jt2 = (int)(np.floor((temp -minT2)/(maxT2-minT2)*(nt2-1)))
x2 = np.exp(np.linspace(np.log(min2),np.log(max2),len2))
y2 = np.zeros(len2)
for i in range(0,np2):
for j in range(0,nt2):
line = table2.readline()
if i==ip2 and j==jt2:
line = line.split()
for k in range(0,len2):
y2[k] = (float)(line[k])
table2.close()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_yscale('log')
plt.xlabel('Wavelength (microns)')
plt.ylabel('Cross Section (cm$^2$)')
#plt.axis((0.6,2.5,1e-31,1e-10))
ax.plot(x1,y1,'-',linewidth=1,c='k')
ax.plot(x2,y2,'-',linewidth=1,c='r')
plt.show()
|
alexrhoweREPO_NAMEAPOLLOPATH_START.@APOLLO_extracted@APOLLO-master@Plot.Opac.py@.PATH_END.py
|
{
"filename": "K2_Detrend_AMC.py",
"repo_name": "barentsen/dave",
"repo_path": "dave_extracted/dave-master/extractDetrend/K2photo/K2_Detrend_AMC.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
from martinsff import martinsff
import extract_lc
extract_lc = reload(extract_lc)
from astropy.stats import median_absolute_deviation as MAD
import astropy
from astropy.table import Table
import glob
from numpy import transpose
import astropy.io.ascii as ascii
def K2_DetrendRev4(fn,):
time, flux, xbar, ybar = np.genfromtxt(fn, unpack = True, skip_header=1)
m1 = np.isfinite(flux)
time = time[m1]
flux = flux[m1]
xbar = xbar[m1]
ybar = ybar[m1]
flatlc = extract_lc.medfilt(time,flux,window=3)
# zpt = len(time)%300
zpt = len(time)%327
outflux, correction, thr_cad = extract_lc.run_C0_detrend(
# time, flatlc, xbar, ybar, cadstep=300, skip=1828)
time, flatlc, xbar, ybar, cadstep=327, skip=1828)
not_thr = ~thr_cad
corflux = (flux[zpt:][not_thr]/
np.median(flux[zpt:][not_thr])/
correction[not_thr])
corflatflux = (flatlc[zpt:][not_thr]/
np.median(flatlc[zpt:][not_thr])/
correction[not_thr])
# The 1.4826 and *4 factors make this similar to a 4-sigma cut.
mad_cut = 1.4826*MAD(corflatflux-1.)*4
keep = np.abs(corflatflux-1.) < mad_cut
# Adds the detrended data to an ascii table.
# To use this in conjunction with the ACF included in this package, you must
# comment out corflatflux, keep, flux, and flatflux, then run the script on
# your data set.
compiled_table = astropy.table.Table()
compiled_table['time'] = time[zpt:][not_thr]
compiled_table['corflatflux'] = corflatflux
compiled_table['corflux'] = corflux
compiled_table['keep'] = keep
compiled_table['flux'] = flux[zpt:][not_thr]
compiled_table['flatflux'] = flatlc[zpt:][not_thr]
compiled_table['xbar'] = xbar[zpt:][not_thr]
compiled_table['ybar'] = ybar[zpt:][not_thr]
# Generates the DANCe # for the file title.
# DANCe = fn.split('/')[-1].split('.')[0]
substr = fn.split('/')[-1]
end = substr.find('.dat')
DANCe = substr[:end]
newtable = {'Dates': time[zpt:][not_thr], 'Flux': flux[zpt:][not_thr], 'Corrflux': corflux, 'Xpos': xbar[zpt:][not_thr], 'Ypos': ybar[zpt:][not_thr]}
ascii.write(newtable, '/Users/acody/Data/K2/Field_0/M35/Lightcurves_RADec_ap3.0_v4_AMCdetrend/'+DANCe + '_detrended.dat', names=['Dates','Flux', 'Corrflux','Xpos','Ypos'])
# Create some plots
plt.clf()
plt.subplot(211)
plt.plot(time[zpt:][not_thr], flux[zpt:][not_thr]/np.median(flux[zpt:][not_thr]), 'bo', markersize=2)
plt.xlabel('Time [d]')
plt.ylabel('Flux/Median flux')
plt.ylim((np.median(flux[zpt:][not_thr]/np.median(flux[zpt:][not_thr]))-4.5*np.std(flux[zpt:][not_thr]/np.median(flux[zpt:][not_thr])),np.median(flux[zpt:][not_thr]/np.median(flux[zpt:][not_thr]))+
4.5*np.std(flux[zpt:][not_thr]/np.median(flux[zpt:][not_thr]))))
plt.title = DANCe
plt.subplot(212)
plt.plot(time[zpt:][not_thr], corflux/np.median(corflux), 'bo', markersize=2)
plt.xlabel('Time [d]')
plt.ylabel('Flux/Median flux')
plt.ylim((np.median(corflux/np.median(corflux))-4.5*np.std(corflux/np.median(corflux)),np.median(corflux/np.median(corflux))+4.5*np.std(corflux/np.median(corflux))))
plt.savefig('/Users/acody/Data/K2/Field_0/M35/Lightcurves_RADec_ap3.0_v4_AMCdetrend/'+DANCe + '_detrended.png')
def DetrenderRev4(file_pathway):
# file_pathway should be the directory that contains all of the .dat files you
# wish to detrend. You may need to comment out or lightly edit certain lines
# in this code. This is the command you runt to detrend the data!
x = glob.glob("%s/*.dat" % file_pathway)
y = transpose(x)
z = Table.read(y, format='ascii')
for row in z:
F = str(row[0])
K2_DetrendRev4(F)
|
barentsenREPO_NAMEdavePATH_START.@dave_extracted@dave-master@extractDetrend@K2photo@K2_Detrend_AMC.py@.PATH_END.py
|
{
"filename": "ModelReaderTesting_WACCMXtiming.ipynb",
"repo_name": "nasa/Kamodo",
"repo_path": "Kamodo_extracted/Kamodo-master/Validation/Notebooks/ModelReaderTesting_WACCMXtiming.ipynb",
"type": "Jupyter Notebook"
}
|
# Demo notebook for Model Reader
```python
model = 'WACCMX'
file_dir = 'D:/WACCMX/Jack_Wang_081222_IT_2/'
import kamodo_ccmc.flythrough.model_wrapper as MW
MW.Variable_Search('', model, file_dir)
```
```python
var_list = ['E_east', 'E_equator', 'TEC', 'H_geopot_ilev', 'N_e', 'rho', 'vi_north', 'v_north',
'vi_east', 'v_east']
from time import perf_counter
reader = MW.Model_Reader(model)
t0 = perf_counter()
kamodo_object = reader(file_dir, variables_requested=var_list)
t1 = perf_counter()
print(t1-t0)
kamodo_object
```
```python
```
|
nasaREPO_NAMEKamodoPATH_START.@Kamodo_extracted@Kamodo-master@Validation@Notebooks@ModelReaderTesting_WACCMXtiming.ipynb@.PATH_END.py
|
{
"filename": "Example5_PSF_Correction.ipynb",
"repo_name": "bsafdi/NPTFit",
"repo_path": "NPTFit_extracted/NPTFit-master/examples/Example5_PSF_Correction.ipynb",
"type": "Jupyter Notebook"
}
|
# Example 5: NPTF Correction for the Point Spread Function (PSF)
In this example we show how to account for the PSF correction using `psf_correction.py`
Fundamentally the presence of a non-zero PSF implies that the photons from any point source will be smeared out into some region around its true location. This effect must be accounted for in the NPTF. This is achieved via a function $\rho(f)$. In the code we discretize $\rho(f)$ as an approximation to the full function.
The two outputs of an instance of `psf_correction` are: 1. f_ary, an array of f values; and 2. df_rho_div_f_ary, an associated array of $\Delta f \rho(f)/f$ values, where $\Delta f$ is the width of the f_ary bins.
If the angular reconstruction of the data is perfect, then $\rho(f) = \delta(f-1)$. In many situations, such as for the _Fermi_ data at higher energies, a Gaussian approximation of the PSF will suffice. Even then there are a number of variables that go into evaluating the correction, as shown below. Finally we will show how the code can be used for the case of non-Gaussian PSFs.
As the calculation of $\rho(f)$ can be time consuming, we always save the output to avoid recomputing the same correction twice. Consequently it can be convenient to have a common `psf_dir` where all PSF corrections for the runs are stored.
```python
# Import relevant modules
%matplotlib inline
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from NPTFit import psf_correction as pc # Module for determining the PSF correction
from __future__ import print_function
```
## Example 1: Default Gaussian PSF
We start by showing the PSF correction for a Gaussian PSF - that is the PSF as a function of $r$ is $\exp \left[ -r^2 / (2\sigma^2) \right]$ - with $\sigma$ set to the value of the 68% containment radius for the PSF of the _Fermi_ dataset we will use in later examples.
```python
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.1812)
f_ary_1 = pc_inst.f_ary
df_rho_div_f_ary_1 = pc_inst.df_rho_div_f_ary
print('f_ary:', f_ary_1)
print('df_rho_div_f_ary:', df_rho_div_f_ary_1)
plt.plot(f_ary_1,f_ary_1**2*df_rho_div_f_ary_1/(f_ary_1[1]-f_ary_1[0]),color='black', lw = 1.5)
plt.xlabel('$f$')
plt.ylabel('$f \\times \\rho(f)$')
plt.title('Gaussian PSF, $\sigma_\mathrm{PSF} = 0.1812$', y=1.04)
```
File saved as: /zfs/nrodd/NPTFRemakeExamples/psf_dir/gauss_128_0.181_10_50000_1000_0.01.npy
f_ary: [0.05 0.15 0.25 0.35 0.45 0.55 0.65 0.75 0.85 0.95]
df_rho_div_f_ary: [65.19815984 6.88897747 2.52908225 1.28920055 0.80522461 0.54317562
0.09145394 0. 0. 0. ]
Text(0.5,1.04,'Gaussian PSF, $\\sigma_\\mathrm{PSF} = 0.1812$')

## Example 2: Impact of changing $\sigma$
Here we show the impact on the PSF of changing $\sigma$. From the plot we can see that for a small PSF, $\rho(f)$ approaches the no PSF case of $\delta(f-1)$ implying that the flux fractions are concentrated at a single large value. As $\sigma$ increases we move away from this idealized scenario and the flux becomes more spread out, leading to a $\rho(f)$ peaked at lower flux values.
```python
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.05)
f_ary_2 = pc_inst.f_ary
df_rho_div_f_ary_2 = pc_inst.df_rho_div_f_ary
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.4)
f_ary_3 = pc_inst.f_ary
df_rho_div_f_ary_3 = pc_inst.df_rho_div_f_ary
plt.plot(f_ary_1,f_ary_1**2*df_rho_div_f_ary_1/(f_ary_1[1]-f_ary_1[0]),color='cornflowerblue',label='0.18', lw = 1.5)
plt.plot(f_ary_2,f_ary_2**2*df_rho_div_f_ary_2/(f_ary_2[1]-f_ary_2[0]),color='forestgreen',label='0.05', lw = 1.5)
plt.plot(f_ary_3,f_ary_3**2*df_rho_div_f_ary_3/(f_ary_3[1]-f_ary_3[0]),color='maroon',label='0.4', lw = 1.5)
plt.xlabel('$f$')
plt.ylabel('$f \\times \\rho(f)$')
plt.legend(loc='upper right', fancybox=True)
plt.title('Varying $\sigma_\mathrm{PSF}$', y=1.04)
```
File saved as: /zfs/nrodd/NPTFRemakeExamples/psf_dir/gauss_128_0.05_10_50000_1000_0.01.npy
File saved as: /zfs/nrodd/NPTFRemakeExamples/psf_dir/gauss_128_0.4_10_50000_1000_0.01.npy
Text(0.5,1.04,'Varying $\\sigma_\\mathrm{PSF}$')

## Example 3: Changing the default options for determining $\rho(f)$
In this example we show how for a given PSF, the other parameters associated with how accurately we calculate $\rho(f)$ can impact what we get back. The parameters that can be changed are are:
| Argument | Defaults | Purpose |
| ------------- | ------------- | ------------- |
| `num_f_bins` | 10 | number of f_bins used |
| `n_psf` | 50000 | number of PSFs placed down when calculating |
| `n_pts_per_psf` | 1000 | number of points to place per psf in the calculation |
| `f_trunc` | 0.01 | minimum flux fraction to keep track of |
| `nside` | 128 | nside of the map the PSF is used on |
The default parameters have been chosen to be accurate enough for the Fermi analyses we will be performed later. But if the user changes the PSF (even just $\sigma$), it is important to be sure that the above parameters are chosen so that $\rho(f)$ is evaluated accurately enough.
In general increasing `num_f_bins`, `n_psf`, and `n_pts_per_psf`, whilst decreasing `f_trunc` leads to a more accurate $\rho(f)$. But each will also slow down the evaluation of $\rho(f)$, and in the case of `num_f_bin`, slow down the subsequent non-Poissonian likelihood evaluation.
`nside` should be set to the value of the map being analysed, but we also highlight the impact of changing it below. For an analysis on a non-HEALPix grid, the PSF can often be approximated by an appropriate HEALPix binning. If this is not the case, however, a different approach must be pursued in calculating $\rho(f)$.
```python
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.1812,num_f_bins=20)
f_ary_4 = pc_inst.f_ary
df_rho_div_f_ary_4 = pc_inst.df_rho_div_f_ary
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.1812,n_psf=5000,n_pts_per_psf=100)
f_ary_5 = pc_inst.f_ary
df_rho_div_f_ary_5 = pc_inst.df_rho_div_f_ary
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.1812,f_trunc=0.1)
f_ary_6 = pc_inst.f_ary
df_rho_div_f_ary_6 = pc_inst.df_rho_div_f_ary
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.1812,nside=64)
f_ary_7 = pc_inst.f_ary
df_rho_div_f_ary_7 = pc_inst.df_rho_div_f_ary
plt.plot(f_ary_1,f_ary_1**2*df_rho_div_f_ary_1/(f_ary_1[1]-f_ary_1[0]),color='black',label=r'Default', lw=2.2)
plt.plot(f_ary_4,f_ary_4**2*df_rho_div_f_ary_4/(f_ary_4[1]-f_ary_4[0]),color='forestgreen',label=r'more f\_bins', lw = 1.5)
plt.plot(f_ary_5,f_ary_5**2*df_rho_div_f_ary_5/(f_ary_5[1]-f_ary_5[0]),color='cornflowerblue',label=r'fewer points', lw = 1.5)
plt.plot(f_ary_6,f_ary_6**2*df_rho_div_f_ary_6/(f_ary_6[1]-f_ary_6[0]),color='salmon',label=r'larger f\_trunc', lw = 1.5)
plt.plot(f_ary_7,f_ary_7**2*df_rho_div_f_ary_7/(f_ary_7[1]-f_ary_7[0]),color='orchid',label=r'lower nside', lw = 1.5)
plt.xlabel('$f$')
plt.ylabel('$f \\times \\rho(f)$')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True)
```
File saved as: /zfs/nrodd/NPTFRemakeExamples/psf_dir/gauss_128_0.181_20_50000_1000_0.01.npy
File saved as: /zfs/nrodd/NPTFRemakeExamples/psf_dir/gauss_128_0.181_10_5000_100_0.01.npy
File saved as: /zfs/nrodd/NPTFRemakeExamples/psf_dir/gauss_128_0.181_10_50000_1000_0.1.npy
File saved as: /zfs/nrodd/NPTFRemakeExamples/psf_dir/gauss_64_0.181_10_50000_1000_0.01.npy
<matplotlib.legend.Legend at 0x7fe4c00337b8>

## Example 4: PSF on a Cartesian Grid
For some applications, particularly when analyzing smaller regions of the sky, it may be desirable to work with data on a Cartesian grid rather than a healpix map. Note generally for larger regions, in order to account for curvature on the sky a healpix pixelization is recommended. Code to convert from Cartesian grids to healpix can be found here: https://github.com/nickrodd/grid2healpix
In order to calculate the appropriate PSF correction for Cartesian maps the general syntax is the same, except now the `healpix_map` keyword should be set to `False` and the `pixarea` keyword set to the area in sr of each pixel of the Cartesian map. In addition the `gridsize` keyword determines how large the map is, and flux that falls outside the map is lost in the Cartesian case.
As an example of this syntax we calculate the PSF correction on a Cartesian map that has pixels the same size as an `nside=128` healpix map, and compare the two PSF corrections. Note they are essentially identical.
```python
pixarea = 4*np.pi/(12*128*128)
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.1812, healpix_map=False, pixarea=pixarea, gridsize=100)
f_ary_8 = pc_inst.f_ary
df_rho_div_f_ary_8 = pc_inst.df_rho_div_f_ary
plt.plot(f_ary_1,f_ary_1**2*df_rho_div_f_ary_1/(f_ary_1[1]-f_ary_1[0]),color='black', label=r'healpix', lw = 1.5)
plt.plot(f_ary_8,f_ary_8**2*df_rho_div_f_ary_8/(f_ary_8[1]-f_ary_8[0]),color='forestgreen', label=r'cartesian', lw = 1.5)
plt.xlabel('$f$')
plt.ylabel('$f \\times \\rho(f)$')
```
File saved as: /zfs/nrodd/NPTFRemakeExamples/psf_dir/gauss_0.21_0.181_10_50000_1000_0.01.npy
Text(0,0.5,'$f \\times \\rho(f)$')

## Example 5: Custom PSF
In addition to the default Gausian PSF, `psf_correction.py` also has the option of taking in a custom PSF. In order to use this ability, the user needs to initialise `psf_correction` with `delay_compute=True`, manually define the parameters that define the PSF and then call `make_or_load_psf_corr`.
The variables that need to be redefined in the instance of `psf_correction` are:
| Argument | Purpose |
| ------------- | ------------- |
| `psf_r_func` | the psf as a function of r, distance in radians from the center of the point source |
| `sample_psf_max` | maximum distance to sample the psf from the center, should be larger for psfs with long tails |
| `psf_samples` | number of samples to make from the psf (linearly spaced) from 0 to sample_psf_max, should be large enough to adequately represent the full psf |
| `psf_tag` | label the psf is saved with |
As an example of a more complicated PSF we consider the full Fermi-LAT PSF. The PSF of Fermi is approximately Gaussian near the core, but has larger tails. To model this a pair of King functions are used to describe the radial distribution. Below we show a comparison between the Gaussian approximation and the full PSF, for two different energies. As shown, for low energies where the Fermi PSF is larger, the difference between the two can be significant. For higher energies where the PSF becomes smaller, however, the difference is marginal.
For the full details of the Fermi-LAT PSF, see:
http://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/Cicerone_LAT_IRFs/IRF_PSF.html
```python
# Fermi-LAT PSF at 2 GeV
# Calculate the appropriate Gaussian approximation to the PSF for 2 GeV
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.2354)
f_ary_9 = pc_inst.f_ary
df_rho_div_f_ary_9 = pc_inst.df_rho_div_f_ary
# Define parameters that specify the Fermi-LAT PSF at 2 GeV
fcore = 0.748988248179
score = 0.428653790656
gcore = 7.82363229341
stail = 0.715962650769
gtail = 3.61883748683
spe = 0.00456544262478
# Define the full PSF in terms of two King functions
def king_fn(x, sigma, gamma):
return 1./(2.*np.pi*sigma**2.)*(1.-1./gamma)*(1.+(x**2./(2.*gamma*sigma**2.)))**(-gamma)
def Fermi_PSF(r):
return fcore*king_fn(r/spe,score,gcore) + (1-fcore)*king_fn(r/spe,stail,gtail)
# Modify the relevant parameters in pc_inst and then make or load the PSF
pc_inst = pc.PSFCorrection(delay_compute=True)
pc_inst.psf_r_func = lambda r: Fermi_PSF(r)
pc_inst.sample_psf_max = 10.*spe*(score+stail)/2.
pc_inst.psf_samples = 10000
pc_inst.psf_tag = 'Fermi_PSF_2GeV'
pc_inst.make_or_load_psf_corr()
# Extract f_ary and df_rho_div_f_ary as usual
f_ary_10 = pc_inst.f_ary
df_rho_div_f_ary_10 = pc_inst.df_rho_div_f_ary
plt.plot(f_ary_9,f_ary_9**2*df_rho_div_f_ary_9/(f_ary_9[1]-f_ary_9[0]),color='maroon',label='Gauss PSF', lw = 1.5)
plt.plot(f_ary_10,f_ary_10**2*df_rho_div_f_ary_10/(f_ary_10[1]-f_ary_10[0]),color='forestgreen',label='Fermi PSF', lw = 1.5)
plt.xlabel('$f$')
plt.ylabel('$f \\times \\rho(f)$')
plt.legend(loc='upper right', fancybox=True)
```
File saved as: /zfs/nrodd/NPTFRemakeExamples/psf_dir/gauss_128_0.235_10_50000_1000_0.01.npy
File saved as: /zfs/nrodd/NPTFRemakeExamples/psf_dir/Fermi_PSF_2GeV.npy
<matplotlib.legend.Legend at 0x7fe4b5217e10>

```python
# Fermi-LAT PSF at 20 GeV
# Calculate the appropriate Gaussian approximation to the PSF for 20 GeV
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.05529)
f_ary_11 = pc_inst.f_ary
df_rho_div_f_ary_11 = pc_inst.df_rho_div_f_ary
# Define parameters that specify the Fermi-LAT PSF at 20 GeV
fcore = 0.834725201378
score = 0.498192326976
gcore = 6.32075520959
stail = 1.06648424558
gtail = 4.49677834267
spe = 0.000943339426754
# Define the full PSF in terms of two King functions
def king_fn(x, sigma, gamma):
return 1./(2.*np.pi*sigma**2.)*(1.-1./gamma)*(1.+(x**2./(2.*gamma*sigma**2.)))**(-gamma)
def Fermi_PSF(r):
return fcore*king_fn(r/spe,score,gcore) + (1-fcore)*king_fn(r/spe,stail,gtail)
# Modify the relevant parameters in pc_inst and then make or load the PSF
pc_inst = pc.PSFCorrection(delay_compute=True)
pc_inst.psf_r_func = lambda r: Fermi_PSF(r)
pc_inst.sample_psf_max = 10.*spe*(score+stail)/2.
pc_inst.psf_samples = 10000
pc_inst.psf_tag = 'Fermi_PSF_20GeV'
pc_inst.make_or_load_psf_corr()
# Extract f_ary and df_rho_div_f_ary as usual
f_ary_12 = pc_inst.f_ary
df_rho_div_f_ary_12 = pc_inst.df_rho_div_f_ary
plt.plot(f_ary_11,f_ary_11**2*df_rho_div_f_ary_11/(f_ary_11[1]-f_ary_11[0]),color='maroon',label='Gauss PSF', lw = 1.5)
plt.plot(f_ary_12,f_ary_12**2*df_rho_div_f_ary_12/(f_ary_12[1]-f_ary_12[0]),color='forestgreen',label='Fermi PSF', lw = 1.5)
plt.xlabel('$f$')
plt.ylabel('$f \\times \\rho(f)$')
plt.legend(loc='upper left', fancybox=True)
```
File saved as: /zfs/nrodd/NPTFRemakeExamples/psf_dir/gauss_128_0.055_10_50000_1000_0.01.npy
File saved as: /zfs/nrodd/NPTFRemakeExamples/psf_dir/Fermi_PSF_20GeV.npy
<matplotlib.legend.Legend at 0x7fe4b51cccc0>

The above example also serves as a tutorial on how to combine various PSFs into a single PSF. In the case of the Fermi PSF the full radial dependence is the sum of two King functions. More generally if the full PSF is a combination of multiple individual ones (for example from multiple energy bins), then this can be formed by just adding these functions with an appropriate weighting to get a single `psf_r_func`.
|
bsafdiREPO_NAMENPTFitPATH_START.@NPTFit_extracted@NPTFit-master@examples@Example5_PSF_Correction.ipynb@.PATH_END.py
|
{
"filename": "transfer_learning.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/guides/transfer_learning.py",
"type": "Python"
}
|
"""
Title: Transfer learning & fine-tuning
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2020/04/15
Last modified: 2023/06/25
Description: Complete guide to transfer learning & fine-tuning in Keras.
Accelerator: GPU
"""
"""
## Setup
"""
import numpy as np
import keras
from keras import layers
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
"""
## Introduction
**Transfer learning** consists of taking features learned on one problem, and
leveraging them on a new, similar problem. For instance, features from a model that has
learned to identify raccoons may be useful to kick-start a model meant to identify
tanukis.
Transfer learning is usually done for tasks where your dataset has too little data to
train a full-scale model from scratch.
The most common incarnation of transfer learning in the context of deep learning is the
following workflow:
1. Take layers from a previously trained model.
2. Freeze them, so as to avoid destroying any of the information they contain during
future training rounds.
3. Add some new, trainable layers on top of the frozen layers. They will learn to turn
the old features into predictions on a new dataset.
4. Train the new layers on your dataset.
A last, optional step, is **fine-tuning**, which consists of unfreezing the entire
model you obtained above (or part of it), and re-training it on the new data with a
very low learning rate. This can potentially achieve meaningful improvements, by
incrementally adapting the pretrained features to the new data.
First, we will go over the Keras `trainable` API in detail, which underlies most
transfer learning & fine-tuning workflows.
Then, we'll demonstrate the typical workflow by taking a model pretrained on the
ImageNet dataset, and retraining it on the Kaggle "cats vs dogs" classification
dataset.
This is adapted from
[Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python)
and the 2016 blog post
["building powerful image classification models using very little data"](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html).
"""
"""
## Freezing layers: understanding the `trainable` attribute
Layers & models have three weight attributes:
- `weights` is the list of all weights variables of the layer.
- `trainable_weights` is the list of those that are meant to be updated (via gradient
descent) to minimize the loss during training.
- `non_trainable_weights` is the list of those that aren't meant to be trained.
Typically they are updated by the model during the forward pass.
**Example: the `Dense` layer has 2 trainable weights (kernel & bias)**
"""
layer = keras.layers.Dense(3)
layer.build((None, 4)) # Create the weights
print("weights:", len(layer.weights))
print("trainable_weights:", len(layer.trainable_weights))
print("non_trainable_weights:", len(layer.non_trainable_weights))
"""
In general, all weights are trainable weights. The only built-in layer that has
non-trainable weights is the `BatchNormalization` layer. It uses non-trainable weights
to keep track of the mean and variance of its inputs during training.
To learn how to use non-trainable weights in your own custom layers, see the
[guide to writing new layers from scratch](https://keras.io/guides/making_new_layers_and_models_via_subclassing/).
**Example: the `BatchNormalization` layer has 2 trainable weights and 2 non-trainable
weights**
"""
layer = keras.layers.BatchNormalization()
layer.build((None, 4)) # Create the weights
print("weights:", len(layer.weights))
print("trainable_weights:", len(layer.trainable_weights))
print("non_trainable_weights:", len(layer.non_trainable_weights))
"""
Layers & models also feature a boolean attribute `trainable`. Its value can be changed.
Setting `layer.trainable` to `False` moves all the layer's weights from trainable to
non-trainable. This is called "freezing" the layer: the state of a frozen layer won't
be updated during training (either when training with `fit()` or when training with
any custom loop that relies on `trainable_weights` to apply gradient updates).
**Example: setting `trainable` to `False`**
"""
layer = keras.layers.Dense(3)
layer.build((None, 4)) # Create the weights
layer.trainable = False # Freeze the layer
print("weights:", len(layer.weights))
print("trainable_weights:", len(layer.trainable_weights))
print("non_trainable_weights:", len(layer.non_trainable_weights))
"""
When a trainable weight becomes non-trainable, its value is no longer updated during
training.
"""
# Make a model with 2 layers
layer1 = keras.layers.Dense(3, activation="relu")
layer2 = keras.layers.Dense(3, activation="sigmoid")
model = keras.Sequential([keras.Input(shape=(3,)), layer1, layer2])
# Freeze the first layer
layer1.trainable = False
# Keep a copy of the weights of layer1 for later reference
initial_layer1_weights_values = layer1.get_weights()
# Train the model
model.compile(optimizer="adam", loss="mse")
model.fit(np.random.random((2, 3)), np.random.random((2, 3)))
# Check that the weights of layer1 have not changed during training
final_layer1_weights_values = layer1.get_weights()
np.testing.assert_allclose(
initial_layer1_weights_values[0], final_layer1_weights_values[0]
)
np.testing.assert_allclose(
initial_layer1_weights_values[1], final_layer1_weights_values[1]
)
"""
Do not confuse the `layer.trainable` attribute with the argument `training` in
`layer.__call__()` (which controls whether the layer should run its forward pass in
inference mode or training mode). For more information, see the
[Keras FAQ](
https://keras.io/getting_started/faq/#whats-the-difference-between-the-training-argument-in-call-and-the-trainable-attribute).
"""
"""
## Recursive setting of the `trainable` attribute
If you set `trainable = False` on a model or on any layer that has sublayers,
all children layers become non-trainable as well.
**Example:**
"""
inner_model = keras.Sequential(
[
keras.Input(shape=(3,)),
keras.layers.Dense(3, activation="relu"),
keras.layers.Dense(3, activation="relu"),
]
)
model = keras.Sequential(
[
keras.Input(shape=(3,)),
inner_model,
keras.layers.Dense(3, activation="sigmoid"),
]
)
model.trainable = False # Freeze the outer model
assert inner_model.trainable == False # All layers in `model` are now frozen
assert (
inner_model.layers[0].trainable == False
) # `trainable` is propagated recursively
"""
## The typical transfer-learning workflow
This leads us to how a typical transfer learning workflow can be implemented in Keras:
1. Instantiate a base model and load pre-trained weights into it.
2. Freeze all layers in the base model by setting `trainable = False`.
3. Create a new model on top of the output of one (or several) layers from the base
model.
4. Train your new model on your new dataset.
Note that an alternative, more lightweight workflow could also be:
1. Instantiate a base model and load pre-trained weights into it.
2. Run your new dataset through it and record the output of one (or several) layers
from the base model. This is called **feature extraction**.
3. Use that output as input data for a new, smaller model.
A key advantage of that second workflow is that you only run the base model once on
your data, rather than once per epoch of training. So it's a lot faster & cheaper.
An issue with that second workflow, though, is that it doesn't allow you to dynamically
modify the input data of your new model during training, which is required when doing
data augmentation, for instance. Transfer learning is typically used for tasks when
your new dataset has too little data to train a full-scale model from scratch, and in
such scenarios data augmentation is very important. So in what follows, we will focus
on the first workflow.
Here's what the first workflow looks like in Keras:
First, instantiate a base model with pre-trained weights.
```python
base_model = keras.applications.Xception(
weights='imagenet', # Load weights pre-trained on ImageNet.
input_shape=(150, 150, 3),
include_top=False) # Do not include the ImageNet classifier at the top.
```
Then, freeze the base model.
```python
base_model.trainable = False
```
Create a new model on top.
```python
inputs = keras.Input(shape=(150, 150, 3))
# We make sure that the base_model is running in inference mode here,
# by passing `training=False`. This is important for fine-tuning, as you will
# learn in a few paragraphs.
x = base_model(inputs, training=False)
# Convert features of shape `base_model.output_shape[1:]` to vectors
x = keras.layers.GlobalAveragePooling2D()(x)
# A Dense classifier with a single unit (binary classification)
outputs = keras.layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
```
Train the model on new data.
```python
model.compile(optimizer=keras.optimizers.Adam(),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()])
model.fit(new_dataset, epochs=20, callbacks=..., validation_data=...)
```
"""
"""
## Fine-tuning
Once your model has converged on the new data, you can try to unfreeze all or part of
the base model and retrain the whole model end-to-end with a very low learning rate.
This is an optional last step that can potentially give you incremental improvements.
It could also potentially lead to quick overfitting -- keep that in mind.
It is critical to only do this step *after* the model with frozen layers has been
trained to convergence. If you mix randomly-initialized trainable layers with
trainable layers that hold pre-trained features, the randomly-initialized layers will
cause very large gradient updates during training, which will destroy your pre-trained
features.
It's also critical to use a very low learning rate at this stage, because
you are training a much larger model than in the first round of training, on a dataset
that is typically very small.
As a result, you are at risk of overfitting very quickly if you apply large weight
updates. Here, you only want to readapt the pretrained weights in an incremental way.
This is how to implement fine-tuning of the whole base model:
```python
# Unfreeze the base model
base_model.trainable = True
# It's important to recompile your model after you make any changes
# to the `trainable` attribute of any inner layer, so that your changes
# are take into account
model.compile(optimizer=keras.optimizers.Adam(1e-5), # Very low learning rate
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()])
# Train end-to-end. Be careful to stop before you overfit!
model.fit(new_dataset, epochs=10, callbacks=..., validation_data=...)
```
**Important note about `compile()` and `trainable`**
Calling `compile()` on a model is meant to "freeze" the behavior of that model. This
implies that the `trainable`
attribute values at the time the model is compiled should be preserved throughout the
lifetime of that model,
until `compile` is called again. Hence, if you change any `trainable` value, make sure
to call `compile()` again on your
model for your changes to be taken into account.
**Important notes about `BatchNormalization` layer**
Many image models contain `BatchNormalization` layers. That layer is a special case on
every imaginable count. Here are a few things to keep in mind.
- `BatchNormalization` contains 2 non-trainable weights that get updated during
training. These are the variables tracking the mean and variance of the inputs.
- When you set `bn_layer.trainable = False`, the `BatchNormalization` layer will
run in inference mode, and will not update its mean & variance statistics. This is not
the case for other layers in general, as
[weight trainability & inference/training modes are two orthogonal concepts](
https://keras.io/getting_started/faq/#whats-the-difference-between-the-training-argument-in-call-and-the-trainable-attribute).
But the two are tied in the case of the `BatchNormalization` layer.
- When you unfreeze a model for finetuning by setting `base_model.trainable=True` that
contains `BatchNormalization` layers, then all layers of the base model become
trainable along with `BatchNormalization` layers. It's a good idea to keep
`BatchNormalization` either frozen during fine-tuning, or running in inference mode,
so remember to set `layer.trainable = False`
on those layers specifically after unfreezing the outer model, or otherwise
call the model with `training=False` to keep it inference mode.
You'll see this pattern in action in the end-to-end example at the end of this guide.
"""
"""
## An end-to-end example: fine-tuning an image classification model on a cats vs. dogs dataset
To solidify these concepts, let's walk you through a concrete end-to-end transfer
learning & fine-tuning example. We will load the Xception model, pre-trained on
ImageNet, and use it on the Kaggle "cats vs. dogs" classification dataset.
"""
"""
### Getting the data
First, let's fetch the cats vs. dogs dataset using TFDS. If you have your own dataset,
you'll probably want to use the utility
`keras.utils.image_dataset_from_directory` to generate similar labeled
dataset objects from a set of images on disk filed into class-specific folders.
Transfer learning is most useful when working with very small datasets. To keep our
dataset small, we will use 40% of the original training data (25,000 images) for
training, 10% for validation, and 10% for testing.
"""
tfds.disable_progress_bar()
train_ds, validation_ds, test_ds = tfds.load(
"cats_vs_dogs",
# Reserve 10% for validation and 10% for test
split=["train[:40%]", "train[40%:50%]", "train[50%:60%]"],
as_supervised=True, # Include labels
)
print(f"Number of training samples: {train_ds.cardinality()}")
print(f"Number of validation samples: {validation_ds.cardinality()}")
print(f"Number of test samples: {test_ds.cardinality()}")
"""
These are the first 9 images in the training dataset -- as you can see, they're all
different sizes.
"""
plt.figure(figsize=(10, 10))
for i, (image, label) in enumerate(train_ds.take(9)):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image)
plt.title(int(label))
plt.axis("off")
"""
We can also see that label 1 is "dog" and label 0 is "cat".
"""
"""
### Standardizing the data
Our raw images have a variety of sizes. In addition, each pixel consists of 3 integer
values between 0 and 255 (RGB level values). This isn't a great fit for feeding a
neural network. We need to do 2 things:
- Standardize to a fixed image size. We pick 150x150.
- Normalize pixel values between -1 and 1. We'll do this using a `Normalization` layer as
part of the model itself.
In general, it's a good practice to develop models that take raw data as input, as
opposed to models that take already-preprocessed data. The reason being that, if your
model expects preprocessed data, any time you export your model to use it elsewhere
(in a web browser, in a mobile app), you'll need to reimplement the exact same
preprocessing pipeline. This gets very tricky very quickly. So we should do the least
possible amount of preprocessing before hitting the model.
Here, we'll do image resizing in the data pipeline (because a deep neural network can
only process contiguous batches of data), and we'll do the input value scaling as part
of the model, when we create it.
Let's resize images to 150x150:
"""
resize_fn = keras.layers.Resizing(150, 150)
train_ds = train_ds.map(lambda x, y: (resize_fn(x), y))
validation_ds = validation_ds.map(lambda x, y: (resize_fn(x), y))
test_ds = test_ds.map(lambda x, y: (resize_fn(x), y))
"""
### Using random data augmentation
When you don't have a large image dataset, it's a good practice to artificially
introduce sample diversity by applying random yet realistic transformations to
the training images, such as random horizontal flipping or small random rotations. This
helps expose the model to different aspects of the training data while slowing down
overfitting.
"""
augmentation_layers = [
layers.RandomFlip("horizontal"),
layers.RandomRotation(0.1),
]
def data_augmentation(x):
for layer in augmentation_layers:
x = layer(x)
return x
train_ds = train_ds.map(lambda x, y: (data_augmentation(x), y))
"""
Let's batch the data and use prefetching to optimize loading speed.
"""
from tensorflow import data as tf_data
batch_size = 64
train_ds = train_ds.batch(batch_size).prefetch(tf_data.AUTOTUNE).cache()
validation_ds = (
validation_ds.batch(batch_size).prefetch(tf_data.AUTOTUNE).cache()
)
test_ds = test_ds.batch(batch_size).prefetch(tf_data.AUTOTUNE).cache()
"""
Let's visualize what the first image of the first batch looks like after various random
transformations:
"""
for images, labels in train_ds.take(1):
plt.figure(figsize=(10, 10))
first_image = images[0]
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
augmented_image = data_augmentation(np.expand_dims(first_image, 0))
plt.imshow(np.array(augmented_image[0]).astype("int32"))
plt.title(int(labels[0]))
plt.axis("off")
"""
## Build a model
Now let's built a model that follows the blueprint we've explained earlier.
Note that:
- We add a `Rescaling` layer to scale input values (initially in the `[0, 255]`
range) to the `[-1, 1]` range.
- We add a `Dropout` layer before the classification layer, for regularization.
- We make sure to pass `training=False` when calling the base model, so that
it runs in inference mode, so that batchnorm statistics don't get updated
even after we unfreeze the base model for fine-tuning.
"""
base_model = keras.applications.Xception(
weights="imagenet", # Load weights pre-trained on ImageNet.
input_shape=(150, 150, 3),
include_top=False,
) # Do not include the ImageNet classifier at the top.
# Freeze the base_model
base_model.trainable = False
# Create new model on top
inputs = keras.Input(shape=(150, 150, 3))
# Pre-trained Xception weights requires that input be scaled
# from (0, 255) to a range of (-1., +1.), the rescaling layer
# outputs: `(inputs * scale) + offset`
scale_layer = keras.layers.Rescaling(scale=1 / 127.5, offset=-1)
x = scale_layer(inputs)
# The base model contains batchnorm layers. We want to keep them in inference mode
# when we unfreeze the base model for fine-tuning, so we make sure that the
# base_model is running in inference mode here.
x = base_model(x, training=False)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dropout(0.2)(x) # Regularize with dropout
outputs = keras.layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
model.summary(show_trainable=True)
"""
## Train the top layer
"""
model.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()],
)
epochs = 2
print("Fitting the top layer of the model")
model.fit(train_ds, epochs=epochs, validation_data=validation_ds)
"""
## Do a round of fine-tuning of the entire model
Finally, let's unfreeze the base model and train the entire model end-to-end with a low
learning rate.
Importantly, although the base model becomes trainable, it is still running in
inference mode since we passed `training=False` when calling it when we built the
model. This means that the batch normalization layers inside won't update their batch
statistics. If they did, they would wreck havoc on the representations learned by the
model so far.
"""
# Unfreeze the base_model. Note that it keeps running in inference mode
# since we passed `training=False` when calling it. This means that
# the batchnorm layers will not update their batch statistics.
# This prevents the batchnorm layers from undoing all the training
# we've done so far.
base_model.trainable = True
model.summary(show_trainable=True)
model.compile(
optimizer=keras.optimizers.Adam(1e-5), # Low learning rate
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()],
)
epochs = 1
print("Fitting the end-to-end model")
model.fit(train_ds, epochs=epochs, validation_data=validation_ds)
"""
After 10 epochs, fine-tuning gains us a nice improvement here.
Let's evaluate the model on the test dataset:
"""
print("Test dataset evaluation")
model.evaluate(test_ds)
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@guides@transfer_learning.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "jdswinbank/Comet",
"repo_path": "Comet_extracted/Comet-master/docs/conf.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
#
# Comet documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 28 16:29:06 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
import datetime
from comet import __author__ as author
from comet import __version__ as version
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.todo"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
start_year = 2012
if datetime.datetime.now().year == start_year:
years = u"%d" % start_year
else:
years = u"%d—%d" % (start_year, datetime.datetime.now().year)
copyright = u"%s, %s" % (years, author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/dh106.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "Cometdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "Comet.tex", u"Comet Documentation", u"John Swinbank", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "comet", u"Comet Documentation", [u"John Swinbank"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"Comet",
u"Comet Documentation",
u"John Swinbank",
"Comet",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
jdswinbankREPO_NAMECometPATH_START.@Comet_extracted@Comet-master@docs@conf.py@.PATH_END.py
|
{
"filename": "intervals_between_obs_metric.py",
"repo_name": "lsst/rubin_sim",
"repo_path": "rubin_sim_extracted/rubin_sim-main/rubin_sim/maf/maf_contrib/intervals_between_obs_metric.py",
"type": "Python"
}
|
# Example for IntervalsBetweenObsMetric
# Somayeh Khakpash - Lehigh University
# Last edited : 10/21/2020
# Calculates statistics (mean or median or standard deviation) of intervals
# between observations during simultaneous windows/Inter-seasonal gap of
# another survey.
# SurveyIntervals is the list of the survey observing window/Inter-seasonal
# gap intervals. It should be in the format:
# SurveyIntervals = [ [YYYY-MM-DD, YYYY-MM-DD] , [YYYY-MM-DD, YYYY-MM-DD] ,
# ... , [YYYY-MM-DD, YYYY-MM-DD] ]
# We are interested in calculating this metric in each of the LSST passbands.
# The difference between this metric and the VisitGapMetric metric is that
# VisitGapMetric calculates reduceFunc of gaps between observations of a
# data_slice throughout the whole
# baseline, but IntervalsBetweenObsMetric calculates the gaps between
# observations during another survey observing window.
# This metric combined with surveys footprint
# overlap can determine how many often another survey footprint is
# observed by LSST during specific time intervals.
__all__ = ("IntervalsBetweenObsMetric",)
import numpy as np
from astropy.time import Time
from rubin_sim.maf.metrics import BaseMetric
class IntervalsBetweenObsMetric(BaseMetric):
def __init__(
self,
survey_intervals,
stat,
metric_name="IntervalsBetweenObsMetric",
time_col="observationStartMJD",
**kwargs,
):
self.time_col = time_col
self.metric_name = metric_name
self.survey_intervals = survey_intervals
self.stat = stat
super(IntervalsBetweenObsMetric, self).__init__(col=time_col, metric_name=metric_name, **kwargs)
def run(self, data_slice, slice_point=None):
data_slice.sort(order=self.time_col)
obs_diff = []
for interval in self.survey_intervals:
start_interval = Time(interval[0] + " 00:00:00")
end_interval = Time(interval[1] + " 00:00:00")
index = data_slice[self.time_col][
np.where(
(data_slice[self.time_col] > start_interval.mjd)
& (data_slice[self.time_col] < end_interval.mjd)
)[0]
]
obs_diff_per_interval = np.diff(index)
obs_diff = obs_diff + obs_diff_per_interval.tolist()
if self.stat == "mean":
result = np.mean(obs_diff)
elif self.stat == "median":
result = np.median(obs_diff)
elif self.stat == "std":
result = np.std(obs_diff)
return result
|
lsstREPO_NAMErubin_simPATH_START.@rubin_sim_extracted@rubin_sim-main@rubin_sim@maf@maf_contrib@intervals_between_obs_metric.py@.PATH_END.py
|
{
"filename": "utilities.py",
"repo_name": "astrolabsoftware/fink-science",
"repo_path": "fink-science_extracted/fink-science-master/tutorial/utilities.py",
"type": "Python"
}
|
# Copyright 2020 AstroLab Software
# Author: Julien Peloton
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
def compute_delta(magpsf: np.array) -> float:
""" Compute the difference between 2 consecutive magnitude measurements,
and returns the last one.
Parameters
----------
magpsf: 1d array
Vector of magnitude measurements from the most ancient
to the most recent.
Returns
----------
out: float
Difference between the last 2 measurements. NaN is the difference
cannot be computed.
"""
if len(magpsf) <= 1:
return None
return np.diff(magpsf)[-1]
|
astrolabsoftwareREPO_NAMEfink-sciencePATH_START.@fink-science_extracted@fink-science-master@tutorial@utilities.py@.PATH_END.py
|
{
"filename": "_thickness.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatterpolargl/marker/colorbar/_thickness.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ThicknessValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="thickness",
parent_name="scatterpolargl.marker.colorbar",
**kwargs,
):
super(ThicknessValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatterpolargl@marker@colorbar@_thickness.py@.PATH_END.py
|
{
"filename": "release_note.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/tools/releasing/release_note.py",
"type": "Python"
}
|
"""
Generate a release note template. The key parameters are
RELEASE, VERSION, MILESTONE BRANCH, and LAST_COMMIT_SHA.
LAST_COMMIT_SHA is the sha of the commit used to produce the previous
version. This is used to determine the time stamp where commits in the
current release begin.
Requires PyGitHub, dateparser and jinja2
"""
from collections import defaultdict
import datetime as dt
import os
import dateparser
from github import Github, GithubException
from jinja2 import Template
# Full release version
RELEASE = "0.14.1"
# The current milestone and short version
VERSION = MILESTONE = "0.14"
# This is the final commit from the previous release
LAST_COMMIT_SHA = "cc924783be1d46fd9802f8d559f6dfe7fb071ffa"
# Branch, usually main but can be a maintenance branch as well
BRANCH = "main"
# Provide access token using command line to keep out of repo
ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN", None)
if not ACCESS_TOKEN:
raise RuntimeError(
"Must set environment variable GITHUB_ACCESS_TOKEN "
"containing a valid GitHub access token before running"
"this program."
)
# Using an access token
g = Github(ACCESS_TOKEN)
# Get the repo
statsmodels = g.get_user("statsmodels").get_repo("statsmodels")
# Look up the modification time of the commit used to tag the previous release
last_modified = statsmodels.get_commit(LAST_COMMIT_SHA).commit.last_modified
last_modified = dateparser.parse(last_modified)
# Look for times creater than this time plus 1 second
first_commit_time = last_modified + dt.timedelta(seconds=1)
first_commit_time_iso = first_commit_time.isoformat()
# General search for sm/sm, PR, merged, merged> first commit time and branch
query_parts = (
"repo:statsmodels/statsmodels",
"is:pr",
"is:merged",
f"merged:>{first_commit_time_iso}",
f"base:{BRANCH}",
)
query = " ".join(query_parts)
merged_pull_data = []
merged_pulls = g.search_issues(query)
# Get the milestone for the current release or create if it does not exist
milestone = None
for ms in statsmodels.get_milestones():
if ms.title == MILESTONE:
milestone = ms
if milestone is None:
description = f"Release {MILESTONE} issues and pull requests"
milestone = statsmodels.create_milestone(
MILESTONE, state="open", description=description
)
# Get PR data and set the milestone if needed
for pull in merged_pulls:
merged_pull_data.append(
{
"number": pull.number,
"title": pull.title,
"login": pull.user.login,
"labels": pull.labels,
"milestone": pull.milestone,
}
)
if pull.milestone is None or pull.milestone != milestone:
pull.edit(milestone=milestone)
merged_pull_data = sorted(merged_pull_data, key=lambda x: x["number"])
# Robust name resolutions using commits and GitHub lookup
names = defaultdict(set)
extra_names = set()
for pull in merged_pull_data:
print("Reading commit data for PR#{}".format(pull["number"]))
pr = statsmodels.get_pull(pull["number"])
for commit in pr.get_commits():
name = commit.commit.author.name
if name and commit.author:
try:
names[commit.author.login].update([name])
except GithubException:
pass
elif name:
extra_names.update([name])
for login in names:
user = g.get_user(login)
if user.name:
names[login].update([user.name])
# Continue trying to resolve to human names
contributors = []
for login in names:
print(f"Reading user data for {login}")
user_names = list(names[login])
if len(user_names) == 1:
name = user_names[0]
if " " in name:
name = name.title()
contributors.append(name)
else:
valid = [name for name in user_names if " " in name]
if len(valid) == 0:
contributors.append(login)
else:
contributors.append(valid[0].title())
contributors = sorted(set(contributors))
# Get all issues closed since first_commit_time_iso
query_parts = (
"repo:statsmodels/statsmodels",
"is:issue",
"is:closed",
f"closed:>{first_commit_time_iso}",
)
query = " ".join(query_parts)
closed_issues = g.search_issues(query)
# Set the milestone for these issues if needed
for issue in closed_issues:
if issue.milestone is None or issue.milestone != milestone:
issue.edit(milestone=milestone)
issues_closed = closed_issues.totalCount
# Create a What's New Dictionary to automatically populate the template
# Structure is dict[module, dict[pr number, sanitized title]]
whats_new = defaultdict(dict)
for pull in merged_pull_data:
if pull["labels"]:
labels = [
lab.name
for lab in pull["labels"]
if not (lab.name.startswith("type") or lab.name.startswith("prio"))
]
labels = sorted(labels)
if "maintenance" in labels and len(labels) > 1:
labels.remove("maintenance")
elif "comp-docs" in labels and len(labels) > 1:
labels.remove("comp-docs")
for label in labels:
label = label.split("comp-")[-1].replace("-", ".")
number = pull["number"]
title = pull["title"]
if ": " in title:
title = ": ".join(title.split(": ")[1:])
title = title[:1].upper() + title[1:]
whats_new[label][number] = title
whats_new = {key: whats_new[key] for key in sorted(whats_new)}
# Variables for the template
variables = {
"milestone": MILESTONE,
"release": RELEASE,
"version": VERSION,
"issues_closed": issues_closed,
"pulls_merged": len(merged_pull_data),
"contributors": contributors,
"pulls": merged_pull_data,
"whats_new": whats_new,
}
# Read the template and generate the output
with open("release_note.tmpl", encoding="utf-8") as tmpl:
tmpl_data = tmpl.read()
t = Template(tmpl_data)
rendered = t.render(**variables)
file_name = f"version{VERSION}.rst"
with open(file_name, encoding="utf-8", mode="w") as out:
out.write(rendered)
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@tools@releasing@release_note.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "RadioAstronomySoftwareGroup/pyuvdata",
"repo_path": "pyuvdata_extracted/pyuvdata-main/src/pyuvdata/__init__.py",
"type": "Python"
}
|
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Init file for pyuvdata."""
import contextlib
import warnings
from importlib.metadata import PackageNotFoundError, version
from pathlib import Path
from setuptools_scm import get_version
# copy this function here from setup.py.
# Copying code is terrible, but it's better than altering the python path in setup.py.
def branch_scheme(version): # pragma: nocover
"""
Local version scheme that adds the branch name for absolute reproducibility.
If and when this is added to setuptools_scm this function can be removed.
"""
if version.exact or version.node is None:
return version.format_choice("", "+d{time:{time_format}}", time_format="%Y%m%d")
else:
if version.branch == "main":
return version.format_choice("+{node}", "+{node}.dirty")
else:
return version.format_choice("+{node}.{branch}", "+{node}.{branch}.dirty")
try: # pragma: nocover
# get accurate version for developer installs
version_str = get_version(Path(__file__).parent.parent, local_scheme=branch_scheme)
__version__ = version_str
except (LookupError, ImportError): # pragma: no cover
# Set the version automatically from the package details.
# don't set anything if the package is not installed
with contextlib.suppress(PackageNotFoundError):
__version__ = version("pyuvdata")
# Filter annoying Cython warnings that serve no good purpose. see numpy#432
# needs to be done before the imports to work properly
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
from .analytic_beam import AiryBeam, GaussianBeam, ShortDipoleBeam, UniformBeam # noqa
from .beam_interface import BeamInterface # noqa
from .telescopes import Telescope # noqa
from .telescopes import get_telescope # noqa # NB: get_telescopes is deprecated
from .uvbeam import UVBeam # noqa
from .uvcal import UVCal # noqa
from .uvdata import FastUVH5Meta # noqa
from .uvdata import UVData # noqa
from .uvflag import UVFlag # noqa
__all__ = [
"UVData",
"FastUVH5Meta",
"UVCal",
"UVFlag",
"UVBeam",
"BeamInterface",
"AiryBeam",
"GaussianBeam",
"ShortDipoleBeam",
"UniformBeam",
"Telescope",
"get_telescope",
]
# adapted from https://github.com/astropy/astropy/__init__.py
# please consult astropy/__init__.py for clarification on logic details
# Cleanup the top-level namespace.
# Delete everything that is not in __all__, a magic function,
# or is a submodule of this package
from types import ModuleType as __module_type__ # noqa
for varname in dir():
if not (
varname in __all__
or (varname.startswith("__") and varname.endswith("__"))
or (
varname[0] != "_"
and isinstance(locals()[varname], __module_type__)
and locals()[varname].__name__.startswith(__name__ + ".")
)
):
del locals()[varname]
del varname, __module_type__
|
RadioAstronomySoftwareGroupREPO_NAMEpyuvdataPATH_START.@pyuvdata_extracted@pyuvdata-main@src@pyuvdata@__init__.py@.PATH_END.py
|
{
"filename": "_ufunc.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/_typing/_ufunc.py",
"type": "Python"
}
|
from .. import ufunc
_UFunc_Nin1_Nout1 = ufunc
_UFunc_Nin2_Nout1 = ufunc
_UFunc_Nin1_Nout2 = ufunc
_UFunc_Nin2_Nout2 = ufunc
_GUFunc_Nin2_Nout1 = ufunc
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@_typing@_ufunc.py@.PATH_END.py
|
{
"filename": "test_status.py",
"repo_name": "mpi4py/mpi4py",
"repo_path": "mpi4py_extracted/mpi4py-master/test/test_status.py",
"type": "Python"
}
|
from mpi4py import MPI
import mpiunittest as unittest
class TestStatus(unittest.TestCase):
def setUp(self):
self.STATUS = MPI.Status()
def tearDown(self):
self.STATUS = None
def testDefaultFieldValues(self):
self.assertEqual(self.STATUS.Get_source(), MPI.ANY_SOURCE)
self.assertEqual(self.STATUS.Get_tag(), MPI.ANY_TAG)
self.assertEqual(self.STATUS.Get_error(), MPI.SUCCESS)
def testGetCount(self):
count = self.STATUS.Get_count(MPI.BYTE)
self.assertEqual(count, 0)
def testGetElements(self):
elements = self.STATUS.Get_elements(MPI.BYTE)
self.assertEqual(elements, 0)
def testSetElements(self):
try:
self.STATUS.Set_elements(MPI.BYTE, 7)
count = self.STATUS.Get_count(MPI.BYTE)
self.assertEqual(count, 7)
elements = self.STATUS.Get_elements(MPI.BYTE)
self.assertEqual(elements, 7)
except NotImplementedError:
if MPI.Get_version() >= (2,0): raise
self.skipTest('mpi-status-set_elements')
def testIsCancelled(self):
flag = self.STATUS.Is_cancelled()
self.assertIs(type(flag), bool)
self.assertFalse(flag)
def testSetCancelled(self):
try:
self.STATUS.Set_cancelled(True)
flag = self.STATUS.Is_cancelled()
self.assertTrue(flag)
except NotImplementedError:
if MPI.Get_version() >= (2,0): raise
self.skipTest('mpi-status-set_cancelled')
def testPyProps(self):
self.assertEqual(self.STATUS.Get_source(), self.STATUS.source)
self.assertEqual(self.STATUS.Get_tag(), self.STATUS.tag)
self.assertEqual(self.STATUS.Get_error(), self.STATUS.error)
self.STATUS.source = 1
self.STATUS.tag = 2
self.STATUS.error = MPI.ERR_ARG
self.assertEqual(self.STATUS.source, 1)
self.assertEqual(self.STATUS.tag, 2)
self.assertEqual(self.STATUS.error, MPI.ERR_ARG)
try:
self.assertIs(type(self.STATUS.count), int)
self.assertEqual(self.STATUS.count, 0)
self.STATUS.count = 7
self.assertEqual(self.STATUS.count, 7)
self.STATUS.count = 0
except NotImplementedError:
if MPI.Get_version() >= (2,0): raise
try:
self.assertIs(type(self.STATUS.cancelled), bool)
self.assertFalse(self.STATUS.cancelled)
self.STATUS.cancelled = True
self.assertTrue(self.STATUS.cancelled)
self.STATUS.cancelled = False
except NotImplementedError:
if MPI.Get_version() >= (2,0): raise
def testConstructor(self):
self.assertRaises(TypeError, MPI.Status, 123)
self.assertRaises(TypeError, MPI.Status, "abc")
def testCopyConstructor(self):
self.STATUS.source = 1
self.STATUS.tag = 2
self.STATUS.error = MPI.ERR_ARG
status = MPI.Status(self.STATUS)
self.assertEqual(status.source, 1)
self.assertEqual(status.tag, 2)
self.assertEqual(status.error, MPI.ERR_ARG)
try:
self.STATUS.Set_elements(MPI.BYTE, 7)
except NotImplementedError:
pass
try:
self.STATUS.Set_cancelled(True)
except NotImplementedError:
pass
status = MPI.Status(self.STATUS)
try:
count = status.Get_count(MPI.BYTE)
elems = status.Get_elements(MPI.BYTE)
self.assertEqual(count, 7)
self.assertEqual(elems, 7)
except NotImplementedError:
pass
try:
flag = status.Is_cancelled()
self.assertTrue(flag)
except NotImplementedError:
pass
def testPickle(self):
from pickle import dumps, loads
self.STATUS.source = 1
self.STATUS.tag = 2
self.STATUS.error = MPI.ERR_ARG
status = loads(dumps(self.STATUS))
self.assertEqual(status.source, 1)
self.assertEqual(status.tag, 2)
self.assertEqual(status.error, MPI.ERR_ARG)
if __name__ == '__main__':
unittest.main()
|
mpi4pyREPO_NAMEmpi4pyPATH_START.@mpi4py_extracted@mpi4py-master@test@test_status.py@.PATH_END.py
|
{
"filename": "test_gs_crt.py",
"repo_name": "mirochaj/ares",
"repo_path": "ares_extracted/ares-main/tests/adv/test_gs_crt.py",
"type": "Python"
}
|
"""
test_gs_crt.py
Author: Jordan Mirocha
Affiliation: UCLA
Created on: Wed May 11 09:46:05 PDT 2016
Description:
"""
import ares
import numpy as np
import matplotlib.pyplot as pl
pars = \
{
'pop_sed{1}': 'mcd',
'pop_alpha{1}': -1.5,
'pop_Emin{1}': 2e2,
'pop_Emax{1}': 3e4,
'pop_EminNorm{1}': 5e2,
'pop_EmaxNorm{1}': 8e3,
#'pop_logN{1}': -np.inf,
'pop_solve_rte{1}': True,
'pop_tau_Nz{1}': 1e3,
'pop_approx_tau{1}': 'neutral',
# Force optically thin to overestimate heating/ionization?
'final_redshift': 5,
'initial_redshift': 50,
'problem_type': 101.2
}
ax1 = None; ax2 = None
labels = ['fidicual', 'fiducial+RT', 'fiducial+OTRT']
for i, solve_rte in enumerate([False, True, True]):
pars['pop_solve_rte{1}'] = solve_rte
if i == 2:
pars['pop_approx_tau{1}'] = True
sim = ares.simulations.Global21cm(**pars)
sim.run()
ax1 = sim.GlobalSignature(fig=1, label=labels[i], ax=ax1)
ax2 = sim.IonizationHistory(fig=2, ax=ax2)
ax1.legend(loc='lower right')
pl.show()
for i in range(1,3):
pl.figure(i)
pl.savefig('{0!s}_{1}.png'.format(__file__.rstrip('.py'), i))
#pl.close()
|
mirochajREPO_NAMEaresPATH_START.@ares_extracted@ares-main@tests@adv@test_gs_crt.py@.PATH_END.py
|
{
"filename": "_xaxis.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/layout/scene/_xaxis.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class XAxis(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.scene"
_path_str = "layout.scene.xaxis"
_valid_props = {
"autorange",
"autotypenumbers",
"backgroundcolor",
"calendar",
"categoryarray",
"categoryarraysrc",
"categoryorder",
"color",
"dtick",
"exponentformat",
"gridcolor",
"gridwidth",
"hoverformat",
"linecolor",
"linewidth",
"minexponent",
"mirror",
"nticks",
"range",
"rangemode",
"separatethousands",
"showaxeslabels",
"showbackground",
"showexponent",
"showgrid",
"showline",
"showspikes",
"showticklabels",
"showtickprefix",
"showticksuffix",
"spikecolor",
"spikesides",
"spikethickness",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"type",
"visible",
"zeroline",
"zerolinecolor",
"zerolinewidth",
}
# autorange
# ---------
@property
def autorange(self):
"""
Determines whether or not the range of this axis is computed in
relation to the input data. See `rangemode` for more info. If
`range` is provided, then `autorange` is set to False.
The 'autorange' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'reversed']
Returns
-------
Any
"""
return self["autorange"]
@autorange.setter
def autorange(self, val):
self["autorange"] = val
# autotypenumbers
# ---------------
@property
def autotypenumbers(self):
"""
Using "strict" a numeric string in trace data is not converted
to a number. Using *convert types* a numeric string in trace
data may be treated as a number during automatic axis `type`
detection. Defaults to layout.autotypenumbers.
The 'autotypenumbers' property is an enumeration that may be specified as:
- One of the following enumeration values:
['convert types', 'strict']
Returns
-------
Any
"""
return self["autotypenumbers"]
@autotypenumbers.setter
def autotypenumbers(self, val):
self["autotypenumbers"] = val
# backgroundcolor
# ---------------
@property
def backgroundcolor(self):
"""
Sets the background color of this axis' wall.
The 'backgroundcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["backgroundcolor"]
@backgroundcolor.setter
def backgroundcolor(self, val):
self["backgroundcolor"] = val
# calendar
# --------
@property
def calendar(self):
"""
Sets the calendar system to use for `range` and `tick0` if this
is a date axis. This does not set the calendar for interpreting
data on this axis, that's specified in the trace or via the
global `layout.calendar`
The 'calendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['gregorian', 'chinese', 'coptic', 'discworld',
'ethiopian', 'hebrew', 'islamic', 'julian', 'mayan',
'nanakshahi', 'nepali', 'persian', 'jalali', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["calendar"]
@calendar.setter
def calendar(self, val):
self["calendar"] = val
# categoryarray
# -------------
@property
def categoryarray(self):
"""
Sets the order in which categories on this axis appear. Only
has an effect if `categoryorder` is set to "array". Used with
`categoryorder`.
The 'categoryarray' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["categoryarray"]
@categoryarray.setter
def categoryarray(self, val):
self["categoryarray"] = val
# categoryarraysrc
# ----------------
@property
def categoryarraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for
categoryarray .
The 'categoryarraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["categoryarraysrc"]
@categoryarraysrc.setter
def categoryarraysrc(self, val):
self["categoryarraysrc"] = val
# categoryorder
# -------------
@property
def categoryorder(self):
"""
Specifies the ordering logic for the case of categorical
variables. By default, plotly uses "trace", which specifies the
order that is present in the data supplied. Set `categoryorder`
to *category ascending* or *category descending* if order
should be determined by the alphanumerical order of the
category names. Set `categoryorder` to "array" to derive the
ordering from the attribute `categoryarray`. If a category is
not found in the `categoryarray` array, the sorting behavior
for that attribute will be identical to the "trace" mode. The
unspecified categories will follow the categories in
`categoryarray`. Set `categoryorder` to *total ascending* or
*total descending* if order should be determined by the
numerical order of the values. Similarly, the order can be
determined by the min, max, sum, mean or median of all the
values.
The 'categoryorder' property is an enumeration that may be specified as:
- One of the following enumeration values:
['trace', 'category ascending', 'category descending',
'array', 'total ascending', 'total descending', 'min
ascending', 'min descending', 'max ascending', 'max
descending', 'sum ascending', 'sum descending', 'mean
ascending', 'mean descending', 'median ascending', 'median
descending']
Returns
-------
Any
"""
return self["categoryorder"]
@categoryorder.setter
def categoryorder(self, val):
self["categoryorder"] = val
# color
# -----
@property
def color(self):
"""
Sets default for all colors associated with this axis all at
once: line, font, tick, and grid colors. Grid color is
lightened by blending this with the plot background Individual
pieces can override this.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# gridcolor
# ---------
@property
def gridcolor(self):
"""
Sets the color of the grid lines.
The 'gridcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["gridcolor"]
@gridcolor.setter
def gridcolor(self, val):
self["gridcolor"] = val
# gridwidth
# ---------
@property
def gridwidth(self):
"""
Sets the width (in px) of the grid lines.
The 'gridwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["gridwidth"]
@gridwidth.setter
def gridwidth(self, val):
self["gridwidth"] = val
# hoverformat
# -----------
@property
def hoverformat(self):
"""
Sets the hover text formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-time-format#locale_format We add
one item to d3's date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
The 'hoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["hoverformat"]
@hoverformat.setter
def hoverformat(self, val):
self["hoverformat"] = val
# linecolor
# ---------
@property
def linecolor(self):
"""
Sets the axis line color.
The 'linecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["linecolor"]
@linecolor.setter
def linecolor(self, val):
self["linecolor"] = val
# linewidth
# ---------
@property
def linewidth(self):
"""
Sets the width (in px) of the axis line.
The 'linewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["linewidth"]
@linewidth.setter
def linewidth(self, val):
self["linewidth"] = val
# minexponent
# -----------
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
# mirror
# ------
@property
def mirror(self):
"""
Determines if the axis lines or/and ticks are mirrored to the
opposite side of the plotting area. If True, the axis lines are
mirrored. If "ticks", the axis lines and ticks are mirrored. If
False, mirroring is disable. If "all", axis lines are mirrored
on all shared-axes subplots. If "allticks", axis lines and
ticks are mirrored on all shared-axes subplots.
The 'mirror' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, 'ticks', False, 'all', 'allticks']
Returns
-------
Any
"""
return self["mirror"]
@mirror.setter
def mirror(self, val):
self["mirror"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# range
# -----
@property
def range(self):
"""
Sets the range of this axis. If the axis `type` is "log", then
you must take the log of your desired range (e.g. to set the
range from 1 to 100, set the range from 0 to 2). If the axis
`type` is "date", it should be date strings, like date data,
though Date objects and unix milliseconds will be accepted and
converted to strings. If the axis `type` is "category", it
should be numbers, using the scale where each category is
assigned a serial number from zero in the order it appears.
The 'range' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'range[0]' property accepts values of any type
(1) The 'range[1]' property accepts values of any type
Returns
-------
list
"""
return self["range"]
@range.setter
def range(self, val):
self["range"] = val
# rangemode
# ---------
@property
def rangemode(self):
"""
If "normal", the range is computed in relation to the extrema
of the input data. If *tozero*`, the range extends to 0,
regardless of the input data If "nonnegative", the range is
non-negative, regardless of the input data. Applies only to
linear axes.
The 'rangemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'tozero', 'nonnegative']
Returns
-------
Any
"""
return self["rangemode"]
@rangemode.setter
def rangemode(self, val):
self["rangemode"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showaxeslabels
# --------------
@property
def showaxeslabels(self):
"""
Sets whether or not this axis is labeled
The 'showaxeslabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showaxeslabels"]
@showaxeslabels.setter
def showaxeslabels(self, val):
self["showaxeslabels"] = val
# showbackground
# --------------
@property
def showbackground(self):
"""
Sets whether or not this axis' wall has a background color.
The 'showbackground' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showbackground"]
@showbackground.setter
def showbackground(self, val):
self["showbackground"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showgrid
# --------
@property
def showgrid(self):
"""
Determines whether or not grid lines are drawn. If True, the
grid lines are drawn at every tick mark.
The 'showgrid' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showgrid"]
@showgrid.setter
def showgrid(self, val):
self["showgrid"] = val
# showline
# --------
@property
def showline(self):
"""
Determines whether or not a line bounding this axis is drawn.
The 'showline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showline"]
@showline.setter
def showline(self, val):
self["showline"] = val
# showspikes
# ----------
@property
def showspikes(self):
"""
Sets whether or not spikes starting from data points to this
axis' wall are shown on hover.
The 'showspikes' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showspikes"]
@showspikes.setter
def showspikes(self, val):
self["showspikes"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# spikecolor
# ----------
@property
def spikecolor(self):
"""
Sets the color of the spikes.
The 'spikecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["spikecolor"]
@spikecolor.setter
def spikecolor(self, val):
self["spikecolor"] = val
# spikesides
# ----------
@property
def spikesides(self):
"""
Sets whether or not spikes extending from the projection data
points to this axis' wall boundaries are shown on hover.
The 'spikesides' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["spikesides"]
@spikesides.setter
def spikesides(self, val):
self["spikesides"] = val
# spikethickness
# --------------
@property
def spikethickness(self):
"""
Sets the thickness (in px) of the spikes.
The 'spikethickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["spikethickness"]
@spikethickness.setter
def spikethickness(self, val):
self["spikethickness"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the tick font.
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.xaxis.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.scene.xaxis.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-time-format#locale_format We add
one item to d3's date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.scene.xaxis.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.layout.scene.xaxis.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as
layout.template.layout.scene.xaxis.tickformatstopdefaults),
sets the default property values to use for elements of
layout.scene.xaxis.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.xaxis.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.layout.scene.xaxis.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.xaxis.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this axis' title font. Note that the
title's font used to be customized by the now
deprecated `titlefont` attribute.
text
Sets the title of this axis. Note that before
the existence of `title.text`, the title's
contents used to be defined as the `title`
attribute itself. This behavior has been
deprecated.
Returns
-------
plotly.graph_objs.layout.scene.xaxis.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use layout.scene.xaxis.title.font instead.
Sets this axis' title font. Note that the title's font used to
be customized by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.xaxis.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# type
# ----
@property
def type(self):
"""
Sets the axis type. By default, plotly attempts to determined
the axis type by looking into the data of the traces that
referenced the axis in question.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['-', 'linear', 'log', 'date', 'category']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# visible
# -------
@property
def visible(self):
"""
A single toggle to hide the axis while preserving interaction
like dragging. Default is true when a cheater plot is present
on the axis, otherwise false
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# zeroline
# --------
@property
def zeroline(self):
"""
Determines whether or not a line is drawn at along the 0 value
of this axis. If True, the zero line is drawn on top of the
grid lines.
The 'zeroline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["zeroline"]
@zeroline.setter
def zeroline(self, val):
self["zeroline"] = val
# zerolinecolor
# -------------
@property
def zerolinecolor(self):
"""
Sets the line color of the zero line.
The 'zerolinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["zerolinecolor"]
@zerolinecolor.setter
def zerolinecolor(self, val):
self["zerolinecolor"] = val
# zerolinewidth
# -------------
@property
def zerolinewidth(self):
"""
Sets the width (in px) of the zero line.
The 'zerolinewidth' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zerolinewidth"]
@zerolinewidth.setter
def zerolinewidth(self, val):
self["zerolinewidth"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autorange
Determines whether or not the range of this axis is
computed in relation to the input data. See `rangemode`
for more info. If `range` is provided, then `autorange`
is set to False.
autotypenumbers
Using "strict" a numeric string in trace data is not
converted to a number. Using *convert types* a numeric
string in trace data may be treated as a number during
automatic axis `type` detection. Defaults to
layout.autotypenumbers.
backgroundcolor
Sets the background color of this axis' wall.
calendar
Sets the calendar system to use for `range` and `tick0`
if this is a date axis. This does not set the calendar
for interpreting data on this axis, that's specified in
the trace or via the global `layout.calendar`
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
categoryarray .
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`. Set `categoryorder`
to *total ascending* or *total descending* if order
should be determined by the numerical order of the
values. Similarly, the order can be determined by the
min, max, sum, mean or median of all the values.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
gridcolor
Sets the color of the grid lines.
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-time-
format#locale_format We add one item to d3's date
formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
mirror
Determines if the axis lines or/and ticks are mirrored
to the opposite side of the plotting area. If True, the
axis lines are mirrored. If "ticks", the axis lines and
ticks are mirrored. If False, mirroring is disable. If
"all", axis lines are mirrored on all shared-axes
subplots. If "allticks", axis lines and ticks are
mirrored on all shared-axes subplots.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
range
Sets the range of this axis. If the axis `type` is
"log", then you must take the log of your desired range
(e.g. to set the range from 1 to 100, set the range
from 0 to 2). If the axis `type` is "date", it should
be date strings, like date data, though Date objects
and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order it
appears.
rangemode
If "normal", the range is computed in relation to the
extrema of the input data. If *tozero*`, the range
extends to 0, regardless of the input data If
"nonnegative", the range is non-negative, regardless of
the input data. Applies only to linear axes.
separatethousands
If "true", even 4-digit integers are separated
showaxeslabels
Sets whether or not this axis is labeled
showbackground
Sets whether or not this axis' wall has a background
color.
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showspikes
Sets whether or not spikes starting from data points to
this axis' wall are shown on hover.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
spikecolor
Sets the color of the spikes.
spikesides
Sets whether or not spikes extending from the
projection data points to this axis' wall boundaries
are shown on hover.
spikethickness
Sets the thickness (in px) of the spikes.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-time-
format#locale_format We add one item to d3's date
formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.scene.xa
xis.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.layout.scen
e.xaxis.tickformatstopdefaults), sets the default
property values to use for elements of
layout.scene.xaxis.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.scene.xaxis.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use layout.scene.xaxis.title.font
instead. Sets this axis' title font. Note that the
title's font used to be customized by the now
deprecated `titlefont` attribute.
type
Sets the axis type. By default, plotly attempts to
determined the axis type by looking into the data of
the traces that referenced the axis in question.
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
zeroline
Determines whether or not a line is drawn at along the
0 value of this axis. If True, the zero line is drawn
on top of the grid lines.
zerolinecolor
Sets the line color of the zero line.
zerolinewidth
Sets the width (in px) of the zero line.
"""
_mapped_properties = {"titlefont": ("title", "font")}
def __init__(
self,
arg=None,
autorange=None,
autotypenumbers=None,
backgroundcolor=None,
calendar=None,
categoryarray=None,
categoryarraysrc=None,
categoryorder=None,
color=None,
dtick=None,
exponentformat=None,
gridcolor=None,
gridwidth=None,
hoverformat=None,
linecolor=None,
linewidth=None,
minexponent=None,
mirror=None,
nticks=None,
range=None,
rangemode=None,
separatethousands=None,
showaxeslabels=None,
showbackground=None,
showexponent=None,
showgrid=None,
showline=None,
showspikes=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
spikecolor=None,
spikesides=None,
spikethickness=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
type=None,
visible=None,
zeroline=None,
zerolinecolor=None,
zerolinewidth=None,
**kwargs
):
"""
Construct a new XAxis object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.XAxis`
autorange
Determines whether or not the range of this axis is
computed in relation to the input data. See `rangemode`
for more info. If `range` is provided, then `autorange`
is set to False.
autotypenumbers
Using "strict" a numeric string in trace data is not
converted to a number. Using *convert types* a numeric
string in trace data may be treated as a number during
automatic axis `type` detection. Defaults to
layout.autotypenumbers.
backgroundcolor
Sets the background color of this axis' wall.
calendar
Sets the calendar system to use for `range` and `tick0`
if this is a date axis. This does not set the calendar
for interpreting data on this axis, that's specified in
the trace or via the global `layout.calendar`
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
categoryarray .
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`. Set `categoryorder`
to *total ascending* or *total descending* if order
should be determined by the numerical order of the
values. Similarly, the order can be determined by the
min, max, sum, mean or median of all the values.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
gridcolor
Sets the color of the grid lines.
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-time-
format#locale_format We add one item to d3's date
formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
mirror
Determines if the axis lines or/and ticks are mirrored
to the opposite side of the plotting area. If True, the
axis lines are mirrored. If "ticks", the axis lines and
ticks are mirrored. If False, mirroring is disable. If
"all", axis lines are mirrored on all shared-axes
subplots. If "allticks", axis lines and ticks are
mirrored on all shared-axes subplots.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
range
Sets the range of this axis. If the axis `type` is
"log", then you must take the log of your desired range
(e.g. to set the range from 1 to 100, set the range
from 0 to 2). If the axis `type` is "date", it should
be date strings, like date data, though Date objects
and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order it
appears.
rangemode
If "normal", the range is computed in relation to the
extrema of the input data. If *tozero*`, the range
extends to 0, regardless of the input data If
"nonnegative", the range is non-negative, regardless of
the input data. Applies only to linear axes.
separatethousands
If "true", even 4-digit integers are separated
showaxeslabels
Sets whether or not this axis is labeled
showbackground
Sets whether or not this axis' wall has a background
color.
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showspikes
Sets whether or not spikes starting from data points to
this axis' wall are shown on hover.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
spikecolor
Sets the color of the spikes.
spikesides
Sets whether or not spikes extending from the
projection data points to this axis' wall boundaries
are shown on hover.
spikethickness
Sets the thickness (in px) of the spikes.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-time-
format#locale_format We add one item to d3's date
formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.scene.xa
xis.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.layout.scen
e.xaxis.tickformatstopdefaults), sets the default
property values to use for elements of
layout.scene.xaxis.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.scene.xaxis.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use layout.scene.xaxis.title.font
instead. Sets this axis' title font. Note that the
title's font used to be customized by the now
deprecated `titlefont` attribute.
type
Sets the axis type. By default, plotly attempts to
determined the axis type by looking into the data of
the traces that referenced the axis in question.
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
zeroline
Determines whether or not a line is drawn at along the
0 value of this axis. If True, the zero line is drawn
on top of the grid lines.
zerolinecolor
Sets the line color of the zero line.
zerolinewidth
Sets the width (in px) of the zero line.
Returns
-------
XAxis
"""
super(XAxis, self).__init__("xaxis")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.scene.XAxis
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.XAxis`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autorange", None)
_v = autorange if autorange is not None else _v
if _v is not None:
self["autorange"] = _v
_v = arg.pop("autotypenumbers", None)
_v = autotypenumbers if autotypenumbers is not None else _v
if _v is not None:
self["autotypenumbers"] = _v
_v = arg.pop("backgroundcolor", None)
_v = backgroundcolor if backgroundcolor is not None else _v
if _v is not None:
self["backgroundcolor"] = _v
_v = arg.pop("calendar", None)
_v = calendar if calendar is not None else _v
if _v is not None:
self["calendar"] = _v
_v = arg.pop("categoryarray", None)
_v = categoryarray if categoryarray is not None else _v
if _v is not None:
self["categoryarray"] = _v
_v = arg.pop("categoryarraysrc", None)
_v = categoryarraysrc if categoryarraysrc is not None else _v
if _v is not None:
self["categoryarraysrc"] = _v
_v = arg.pop("categoryorder", None)
_v = categoryorder if categoryorder is not None else _v
if _v is not None:
self["categoryorder"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("gridcolor", None)
_v = gridcolor if gridcolor is not None else _v
if _v is not None:
self["gridcolor"] = _v
_v = arg.pop("gridwidth", None)
_v = gridwidth if gridwidth is not None else _v
if _v is not None:
self["gridwidth"] = _v
_v = arg.pop("hoverformat", None)
_v = hoverformat if hoverformat is not None else _v
if _v is not None:
self["hoverformat"] = _v
_v = arg.pop("linecolor", None)
_v = linecolor if linecolor is not None else _v
if _v is not None:
self["linecolor"] = _v
_v = arg.pop("linewidth", None)
_v = linewidth if linewidth is not None else _v
if _v is not None:
self["linewidth"] = _v
_v = arg.pop("minexponent", None)
_v = minexponent if minexponent is not None else _v
if _v is not None:
self["minexponent"] = _v
_v = arg.pop("mirror", None)
_v = mirror if mirror is not None else _v
if _v is not None:
self["mirror"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("range", None)
_v = range if range is not None else _v
if _v is not None:
self["range"] = _v
_v = arg.pop("rangemode", None)
_v = rangemode if rangemode is not None else _v
if _v is not None:
self["rangemode"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showaxeslabels", None)
_v = showaxeslabels if showaxeslabels is not None else _v
if _v is not None:
self["showaxeslabels"] = _v
_v = arg.pop("showbackground", None)
_v = showbackground if showbackground is not None else _v
if _v is not None:
self["showbackground"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showgrid", None)
_v = showgrid if showgrid is not None else _v
if _v is not None:
self["showgrid"] = _v
_v = arg.pop("showline", None)
_v = showline if showline is not None else _v
if _v is not None:
self["showline"] = _v
_v = arg.pop("showspikes", None)
_v = showspikes if showspikes is not None else _v
if _v is not None:
self["showspikes"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("spikecolor", None)
_v = spikecolor if spikecolor is not None else _v
if _v is not None:
self["spikecolor"] = _v
_v = arg.pop("spikesides", None)
_v = spikesides if spikesides is not None else _v
if _v is not None:
self["spikesides"] = _v
_v = arg.pop("spikethickness", None)
_v = spikethickness if spikethickness is not None else _v
if _v is not None:
self["spikethickness"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("type", None)
_v = type if type is not None else _v
if _v is not None:
self["type"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("zeroline", None)
_v = zeroline if zeroline is not None else _v
if _v is not None:
self["zeroline"] = _v
_v = arg.pop("zerolinecolor", None)
_v = zerolinecolor if zerolinecolor is not None else _v
if _v is not None:
self["zerolinecolor"] = _v
_v = arg.pop("zerolinewidth", None)
_v = zerolinewidth if zerolinewidth is not None else _v
if _v is not None:
self["zerolinewidth"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@layout@scene@_xaxis.py@.PATH_END.py
|
{
"filename": "_family.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/legend/font/_family.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="family", parent_name="layout.legend.font", **kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@legend@font@_family.py@.PATH_END.py
|
{
"filename": "figtable.py",
"repo_name": "xraypy/xraylarch",
"repo_path": "xraylarch_extracted/xraylarch-master/doc/sphinx/ext/figtable.py",
"type": "Python"
}
|
"""
Adds a new directive called 'figtable' that creates a figure
around a table.
"""
from docutils import nodes
import docutils.parsers.rst.directives as directives
from docutils.parsers.rst import Directive
from sphinx import addnodes
class figtable(nodes.General, nodes.Element):
pass
def visit_figtable_node(self, node):
pass
def depart_figtable_node(self, node):
pass
def visit_figtable_tex(self, node):
if node['nofig']:
self.body.append('\n\n\\begin{table}\n\\capstart\n\\begin{center}\n')
else:
self.body.append('\n\n\\begin{figure}[tbp]\n\\capstart\n\\begin{center}\n')
def depart_figtable_tex(self, node):
if node['nofig']:
self.body.append('\n\\end{center}\n\\end{table}\n')
else:
self.body.append('\n\\end{center}\n\\end{figure}\n')
def visit_figtable_html(self, node):
atts = {'class': 'figure align-center'}
self.body.append(self.starttag(node, 'div', **atts) + '<center>')
def depart_figtable_html(self, node):
self.body.append('</center></div>')
class FigTableDirective(Directive):
has_content = True
optional_arguments = 5
final_argument_whitespace = True
option_spec = {'label': directives.uri,
'spec': directives.unchanged,
'caption': directives.unchanged,
'alt': directives.unchanged,
'nofig': directives.flag}
def run(self):
label = self.options.get('label', None)
spec = self.options.get('spec', None)
caption = self.options.get('caption', None)
alt = self.options.get('alt', None)
nofig = 'nofig' in self.options
figtable_node = figtable('', ids=[label] if label is not None else [])
figtable_node['nofig'] = nofig
if spec is not None:
table_spec_node = addnodes.tabular_col_spec()
table_spec_node['spec'] = spec
figtable_node.append(table_spec_node)
node = nodes.Element()
self.state.nested_parse(self.content, self.content_offset, node)
tablenode = node[0]
if alt is not None:
tablenode['alt'] = alt
figtable_node.append(tablenode)
if caption is not None:
caption_node = nodes.caption('', '', nodes.Text(caption))
figtable_node.append(caption_node)
if label is not None:
targetnode = nodes.target('', '', ids=[label])
figtable_node.append(targetnode)
return [figtable_node]
def setup(app):
app.add_node(figtable,
html=(visit_figtable_html, depart_figtable_html),
singlehtml=(visit_figtable_html, depart_figtable_html),
latex=(visit_figtable_tex, depart_figtable_tex),
text=(visit_figtable_node, depart_figtable_node))
app.add_directive('figtable', FigTableDirective)
|
xraypyREPO_NAMExraylarchPATH_START.@xraylarch_extracted@xraylarch-master@doc@sphinx@ext@figtable.py@.PATH_END.py
|
{
"filename": "orca_defaults.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/_plotly_future_/orca_defaults.py",
"type": "Python"
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@_plotly_future_@orca_defaults.py@.PATH_END.py
|
|
{
"filename": "kernel.py",
"repo_name": "ratt-ru/QuartiCal",
"repo_path": "QuartiCal_extracted/QuartiCal-main/quartical/gains/amplitude/kernel.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
import numpy as np
from numba import prange, njit
from numba.extending import overload
from quartical.utils.numba import (coerce_literal,
JIT_OPTIONS,
PARALLEL_JIT_OPTIONS)
from quartical.gains.general.generics import (native_intermediaries,
upsampled_itermediaries,
per_array_jhj_jhr,
resample_solints,
downsample_jhj_jhr)
from quartical.gains.general.flagging import (flag_intermediaries,
update_gain_flags,
finalize_gain_flags,
apply_gain_flags_to_flag_col,
update_param_flags)
from quartical.gains.general.convenience import (get_row,
get_extents)
import quartical.gains.general.factories as factories
from quartical.gains.general.inversion import (invert_factory,
inversion_buffer_factory)
def get_identity_params(corr_mode):
if corr_mode.literal_value in (2, 4):
return np.ones((2,), dtype=np.float64)
elif corr_mode.literal_value == 1:
return np.ones((1,), dtype=np.float64)
else:
raise ValueError("Unsupported number of correlations.")
@njit(**JIT_OPTIONS)
def amplitude_solver(
ms_inputs,
mapping_inputs,
chain_inputs,
meta_inputs,
corr_mode
):
return amplitude_solver_impl(
ms_inputs,
mapping_inputs,
chain_inputs,
meta_inputs,
corr_mode
)
def amplitude_solver_impl(
ms_inputs,
mapping_inputs,
chain_inputs,
meta_inputs,
corr_mode
):
raise NotImplementedError
@overload(amplitude_solver_impl, jit_options=JIT_OPTIONS)
def nb_amplitude_solver_impl(
ms_inputs,
mapping_inputs,
chain_inputs,
meta_inputs,
corr_mode
):
coerce_literal(nb_amplitude_solver_impl, ["corr_mode"])
identity_params = get_identity_params(corr_mode)
def impl(
ms_inputs,
mapping_inputs,
chain_inputs,
meta_inputs,
corr_mode
):
gains = chain_inputs.gains
gain_flags = chain_inputs.gain_flags
active_term = meta_inputs.active_term
max_iter = meta_inputs.iters
solve_per = meta_inputs.solve_per
dd_term = meta_inputs.dd_term
n_thread = meta_inputs.threads
active_gain = gains[active_term]
active_gain_flags = gain_flags[active_term]
active_params = chain_inputs.params[active_term]
# Set up some intemediaries used for flagging
km1_gain = active_gain.copy()
km1_abs2_diffs = np.zeros_like(active_gain_flags, dtype=np.float64)
abs2_diffs_trend = np.zeros_like(active_gain_flags, dtype=np.float64)
flag_imdry = flag_intermediaries(
km1_gain, km1_abs2_diffs, abs2_diffs_trend
)
# Set up some intemediaries used for solving.
real_dtype = active_gain.real.dtype
param_shape = active_params.shape
active_t_map_g = mapping_inputs.time_maps[active_term]
active_f_map_g = mapping_inputs.freq_maps[active_term]
# Create more work to do in paralllel when needed, else no-op.
resampler = resample_solints(active_t_map_g, param_shape, n_thread)
# Determine the starts and stops of the rows and channels associated
# with each solution interval.
extents = get_extents(resampler.upsample_t_map, active_f_map_g)
upsample_shape = resampler.upsample_shape
upsampled_jhj = np.empty(upsample_shape + (upsample_shape[-1],),
dtype=real_dtype)
upsampled_jhr = np.empty(upsample_shape, dtype=real_dtype)
jhj = upsampled_jhj[:param_shape[0]]
jhr = upsampled_jhr[:param_shape[0]]
update = np.zeros(param_shape, dtype=real_dtype)
upsampled_imdry = upsampled_itermediaries(upsampled_jhj, upsampled_jhr)
native_imdry = native_intermediaries(jhj, jhr, update)
for loop_idx in range(max_iter or 1):
compute_jhj_jhr(
ms_inputs,
mapping_inputs,
chain_inputs,
meta_inputs,
upsampled_imdry,
extents,
corr_mode
)
if resampler.active:
downsample_jhj_jhr(upsampled_imdry, resampler.downsample_t_map)
if solve_per == "array":
per_array_jhj_jhr(native_imdry)
if not max_iter: # Non-solvable term, we just want jhj.
conv_perc = 0 # Didn't converge.
loop_idx = -1 # Did zero iterations.
break
compute_update(native_imdry, corr_mode)
finalize_update(
chain_inputs,
meta_inputs,
native_imdry,
loop_idx,
corr_mode
)
# Check for gain convergence. Produced as a side effect of
# flagging. The converged percentage is based on unflagged
# intervals.
conv_perc = update_gain_flags(
chain_inputs,
meta_inputs,
flag_imdry,
loop_idx,
corr_mode
)
# Propagate gain flags to parameter flags.
update_param_flags(
mapping_inputs,
chain_inputs,
meta_inputs,
identity_params
)
if conv_perc >= meta_inputs.stop_frac:
break
# NOTE: Removes soft flags and flags points which have bad trends.
finalize_gain_flags(
chain_inputs,
meta_inputs,
flag_imdry,
corr_mode
)
# Call this one last time to ensure points flagged by finialize are
# propagated (in the DI case).
if not dd_term:
apply_gain_flags_to_flag_col(
ms_inputs,
mapping_inputs,
chain_inputs,
meta_inputs
)
return native_imdry.jhj, loop_idx + 1, conv_perc
return impl
def compute_jhj_jhr(
ms_inputs,
mapping_inputs,
chain_inputs,
meta_inputs,
upsampled_imdry,
extents,
corr_mode
):
return NotImplementedError
@overload(compute_jhj_jhr, jit_options=PARALLEL_JIT_OPTIONS)
def nb_compute_jhj_jhr(
ms_inputs,
mapping_inputs,
chain_inputs,
meta_inputs,
upsampled_imdry,
extents,
corr_mode
):
coerce_literal(nb_compute_jhj_jhr, ["corr_mode"])
# We want to dispatch based on this field so we need its type.
row_weights_idx = ms_inputs.fields.index('ROW_WEIGHTS')
row_weights_type = ms_inputs[row_weights_idx]
imul_rweight = factories.imul_rweight_factory(corr_mode, row_weights_type)
v1_imul_v2 = factories.v1_imul_v2_factory(corr_mode)
v1_imul_v2ct = factories.v1_imul_v2ct_factory(corr_mode)
v1ct_imul_v2 = factories.v1ct_imul_v2_factory(corr_mode)
absv1_idiv_absv2 = factories.absv1_idiv_absv2_factory(corr_mode)
iunpack = factories.iunpack_factory(corr_mode)
iunpackct = factories.iunpackct_factory(corr_mode)
imul = factories.imul_factory(corr_mode)
iadd = factories.iadd_factory(corr_mode)
isub = factories.isub_factory(corr_mode)
valloc = factories.valloc_factory(corr_mode)
make_loop_vars = factories.loop_var_factory(corr_mode)
set_identity = factories.set_identity_factory(corr_mode)
compute_jhwj_jhwr_elem = compute_jhwj_jhwr_elem_factory(corr_mode)
def impl(
ms_inputs,
mapping_inputs,
chain_inputs,
meta_inputs,
upsampled_imdry,
extents,
corr_mode
):
active_term = meta_inputs.active_term
data = ms_inputs.DATA
model = ms_inputs.MODEL_DATA
weights = ms_inputs.WEIGHT
flags = ms_inputs.FLAG
antenna1 = ms_inputs.ANTENNA1
antenna2 = ms_inputs.ANTENNA2
row_map = ms_inputs.ROW_MAP
row_weights = ms_inputs.ROW_WEIGHTS
time_maps = mapping_inputs.time_maps
freq_maps = mapping_inputs.freq_maps
dir_maps = mapping_inputs.dir_maps
gains = chain_inputs.gains
jhj = upsampled_imdry.jhj
jhr = upsampled_imdry.jhr
n_row, n_chan, n_dir, n_corr = model.shape
jhj[:] = 0
jhr[:] = 0
n_tint, n_fint, n_ant, n_gdir, n_param = jhr.shape
n_int = n_tint*n_fint
complex_dtype = gains[active_term].dtype
weight_dtype = weights.dtype
n_gains = len(gains)
row_starts = extents.row_starts
row_stops = extents.row_stops
chan_starts = extents.chan_starts
chan_stops = extents.chan_stops
# Determine loop variables based on where we are in the chain.
# gt means greater than (n>j) and lt means less than (n<j).
all_terms, gt_active, lt_active = make_loop_vars(n_gains, active_term)
# Parallel over all solution intervals.
for i in prange(n_int):
ti = i//n_fint
fi = i - ti*n_fint
rs = row_starts[ti]
re = row_stops[ti]
fs = chan_starts[fi]
fe = chan_stops[fi]
rop_pq = valloc(complex_dtype) # Right-multiply operator for pq.
rop_qp = valloc(complex_dtype) # Right-multiply operator for qp.
lop_pq = valloc(complex_dtype) # Left-multiply operator for pq.
lop_qp = valloc(complex_dtype) # Left-multiply operator for qp.
w = valloc(weight_dtype)
r_pq = valloc(complex_dtype)
wr_pq = valloc(complex_dtype)
wr_qp = valloc(complex_dtype)
v_pqd = valloc(complex_dtype)
v_pq = valloc(complex_dtype)
gains_p = valloc(complex_dtype, leading_dims=(n_gains,))
gains_q = valloc(complex_dtype, leading_dims=(n_gains,))
lop_pq_arr = valloc(complex_dtype, leading_dims=(n_gdir,))
rop_pq_arr = valloc(complex_dtype, leading_dims=(n_gdir,))
lop_qp_arr = valloc(complex_dtype, leading_dims=(n_gdir,))
rop_qp_arr = valloc(complex_dtype, leading_dims=(n_gdir,))
jhr_tifi = jhr[ti, fi]
jhj_tifi = jhj[ti, fi]
for row_ind in range(rs, re):
row = get_row(row_ind, row_map)
a1_m, a2_m = antenna1[row], antenna2[row]
for f in range(fs, fe):
if flags[row, f]: # Skip flagged data points.
continue
# Apply row weights in the BDA case, otherwise a no-op.
imul_rweight(weights[row, f], w, row_weights, row_ind)
iunpack(r_pq, data[row, f])
lop_pq_arr[:] = 0
rop_pq_arr[:] = 0
lop_qp_arr[:] = 0
rop_qp_arr[:] = 0
v_pq[:] = 0
for d in range(n_dir):
set_identity(lop_pq)
set_identity(lop_qp)
# Construct a small contiguous gain array. This makes
# the single term case fractionally slower.
for gi in range(n_gains):
d_m = dir_maps[gi][d] # Broadcast dir.
t_m = time_maps[gi][row_ind]
f_m = freq_maps[gi][f]
gain = gains[gi][t_m, f_m]
iunpack(gains_p[gi], gain[a1_m, d_m])
iunpack(gains_q[gi], gain[a2_m, d_m])
m = model[row, f, d]
iunpack(rop_qp, m)
iunpackct(rop_pq, rop_qp)
for g in all_terms:
g_q = gains_q[g]
v1_imul_v2(g_q, rop_pq, rop_pq)
g_p = gains_p[g]
v1_imul_v2(g_p, rop_qp, rop_qp)
for g in gt_active:
g_p = gains_p[g]
v1_imul_v2ct(rop_pq, g_p, rop_pq)
g_q = gains_q[g]
v1_imul_v2ct(rop_qp, g_q, rop_qp)
for g in lt_active:
g_p = gains_p[g]
v1ct_imul_v2(g_p, lop_pq, lop_pq)
g_q = gains_q[g]
v1ct_imul_v2(g_q, lop_qp, lop_qp)
out_d = dir_maps[active_term][d]
iunpack(lop_pq_arr[out_d], lop_pq)
iadd(rop_pq_arr[out_d], rop_pq)
iunpack(lop_qp_arr[out_d], lop_qp)
iadd(rop_qp_arr[out_d], rop_qp)
v1ct_imul_v2(lop_pq, gains_p[active_term], v_pqd)
v1_imul_v2ct(v_pqd, rop_pq, v_pqd)
iadd(v_pq, v_pqd)
absv1_idiv_absv2(r_pq, v_pq, r_pq)
imul(r_pq, v_pq)
isub(r_pq, v_pq)
for d in range(n_gdir):
iunpack(wr_pq, r_pq)
imul(wr_pq, w)
iunpackct(wr_qp, wr_pq)
lop_pq_d = lop_pq_arr[d]
rop_pq_d = rop_pq_arr[d]
compute_jhwj_jhwr_elem(lop_pq_d,
rop_pq_d,
w,
wr_pq,
jhr_tifi[a1_m, d],
jhj_tifi[a1_m, d])
lop_qp_d = lop_qp_arr[d]
rop_qp_d = rop_qp_arr[d]
compute_jhwj_jhwr_elem(lop_qp_d,
rop_qp_d,
w,
wr_qp,
jhr_tifi[a2_m, d],
jhj_tifi[a2_m, d])
return
return impl
def compute_update(native_imdry, corr_mode):
raise NotImplementedError
@overload(compute_update, jit_options=PARALLEL_JIT_OPTIONS)
def nb_compute_update(native_imdry, corr_mode):
coerce_literal(nb_compute_update, ["corr_mode"])
# We want to dispatch based on this field so we need its type.
jhj = native_imdry[native_imdry.fields.index('jhj')]
generalised = jhj.ndim == 6
inversion_buffer = inversion_buffer_factory(generalised=generalised)
invert = invert_factory(corr_mode, generalised=generalised)
def impl(native_imdry, corr_mode):
jhj = native_imdry.jhj
jhr = native_imdry.jhr
update = native_imdry.update
n_tint, n_fint, n_ant, n_dir, n_param = jhr.shape
n_int = n_tint * n_fint
result_dtype = jhr.dtype
for i in prange(n_int):
t = i // n_fint
f = i - t * n_fint
buffers = inversion_buffer(n_param, result_dtype)
for a in range(n_ant):
for d in range(n_dir):
invert(jhj[t, f, a, d],
jhr[t, f, a, d],
update[t, f, a, d],
buffers)
return impl
def finalize_update(
chain_inputs,
meta_inputs,
native_imdry,
loop_idx,
corr_mode
):
raise NotImplementedError
@overload(finalize_update, jit_options=JIT_OPTIONS)
def nb_finalize_update(
chain_inputs,
meta_inputs,
native_imdry,
loop_idx,
corr_mode
):
coerce_literal(nb_finalize_update, ["corr_mode"])
set_identity = factories.set_identity_factory(corr_mode)
param_to_gain = param_to_gain_factory(corr_mode)
def impl(
chain_inputs,
meta_inputs,
native_imdry,
loop_idx,
corr_mode
):
dd_term = meta_inputs.dd_term
active_term = meta_inputs.active_term
pinned_directions = meta_inputs.pinned_directions
gain = chain_inputs.gains[active_term]
gain_flags = chain_inputs.gain_flags[active_term]
params = chain_inputs.params[active_term]
update = native_imdry.update
n_tint, n_fint, n_ant, n_dir, n_corr = gain.shape
if dd_term:
dir_loop = [d for d in range(n_dir) if d not in pinned_directions]
else:
dir_loop = [d for d in range(n_dir)]
for ti in range(n_tint):
for fi in range(n_fint):
for a in range(n_ant):
for d in dir_loop:
p = params[ti, fi, a, d]
g = gain[ti, fi, a, d]
fl = gain_flags[ti, fi, a, d]
upd = update[ti, fi, a, d]
if fl == 1:
p[:] = 1
set_identity(g)
else:
upd /= 2
p += upd
param_to_gain(p, g)
return impl
def param_to_gain_factory(corr_mode):
if corr_mode.literal_value == 4:
def impl(params, gain):
gain[0] = params[0]
gain[3] = params[1]
elif corr_mode.literal_value == 2:
def impl(params, gain):
gain[0] = params[0]
gain[1] = params[1]
elif corr_mode.literal_value == 1:
def impl(params, gain):
gain[0] = params[0]
else:
raise ValueError("Unsupported number of correlations.")
return factories.qcjit(impl)
def compute_jhwj_jhwr_elem_factory(corr_mode):
v1_imul_v2 = factories.v1_imul_v2_factory(corr_mode)
unpack = factories.unpack_factory(corr_mode)
unpackc = factories.unpackc_factory(corr_mode)
if corr_mode.literal_value == 4:
def impl(lop, rop, w, res, jhr, jhj):
# Effectively apply zero weight to off-diagonal terms.
# TODO: Can be tidied but requires moving other weighting code.
res[1] = 0
res[2] = 0
# Accumulate an element of jhwr.
v1_imul_v2(res, rop, res)
v1_imul_v2(lop, res, res)
# Accumulate an element of jhwj.
w_0, _, _, w_3 = unpack(w) # NOTE: XX, XY, YX, YY
r_0, _, _, r_3 = unpack(res) # NOTE: XX, XY, YX, YY
jhr[0] += r_0.real
jhr[1] += r_3.real
lop_00, lop_01, lop_10, lop_11 = unpack(lop)
rop_00, rop_10, rop_01, rop_11 = unpack(rop) # "Transpose"
jh_00 = lop_00 * rop_00
jh_03 = lop_01 * rop_01
j_00 = jh_00.conjugate()
j_03 = jh_03.conjugate()
jh_30 = lop_10 * rop_10
jh_33 = lop_11 * rop_11
j_30 = jh_30.conjugate()
j_33 = jh_33.conjugate()
jhwj_00 = jh_00*w_0*j_00 + jh_03*w_3*j_03
jhwj_03 = jh_00*w_0*j_30 + jh_03*w_3*j_33
jhwj_33 = jh_30*w_0*j_30 + jh_33*w_3*j_33
jhj[0, 0] += jhwj_00.real
jhj[0, 1] += jhwj_03.real
jhj[1, 0] = jhj[0, 1]
jhj[1, 1] += jhwj_33.real
elif corr_mode.literal_value == 2:
def impl(lop, rop, w, res, jhr, jhj):
# Accumulate an element of jhwr.
v1_imul_v2(res, rop, res)
r_0, r_1 = unpack(res)
jhr[0] += r_0.real
jhr[1] += r_1.real
# Accumulate an element of jhwj.
jh_00, jh_11 = unpack(rop)
j_00, j_11 = unpackc(rop)
w_00, w_11 = unpack(w)
# TODO: Consider representing as a vector?
jhj[0, 0] += (jh_00*w_00*j_00).real
jhj[1, 1] += (jh_11*w_11*j_11).real
elif corr_mode.literal_value == 1:
def impl(lop, rop, w, res, jhr, jhj):
# Accumulate an element of jhwr.
v1_imul_v2(res, rop, res)
r_0 = unpack(res)
jhr[0] += r_0.real
# Accumulate an element of jhwj.
jh_00 = unpack(rop)
j_00 = unpackc(rop)
w_00 = unpack(w)
jhj[0, 0] += (jh_00*w_00*j_00).real
else:
raise ValueError("Unsupported number of correlations.")
return factories.qcjit(impl)
@njit(**JIT_OPTIONS)
def amplitude_params_to_gains(
params,
gains
):
n_time, n_freq, n_ant, n_dir, n_corr = gains.shape
for t in range(n_time):
for f in range(n_freq):
for a in range(n_ant):
for d in range(n_dir):
g = gains[t, f, a, d]
p = params[t, f, a, d]
g[0] = p[0]
if n_corr > 1:
g[-1] = p[-1]
|
ratt-ruREPO_NAMEQuartiCalPATH_START.@QuartiCal_extracted@QuartiCal-main@quartical@gains@amplitude@kernel.py@.PATH_END.py
|
{
"filename": "_util.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/filelock/py2/filelock/_util.py",
"type": "Python"
}
|
import os
import stat
import sys
PermissionError = PermissionError if sys.version_info[0] == 3 else OSError
def raise_on_exist_ro_file(filename):
try:
file_stat = os.stat(filename) # use stat to do exists + can write to check without race condition
except OSError:
return None # swallow does not exist or other errors
if file_stat.st_mtime != 0: # if os.stat returns but modification is zero that's an invalid os.stat - ignore it
if not (file_stat.st_mode & stat.S_IWUSR):
raise PermissionError("Permission denied: {!r}".format(filename))
__all__ = [
"raise_on_exist_ro_file",
"PermissionError",
]
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@filelock@py2@filelock@_util.py@.PATH_END.py
|
{
"filename": "fixwheel.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/build/rocm/tools/fixwheel.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# Copyright 2024 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE(mrodden): This file is part of the ROCm build scripts, and
# needs be compatible with Python 3.6. Please do not include these
# in any "upgrade" scripts
import argparse
import logging
import os
from pprint import pprint
import subprocess
from auditwheel.lddtree import lddtree
from auditwheel.wheeltools import InWheelCtx
from auditwheel.elfutils import elf_file_filter
from auditwheel.policy import WheelPolicies
from auditwheel.wheel_abi import analyze_wheel_abi
LOG = logging.getLogger(__name__)
def tree(path):
with InWheelCtx(path) as ctx:
for sofile, fd in elf_file_filter(ctx.iter_files()):
LOG.info("found SO file: %s" % sofile)
elftree = lddtree(sofile)
print(elftree)
def parse_args():
p = argparse.ArgumentParser()
p.add_argument("wheel_path")
return p.parse_args()
def parse_wheel_name(path):
wheel_name = os.path.basename(path)
return wheel_name[:-4].split("-")
def fix_wheel(path):
tup = parse_wheel_name(path)
plat_tag = tup[4]
if "manylinux2014" in plat_tag:
# strip any manylinux tags from the current wheel first
from wheel.cli import tags
plat_mod_str = "linux_x86_64"
new_wheel = tags.tags(
path,
python_tags=None,
abi_tags=None,
platform_tags=plat_mod_str,
build_tag=None,
)
new_path = os.path.join(os.path.dirname(path), new_wheel)
LOG.info("Stripped broken tags and created new wheel at %r" % new_path)
path = new_path
# build excludes, using auditwheels lddtree to find them
wheel_pol = WheelPolicies()
exclude = frozenset()
abi = analyze_wheel_abi(wheel_pol, path, exclude)
plat = "manylinux_2_28_x86_64"
ext_libs = abi.external_refs.get(plat, {}).get("libs")
exclude = list(ext_libs.keys())
# call auditwheel repair with excludes
cmd = ["auditwheel", "repair", "--plat", plat, "--only-plat"]
for ex in exclude:
cmd.append("--exclude")
cmd.append(ex)
cmd.append(path)
LOG.info("running %r" % cmd)
rc = subprocess.run(cmd, check=True)
def main():
args = parse_args()
path = args.wheel_path
fix_wheel(path)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@build@rocm@tools@fixwheel.py@.PATH_END.py
|
{
"filename": "ttv2fast2furious.py",
"repo_name": "shadden/TTV2Fast2Furious",
"repo_path": "TTV2Fast2Furious_extracted/TTV2Fast2Furious-master/ttv2fast2furious/ttv2fast2furious.py",
"type": "Python"
}
|
"""
.. module:: ttv2fast2furious
:platform: Unix, Mac, Windows
:synopsis: Routines for TTV analysis
.. moduleauthor:: Sam Hadden <samuel.hadden@cfa.harvard.edu>
"""
import warnings
import numpy as np
from scipy.optimize import lsq_linear
from scipy.special import gammainc,gammaincinv
from scipy.special import ellipk,ellipkinc,ellipe,ellipeinc
import scipy.integrate as integrate
from ttv2fast2furious.ttv_basis_functions import get_nearest_firstorder,get_superperiod,dt0_InnerPlanet,dt0_OuterPlanet,get_fCoeffs
from ttv2fast2furious.ttv_basis_functions import get_nearest_secondorder,dt2_InnerPlanet,dt2_OuterPlanet
from collections import OrderedDict
def ttv_basis_function_matrix_inner(P,P1,T0,T10,Ntrans,IncludeLinearBasis=True):
"""
Compute the transit time basis function matrix, 'M', for a planet subject to an external perturber.
Args:
P (real): Inner planet period.
P1 (real): Inner planet period.
T (real): Outer planet initial time of transit
T1 (real): Outer planet initial time of transit
Ntrans (int): number of transits
IncludeLinearBasis (bool): Whether basis functions representing a linear transit emphemris is included. Default is True.
Returns:
Array: the resulting basis function matrix is returned an (Ntrans, 5) array or an (Ntrans, 3) array if IncludeLinearBasis=False.
"""
j = get_nearest_firstorder(P/P1)
assert j > 0 , "Bad period ratio!!! P,P1 = %.3f \t %.3f"%(P,P1)
superP = get_superperiod(P,P1)
superPhase = 2*np.pi * (j * (-T10/P1) - (j-1) * (-T0/P) )
Times = T0 + np.arange(Ntrans) * P
# Normalize super-sin/cos so that basis fn amplitude is mu * (Zx/Zy)
#
j_2 = 1.0 / j / j
Delta = (j-1) * P1 / P / j - 1
Delta_2 = 1.0 / Delta / Delta
alpha = (P/P1)**(2./3.)
alpha_2 = 1.0 / alpha / alpha
fCoeffIn,fCoeffOut=get_fCoeffs(j,alpha)
denom = (fCoeffIn*fCoeffIn + fCoeffOut*fCoeffOut)**(0.5)
S = 1.5 * (1-j) * alpha_2 * j_2 * Delta_2 * denom * P / (np.pi)
C = -1.5 * (1-j) * alpha_2 * j_2 * Delta_2 * denom * P / (np.pi)
dt0 = dt0_InnerPlanet(P,P1,T0,T10,Ntrans)
superSin = S * np.sin(2*np.pi * Times / superP + superPhase)
superCos = C * np.cos(2*np.pi * Times / superP + superPhase)
if IncludeLinearBasis:
basis_function_matrix=np.vstack((np.ones(Ntrans),np.arange(Ntrans),dt0,superSin,superCos)).T
else:
basis_function_matrix=np.vstack((dt0,superSin,superCos)).T
return basis_function_matrix
def ttv_basis_function_matrix_outer(P,P1,T0,T10,Ntrans,IncludeLinearBasis=True):
"""
Compute the transit time basis function matrix, 'M', for a planet subject to an interior perturber.
Args:
P (real): Inner planet period.
P1 (real): Inner planet period.
T (real): Outer planet initial time of transit
T1 (real): Outer planet initial time of transit
Ntrans (int): number of transits
IncludeLinearBasis (bool): Whether basis functions representing a linear transit emphemris is included. Default is True.
Returns:
Array: the resulting basis function matrix is returned an (Ntrans, 5) array or an (Ntrans, 3) array if IncludeLinearBasis=False.
"""
j = get_nearest_firstorder(P/P1)
assert j > 0 , "Bad period ratio! P,P1 = %.3f \t %.3f"%(P,P1)
superP = get_superperiod(P,P1)
superPhase = 2*np.pi * (j * (-T10/P1) - (j-1) * (-T0/P) )
Times = T10 + np.arange(Ntrans) * P1
# Normalize super-sin/cos so that basis fn amplitude is mu * (Zx/Zy)
#
j_2 = 1.0 / j / j
Delta = (j-1) * P1 / P / j - 1
Delta_2 = 1.0 / Delta / Delta
alpha = (P/P1)**(2./3.)
alpha_2 = 1.0 / alpha / alpha
fCoeffIn,fCoeffOut=get_fCoeffs(j,alpha)
denom = (fCoeffIn*fCoeffIn + fCoeffOut*fCoeffOut)**(0.5)
S1 = 1.5 * (j) * j_2 * Delta_2 * denom * P1 / (np.pi)
C1 = -1.5 * (j) * j_2 * Delta_2 * denom * P1 / (np.pi)
dt0 = dt0_OuterPlanet(P,P1,T0,T10,Ntrans)
superSin = S1 * np.sin(2*np.pi * Times / superP + superPhase)
superCos = C1 * np.cos(2*np.pi * Times / superP + superPhase)
if IncludeLinearBasis:
basis_function_matrix=np.vstack((np.ones(Ntrans),np.arange(Ntrans),dt0,superSin,superCos)).T
else:
basis_function_matrix=np.vstack((dt0,superSin,superCos)).T
return basis_function_matrix
###################################
def PlanetPropertiestoLinearModelAmplitudes(T0,P,mass,e,varpi,T10,P1,mass1,e1,varpi1):
"""
"""
j=get_nearest_firstorder(P/P1)
j_2 = 1.0 / j / j
Delta = (j-1) * P1 / P / j - 1
Delta_2 = 1.0 / Delta / Delta
ex = e * np.cos(varpi)
ey = e * np.sin(varpi)
ex1 = e1 * np.cos(varpi1)
ey1 = e1 * np.sin(varpi1)
alpha = (P/P1)**(2./3.)
alpha_2 = 1.0 / alpha / alpha
fCoeffIn,fCoeffOut=get_fCoeffs(j,alpha)
denom = (fCoeffIn*fCoeffIn + fCoeffOut*fCoeffOut)**(0.5)
Zx = fCoeffIn * ex + fCoeffOut * ex1
Zy = fCoeffIn * ey + fCoeffOut * ey1
Zx /= denom
Zy /= denom
S = mass1 * Zx
C = mass1 * Zy
S1 = mass * Zx
C1 = mass * Zy
return np.array([T0,P,mass1,S,C]),np.array([T10,P1,mass,S1,C1])
###################################
def get_ttv_basis_function_matrix(Pi,Pj,T0i,T0j,Ntrans):
"""
Compute basis function matrix for planet `i`'s TTV due to planet `j`.
Arguments
--------
Pi: real
TTV planet's period
Pj: real
Perturbing planet's period
T0i: real
TTV planet's intial time of transit
"""
if Pi < Pj:
return ttv_basis_function_matrix_inner(Pi,Pj,T0i,T0j,Ntrans,IncludeLinearBasis=False)
else:
return ttv_basis_function_matrix_outer(Pj,Pi,T0j,T0i,Ntrans,IncludeLinearBasis=False)
def get_linear_basis_basis_function_matrix(Ntransits):
return np.vstack((np.ones(Ntransits),np.arange(Ntransits))).T
###################################
def MultiplanetSystemBasisFunctionMatrices(Nplanets,Periods,T0s,Ntransits,**kwargs):
"""
Compute basis function matrices for the transit times of an `Nplanet` system.
Parameters
----------
Nplanets : int
The number of transting planets to model.
Periods : ndarray
Array listing planets' orbital periods.
T0s : ndarray
Array listing the times of planets' first transits
Ntransits: ndarray
Array listing the number of transits to compute for each planet.
Keyword Arguments
-----------------
``InteractionMatrix``
Specify the interactions between planets as a matrix.
By default, all planets are assumed to interact with one
antoher.
Returns
-------
list
List of ndarrays with columns containing TTV basis functions of each planet.
"""
InteractionMatrix=kwargs.get("InteractionMatrix",np.ones((Nplanets,Nplanets),dtype=bool))
for i in range(Nplanets):
# No self-interactions!
InteractionMatrix[i,i]=False
BasisFunctionMatrices=[get_linear_basis_basis_function_matrix(Nt) for Nt in Ntransits]
for i in range(Nplanets):
Pi = Periods[i]
T0i = T0s[i]
Ntransi = Ntransits[i]
for j in range(Nplanets):
if InteractionMatrix[i,j]:
Pj = Periods[j]
T0j = T0s[j]
A = get_ttv_basis_function_matrix(Pi,Pj,T0i,T0j,Ntransi)
BasisFunctionMatrices[i] = np.hstack((BasisFunctionMatrices[i],A))
else:
continue
return BasisFunctionMatrices
###################################
def SetupInteractionMatrixWithMaxPeriodRatio(Periods, MaxRatio):
Np=len(Periods)
entries = []
for P1 in Periods:
for P2 in Periods:
entries.append(np.max((P1/P2,P2/P1)) < MaxRatio)
entries=np.array(entries).reshape((Np,Np))
for i in range(Np):
entries[i,i]=False
return entries
###################################
def get_ttv_model_amplitudes(Pi,Pj,T0i,T0j,massi,massj,ei,ej,pmgi,pmgj):
if Pi<Pj:
Xi,Xj = PlanetPropertiestoLinearModelAmplitudes(T0i,Pi,massi,ei,pmgi,T0j,Pj,massj,ej,pmgj)
else:
Xj,Xi = PlanetPropertiestoLinearModelAmplitudes(T0j,Pj,massj,ej,pmgj,T0i,Pi,massi,ei,pmgi)
return Xi[2:],Xj[2:]
###################################
def MultiplanetSystemLinearModelAmplitudes(Nplanets,Periods,T0s,masses,eccs,pomegas,**kwargs):
"""
Compute amplitudes for linear model basis functions for user-specified planet parameters.
Parameters
----------
Nplanets : int
The number of planets in the system.
Periods : ndarray
Periods of the planets.
T0s : ndarray
Times of first transit
masses : ndarray
Planet masses, in units of host-star mass.
eccs : ndarray
Eccentricities.
pomegas : ndarray
Longitudes of periapse.
Returns
-------
:obj:`list` of ndarrays
List of model amplitudes
"""
InteractionMatrix=kwargs.get("InteractionMatrix",np.ones((Nplanets,Nplanets),dtype=bool))
for i in range(Nplanets):
# No self-interactions!
InteractionMatrix[i,i]=False
Xs=[]
for i in range(Nplanets):
lenXi = 2 + 3 * np.sum(InteractionMatrix[i])
Xi = np.zeros(lenXi)
Xi[0] = T0s[i]
Xi[1] = Periods[i]
Xs.append(Xi)
for i in range(Nplanets):
Pi = Periods[i]
T0i = T0s[i]
massi=masses[i]
ei=eccs[i]
pmgi=pomegas[i]
for j in range(i+1,Nplanets):
if InteractionMatrix[i,j] or InteractionMatrix[j,i]:
Pj = Periods[j]
T0j = T0s[j]
massj = masses[j]
ej = eccs[j]
pmgj=pomegas[j]
Xi,Xj = get_ttv_model_amplitudes(Pi,Pj,T0i,T0j,massi,massj,ei,ej,pmgi,pmgj)
if InteractionMatrix[i,j]:
Jindex=2+3*np.sum(InteractionMatrix[i,:j])
Xs[i][Jindex:Jindex+3]=Xi
if InteractionMatrix[j,i]:
Iindex=2+3*np.sum(InteractionMatrix[j,:i])
Xs[j][Iindex:Iindex+3]=Xj
else:
pass
return Xs
###################################
class PlanetTransitObservations(object):
"""
Object to store transit timing measurement information.
"""
def __init__(self,transit_numbers,times,uncertainties):
"""
Parameters
----------
transit_numbers : ndarray
List that numbers the series of transit observations
times : ndarray
List of transit mid-times
uncertainties : ndarray
List of tramsit mid-time measurement :math:`1\sigma` uncertainties
"""
self._transit_numbers=transit_numbers
self._times=times
self._uncertainties = uncertainties
self._Ntransits = len(times)
self._mask = np.ones(self._Ntransits,dtype=bool)
@classmethod
def from_times_only(cls,times,unc=0):
"""
Initialize transit observation object diectly from list of transit times.
Return a :class:`PlanetTransitObservations` object initialized from a list of transit times.
Transit numbers are automatically assigned sequentially. Uniform uncertainties are
assigned to the observations according to the user-specified `unc`.
Parameters
----------
times : array_like
List of transit mid-times
unc : float, optional
Uncertainty assigned to transit observations
"""
Ntr = len(times)
nums = np.arange(Ntr)
sigma = unc * np.ones(Ntr)
return cls(nums,times,sigma)
def function_mask(self,func):
"""
Mask out transit times by applying function `func` to the times.
Parameters
----------
func : callable
Function to mask times. `func` should return `False` for times that are
to be masked out. Otherwise `func` should return `True`.
"""
self._mask = np.array(list(map(func,self._times)))
def unmask(self):
self._mask = np.ones(self._Ntransits,dtype=bool)
@property
def times(self):
"""
ndarray: List of transit mid-times
"""
return self._times[self._mask]
@property
def transit_numbers(self):
"""
ndarray: List of transit epoch numbers.
"""
return self._transit_numbers[self._mask]
@property
def uncertainties(self):
"""
ndarray: List of :math:`1\sigma` transit mid-time measurement uncertainties.
"""
return self._uncertainties[self._mask]
@uncertainties.setter
def uncertainties(self,value):
self._uncertainties[self._mask] = value
@property
def Ntransits(self):
return len(self.times)
@property
def weighted_obs_vector(self):
return self.times / self.uncertainties
def basis_function_matrix(self):
"""
Generate the basis function matrix for a linear transit ephemeris.
Returns
-------
ndarray
basis function matrix
"""
constant_basis = np.ones(self.Ntransits)
return np.vstack(( constant_basis , self.transit_numbers)).T
def linear_fit_design_matrix(self):
"""
Generate the design matrix for a linear transit ephemeris.
"""
constant_basis = np.ones(self.Ntransits)
sigma = self.uncertainties
design_matrix = np.vstack(( constant_basis / sigma , self.transit_numbers / sigma )).T
return design_matrix
def linear_best_fit(self):
"""
Determine the best-fit period and initial transit time for a linear transit ephemeris.
"""
sigma = self.uncertainties
y = self.weighted_obs_vector
A = self.linear_fit_design_matrix()
fitresults = np.linalg.lstsq(A,y,rcond=-1)
return fitresults[0]
def linear_fit_residuals(self):
"""
Return the transit timing residuals of a best-fit linear transit ephemeris.
"""
M = self.basis_function_matrix()
best = self.linear_best_fit()
return self.times - M.dot(best)
class transit_times_model(OrderedDict):
"""
Class representing a linear model for a single set of transit times.
Attributes
----------
observations : :obj:`PlanetTransitObservations`
Transit observations to model.
parameter_bounds : :obj:`OrderedDict`
A dictionary that contains bounding intervals
for each linear model amplitude
MaxTransitNumber : int
Largest transit epoch number of the transits recorded in
'observations'. This is used when generating new basis
functions to add to the model.
"""
def __init__(self,observations,suffix=''):
super(transit_times_model,self).__init__()
self.observations = observations
self.parameter_bounds = OrderedDict()
self.MaxTransitNumber = observations._transit_numbers.max()
self['T0{}'.format(suffix)] = np.ones(observations._Ntransits)
self['P{}'.format(suffix)] = observations._transit_numbers
self.parameter_bounds['P{}'.format(suffix)] = (0,np.inf)
self._suffix = suffix
def __reduce__(self):
"""Helps the class play nice with pickle_.
.. _pickle: https://docs.python.org/3/library/pickle.html#object.__reduce__"""
red = (self.__class__, (self.observations,
self._suffix),
None,None,iter(self.items()))
return red
def __getitem__(self,key):
return super().__getitem__(key)[self.mask]
def __setitem__(self,key,val):
if key not in self.parameter_bounds.keys():
self.parameter_bounds[key]=(-np.inf,np.inf)
super().__setitem__(key,val)
@property
def mask(self):
return self.observations._mask
@property
def Nrow(self):
"""int: Number of basis function matrix rows"""
return len(self)
@property
def Ncol(self):
"""int: Number of basis function matrix columns"""
return self.observations.Ntransits
@property
def basis_function_matrix(self):
"""Return the basis function matrix."""
return np.vstack([val[self.mask] for val in self.values()]).T
def design_matrix(self):
"""Return the design matrix."""
sigma_vec = self.observations.uncertainties
Atranspose = np.transpose(self.basis_function_matrix) / sigma_vec
return np.transpose(Atranspose)
def cov_inv(self):
"""Return the inverese covariance matrix"""
A = self.design_matrix()
return np.transpose(A).dot(A)
def cov(self):
"""Return the covariance matrix."""
return np.linalg.inv(self.cov_inv())
def list_columns(self):
"""List the column labels of the basis function matrix"""
return [key for key in self.keys()]
def weighted_obs_vector(self):
"""Observation times weighed by uncertainties.
Used primarily in least-squares fitting along with design
matrix.
"""
return self.observations.times / self.observations.uncertainties
def function_mask(self,maskfn):
"""Mask the underlying observations using function 'maskfn'
Arguments
---------
maskfn : callable
Function of observationt time used to mask observations.
Observations times for which 'maskfn' returns 'False' are
masked out, otherwise they are included.
"""
if not callable(maskfn):
raise TypeError("'maskfn' must be callable.")
self.observations.function_mask(maskfn)
def best_fit(self,full_output=False):
"""Compute the best-fit transit model amplitudes
subject to the constraints set in 'parameter_bounds'
attribute.
Arguments
---------
full_output : bool (optional)
If 'True', return the 'OptimizeResult' object generated
by scipy.lsq_linear along with the default dictionary
containing best-fit amplitudes
Returns
-------
dictionary :
A dictionary containing the best-fit model amplitudes.
"""
A = self.design_matrix()
y = self.weighted_obs_vector()
lb,ub = np.transpose([bounds for bounds in self.parameter_bounds.values()])
min_result = lsq_linear(A,y,bounds = (lb,ub))
best_dict = {key:val for key,val in zip(self.keys(),min_result.x)}
if full_output:
return best_dict,min_result
return best_dict
def best_fit_vec(self):
"""Get best-fit transit model amplitudes as a vector.
Returns
-------
ndarray :
Vector representing the best-fit TTV model amplitudes
"""
bfdict = self.best_fit()
return np.array([bfdict[x] for x in self.list_columns()])
def residuals(self):
"""Return the normalized residuals of the best-fit solution"""
best_dict,min_result = self.best_fit(full_output=True)
return min_result.fun
def chi_squared(self,per_dof=False):
"""Return the chi-squared value of the best-fit solution.
Arguments
---------
per_dof : bool (optional)
If true, return the chi-sqaured divided by the
number of degrees of freedom (i.e., the number
of observations minus the number of model
parameters). The default value is False.
"""
resids = self.residuals()
chisq = resids.dot(resids)
if per_dof:
dof = self.Ncol - self.Nrow
return chisq / dof
return chisq
def Delta_BIC(self):
"""
Return the difference in Bayesian information criteria (BICs)
between a purely linear transit time ephemeris and the full
transit time models.
"""
chisq = self.chi_squared()
penalty_term=np.log(self.Ncol)*self.Nrow
BIC_full = chisq + penalty_term
line_fit_resids = self.observations.linear_fit_residuals()
chisq_linear_fit = line_fit_resids.dot(line_fit_resids)
linear_fit_penalty = np.log(self.Ncol) * 2
BIC_linear = chisq_linear_fit + linear_fit_penalty
return BIC_linear - BIC_full
class TransitTimesLinearModels(object):
"""
Object representing a collection of transit time linear models in a system of interacting planets.
Parameters
----------
observations_list : :obj:`list` of :obj:`PlanetTransitObservations`
Set of transit observations to model.
Keyword Arguments
-----------------
periods : :obj:`list` of floats
Orbital periods of the transiting planets. Periods are determined
by an initial least-squares fit if they are not supplied as a
keyword argument.
T0s : :obj:`list` of floats
Inital times of transits. Determined by a least-squares fit
when not supplied as a keyword argument
max_period_ratio : float
Maximum period ratio for which planet-planet interactions are
included in the analytic TTV models. Default value is infinite.
planet_names : str or list of str
Names to label each planet with. The planet names appear as
suffixes on basis function labels.
Attributes
----------
observations : list
A list of transit time observation objects representing the transit observations for a system
basis_function_matrices: list
List of each planet's transit itme basis function matrix
periods : ndarray
Best-fit periods of all planets
T0s : ndarray
Best-fit initial times of transit for all planets
best_fit : :obj:`list` of ndarray
List of ndarray best fit amplitudes of each planets' TTV basis functions.
covariance_matrices : :obj:`list` of ndarray
List of ndarray covariance matrices for each planets' TTV basis functions.
"""
def __init__(self,observations_list,**kwargs):
self.observations=observations_list
for obs in self.observations:
errmsg1 = "'TransitObservations' contains transits with negative transit numbers. Please re-number transits."
assert np.alltrue(obs.transit_numbers>=0), errmsg1
planet_names = kwargs.get("planet_names",["{}".format(i) for i in range(len(observations_list))])
assert len(planet_names) == len(self.observations),\
"Planet name string '{}' lengh does not match number of observations ({:d})".format(planet_names,len(self.observations))
self.planet_names = planet_names
self.planet_name_dict = {planet_name:i for i,planet_name in enumerate(planet_names)}
periods=kwargs.get("periods",None)
T0s =kwargs.get("T0s",None)
max_period_ratio=kwargs.get("max_period_ratio",np.infty)
if periods is None or T0s is None:
initial_linear_fit_data = np.array([obs.linear_best_fit() for obs in self.observations ])
if periods is None:
periods = initial_linear_fit_data[:,1]
if T0s is None:
T0s = initial_linear_fit_data[:,0]
self.T0s = T0s
self.periods = periods
self.models = [ transit_times_model(obs,suffix = self.planet_names[i]) for i,obs in enumerate(self.observations) ]
self._maximum_interaction_period_ratio = max_period_ratio
self._interaction_matrix = SetupInteractionMatrixWithMaxPeriodRatio(self.periods,self.maximum_interaction_period_ratio)
self.generate_basis_functions()
def basis_function_matrices(self):
"""list : List containing the basis function matrix of each planet"""
return [model.basis_function_matrix for model in self.models]
def reset(self):
"""
Reset TTV model.
All TTV basis functions are erased and a linear ephemeris is re-fit to each planet's
transit times. The interaction matrix is reset so that all pair-wise interactions
are considered.
"""
initial_linear_fit_data = np.array([obs.linear_best_fit() for obs in self.observations ])
self.T0s = initial_linear_fit_data[:,0]
self.periods = initial_linear_fit_data[:,1]
self.models = [ transit_times_model(obs) for obs in self.observations ]
self._maximum_interaction_period_ratio = np.infty
self._interaction_matrix = SetupInteractionMatrixWithMaxPeriodRatio(self.periods,self.maximum_interaction_period_ratio)
@property
def interaction_matrix(self):
"""
ndarray: Matrix with elements that record whether pair-wise interactions are
in each planet's set of TTV basis functions. If :math:`I_{ij}=` True, then
the basis functions accounting for perturbations by planet j on
planet *i* are included in the model for planet i's TTV.
"""
return self._interaction_matrix
@interaction_matrix.setter
def interaction_matrix(self,value):
self._interaction_matrix = value
@property
def maximum_interaction_period_ratio(self):
"""
float: Maximum period ratio above which planet-planet interactions are ignored in the TTV model.
"""
return self._maximum_interaction_period_ratio
@maximum_interaction_period_ratio.setter
def maximum_interaction_period_ratio(self,value):
self.interaction_matrix = SetupInteractionMatrixWithMaxPeriodRatio(self.periods,value)
self._maximum_interaction_period_ratio = value
@property
def N(self):
"""int: Number of planets with transit observations."""
return len(self.observations)
def design_matrices(self):
""":obj:`list` of ndarrays: Design matrices for each planet's TTV model."""
return [model.design_matrix() for model in self.models]
def covariance_matrices(self):
""":obj:`list` of ndarrays: Covariance matrices of each planet's TTV model."""
return [model.cov() for model in self.models]
def best_fits(self):
""":obj:`list` of ndarrays: best-fit model amplitudes for each planet's TTV model."""
best = dict()
for model in self.models:
best.update(model.best_fit())
return best
def chi_squareds(self,per_dof=False):
"""Return the chi-squareds of each transit time model.
Arguments
---------
per_dof : bool (optional)
If true, return the chi-sqaured divided by the
number of degrees of freedom (i.e., the number
of observations minus the number of model
parameters). The default value is False.
Returns
-------
ndarray :
Chi-squared value of each planet's transit time
model.
"""
return np.array([mdl.chi_squared(per_dof) for mdl in self.models])
def Delta_BICs(self):
"""Return the Delta-BIC of each transit model relative
to linear ephemerides"""
return np.array([mdl.Delta_BIC() for mdl in self.models])
def generate_basis_functions(self,second_order_resonances=None):
"""Generate TTV basis functions and update planets' TTV models."""
for name_i,i in self.planet_name_dict.items():
per_i = self.periods[i]
T0_i = self.T0s[i]
model = self.models[i]
Ntrans = model.MaxTransitNumber+1
for name_j,j in self.planet_name_dict.items():
if self.interaction_matrix[i,j]:
per_j = self.periods[j]
T0_j = self.T0s[j]
bf_mtrx = get_ttv_basis_function_matrix(
per_i, per_j, T0_i, T0_j,Ntrans
)
bf_mtrx = bf_mtrx[model.observations._transit_numbers]
model['dt0_{}{}'.format(name_i,name_j)] = bf_mtrx[:,0]
model.parameter_bounds['dt0_{}{}'.format(name_i,name_j)] = (0,1)
model['dt1x_{}{}'.format(name_i,name_j)] = bf_mtrx[:,1]
model['dt1y_{}{}'.format(name_i,name_j)] = bf_mtrx[:,2]
def add_second_order_resonance(self,planet1,planet2):
"""Add basis-functions for a second-order resonance.
Arguments
---------
planet1 : str or int
Name or index of the first planet
planet2 : str or int
Name or index of the second planet
"""
if type(planet1) is str:
i1str = planet1
i1 = self.planet_name_dict[planet1]
elif type(planet1) is int:
i1str = list(self.planet_name_dict.keys())[planet1]
i1 = planet1
else:
raise ValueError("'planet1' must be of type 'int' or 'str'")
if type(planet2) is str:
i2str = planet2
i2 = self.planet_name_dict[planet2]
elif type(planet2) is int:
i2str = list(self.planet_name_dict.keys())[planet2]
i2 = planet2
else:
raise ValueError("'planet2' must be of type 'int' or 'str'")
p1 = self.periods[i1]
p2 = self.periods[i2]
T01 = self.T0s[i1]
T02 = self.T0s[i2]
Ntr1 = self.models[i1].MaxTransitNumber + 1
Ntr2 = self.models[i2].MaxTransitNumber + 1
if p1 < p2:
dt2_1 = dt2_InnerPlanet(p1,p2,T01,T02,Ntr1).astype(float)
dt2_2 = dt2_OuterPlanet(p1,p2,T01,T02,Ntr2).astype(float)
else:
dt2_1 = dt2_OuterPlanet(p2,p1,T02,T01,Ntr1).astype(float)
dt2_2 = dt2_InnerPlanet(p2,p1,T02,T01,Ntr2).astype(float)
self.models[i1]['dt2x_{}{}'.format(i1str,i2str)] = dt2_1[:,0]
self.models[i1]['dt2y_{}{}'.format(i1str,i2str)] = dt2_1[:,1]
self.models[i2]['dt2x_{}{}'.format(i2str,i1str)] = dt2_2[:,0]
self.models[i2]['dt2y_{}{}'.format(i2str,i1str)] = dt2_2[:,1]
def update_fits(self):
"""
Compute the best-fit periods using current model then re-compute
model basis functions.
"""
fit_dict = self.best_fits()
self.periods = [fit_dict['P{}'.format(name)] for name in self.planet_names]
self.generate_basis_functions()
def compute_ttv_significance(self):
Sigma = self.covariance_matrices
mu = self.best_fits
significance_in_sigmas=[]
for i in range(self.N):
if len(mu[i] >2):
muTTV = mu[i][2:]
SigmaTTVinv= np.linalg.inv(Sigma[i][2:,2:])
chisquared = muTTV.dot(SigmaTTVinv.dot(muTTV))
dof = len(muTTV)
sigma=chiSquared_to_sigmas(chisquared,dof)
significance_in_sigmas.append(sigma)
else:
significance_in_sigmas.append(0)
return significance_in_sigmas
def mask_observations(self,maskfn):
"""Mask transit observations using function 'maskfn'
Arguments
---------
maskfn : callable
Function of observation time used to mask observations.
Observations times for which 'maskfn' returns 'False' are
masked out, otherwise they are included.
"""
if not callable(maskfn):
raise TypeError("'maskfn' must be callable.")
for model in self.models:
model.function_mask(maskfn)
def interactionIndicies(LMsystem,i,j):
i_matrix = LMsystem.interaction_matrix
if i_matrix[i,j]:
k0 = 2 + 3 * np.sum( i_matrix[i,:j])
i_indices = k0 + np.arange(3,dtype=int)
else:
i_indices = []
if i_matrix[j,i]:
l0 = 2 + 3 * np.sum( i_matrix[j,:i])
j_indices = l0 + np.arange(3,dtype=int)
else:
j_indices = []
return i_indices, j_indices
def chiSquared_to_sigmas(chi2,dof):
"""
Convert a chi-squared value to a confidence level, in terms of 'sigmas'
Arguments
---------
chi2 : float
Chi-squared value
dof : int
Number of degrees of freedom
Returns
-------
float
The 'sigma's with the same confidence level for a 1D Gaussian
"""
p = gammainc(0.5 * dof, 0.5 * chi2)
return np.sqrt( 2 * gammaincinv( 0.5 , p ) )
#############################################
|
shaddenREPO_NAMETTV2Fast2FuriousPATH_START.@TTV2Fast2Furious_extracted@TTV2Fast2Furious-master@ttv2fast2furious@ttv2fast2furious.py@.PATH_END.py
|
{
"filename": "SolidSpheral1d.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/src/SimulationControl/SolidSpheral1d.py",
"type": "Python"
}
|
#-------------------------------------------------------------------------------
# Import SolidSpheral objects, setting the 1-D objects as generic names.
#-------------------------------------------------------------------------------
from Spheral1d import *
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@src@SimulationControl@SolidSpheral1d.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/bar/selected/textfont/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._color.ColorValidator"]
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@bar@selected@textfont@__init__.py@.PATH_END.py
|
{
"filename": "test_integrator.py",
"repo_name": "astropy/photutils",
"repo_path": "photutils_extracted/photutils-main/photutils/isophote/tests/test_integrator.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the integrator module.
"""
import numpy as np
import pytest
from astropy.io import fits
from numpy.testing import assert_allclose
from photutils.datasets import get_path
from photutils.isophote.integrator import (BILINEAR, MEAN, MEDIAN,
NEAREST_NEIGHBOR)
from photutils.isophote.sample import EllipseSample
@pytest.mark.remote_data
class TestData:
def setup_class(self):
path = get_path('isophote/synth_highsnr.fits',
location='photutils-datasets', cache=True)
hdu = fits.open(path)
self.data = hdu[0].data
hdu.close()
def make_sample(self, masked=False, sma=40.0, integrmode=BILINEAR):
if masked:
data = np.ma.masked_values(self.data, 200.0, atol=10.0, rtol=0.0)
else:
data = self.data
sample = EllipseSample(data, sma, integrmode=integrmode)
s = sample.extract()
assert len(s) == 3
assert len(s[0]) == len(s[1])
assert len(s[0]) == len(s[2])
return s, sample
@pytest.mark.remote_data
class TestUnmasked(TestData):
def test_bilinear(self):
s, sample = self.make_sample()
assert len(s[0]) == 225
# intensities
assert_allclose(np.mean(s[2]), 200.76, atol=0.01)
assert_allclose(np.std(s[2]), 21.55, atol=0.01)
# radii
assert_allclose(np.max(s[1]), 40.0, atol=0.01)
assert_allclose(np.min(s[1]), 32.0, atol=0.01)
assert sample.total_points == 225
assert sample.actual_points == 225
def test_bilinear_small(self):
# small radius forces sub-pixel sampling
s, sample = self.make_sample(sma=10.0)
# intensities
assert_allclose(np.mean(s[2]), 1045.4, atol=0.1)
assert_allclose(np.std(s[2]), 143.0, atol=0.1)
# radii
assert_allclose(np.max(s[1]), 10.0, atol=0.1)
assert_allclose(np.min(s[1]), 8.0, atol=0.1)
assert sample.total_points == 57
assert sample.actual_points == 57
def test_nearest_neighbor(self):
s, sample = self.make_sample(integrmode=NEAREST_NEIGHBOR)
assert len(s[0]) == 225
# intensities
assert_allclose(np.mean(s[2]), 201.1, atol=0.1)
assert_allclose(np.std(s[2]), 21.8, atol=0.1)
# radii
assert_allclose(np.max(s[1]), 40.0, atol=0.01)
assert_allclose(np.min(s[1]), 32.0, atol=0.01)
assert sample.total_points == 225
assert sample.actual_points == 225
def test_mean(self):
s, sample = self.make_sample(integrmode=MEAN)
assert len(s[0]) == 64
# intensities
assert_allclose(np.mean(s[2]), 199.9, atol=0.1)
assert_allclose(np.std(s[2]), 21.3, atol=0.1)
# radii
assert_allclose(np.max(s[1]), 40.0, atol=0.01)
assert_allclose(np.min(s[1]), 32.0, atol=0.01)
assert_allclose(sample.sector_area, 12.4, atol=0.1)
assert sample.total_points == 64
assert sample.actual_points == 64
def test_mean_small(self):
s, sample = self.make_sample(sma=5.0, integrmode=MEAN)
assert len(s[0]) == 29
# intensities
assert_allclose(np.mean(s[2]), 2339.0, atol=0.1)
assert_allclose(np.std(s[2]), 284.7, atol=0.1)
# radii
assert_allclose(np.max(s[1]), 5.0, atol=0.01)
assert_allclose(np.min(s[1]), 4.0, atol=0.01)
assert_allclose(sample.sector_area, 2.0, atol=0.1)
assert sample.total_points == 29
assert sample.actual_points == 29
def test_median(self):
s, sample = self.make_sample(integrmode=MEDIAN)
assert len(s[0]) == 64
# intensities
assert_allclose(np.mean(s[2]), 199.9, atol=0.1)
assert_allclose(np.std(s[2]), 21.3, atol=0.1)
# radii
assert_allclose(np.max(s[1]), 40.0, atol=0.01)
assert_allclose(np.min(s[1]), 32.01, atol=0.01)
assert_allclose(sample.sector_area, 12.4, atol=0.1)
assert sample.total_points == 64
assert sample.actual_points == 64
@pytest.mark.remote_data
class TestMasked(TestData):
def test_bilinear(self):
s, sample = self.make_sample(masked=True, integrmode=BILINEAR)
assert len(s[0]) == 157
# intensities
assert_allclose(np.mean(s[2]), 201.52, atol=0.01)
assert_allclose(np.std(s[2]), 25.21, atol=0.01)
# radii
assert_allclose(np.max(s[1]), 40.0, atol=0.01)
assert_allclose(np.min(s[1]), 32.0, atol=0.01)
assert sample.total_points == 225
assert sample.actual_points == 157
def test_mean(self):
s, sample = self.make_sample(masked=True, integrmode=MEAN)
assert len(s[0]) == 51
# intensities
assert_allclose(np.mean(s[2]), 199.9, atol=0.1)
assert_allclose(np.std(s[2]), 24.12, atol=0.1)
# radii
assert_allclose(np.max(s[1]), 40.0, atol=0.01)
assert_allclose(np.min(s[1]), 32.0, atol=0.01)
assert_allclose(sample.sector_area, 12.4, atol=0.1)
assert sample.total_points == 64
assert sample.actual_points == 51
|
astropyREPO_NAMEphotutilsPATH_START.@photutils_extracted@photutils-main@photutils@isophote@tests@test_integrator.py@.PATH_END.py
|
{
"filename": "VAC_MVM_Marvin_tutorial.ipynb",
"repo_name": "sdss/marvin",
"repo_path": "marvin_extracted/marvin-main/docs/sphinx/tutorials/notebooks/VAC_MVM_Marvin_tutorial.ipynb",
"type": "Jupyter Notebook"
}
|
# MaNGA Visual Morphology Catalogue (MVM-VAC)
Credit: Jose Antonio Vazquez Mata
See [SDSS DR16 MVM](https://www.sdss.org/dr16/data_access/value-added-catalogs/?vac_id=manga-visual-morphologies-from-sdss-and-desi-images) for more information about this VAC.
This catalogue contains a direct visual morphological classification based on the inspection of image mosaics generated from a combination of SDSS and Dark Energy Legacy Surveys (DESI; legacysurvey.org) images, for all galaxies in MaNGA with unique MaNGA-ID. After a digital image post-processing, we exploit the advantages of these images to identify inner structures, as well as external low surface brightness features for an homogeneous classification. We provide the corresponding mosaics for all MaNGA DR17 galaxies, and also a new estimate of the structural CAS parameters (Concentration, Asymmetry and Clumpiness) based on the DESI images.
The image mosaics generated to carry out this classification are also accessible.
This catalogue has 2 versions:
Version_1.0.1 contains the information for the first 4614 galaxies (MPL-7) and was released as part of the SDSS DR16.
DataModel: https://data.sdss.org/datamodel/files/MANGA_MORPHOLOGY/manga_visual_morpho/1.0.1/manga_visual_morpho.html
Files: https://data.sdss.org/sas/dr16/manga/morphology/manga_visual_morpho/1.0.1/
Version_2.0.1 contains the information for the ~10200 galaxies in the last DR17 (MPL-11), with some updates compared to previous version, as decribed in the Data Model. We suggest the reader to use this version.
DataModel: https://data.sdss.org/datamodel/files/MANGA_MORPHOLOGY/manga_visual_morpho/2.0.1/manga_visual_morpho.html
Files: https://data.sdss.org/sas/dr17/manga/morphology/manga_visual_morpho/2.0.1/
## Accessing the MVM-VAC through Marvin
The first step is to import the marvin libraries to access the information
```python
import marvin
from marvin.tools import Cube
from marvin.tools import Maps
from marvin.tools.vacs import VACs
```
[0;34m[INFO]: [0mNo release version set. Setting default to DR15
[1;33m[WARNING]: [0m[0;39mpath /Users/jv47/sas/mangawork/manga/spectro/redux/v2_4_3/drpall-v2_4_3.fits cannot be found. Setting drpall to None.[0m [0;36m(MarvinUserWarning)[0m
[1;33m[WARNING]: [0m[0;39mpath /Users/jv47/sas/mangawork/manga/spectro/analysis/v2_4_3/2.2.1/dapall-v2_4_3-2.2.1.fits cannot be found. Setting dapall to None.[0m [0;36m(MarvinUserWarning)[0m
## Setting the Data Release (DR) version
If the DR version is not especified, Marvin sets DR15 as default version. In order to change the DR version, the following lines must be used to replace "DR16" for the version required. NOTE: the MVM-VAC is only available in the DR16 and DR17 versions
```python
from marvin import config
config.setRelease("DR16")
```
[1;33m[WARNING]: [0m[0;39mpath /Users/jv47/sas/dr16/manga/spectro/redux/v2_4_3/drpall-v2_4_3.fits cannot be found. Setting drpall to None.[0m [0;36m(MarvinUserWarning)[0m
[1;33m[WARNING]: [0m[0;39mpath /Users/jv47/sas/dr16/manga/spectro/analysis/v2_4_3/2.2.1/dapall-v2_4_3-2.2.1.fits cannot be found. Setting dapall to None.[0m [0;36m(MarvinUserWarning)[0m
To make sure the VAC required in the selected DR version, the following instruction can be follow to list the VACs available:
```python
v = VACs()
v
```
<VACs (firefly, galaxyzoo, gema, HI, visual_morphology)>
## Setting your target
Please provide the plate-IFU information of your target:
```python
cube = Cube('9891-1902')
cube
```
<Marvin Cube (plateifu='9891-1902', mode='remote', data_origin='api')>
Here you can see the VACs containing information of your target:
```python
vacs=cube.vacs
vacs
```
<VACContainer ('firefly', 'galaxyzoo', 'gema', 'HI', 'visual_morphology')>
## The MaNGA Visual Morphology Catalogue
To call the information of your target in the MVM-VAC follow the next lines. Note: if you haven not download the tables and images (using Marvin), this instruction will download the VAC table and the corresponding images associated to your target
```python
morp = vacs.visual_morphology
print(morp)
print(type(morp))
```
[1;33m[WARNING]: [0mYou are accessing outdated DR16 data for this VAC. This target has updated data in DR17. We recommend using the new data release instead.
Target(9891-1902)
<class 'marvin.contrib.vacs.visual_morph.VizMorphTarget'>
Use "data" to access the data for your target. In order to understand the meaning of each column, we refer the reader to the corresponding DataModel, following the links provided at the top of this tutorial
```python
morp.data
```
FITS_rec([('manga-9891-1902', '9891-1902', '1-373827', 228.78634859, 28.38498037, 'Sbc', 4, 0, 0, 3.217, 0.053, 0.11, 0.009, 0.37, 0.451)],
dtype=(numpy.record, [('name', 'S17'), ('plateifu', 'S11'), ('MANGAID', 'S9'), ('objra', '>f8'), ('objdec', '>f8'), ('Type', 'S6'), ('TType', '>i2'), ('edge_on', '>i2'), ('tidal', '>i2'), ('C', '>f4'), ('E_C', '>f4'), ('A', '>f4'), ('E_A', '>f4'), ('S', '>f4'), ('E_S', '>f4')]))
You can get the information of a specific column adding its name:
```python
morp.data.Type
```
chararray(['Sbc'], dtype='<U6')
```python
print(morp.data.C, morp.data.E_C)
```
[3.217] [0.053]
## Access to the Image Mosaics
Additional to the data, we also provide the image mosaics used to extract the information in the MVM-VAC. To access these mosaics use the instruction: morp.show_mosaic('survey')
Version_1.0.1 (DR16). For this version there are two sets of mosaics:
i) the SDSS mosaic, containing a gray logarithmic-scaled r-band image, a filter-enhanced r-band image and the corresponding gri colour composite image, from the SDSS images. 'survey'='sdss'
ii) the DESI mosaic, containing a filter-enhanced r-band image and the corresponding gri colour composite image, from the DESI images. 'survey'='desi'
Version_2.0.1 (DR17).
For this version, we have combined the relevant information from SDSS and DESI surveys in one mosaic. This mosacis contains the gri colour composite image from SDSS; the grz colour composite image, the residual image after subtraction of a best surface brightness model and the filter-enhanced r-band image from the DESI Legacy Surveys. 'survey'='mos'
```python
morp.show_mosaic('sdss')
```
NOTE: For DR16, must specify either survey: sdss or desi. For DR17 must write: mos
<matplotlib.axes._subplots.AxesSubplot at 0x7ff575e10400>

## Full Catalogue Access
To get access to the full catalogue follow:
```python
mvm = v.visual_morphology
table = mvm.get_table(ext=1)
```
```python
table
```
<i>Table masked=True length=4696</i>
<table id="table140692221495856" class="table-striped table-bordered table-condensed">
<thead><tr><th>name</th><th>plateifu</th><th>MANGAID</th><th>objra</th><th>objdec</th><th>Type</th><th>TType</th><th>edge_on</th><th>tidal</th><th>C</th><th>E_C</th><th>A</th><th>E_A</th><th>S</th><th>E_S</th></tr></thead>
<thead><tr><th>bytes17</th><th>bytes11</th><th>bytes9</th><th>float64</th><th>float64</th><th>bytes6</th><th>int16</th><th>int16</th><th>int16</th><th>float32</th><th>float32</th><th>float32</th><th>float32</th><th>float32</th><th>float32</th></tr></thead>
<tr><td>manga-10001-12701</td><td>10001-12701</td><td>1-48157</td><td>133.371090612</td><td>57.5984251446</td><td>Sbc</td><td>4</td><td>0</td><td>0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td></tr>
<tr><td>manga-10001-12702</td><td>10001-12702</td><td>1-48188</td><td>133.685669869</td><td>57.4802503218</td><td>SABbc</td><td>4</td><td>0</td><td>0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td></tr>
<tr><td>manga-10001-12703</td><td>10001-12703</td><td>1-55648</td><td>136.017159969</td><td>57.0923291779</td><td>Sbc</td><td>4</td><td>0</td><td>0</td><td>3.296</td><td>0.055</td><td>0.13</td><td>0.019</td><td>-0.71</td><td>0.857</td></tr>
<tr><td>manga-10001-12704</td><td>10001-12704</td><td>1-55616</td><td>133.989966869</td><td>57.6779676669</td><td>S</td><td>11</td><td>1</td><td>0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td></tr>
<tr><td>manga-10001-12705</td><td>10001-12705</td><td>1-55784</td><td>136.75137451</td><td>57.4514369241</td><td>Sbc</td><td>4</td><td>0</td><td>0</td><td>2.903</td><td>0.046</td><td>0.125</td><td>0.015</td><td>0.53</td><td>0.676</td></tr>
<tr><td>manga-10001-1901</td><td>10001-1901</td><td>1-55567</td><td>133.330028009</td><td>57.0411553708</td><td>S0</td><td>-2</td><td>0</td><td>0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td></tr>
<tr><td>manga-10001-1902</td><td>10001-1902</td><td>1-48201</td><td>134.193923352</td><td>56.7867469988</td><td>SBa</td><td>1</td><td>0</td><td>0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td></tr>
<tr><td>manga-10001-3701</td><td>10001-3701</td><td>1-48111</td><td>132.465646765</td><td>57.1437279024</td><td>S0</td><td>-2</td><td>0</td><td>0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td></tr>
<tr><td>manga-10001-3702</td><td>10001-3702</td><td>1-48136</td><td>132.912768243</td><td>57.1074235568</td><td>E</td><td>-5</td><td>0</td><td>0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td><td>-999.0</td></tr>
<tr><td>manga-10001-3703</td><td>10001-3703</td><td>1-55612</td><td>134.591498946</td><td>57.6849653262</td><td>Sab</td><td>2</td><td>0</td><td>0</td><td>2.858</td><td>0.046</td><td>0.073</td><td>0.01</td><td>-0.15</td><td>0.308</td></tr>
<tr><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td></tr>
<tr><td>manga-9891-3701</td><td>9891-3701</td><td>1-373870</td><td>228.451852792</td><td>28.032958699</td><td>Sa</td><td>1</td><td>0</td><td>1</td><td>4.173</td><td>0.076</td><td>0.145</td><td>0.026</td><td>0.42</td><td>0.738</td></tr>
<tr><td>manga-9891-3702</td><td>9891-3702</td><td>1-631872</td><td>226.943577224</td><td>28.8199794804</td><td>Sa</td><td>1</td><td>0</td><td>0</td><td>3.323</td><td>0.055</td><td>0.395</td><td>0.015</td><td>0.45</td><td>0.564</td></tr>
<tr><td>manga-9891-3703</td><td>9891-3703</td><td>1-374423</td><td>229.811607205</td><td>29.340022935</td><td>E</td><td>-5</td><td>0</td><td>0</td><td>4.45</td><td>0.077</td><td>0.205</td><td>0.013</td><td>0.6</td><td>0.657</td></tr>
<tr><td>manga-9891-3704</td><td>9891-3704</td><td>1-374400</td><td>228.735926301</td><td>28.7597519597</td><td>S0</td><td>-2</td><td>0</td><td>0</td><td>3.631</td><td>0.058</td><td>0.053</td><td>0.005</td><td>0.26</td><td>0.252</td></tr>
<tr><td>manga-9891-6101</td><td>9891-6101</td><td>1-436574</td><td>227.481185986</td><td>28.3821764749</td><td>Sa</td><td>1</td><td>0</td><td>0</td><td>3.66</td><td>0.064</td><td>0.092</td><td>0.006</td><td>0.2</td><td>0.273</td></tr>
<tr><td>manga-9891-6102</td><td>9891-6102</td><td>1-373878</td><td>228.414854807</td><td>28.2444605219</td><td>Sc</td><td>5</td><td>1</td><td>0</td><td>3.089</td><td>0.051</td><td>0.229</td><td>0.008</td><td>0.14</td><td>0.296</td></tr>
<tr><td>manga-9891-6103</td><td>9891-6103</td><td>1-373953</td><td>226.990601449</td><td>28.8818610359</td><td>E</td><td>-5</td><td>0</td><td>0</td><td>3.746</td><td>0.062</td><td>0.04</td><td>0.005</td><td>-0.16</td><td>0.251</td></tr>
<tr><td>manga-9891-6104</td><td>9891-6104</td><td>1-374081</td><td>228.073311064</td><td>29.6572101033</td><td>S0</td><td>-2</td><td>0</td><td>0</td><td>3.697</td><td>0.06</td><td>0.09</td><td>0.005</td><td>-0.69</td><td>0.573</td></tr>
<tr><td>manga-9891-9101</td><td>9891-9101</td><td>1-374007</td><td>227.041406706</td><td>29.2221919157</td><td>S0</td><td>-2</td><td>0</td><td>0</td><td>3.911</td><td>0.07</td><td>0.02</td><td>0.025</td><td>-0.67</td><td>0.887</td></tr>
<tr><td>manga-9891-9102</td><td>9891-9102</td><td>1-436646</td><td>228.176378785</td><td>27.7995586951</td><td>Sa</td><td>1</td><td>0</td><td>0</td><td>3.405</td><td>0.054</td><td>0.048</td><td>0.007</td><td>-0.01</td><td>0.089</td></tr>
</table>
## Plots
It is possible to extract the data from the "table", and plot the data from this catalogue using Matplotlib
```python
table['TType', 'C']
Ttype = table['TType']
C = table['C']
idx = (C>=0)*(Ttype<12)
```
```python
import matplotlib.pyplot as plt
plt.plot(Ttype[idx], C[idx], 'bo')
```
[<matplotlib.lines.Line2D at 0x7ff57688b2b0>]

```python
```
|
sdssREPO_NAMEmarvinPATH_START.@marvin_extracted@marvin-main@docs@sphinx@tutorials@notebooks@VAC_MVM_Marvin_tutorial.ipynb@.PATH_END.py
|
{
"filename": "example_inv_murakami.py",
"repo_name": "geodynamics/burnman",
"repo_path": "burnman_extracted/burnman-main/examples/example_inv_murakami.py",
"type": "Python"
}
|
from __future__ import absolute_import
from __future__ import print_function
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
import sys
import numpy as np
import matplotlib.pyplot as plt
import burnman
from burnman import minerals
import pymc
import cProfile
import scipy.stats as sp
import matplotlib.mlab as mlab
if __name__ == "__main__":
seismic_model = burnman.seismic.PREM()
number_of_points = (
10 # set on how many depth slices the computations should be done
)
depths = np.linspace(1000e3, 2500e3, number_of_points)
seis_p, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate(
["pressure", "density", "v_p", "v_s", "v_phi"], depths
)
temperature = burnman.geotherm.brown_shankland(depths)
print("preparations done")
def calc_velocities(amount_pv, iron_pv, iron_fp):
pv = minerals.SLB_2011.mg_fe_perovskite([1.0 - iron_pv, iron_pv, 0.0])
fp = minerals.SLB_2011.ferropericlase([1.0 - iron_fp, iron_fp])
rock = burnman.Composite([pv, fp], [amount_pv, 1.0 - amount_pv])
mat_rho, mat_vp, mat_vs = rock.evaluate(
["density", "v_p", "v_s"], seis_p, temperature
)
return mat_vp, mat_vs, mat_rho
def error(amount_pv, iron_pv, iron_fp):
mat_vp, mat_vs, mat_rho = calc_velocities(amount_pv, iron_pv, iron_fp)
vs_err = burnman.utils.math.l2(depths, mat_vs, seis_vs) / 1e9
vp_err = burnman.utils.math.l2(depths, mat_vp, seis_vp) / 1e9
den_err = burnman.utils.math.l2(depths, mat_rho, seis_rho) / 1e9
# print vs_err, vp_err, den_err
return vs_err + vp_err + den_err
# Priors on unknown parameters:
amount_pv = pymc.Uniform("amount_pv", lower=0.0, upper=1.0, value=0.5)
iron_pv = pymc.Uniform("iron_pv", lower=0.0, upper=1.0, value=0.5)
iron_fp = pymc.Uniform("iron_fp", lower=0.0, upper=1.0, value=0.5)
minerr = 1e100
@pymc.deterministic
def theta(amount_pv=amount_pv, iron_pv=iron_pv, iron_fp=iron_fp):
global minerr
try:
e = error(amount_pv, iron_pv, iron_fp)
if e < minerr:
minerr = e
print("best fit", e, "values:", amount_pv, iron_pv, iron_fp)
return e
except ValueError:
return 1e20 # float("inf")
sig = 10.0
misfit = pymc.Normal(
"d", mu=theta, tau=1.0 / (sig * sig), value=0, observed=True, trace=True
)
model = [amount_pv, iron_pv, iron_fp, misfit]
things = ["amount_pv", "iron_pv", "iron_fp", "misfit"]
whattodo = ""
if len(sys.argv) < 3:
print("options:")
print("run <dbname>")
print("continue <dbname>")
print("plot <dbname1> <dbname2> ...")
print("show amount_pv iron_pv iron_fp")
else:
whattodo = sys.argv[1]
dbname = sys.argv[2]
if whattodo == "run":
S = pymc.MCMC(model, db="pickle", dbname=dbname)
S.sample(iter=400, burn=200, thin=1)
S.db.close()
if whattodo == "continue":
n_runs = 50
for l in range(0, n_runs):
db = pymc.database.pickle.load(dbname)
print(
"*** run=%d/%d, # samples: %d"
% (l, n_runs, db.trace("amount_pv").stats()["n"])
)
S = pymc.MCMC(model, db=db)
S.sample(iter=500, burn=0, thin=1)
S.db.close()
if whattodo == "plot":
files = sys.argv[2:]
print("files:", files)
toburn = 0
plot_idx = 1
for t in things:
if t == "misfit":
continue
trace = []
print("trace:", t)
for filename in files:
db = pymc.database.pickle.load(filename)
dir(db)
newtrace = db.trace(t, chain=None).gettrace(burn=toburn, chain=None)
if trace != []:
trace = np.append(trace, newtrace)
else:
trace = newtrace
print(" adding ", newtrace.size, "burn = ", toburn)
print(" total size ", trace.size)
print("mean = ", trace.mean())
for bin in [10, 20, 50, 100]:
hist, bin_edges = np.histogram(trace, bins=bin)
a = np.argmax(hist)
print(
"maxlike = ",
bin_edges[a],
bin_edges[a + 1],
(bin_edges[a] + bin_edges[a + 1]) / 2.0,
)
plt.subplot(2, len(things) / 2, plot_idx)
if plot_idx == 2:
n, bins, patches = plt.hist(
np.array(trace), 50, normed=1, facecolor="green", alpha=0.75
)
X = sp.gumbel_l.fit(np.array(trace))
print(X)
dist = sp.gumbel_l(X[0], X[1])
x = np.array(bins)
y = dist.pdf(x)
print(y)
plt.plot(x, y, "k--", linewidth=2)
X = sp.norm.fit(np.array(trace))
print(X)
dist = sp.norm(X[0], X[1])
x = np.array(bins)
y = dist.pdf(x)
plt.plot(x, y, "r--", linewidth=2)
X = sp.genextreme.fit(np.array(trace))
print(X)
dist = sp.genextreme(X[0], X[1], X[2])
x = np.array(bins)
y = dist.pdf(x)
plt.plot(x, y, "b--", linewidth=2)
plt.title("%s" % (t), fontsize="small")
elif plot_idx == 3:
n, bins, patches = plt.hist(
np.array(trace), 50, normed=1, facecolor="green", alpha=0.75
)
X = sp.expon.fit(np.array(trace), floc=0)
print(X)
dist = sp.expon(X[0], X[1])
print(bins)
print(dist.pdf(np.array(bins)))
plt.plot(bins, dist.pdf(np.array(bins)), "r--", linewidth=2)
plt.title(
"%s, mu: %.3e, sigma: %.3e" % (t, mu, sigma), fontsize="small"
)
else:
(mu, sigma) = sp.norm.fit(np.array(trace))
print("mu, sigma: %e %e" % (mu, sigma))
n, bins, patches = plt.hist(
np.array(trace), 50, normed=1, facecolor="green", alpha=0.75
)
y = mlab.normpdf(bins, mu, sigma)
l = plt.plot(bins, y, "r--", linewidth=2)
plt.title(
"%s, mean: %.3e, std dev.: %.3e" % (t, mu, sigma), fontsize="small"
)
plot_idx += 1
plt.savefig("output_figures/example_inv_murakami.png")
plt.show()
if whattodo == "misfittest":
for a in np.linspace(0.4, 0.8, 10):
for b in np.linspace(0.05, 0.2, 5):
for c in np.linspace(0, 0.2, 5):
mat_vp, mat_vs, mat_rho = calc_velocities(a, b, c)
misfit = error(a, b, c)
print("misfit: %s " % misfit)
if misfit < 25:
plt.subplot(2, 2, 1)
plt.plot(
seis_p / 1.0e9,
mat_vs / 1.0e3,
color="r",
linestyle="-",
marker="x",
markerfacecolor="r",
markersize=4,
)
plt.subplot(2, 2, 2)
plt.plot(
seis_p / 1.0e9,
mat_vp / 1.0e3,
color="r",
linestyle="-",
marker="x",
markerfacecolor="r",
markersize=4,
)
plt.subplot(2, 2, 3)
plt.plot(
seis_p / 1.0e9,
mat_rho / 1.0e3,
color="r",
linestyle="-",
marker="x",
markerfacecolor="r",
markersize=4,
label="model 1",
)
plt.subplot(2, 2, 1)
plt.plot(
seis_p / 1.0e9,
seis_vs / 1.0e3,
color="k",
linestyle="-",
marker="o",
markerfacecolor="None",
markersize=6,
)
plt.ylim([4, 8])
plt.title("Vs (km/s)")
# plot Vp
plt.subplot(2, 2, 2)
plt.plot(
seis_p / 1.0e9,
seis_vp / 1.0e3,
color="k",
linestyle="-",
marker="o",
markerfacecolor="k",
markersize=4,
)
plt.ylim([10, 14])
plt.title("Vp (km/s)")
# plot density
plt.subplot(2, 2, 3)
plt.plot(
seis_p / 1.0e9,
seis_rho / 1.0e3,
color="k",
linestyle="-",
marker="o",
markerfacecolor="k",
markersize=4,
label="ref",
)
plt.title("density ($\cdot 10^3$ kg/m^$3$)")
plt.legend(loc="upper left")
plt.ylim([4, 8])
plt.savefig("output_figures/example_inv_murakami_2.png")
plt.show()
if whattodo == "test":
db = pymc.database.pickle.load(dbname)
S = pymc.MCMC(model, db=db)
for t in things:
print(db.trace(t).stats())
print("means:")
for t in things:
print(t, "\t", db.trace(t).stats()["mean"])
print("#samples: %d" % db.trace("mg_pv_K").stats()["n"])
pymc.raftery_lewis(S, q=0.025, r=0.01)
b = 1
t = 1
scores = pymc.geweke(S, intervals=20)
pymc.Matplot.trace(
db.trace("deviance", chain=None).gettrace(burn=1000, thin=t, chain=None),
"deviance",
rows=2,
columns=9,
num=1,
)
pymc.Matplot.trace(
db.trace("mg_pv_K", chain=None).gettrace(thin=t, chain=None),
"mg_pv_K",
rows=2,
columns=9,
num=2,
)
pymc.Matplot.histogram(
np.array(db.trace("mg_pv_K", chain=None).gettrace(burn=b, chain=None)),
"mg_pv_K",
rows=2,
columns=9,
num=11,
)
pymc.Matplot.trace(
db.trace("mg_pv_K_prime", chain=None).gettrace(thin=t, chain=None),
"mg_pv_K_prime",
rows=2,
columns=9,
num=3,
)
pymc.Matplot.histogram(
np.array(
db.trace("mg_pv_K_prime", chain=None).gettrace(burn=b, chain=None)
),
"mg_pv_K_prime",
rows=2,
columns=9,
num=12,
)
pymc.Matplot.trace(
db.trace("mg_pv_G", chain=None).gettrace(thin=t, chain=None),
"mg_pv_G",
rows=2,
columns=9,
num=4,
)
pymc.Matplot.histogram(
np.array(db.trace("mg_pv_G", chain=None).gettrace(burn=b, chain=None)),
"mg_pv_G",
rows=2,
columns=9,
num=13,
)
pymc.Matplot.trace(
db.trace("mg_pv_G_prime", chain=None).gettrace(thin=t, chain=None),
"mg_pv_G_prime",
rows=2,
columns=9,
num=5,
)
pymc.Matplot.histogram(
np.array(
db.trace("mg_pv_G_prime", chain=None).gettrace(burn=b, chain=None)
),
"mg_pv_G_prime",
rows=2,
columns=9,
num=14,
)
pymc.Matplot.trace(
db.trace("fe_pv_K", chain=None).gettrace(thin=t, chain=None),
"fe_pv_K",
rows=2,
columns=9,
num=6,
)
pymc.Matplot.histogram(
np.array(db.trace("fe_pv_K", chain=None).gettrace(burn=b, chain=None)),
"fe_pv_K",
rows=2,
columns=9,
num=15,
)
pymc.Matplot.trace(
db.trace("fe_pv_K_prime", chain=None).gettrace(thin=t, chain=None),
"fe_pv_K_prime",
rows=2,
columns=9,
num=7,
)
pymc.Matplot.histogram(
np.array(
db.trace("fe_pv_K_prime", chain=None).gettrace(burn=b, chain=None)
),
"fe_pv_K_prime",
rows=2,
columns=9,
num=16,
)
pymc.Matplot.trace(
db.trace("fe_pv_G", chain=None).gettrace(thin=t, chain=None),
"fe_pv_G",
rows=2,
columns=9,
num=8,
)
pymc.Matplot.histogram(
np.array(db.trace("fe_pv_G", chain=None).gettrace(burn=b, chain=None)),
"fe_pv_G",
rows=2,
columns=9,
num=17,
)
pymc.Matplot.trace(
db.trace("fe_pv_G_prime", chain=None).gettrace(thin=t, chain=None),
"fe_pv_G_prime",
rows=2,
columns=9,
num=9,
)
pymc.Matplot.histogram(
np.array(
db.trace("fe_pv_G_prime", chain=None).gettrace(burn=b, chain=None)
),
"fe_pv_G_prime",
rows=2,
columns=9,
num=18,
)
plt.show()
if whattodo == "show":
values = [float(i) for i in sys.argv[2:]]
mat_vp, mat_vs, mat_rho = calc_velocities(values[0], values[1], values[2])
misfit = error(values[0], values[1], values[2])
print("misfit: %s " % misfit)
plt.suptitle(
"misfit %.3e, amount_pv=%.4f, iron_pv=%.4f, iron_fp=%.4f"
% (misfit, values[0], values[1], values[2])
)
plt.subplot(2, 2, 1)
plt.plot(
seis_p / 1.0e9,
mat_vs / 1.0e3,
color="r",
linestyle="-",
marker="x",
markerfacecolor="r",
markersize=4,
)
plt.plot(
seis_p / 1.0e9,
seis_vs / 1.0e3,
color="k",
linestyle="-",
marker="o",
markerfacecolor="None",
markersize=6,
)
plt.ylim([4, 8])
plt.title("Vs (km/s)")
# plot Vp
plt.subplot(2, 2, 2)
plt.plot(
seis_p / 1.0e9,
mat_vp / 1.0e3,
color="r",
linestyle="-",
marker="x",
markerfacecolor="r",
markersize=4,
)
plt.plot(
seis_p / 1.0e9,
seis_vp / 1.0e3,
color="k",
linestyle="-",
marker="o",
markerfacecolor="k",
markersize=4,
)
plt.ylim([10, 14])
plt.title("Vp (km/s)")
# plot density
plt.subplot(2, 2, 3)
plt.plot(
seis_p / 1.0e9,
mat_rho / 1.0e3,
color="r",
linestyle="-",
marker="x",
markerfacecolor="r",
markersize=4,
label="model 1",
)
plt.plot(
seis_p / 1.0e9,
seis_rho / 1.0e3,
color="k",
linestyle="-",
marker="o",
markerfacecolor="k",
markersize=4,
label="ref",
)
plt.title("density ($\cdot 10^3$ kg/m$^3$)")
plt.legend(loc="upper left")
plt.ylim([4, 8])
plt.savefig("output_figures/example_inv_murakami_2.png")
plt.show()
if whattodo == "profile2":
# run with:
# python -m cProfile -o output.pstats example_inv_big_pv.py profile2 1
# gprof2dot.py -f pstats output.pstats | dot -Tpng -o output.png
[
error(
235.654790318e9,
3.87724833477,
165.45907725e9,
1.61618366689,
273.888690109e9,
4.38543140228,
306.635371217e9,
1.42610871557,
)
for i in range(0, 10)
]
if whattodo == "profile":
# just run normally
cProfile.run(
"[error(235.654790318e9, 3.87724833477, 165.45907725e9, 1.61618366689, 273.888690109e9, 4.38543140228, 306.635371217e9, 1.42610871557) for i in range(0,10)]"
)
|
geodynamicsREPO_NAMEburnmanPATH_START.@burnman_extracted@burnman-main@examples@example_inv_murakami.py@.PATH_END.py
|
{
"filename": "pulsations.py",
"repo_name": "mikecokina/elisa",
"repo_path": "elisa_extracted/elisa-master/src/elisa/binary_system/surface/pulsations.py",
"type": "Python"
}
|
import numpy as np
from .. import utils as bsutils
from ... pulse.container_ops import (
incorporate_pulsations_to_model,
generate_harmonics
)
from ... import const
def build_harmonics(system, component, components_distance):
"""
Adds pre-calculated spherical harmonics values for each pulsation mode.
:param system: elisa.binary_system.contaier.OrbitalPositionContainer; instance
:param component: Union[str, None];
:param components_distance: float;
"""
components = bsutils.component_to_list(component)
for component in components:
star = getattr(system, component)
pos_correction = bsutils.correction_to_com(system.position.distance, system.mass_ratio, system.secondary.com)[0]
asini = system.semi_major_axis * np.sin(system.inclination)
if star.has_pulsations():
phase = bsutils.calculate_rotational_phase(system, component)
com_x = 0 if component == 'primary' else components_distance
# LTE effect
time_correction = (star.com[0] - pos_correction) * asini / const.C
generate_harmonics(star, com_x=com_x, phase=phase, time=system.time+time_correction)
def build_perturbations(system, component, components_distance):
"""
Incorporating perturbations of surface quantities into the PositionContainer.
:param system: elisa.binary_system.contaier.OrbitalPositionContainer; instance
:param component: Union[str, None];
:param components_distance: float;
:return: elisa.binary_system.contaier.OrbitalPositionContainer; instance
"""
components = bsutils.component_to_list(component)
for component in components:
star = getattr(system, component)
if star.has_pulsations():
com_x = 0 if component == 'primary' else components_distance
incorporate_pulsations_to_model(star, com_x=com_x, scale=system.semi_major_axis)
return system
|
mikecokinaREPO_NAMEelisaPATH_START.@elisa_extracted@elisa-master@src@elisa@binary_system@surface@pulsations.py@.PATH_END.py
|
{
"filename": "xcdm_nopert.ipynb",
"repo_name": "NumCosmo/NumCosmo",
"repo_path": "NumCosmo_extracted/NumCosmo-master/notebooks/apes_tests/xcdm_nopert.ipynb",
"type": "Jupyter Notebook"
}
|
---
**License**
xcdm_nopert
Thu Mar 16 16:56:00 2023\
Copyright 2023\
Sandro Dias Pinto Vitenti <vitenti@uel.br>
---
---
xcdm_nopert\
Copyright (C) 2023 Sandro Dias Pinto Vitenti <vitenti@uel.br>
numcosmo is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
numcosmo is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
---
```python
import sys
from numcosmo_py import Ncm
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.animation import FuncAnimation
from IPython.display import HTML
import numpy as np
import math
import getdist
from getdist import plots
from numcosmo_py.experiments.xcdm_no_perturbations import run_xcdm_nopert_mcmc
from numcosmo_py.plotting.tools import confidence_ellipse
from numcosmo_py.plotting.tools import set_rc_params_article
from numcosmo_py.plotting.getdist import mcat_to_mcsamples
from numcosmo_py.sampling.esmcmc import mcat_print_info, WalkerTypes
```
```python
Ncm.cfg_init()
Ncm.cfg_set_log_handler(lambda msg: sys.stdout.write(msg) and sys.stdout.flush())
ssize = 2000000
nthreads = 4
nwalkers_tiny = 64
nwalkers_small = 600
nwalkers = 2000
thin = 1
use_neutrino = True
save_figs = True
verbose = False
Ncm.func_eval_set_max_threads(nthreads)
Ncm.func_eval_log_pool_stats()
```
```python
catalog_apes_small = run_xcdm_nopert_mcmc(
sampler=WalkerTypes.APES,
nwalkers=nwalkers_small,
ssize=ssize,
verbose=verbose,
nthreads=nthreads,
use_neutrino=use_neutrino,
)
catalog_apes = run_xcdm_nopert_mcmc(
sampler=WalkerTypes.APES,
nwalkers=nwalkers,
ssize=ssize,
verbose=verbose,
nthreads=nthreads,
use_neutrino=use_neutrino,
)
catalog_stretch_tiny = run_xcdm_nopert_mcmc(
sampler=WalkerTypes.STRETCH,
nwalkers=nwalkers_tiny,
ssize=ssize,
verbose=verbose,
nthreads=nthreads,
use_neutrino=use_neutrino,
)
catalog_stretch_small = run_xcdm_nopert_mcmc(
sampler=WalkerTypes.STRETCH,
nwalkers=nwalkers_small,
ssize=ssize,
verbose=verbose,
nthreads=nthreads,
use_neutrino=use_neutrino,
)
catalog_stretch = run_xcdm_nopert_mcmc(
sampler=WalkerTypes.STRETCH,
nwalkers=nwalkers,
ssize=ssize,
verbose=verbose,
nthreads=nthreads,
use_neutrino=use_neutrino,
)
```
```python
def load_cat_skip_burnin(catalog):
mcat = Ncm.MSetCatalog.new_from_file_ro(catalog, 0)
max_t, _ = mcat.calc_max_ess_time(100, Ncm.FitRunMsgs.SIMPLE)
return mcat, max_t * mcat.nchains()
mcat_apes_small, apes_burnin_small = load_cat_skip_burnin(catalog_apes_small)
mcat_apes, apes_burnin = load_cat_skip_burnin(catalog_apes)
mcat_stretch_tiny, stretch_burnin_tiny = load_cat_skip_burnin(catalog_stretch_tiny)
mcat_stretch_small, stretch_burnin_small = load_cat_skip_burnin(catalog_stretch_small)
mcat_stretch, stretch_burnin = load_cat_skip_burnin(catalog_stretch)
param1 = 0
param2 = 4
param1_symbol = mcat_apes.col_symb(param1 + mcat_apes.nadd_vals())
param2_symbol = mcat_apes.col_symb(param2 + mcat_apes.nadd_vals())
assert mcat_apes_small.nchains() == nwalkers_small
assert mcat_apes.nchains() == nwalkers
assert mcat_stretch_tiny.nchains() == nwalkers_tiny
assert mcat_stretch_small.nchains() == nwalkers_small
assert mcat_stretch.nchains() == nwalkers
```
```python
sample_apes_small, rows_apes_small, posterior_apes_small = mcat_to_mcsamples(
mcat_apes_small,
"APES",
thin=thin,
burnin=apes_burnin_small,
)
del mcat_apes_small
sample_apes, rows_apes, posterior_apes = mcat_to_mcsamples(
mcat_apes,
"APES",
thin=thin,
burnin=apes_burnin,
)
del mcat_apes
sample_stretch_tiny, rows_stretch_tiny, posterior_stretch_tiny = mcat_to_mcsamples(
mcat_stretch_tiny,
"STRETCH",
thin=thin,
burnin=stretch_burnin_tiny,
)
del mcat_stretch_tiny
sample_stretch_small, rows_stretch_small, posterior_stretch_small = mcat_to_mcsamples(
mcat_stretch_small,
"STRETCH",
thin=thin,
burnin=stretch_burnin_small,
)
del mcat_stretch_small
sample_stretch, rows_stretch, posterior_stretch = mcat_to_mcsamples(
mcat_stretch,
"STRETCH",
thin=thin,
burnin=stretch_burnin,
)
del mcat_stretch
```
```python
print(
sample_apes_small.getConvergeTests(
what=("MeanVar", "GelmanRubin", "SplitTest", "CorrLengths"), feedback=True
)
)
print(
sample_apes.getConvergeTests(
what=("MeanVar", "GelmanRubin", "SplitTest", "CorrLengths"), feedback=True
)
)
print(
sample_stretch_tiny.getConvergeTests(
what=("MeanVar", "GelmanRubin", "SplitTest", "CorrLengths"), feedback=True
)
)
print(
sample_stretch_small.getConvergeTests(
what=("MeanVar", "GelmanRubin", "SplitTest", "CorrLengths"), feedback=True
)
)
print(
sample_stretch.getConvergeTests(
what=("MeanVar", "GelmanRubin", "SplitTest", "CorrLengths"), feedback=True
)
)
```
```python
set_rc_params_article(ncol=2)
g = getdist.plots.get_subplot_plotter(width_inch=plt.rcParams["figure.figsize"][0])
g.settings.linewidth = 0.01
g.triangle_plot([sample_apes_small, sample_stretch_small], shaded=True)
if save_figs:
plt.savefig("xcdm_nopert_corner.pdf", bbox_inches="tight")
```
```python
set_rc_params_article(ncol=2)
fig, ax = plt.subplots()
ax.set(xlim=(67, 77), ylim=(0, 0.4))
ax.set_xlabel(f"${param1_symbol}$")
ax.set_ylabel(f"${param2_symbol}$")
scat_apes_small = ax.scatter(
rows_apes_small[0:nwalkers_small, param1],
rows_apes_small[0:nwalkers_small, param2],
s=4,
c="red",
)
scat_stretch_small = ax.scatter(
rows_stretch_small[0:nwalkers_small, param1],
rows_stretch_small[0:nwalkers_small, param2],
s=4,
c="blue",
)
nframes = 600
last_t = int(
min(2 * max(stretch_burnin_small, apes_burnin_small), rows_apes_small.shape[0])
/ nwalkers_small
)
if last_t > nframes:
step = last_t // nframes
b = range(0, last_t, step)
else:
b = range(last_r)
def animate(i):
x_i = rows_apes_small[nwalkers_small * b[i] : nwalkers_small * b[i + 1], param1]
y_i = rows_apes_small[nwalkers_small * b[i] : nwalkers_small * b[i + 1], param2]
x2_i = rows_stretch_small[nwalkers_small * b[i] : nwalkers_small * b[i + 1], param1]
y2_i = rows_stretch_small[nwalkers_small * b[i] : nwalkers_small * b[i + 1], param2]
scat_apes_small.set_offsets(np.c_[x_i, y_i])
scat_stretch_small.set_offsets(np.c_[x2_i, y2_i])
anim = FuncAnimation(fig, animate, interval=20, frames=nframes - 1)
pass
```
```python
HTML(anim.to_jshtml())
```
```python
writervideo = animation.FFMpegWriter(fps=20)
anim.save("xcdm_no_pert.mp4", writer=writervideo)
plt.close()
```
```python
set_rc_params_article(ncol=2)
fig = plt.figure()
gs = fig.add_gridspec(2, hspace=0)
axs = gs.subplots(sharex=True)
fig.suptitle("Parameter evolution")
step = 1
alpha = 0.6
t_apes = np.arange(last_t, step=step)
t_stretch = np.arange(last_t, step=step)
last_p = last_t * nwalkers_small
for i in range(0, nwalkers_small, 30):
x_1_apes_small = rows_apes_small[i:last_p:nwalkers_small, param1]
x_1_stretch_small = rows_stretch_small[i:last_p:nwalkers_small, param1]
x_2_apes_small = rows_apes_small[i:last_p:nwalkers_small, param2]
x_2_stretch_small = rows_stretch_small[i:last_p:nwalkers_small, param2]
axs[0].scatter(t_apes, x_1_apes_small[::step], s=0.1, alpha=alpha, color="r")
axs[0].scatter(t_stretch, x_1_stretch_small[::step], s=0.1, alpha=alpha, color="b")
axs[1].scatter(t_apes, x_2_apes_small[::step], s=0.1, alpha=alpha, color="r")
axs[1].scatter(t_stretch, x_2_stretch_small[::step], s=0.1, alpha=alpha, color="b")
# axs[1].set_yscale("symlog")
axs[0].set_ylabel(f"${param1_symbol}$")
axs[1].set_ylabel(f"${param2_symbol}$")
axs[1].set_xlabel("$t$")
if save_figs:
plt.tight_layout()
plt.savefig("xcdm_nopert_param_evol.pdf", bbox_inches="tight")
pass
```
```python
kernel = Ncm.StatsDistKernelST.new(6, 1.0)
interp_vkde = Ncm.StatsDistVKDE.new(kernel, Ncm.StatsDistCV.NONE)
interp_kde = Ncm.StatsDistKDE.new(kernel, Ncm.StatsDistCV.NONE)
# interp_vkde.set_cov_type(Ncm.StatsDistKDECovType.ROBUST)
# interp_kde.set_cov_type(Ncm.StatsDistKDECovType.ROBUST)
```
```python
max_n = len(rows_apes_small)
ssize = int(nwalkers / 2)
interp_vkde.reset()
interp_kde.reset()
for theta in rows_apes_small[-ssize:]:
theta_v = Ncm.Vector.new_array(theta)
interp_vkde.add_obs(theta_v)
interp_kde.add_obs(theta_v)
m2lnL_v = Ncm.Vector.new_array(2.0 * posterior_apes_small[-ssize:])
interp_vkde.prepare_interp(m2lnL_v)
interp_kde.prepare_interp(m2lnL_v)
```
```python
set_rc_params_article(ncol=1)
fig, ax = plt.subplots(1, 1)
indices = [param1, param2]
for i in range(interp_vkde.get_sample_size()):
y_i, cov_i, n_i, w_i = interp_vkde.get_Ki(i)
mu = np.array(y_i.dup_array())
cov = np.array([[cov_i.get(i, j) for j in indices] for i in indices])
cov = cov * 1.0
confidence_ellipse(mu[indices], cov, ax, edgecolor="red")
ax.set_xlabel(f"${param1_symbol}$")
ax.set_ylabel(f"${param2_symbol}$")
ax.autoscale_view()
plt.grid()
if save_figs:
plt.tight_layout()
plt.savefig("xcdm_nopert_vkde_kernels.pdf", bbox_inches="tight")
```
```python
set_rc_params_article(ncol=1)
fig, ax = plt.subplots(1, 1)
for i in range(interp_kde.get_sample_size()):
y_i, cov_i, n_i, w_i = interp_kde.get_Ki(i)
mu = np.array(y_i.dup_array())
cov = np.array([[cov_i.get(i, j) for j in indices] for i in indices])
cov = cov * 1.0
confidence_ellipse(mu[indices], cov, ax, edgecolor="red")
ax.set_xlabel(f"${param1_symbol}$")
ax.set_ylabel(f"${param2_symbol}$")
ax.autoscale_view()
plt.grid()
if save_figs:
plt.tight_layout()
plt.savefig("xcdm_nopert_kde_kernels.pdf", bbox_inches="tight")
```
|
NumCosmoREPO_NAMENumCosmoPATH_START.@NumCosmo_extracted@NumCosmo-master@notebooks@apes_tests@xcdm_nopert.ipynb@.PATH_END.py
|
{
"filename": "tfsa-2021-063.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/security/advisory/tfsa-2021-063.md",
"type": "Markdown"
}
|
## TFSA-2021-063: Undefined behavior in `MaxPool3DGradGrad`
### CVE Number
CVE-2021-29574
### Impact
The implementation of `tf.raw_ops.MaxPool3DGradGrad` exhibits undefined behavior
by dereferencing null pointers backing attacker-supplied empty tensors:
```python
import tensorflow as tf
orig_input = tf.constant([0.0], shape=[1, 1, 1, 1, 1], dtype=tf.float32)
orig_output = tf.constant([0.0], shape=[1, 1, 1, 1, 1], dtype=tf.float32)
grad = tf.constant([], shape=[0, 0, 0, 0, 0], dtype=tf.float32)
ksize = [1, 1, 1, 1, 1]
strides = [1, 1, 1, 1, 1]
padding = "SAME"
tf.raw_ops.MaxPool3DGradGrad(
orig_input=orig_input, orig_output=orig_output, grad=grad, ksize=ksize,
strides=strides, padding=padding)
```
The
[implementation](https://github.com/tensorflow/tensorflow/blob/72fe792967e7fd25234342068806707bbc116618/tensorflow/core/kernels/pooling_ops_3d.cc#L679-L703)
fails to validate that the 3 tensor inputs are not empty. If any of them is
empty, then accessing the elements in the tensor results in dereferencing a
null pointer.
### Patches
We have patched the issue in GitHub commit
[a3d9f9be9ac2296615644061b40cefcee341dcc4](https://github.com/tensorflow/tensorflow/commit/a3d9f9be9ac2296615644061b40cefcee341dcc4).
The fix will be included in TensorFlow 2.5.0. We will also cherrypick this
commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow
2.1.4, as these are also affected and still in supported range.
### For more information
Please consult [our security
guide](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md) for
more information regarding the security model and how to contact us with issues
and questions.
### Attribution
This vulnerability has been reported by Ying Wang and Yakun Zhang of Baidu
X-Team.
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@security@advisory@tfsa-2021-063.md@.PATH_END.py
|
{
"filename": "_outlinecolor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/choropleth/colorbar/_outlinecolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OutlinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="outlinecolor", parent_name="choropleth.colorbar", **kwargs
):
super(OutlinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@choropleth@colorbar@_outlinecolor.py@.PATH_END.py
|
{
"filename": "_weightsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/table/header/font/_weightsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WeightsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="weightsrc", parent_name="table.header.font", **kwargs
):
super(WeightsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@table@header@font@_weightsrc.py@.PATH_END.py
|
{
"filename": "test_aux_functions.py",
"repo_name": "tcassanelli/pyoof",
"repo_path": "pyoof_extracted/pyoof-master/pyoof/tests/test_aux_functions.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Tomas Cassanelli
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.misc import NumpyRNGContext
from astropy import units as apu
import pyoof
def test_str2LaTeX():
LaTeX_string = pyoof.str2LaTeX('1_2_3_4_5')
assert LaTeX_string == '1\\_2\\_3\\_4\\_5'
def test_uv_ratio():
width_true, height_true = 5.65260911, 5
with NumpyRNGContext(0):
u = np.random.uniform(-1, 1, 5) * apu.deg
v = np.random.uniform(-1, 1, 5) * apu.deg
width, height = pyoof.uv_ratio(u, v)
assert_allclose(width, width_true)
assert_allclose(height, height_true)
|
tcassanelliREPO_NAMEpyoofPATH_START.@pyoof_extracted@pyoof-master@pyoof@tests@test_aux_functions.py@.PATH_END.py
|
{
"filename": "coron.py",
"repo_name": "spacetelescope/jwst",
"repo_path": "jwst_extracted/jwst-main/jwst/outlier_detection/coron.py",
"type": "Python"
}
|
"""
Submodule for performing outlier detection on coronagraphy data.
"""
import logging
import numpy as np
from stdatamodels.jwst import datamodels
from jwst.resample.resample_utils import build_mask
from .utils import create_cube_median, flag_model_crs
from ._fileio import save_median
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
__all__ = ["detect_outliers"]
def detect_outliers(
input_model,
save_intermediate_results,
good_bits,
maskpt,
snr,
make_output_path,
):
"""
Flag outliers in coronography data.
See `OutlierDetectionStep.spec` for documentation of these arguments.
"""
if not isinstance(input_model, datamodels.JwstDataModel):
input_model = datamodels.open(input_model)
if not isinstance(input_model, datamodels.CubeModel):
raise TypeError(f"Input must be a CubeModel: {input_model}")
# FIXME weight_type could now be used here. Similar to tso data coron
# data was previously losing var_rnoise due to the conversion from a cube
# to a ModelContainer (which makes the default ivm weight ignore var_rnoise).
# Now that it's handled as a cube we could use the var_rnoise.
input_model.wht = build_mask(input_model.dq, good_bits).astype(np.float32)
# Perform median combination on set of drizzled mosaics
median_data = create_cube_median(input_model, maskpt)
if save_intermediate_results:
# make a median model
median_model = datamodels.ImageModel(median_data)
median_model.update(input_model)
median_model.meta.wcs = input_model.meta.wcs
save_median(median_model, make_output_path)
del median_model
# Perform outlier detection using statistical comparisons between
# each original input image and its blotted version of the median image
flag_model_crs(
input_model,
median_data,
snr,
)
return input_model
|
spacetelescopeREPO_NAMEjwstPATH_START.@jwst_extracted@jwst-main@jwst@outlier_detection@coron.py@.PATH_END.py
|
{
"filename": "visualize.py",
"repo_name": "supernnova/SuperNNova",
"repo_path": "SuperNNova_extracted/SuperNNova-main/python/supernnova/visualization/visualize.py",
"type": "Python"
}
|
import os
from pathlib import Path
import h5py
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
plt.switch_backend("agg")
def plot_lightcurves(df, SNIDs, settings):
"""Utility for gridspec of lightcruves
Args:
df (pandas.DataFrame): dataframe holding the data
SNIDs (np.array or list): holds lightcurve ids
settings (ExperimentSettings): controls experiment hyperparameters
"""
plt.figure(figsize=(20, 10))
plt.suptitle("Sample of SN Ia light curves")
gs = gridspec.GridSpec(4, 4, hspace=0.4)
for i in range(16):
ax = plt.subplot(gs[i])
SNID = SNIDs[i]
df_temp = df.loc[SNID]
# Prepare plotting data in a dict
d = {
flt: {"FLUXCAL": [], "FLUXCALERR": [], "MJD": []}
for flt in settings.list_filters
}
current_time = 0
for idx in range(len(df_temp)):
flt = df_temp.FLT.values[idx]
d[flt]["FLUXCAL"].append(df_temp.FLUXCAL.values[idx])
d[flt]["FLUXCALERR"].append(df_temp.FLUXCALERR.values[idx])
current_time += df_temp.delta_time.values[idx]
d[flt]["MJD"].append(current_time)
for flt in d.keys():
time = d[flt]["MJD"]
# Only plot a time series if it's non empty
if len(time) > 0:
flux = d[flt]["FLUXCAL"]
fluxerr = d[flt]["FLUXCALERR"]
ax.errorbar(time, flux, yerr=fluxerr, label=f"Filter {flt}")
ax.set_title(SNID, fontsize=18)
ax.set_aspect("auto")
ax.legend(loc="best")
plt.savefig(Path(settings.explore_dir) / "sample_lightcurves.png")
def plot_random_preprocessed_lightcurves(settings, SNIDs):
"""Plot lightcurves specified by SNID_idxs from the
preprocessed, pickled database
Args:
settings (ExperimentSettings): controls experiment hyperparameters
SNIDs (list): list of SN lightcurve IDs to plot
"""
list_files = [
f for f in glob.glob(os.path.join(settings.preprocessed_dir, "*_PHOT.pickle"))
]
if len(list_files) == 0:
raise FileNotFoundError(
"No files found in the preprocessed directory. Use option --debug together with --data when generating database."
)
df = pd.concat(list(map(pd.read_pickle, list_files))).set_index("SNID")
# Plot and save
plot_lightcurves(df, SNIDs, settings)
def plot_lightcurves_from_hdf5(settings, SNID_idxs):
"""Plot lightcurves specified by SNID_idxs from the
HDF5 database
Args:
settings (ExperimentSettings): controls experiment hyperparameters
SNID_idxs (list): list of SN lightcurve index to plot
"""
with h5py.File(settings.hdf5_file_name, "r") as hf:
features = hf["features"][:].astype(str)
n_features = len(features)
plt.figure(figsize=(20, 10))
gs = gridspec.GridSpec(4, 4, hspace=0.4)
for idx, SNID_idx in enumerate(SNID_idxs):
ax = plt.subplot(gs[idx])
SNID = hf["SNID"][SNID_idx]
str(hf["PEAKMJD"][SNID_idx])
PEAKMJDNORM = hf["PEAKMJDNORM"][SNID_idx]
typ = hf[settings.sntype_var][SNID_idx]
typ = settings.sntypes[str(typ)]
data = hf["data"][SNID_idx].reshape(-1, n_features)
df = pd.DataFrame(data, columns=features)
non_filter_columns = [
"FLUXCAL_g",
"FLUXCAL_i",
"FLUXCAL_r",
"FLUXCAL_z",
"FLUXCALERR_g",
"FLUXCALERR_i",
"FLUXCALERR_r",
"FLUXCALERR_z",
"delta_time",
"HOSTGAL_PHOTOZ",
"HOSTGAL_PHOTOZ_ERR",
"HOSTGAL_SPECZ",
"HOSTGAL_SPECZ_ERR",
]
filter_columns = [
c for c in df.columns.values if c not in non_filter_columns
]
present_filters = df[filter_columns].transpose().idxmax().values
list_present_filters = [set(f) for f in present_filters]
max_y = -float("Inf")
min_y = float("Inf")
for FLT in settings.list_filters:
idxs = np.array(
[i for i in range(len(df)) if FLT in list_present_filters[i]]
)
if len(idxs) == 0:
continue
arr_flux = df[f"FLUXCAL_{FLT}"].values[idxs]
arr_fluxerr = df[f"FLUXCALERR_{FLT}"].values[idxs]
arr_time = df["delta_time"].cumsum().values[idxs]
ax.errorbar(arr_time, arr_flux, yerr=arr_fluxerr, label=f"Filter {FLT}")
if np.max(arr_flux) > max_y:
max_y = np.max(arr_flux)
if np.min(arr_flux) < min_y:
min_y = np.min(arr_flux)
ax.plot(
[PEAKMJDNORM, PEAKMJDNORM], [min_y, max_y], color="k", linestyle="--"
)
ax.set_title(f"{SNID.decode('utf8')} -- {typ}", fontsize=18)
ax.set_aspect("auto")
ax.legend(loc="best")
plt.savefig(Path(settings.explore_dir) / "sample_lightcurves_from_hdf5.png")
def visualize(settings):
"""Plot a random subset of lightcurves
2 plots: one with preprocessed data and one with processed data
The two plots should show the same data
Args:
settings (ExperimentSettings): controls experiment hyperparameters
"""
# Check the data has been created
settings.check_data_exists()
# Set a random seed
np.random.seed()
with h5py.File(settings.hdf5_file_name, "r") as hf:
SNID_idxs = np.random.permutation(hf["SNID"].shape[0])[:16]
SNIDs = hf["SNID"][:][SNID_idxs]
SNIDs = [i for i in np.array([k for k in SNIDs]).astype(str)]
plot_random_preprocessed_lightcurves(settings, SNIDs)
plot_lightcurves_from_hdf5(settings, SNID_idxs)
|
supernnovaREPO_NAMESuperNNovaPATH_START.@SuperNNova_extracted@SuperNNova-main@python@supernnova@visualization@visualize.py@.PATH_END.py
|
{
"filename": "tests_lunar_cotrans.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/projects/themis/tests/tests_lunar_cotrans.py",
"type": "Python"
}
|
"""Tests of gse2sse and sse2sel functions."""
import unittest
from numpy.testing import assert_array_almost_equal_nulp, assert_array_max_ulp, assert_allclose
from copy import deepcopy
from pytplot import data_exists, get_data, store_data, cdf_to_tplot, del_data, tplot_restore, replace_metadata
from pytplot import get_coords,set_coords
from pyspedas.projects.themis import gse2sse,sse2sel
class LunCotransDataValidation(unittest.TestCase):
""" Compares cotrans results between Python and IDL """
@classmethod
def setUpClass(cls):
"""
IDL Data has to be downloaded to perform these tests
The SPEDAS script that creates the file: projects/themis/state/cotrans/thm_cotrans_validate.pro
"""
from pyspedas.utilities.download import download
from pyspedas.projects.themis.config import CONFIG
# Testing time range
cls.t = ['2008-03-23', '2008-03-28']
# Testing tolerance
cls.tol = 1e-10
# Download tplot files
remote_server = 'https://spedas.org/'
#remote_name = 'testfiles/thm_cotrans_validate.cdf'
remote_name = 'testfiles/thm_cotrans_validate.tplot'
datafile = download(remote_file=remote_name,
remote_path=remote_server,
local_path=CONFIG['local_data_dir'],
no_download=False)
if not datafile:
# Skip tests
raise unittest.SkipTest("Cannot download data validation file")
# Load validation variables from the test file
del_data('*')
filename = datafile[0]
#cdf_to_tplot(filename)
tplot_restore(filename)
# pytplot.tplot_names()
# Input variables
#coord_set_coord('tha_state_pos_gse','gse')
#coord_set_coord('tha_state_vel_gse','gse')
cls.tha_state_pos_gse = get_data('tha_state_pos_gse')
cls.tha_state_vel_gse = get_data('tha_state_vel_gse')
#coord_set_coord('tha_fgs_gse','gse')
cls.tha_fgs_gse = get_data('tha_fgs_gse')
# GSE<->SSE results
#coord_set_coord('tha_state_pos_sse','sse')
#coord_set_coord('tha_state_vel_sse','sse')
cls.tha_state_pos_sse = get_data('tha_state_pos_sse')
cls.tha_state_vel_sse = get_data('tha_state_vel_sse')
#coord_set_coord('tha_state_pos_sse_rotate_only','sse')
#coord_set_coord('tha_state_vel_sse_rotate_only','sse')
cls.tha_state_pos_sse_rotate_only = get_data('tha_state_pos_sse_rotate_only')
cls.tha_state_vel_sse_rotate_only = get_data('tha_state_vel_sse_rotate_only')
#coord_set_coord('tha_fgs_sse','sse')
cls.tha_fgs_sse = get_data('tha_fgs_sse')
#coord_set_coord('tha_fgs_sel','sel')
cls.tha_fgs_sel = get_data('tha_fgs_sel')
#coord_set_coord('tha_state_pos_gse_sse_gse','gse')
#coord_set_coord('tha_state_vel_gse_sse_gse','gse')
cls.tha_state_pos_gse_sse_gse = get_data('tha_state_pos_gse_sse_gse')
cls.tha_state_vel_gse_sse_gse = get_data('tha_state_vel_gse_sse_gse')
#coord_set_coord('tha_state_pos_gse_sse_gse_rotate_only','gse')
#coord_set_coord('tha_state_vel_gse_sse_gse_rotate_only','gse')
cls.tha_state_pos_gse_sse_gse_rotate_only = get_data('tha_state_pos_gse_sse_gse_rotate_only')
cls.tha_state_vel_gse_sse_gse_rotate_only = get_data('tha_state_vel_gse_sse_gse_rotate_only')
#coord_set_coord('tha_fgs_gse_sse_gse','gse')
cls.tha_fgs_gse_sse_gse = get_data('tha_fgs_gse_sse_gse')
# SSE<->SSL results
#coord_set_coord('tha_state_pos_sel','sel')
cls.tha_state_pos_sel = get_data('tha_state_pos_sel')
#coord_set_coord('tha_state_pos_gse_sel_sse','sse')
#coord_set_coord('tha_state_vel_gse_sel_sse','sse')
cls.tha_state_pos_gse_sel_sse = get_data('tha_state_pos_gse_sel_sse')
cls.sse_mat_cotrans = get_data('sse_mat_cotrans')
cls.sel_mat_cotrans = get_data('sel_mat_cotrans')
cls.sel_x_gei = get_data('sel_x_gei')
cls.sel_x_gse = get_data('sel_x_gse')
cls.sel_x_sse = get_data('sel_x_sse')
cls.sel_y_sse = get_data('sel_y_sse')
cls.sel_z_sse = get_data('sel_z_sse')
# It is no longer necessary to load or pass support data when calling gse2sse and sse2sel
# autoload_support(varname='tha_state_pos_gse', slp=True)
def setUp(self):
""" We need to clean tplot variables before each run"""
# del_data('*')
def test_replace_metadata(self):
data = get_data('tha_state_pos_gse')
orig_meta = deepcopy(get_data('tha_state_pos_gse',metadata=True))
orig_coord = get_coords('tha_state_pos_gse')
self.assertEqual(orig_coord.lower(), 'gse')
store_data('newvar',data={'x':data[0],'y':data[1]})
replace_metadata('newvar',orig_meta)
self.assertEqual(get_coords('newvar').lower(),'gse')
orig_meta['data_att']['coord_sys'] = 'goofy' # won't affect tha_state_pos_gse, should not affect newvar either
self.assertEqual(get_coords('newvar').lower(),'gse')
self.assertEqual(get_coords('tha_state_pos_gse').lower(),'gse')
def test_gse2sse_pos(self):
""" Validate gse2sse position transform """
result = gse2sse('tha_state_pos_gse', 'tha_state_pos_sse', variable_type='pos')
self.assertEqual(result,1)
py_sse_mat_cotrans = get_data('sse_mat_cotrans')
assert_allclose(py_sse_mat_cotrans.y, self.sse_mat_cotrans.y, atol=1.0e-06)
pos_sse = get_data('tha_state_pos_sse')
pos_meta = get_data('tha_state_pos_sse',metadata=True)
self.assertEqual(pos_meta['data_att']['units'],'km')
assert_allclose(pos_sse.y, self.tha_state_pos_sse.y, atol=0.1)
self.assertEqual(get_coords('tha_state_pos_sse').lower(),'sse')
def test_gse2sse_pos_rotate_only(self):
""" Validate gse2sse position transform """
result = gse2sse('tha_state_pos_gse', 'tha_state_pos_sse_rotate_only', variable_type='pos',rotation_only=True)
self.assertEqual(result,1)
pos_sse = get_data('tha_state_pos_sse_rotate_only')
pos_meta = get_data('tha_state_pos_sse',metadata=True)
self.assertEqual(pos_meta['data_att']['units'],'km')
assert_allclose(pos_sse.y, self.tha_state_pos_sse_rotate_only.y, atol=0.1)
self.assertEqual(get_coords('tha_state_pos_sse_rotate_only').lower(),'sse')
def test_gse2sse_vel(self):
""" Validate gse2sse velocity transform """
result = gse2sse('tha_state_vel_gse', 'tha_state_vel_sse',variable_type='vel')
self.assertEqual(result,1)
vel_sse = get_data('tha_state_vel_sse')
vel_meta = get_data('tha_state_vel_sse',metadata=True)
self.assertEqual(vel_meta['data_att']['units'],'km/s')
assert_allclose(vel_sse.y, self.tha_state_vel_sse.y, atol=1.0e-03)
self.assertEqual(get_coords('tha_state_vel_sse').lower(),'sse')
def test_gse2sse_vel_rotate_only(self):
""" Validate gse2sse position transform """
result = gse2sse('tha_state_vel_gse', 'tha_state_vel_sse_rotate_only', variable_type='vel',rotation_only=True)
self.assertEqual(result,1)
vel_sse = get_data('tha_state_vel_sse_rotate_only')
vel_meta = get_data('tha_state_vel_sse',metadata=True)
self.assertEqual(vel_meta['data_att']['units'],'km/s')
assert_allclose(vel_sse.y, self.tha_state_vel_sse_rotate_only.y, atol=1.0e-03)
self.assertEqual(get_coords('tha_state_vel_sse_rotate_only').lower(),'sse')
def test_gse2sse_field(self):
""" Validate gse2sse field transform """
result = gse2sse('tha_fgs_gse', 'tha_fgs_sse')
self.assertEqual(result, 1)
fgs_sse = get_data('tha_fgs_sse')
fgs_meta = get_data('tha_fgs_sse',metadata=True)
self.assertEqual(fgs_meta['data_att']['units'],'nT')
assert_allclose(fgs_sse.y, self.tha_fgs_sse.y, atol=1.0e-02)
self.assertEqual(get_coords('tha_fgs_sse').lower(), 'sse')
def test_sse2gse_pos(self):
""" Validate sse2gse position transform """
store_data('tha_state_pos_sse',data={'x':self.tha_state_pos_sse.times, 'y':self.tha_state_pos_sse.y})
set_coords('tha_state_pos_sse','sse')
before_meta = get_data('tha_state_pos_sse',metadata=True)
before_meta['data_att']['units'] = 'km'
result = gse2sse('tha_state_pos_sse', 'tha_state_pos_gse_sse_gse',isssetogse=True,
variable_type='pos')
self.assertEqual(result,1)
pos_gse = get_data('tha_state_pos_gse_sse_gse')
pos_meta = get_data('tha_state_pos_gse_sse_gse',metadata=True)
self.assertEqual(pos_meta['data_att']['units'],'km')
assert_allclose(pos_gse.y, self.tha_state_pos_gse_sse_gse.y, atol=0.1)
self.assertEqual(get_coords('tha_state_pos_gse_sse_gse').lower(),'gse')
def test_sse2gse_pos_rotate_only(self):
""" Validate sse2gse position transform """
store_data('tha_state_pos_sse_rotate_only',
data={'x':self.tha_state_pos_sse_rotate_only.times, 'y':self.tha_state_pos_sse_rotate_only.y})
set_coords('tha_state_pos_sse_rotate_only','sse')
result = gse2sse('tha_state_pos_sse_rotate_only', 'tha_state_pos_gse_sse_gse_rotation_only',isssetogse=True,
variable_type='pos', rotation_only=True)
self.assertEqual(result,1)
pos_gse = get_data('tha_state_pos_gse_sse_gse_rotation_only')
assert_allclose(pos_gse.y, self.tha_state_pos_gse_sse_gse_rotate_only.y, atol=0.1)
self.assertEqual(get_coords('tha_state_pos_gse_sse_gse_rotate_only').lower(),'gse')
def test_sse2gse_vel(self):
""" Validate sse2gse velocity transform """
result = gse2sse('tha_state_vel_sse', 'tha_state_vel_gse_sse_gse',isssetogse=True,
variable_type='vel')
self.assertEqual(result,1)
vel_gse = get_data('tha_state_vel_gse_sse_gse')
assert_allclose(vel_gse.y, self.tha_state_vel_gse_sse_gse.y, atol=1.0e-02)
self.assertEqual(get_coords('tha_state_vel_gse_sse_gse').lower(),'gse')
def test_sse2gse_vel_rotate_only(self):
""" Validate sse2gse position transform """
store_data('tha_state_vel_sse_rotate_only',
data={'x':self.tha_state_vel_sse_rotate_only.times, 'y':self.tha_state_vel_sse_rotate_only.y})
set_coords('tha_state_vel_sse_rotate_only','sse')
result = gse2sse('tha_state_vel_sse_rotate_only', 'tha_state_vel_gse_sse_gse_rotation_only',isssetogse=True,
variable_type='pos', rotation_only=True)
self.assertEqual(result,1)
vel_gse = get_data('tha_state_vel_gse_sse_gse_rotation_only')
assert_allclose(vel_gse.y, self.tha_state_vel_gse_sse_gse_rotate_only.y, atol=1.0e-03)
self.assertEqual(get_coords('tha_state_vel_gse_sse_gse_rotate_only').lower(),'gse')
def test_sse2gse_field(self):
""" Validate gse2sse field transform """
result = gse2sse('tha_fgs_sse','tha_fgs_gse_sse_gse',isssetogse=True)
self.assertEqual(result, 1)
fgs_gse = get_data('tha_fgs_gse_sse_gse')
assert_allclose(fgs_gse.y, self.tha_fgs_gse_sse_gse.y, atol=1.0e-02)
self.assertEqual(get_coords('tha_fgs_gse_sse_gse').lower(), 'gse')
def test_sse2sel_pos(self):
""" Validate sse2sel position transform """
result = sse2sel('tha_state_pos_sse','tha_state_pos_sel')
self.assertEqual(result,1)
py_sel_x_gse = get_data('slp_lun_att_x_gse')
assert_allclose(self.sel_x_gse.y,py_sel_x_gse.y,atol=1.0e-06)
py_sel_x_sse = get_data('sel_x_sse')
assert_allclose(self.sel_x_sse.y,py_sel_x_sse.y,atol=1.0e-06)
py_sel_y_sse = get_data('sel_y_sse')
assert_allclose(self.sel_y_sse.y,py_sel_y_sse.y,atol=1.0e-06)
py_sel_z_sse = get_data('sel_z_sse')
assert_allclose(self.sel_z_sse.y,py_sel_z_sse.y,atol=1.0e-06)
py_sel_mat_cotrans = get_data('sel_mat_cotrans')
assert_allclose(py_sel_mat_cotrans.y, self.sel_mat_cotrans.y, atol=1.0e-06)
pos_sel = get_data('tha_state_pos_sel')
pos_meta = get_data('tha_state_pos_sel',metadata=True)
self.assertEqual(pos_meta['data_att']['units'],'km')
assert_allclose(pos_sel.y, self.tha_state_pos_sel.y, atol=0.1)
self.assertEqual(get_coords('tha_state_pos_sel').lower(),'sel')
def test_sse2sel_fgs(self):
""" Validate sse2sel field transform """
result = sse2sel('tha_fgs_sse', 'tha_fgs_sel')
self.assertEqual(result,1)
fgs_sel = get_data('tha_fgs_sel')
assert_allclose(fgs_sel.y, self.tha_fgs_sel.y, atol=.005)
self.assertEqual(get_coords('tha_fgs_sel').lower(),'sel')
def test_sel2sse_pos(self):
""" Validate sel2sse position transform """
# Restore original baseline input tplot variable
store_data('tha_state_pos_sel',data={'x':self.tha_state_pos_sel.times, 'y':self.tha_state_pos_sel.y})
set_coords('tha_state_pos_sel','sel')
result = sse2sel('tha_state_pos_sel', 'tha_state_pos_sel_sse', isseltosse=True)
self.assertEqual(result,1)
pos_sse = get_data('tha_state_pos_gse_sel_sse')
assert_allclose(pos_sse.y, self.tha_state_pos_gse_sel_sse.y, atol=0.1)
self.assertEqual(get_coords('tha_state_pos_gse_sel_sse').lower(),'sse')
def test_sel2sse_field(self):
""" Validate sel2sse field transform """
# Restore original baseline input tplot variable
store_data('tha_fgs_sel',data={'x':self.tha_fgs_sel.times, 'y':self.tha_fgs_sel.y})
set_coords('tha_fgs_sel','sel')
md_before = get_data('tha_fgs_sel',metadata=True)
md_before['data_att']['units'] = 'nT'
result = sse2sel('tha_fgs_sel', 'tha_fgs_sel_sse', isseltosse=True)
self.assertEqual(result,1)
fgs_sse = get_data('tha_fgs_sel_sse')
fgs_meta = get_data('tha_fgs_sel_sse',metadata=True)
self.assertEqual(fgs_meta['data_att']['units'],'nT')
assert_allclose(fgs_sse.y, self.tha_fgs_sse.y, atol=0.1)
self.assertEqual(get_coords('tha_fgs_sel_sse').lower(),'sse')
if __name__ == '__main__':
unittest.main()
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@projects@themis@tests@tests_lunar_cotrans.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "apertif/apercal",
"repo_path": "apercal_extracted/apercal-master/apercal/pipeline/__init__.py",
"type": "Python"
}
|
from start_pipeline import *
|
apertifREPO_NAMEapercalPATH_START.@apercal_extracted@apercal-master@apercal@pipeline@__init__.py@.PATH_END.py
|
{
"filename": "test_array.py",
"repo_name": "rhayes777/PyAutoFit",
"repo_path": "PyAutoFit_extracted/PyAutoFit-main/test_autofit/mapper/test_array.py",
"type": "Python"
}
|
import pytest
import numpy as np
import autofit as af
from autoconf.dictable import to_dict
@pytest.fixture
def array():
return af.Array(
shape=(2, 2),
prior=af.GaussianPrior(mean=0.0, sigma=1.0),
)
@pytest.fixture
def array_3d():
return af.Array(
shape=(2, 2, 2),
prior=af.GaussianPrior(mean=0.0, sigma=1.0),
)
def test_prior_count(array):
assert array.prior_count == 4
def test_prior_count_3d(array_3d):
assert array_3d.prior_count == 8
def test_instance(array):
instance = array.instance_from_prior_medians()
assert (instance == [[0.0, 0.0], [0.0, 0.0]]).all()
def test_instance_3d(array_3d):
instance = array_3d.instance_from_prior_medians()
assert (
instance
== [
[[0.0, 0.0], [0.0, 0.0]],
[[0.0, 0.0], [0.0, 0.0]],
]
).all()
def test_modify_prior(array):
array[0, 0] = 1.0
assert array.prior_count == 3
assert (
array.instance_from_prior_medians()
== [
[1.0, 0.0],
[0.0, 0.0],
]
).all()
def test_correlation(array):
array[0, 0] = array[1, 1]
array[0, 1] = array[1, 0]
instance = array.random_instance()
assert instance[0, 0] == instance[1, 1]
assert instance[0, 1] == instance[1, 0]
@pytest.fixture
def array_dict():
return {
"arguments": {
"indices": {
"type": "list",
"values": [
{"type": "tuple", "values": [0, 0]},
{"type": "tuple", "values": [0, 1]},
{"type": "tuple", "values": [1, 0]},
{"type": "tuple", "values": [1, 1]},
],
},
"prior_0_0": {
"lower_limit": float("-inf"),
"mean": 0.0,
"sigma": 1.0,
"type": "Gaussian",
"upper_limit": float("inf"),
},
"prior_0_1": {
"lower_limit": float("-inf"),
"mean": 0.0,
"sigma": 1.0,
"type": "Gaussian",
"upper_limit": float("inf"),
},
"prior_1_0": {
"lower_limit": float("-inf"),
"mean": 0.0,
"sigma": 1.0,
"type": "Gaussian",
"upper_limit": float("inf"),
},
"prior_1_1": {
"lower_limit": float("-inf"),
"mean": 0.0,
"sigma": 1.0,
"type": "Gaussian",
"upper_limit": float("inf"),
},
"shape": {"type": "tuple", "values": [2, 2]},
},
"type": "array",
}
def test_to_dict(array, array_dict, remove_ids):
assert remove_ids(to_dict(array)) == array_dict
def test_from_dict(array_dict):
array = af.AbstractPriorModel.from_dict(array_dict)
assert array.prior_count == 4
assert (
array.instance_from_prior_medians()
== [
[0.0, 0.0],
[0.0, 0.0],
]
).all()
@pytest.fixture
def array_1d():
return af.Array(
shape=(2,),
prior=af.GaussianPrior(mean=0.0, sigma=1.0),
)
def test_1d_array(array_1d):
assert array_1d.prior_count == 2
assert (array_1d.instance_from_prior_medians() == [0.0, 0.0]).all()
def test_1d_array_modify_prior(array_1d):
array_1d[0] = 1.0
assert array_1d.prior_count == 1
assert (array_1d.instance_from_prior_medians() == [1.0, 0.0]).all()
def test_tree_flatten(array):
children, aux = array.tree_flatten()
assert len(children) == 4
assert aux == ((2, 2),)
new_array = af.Array.tree_unflatten(aux, children)
assert new_array.prior_count == 4
assert (
new_array.instance_from_prior_medians()
== [
[0.0, 0.0],
[0.0, 0.0],
]
).all()
class Analysis(af.Analysis):
def log_likelihood_function(self, instance):
return -float(
np.mean(
(
np.array(
[
[0.1, 0.2],
[0.3, 0.4],
]
)
- instance
)
** 2
)
)
def test_optimisation():
array = af.Array(
shape=(2, 2),
prior=af.UniformPrior(
lower_limit=0.0,
upper_limit=1.0,
),
)
result = af.DynestyStatic().fit(model=array, analysis=Analysis())
posterior = result.model
array[0, 0] = posterior[0, 0]
array[0, 1] = posterior[0, 1]
result = af.DynestyStatic().fit(model=array, analysis=Analysis())
assert isinstance(result.instance, np.ndarray)
|
rhayes777REPO_NAMEPyAutoFitPATH_START.@PyAutoFit_extracted@PyAutoFit-main@test_autofit@mapper@test_array.py@.PATH_END.py
|
{
"filename": "degrading_a_spectrum.ipynb",
"repo_name": "jlustigy/coronagraph",
"repo_path": "coronagraph_extracted/coronagraph-master/docs/notebooks/degrading_a_spectrum.ipynb",
"type": "Jupyter Notebook"
}
|
```python
%matplotlib inline
%config InlineBackend.figure_format = "png2x"
from __future__ import print_function
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
```
```python
rcParams["savefig.dpi"] = 200
rcParams["figure.dpi"] = 200
rcParams["font.size"] = 20
rcParams["figure.figsize"] = [8, 5]
rcParams["font.family"] = "sans-serif"
rcParams["font.sans-serif"] = ["Computer Modern Sans Serif"]
rcParams["text.usetex"] = True
```
# Degrading a Spectrum to Lower Resolution
```python
import coronagraph as cg
print(cg.__version__)
```
1.0
### Proxima Centauri Stellar Spectrum
First, let's grab the stellar spectral energy distribution (SED) of Proxima Centauri from the VPL website:
```python
# The file is located at the following URL:
url = "http://vpl.astro.washington.edu/spectra/stellar/proxima_cen_sed.txt"
# We can open the URL with the following
import urllib.request
response = urllib.request.urlopen(url)
# Read file lines
data = response.readlines()
# Remove header and extraneous info
tmp = np.array([np.array(str(d).split("\\")) for d in data[25:]])[:,[1,2]]
# Extract columns
lam = np.array([float(d[1:]) for d in tmp[:,0]])
flux = np.array([float(d[1:]) for d in tmp[:,1]])
```
Now let's set the min and max for our low res wavelength grid, use `construct_lam` to create the low-res wavelength grid, use `downbin_spec` to make a low-res spectrum, and plot it.
```python
# Set the wavelength and resolution parameters
lammin = 0.12
lammax = 2.0
R = 200
dl = 0.01
# Construct new low-res wavelength grid
wl, dwl = cg.noise_routines.construct_lam(lammin, lammax, dlam = dl)
# Down-bin flux to low-res
flr = cg.downbin_spec(flux, lam, wl, dlam=dwl)
# Plot
m = (lam > lammin) & (lam < lammax)
plt.plot(lam[m], flux[m])
plt.plot(wl, flr)
#plt.yscale("log")
plt.xlabel(r"Wavelength [$\mu$m]")
plt.ylabel(r"Flux [W/m$^2$/$\mu$m]");
```

NaNs can occur when no high-resolution values exist within a given lower-resolution bein. How many NaNs are there?
```python
print(np.sum(~np.isfinite(flr)))
```
0
Let's try it again now focusing on the UV with a higher resolution.
```python
# Set the wavelength and resolution parameters
lammin = 0.12
lammax = 0.2
R = 2000
# Construct new low-res wavelength grid
wl, dwl = cg.noise_routines.construct_lam(lammin, lammax, R)
# Down-bin flux to low-res
flr = cg.downbin_spec(flux, lam, wl, dlam=dwl)
# Plot
m = (lam > lammin) & (lam < lammax)
plt.plot(lam[m], flux[m])
plt.plot(wl, flr)
plt.yscale("log")
plt.xlabel(r"Wavelength [$\mu$m]")
plt.ylabel(r"Flux [W/m$^2$/$\mu$m]");
```

```python
print(np.sum(~np.isfinite(flr)))
```
26
### Optimal Resolution for Observing Earth's O$_2$ A-band
Let's load in the Earth's reflectance spectrum [(Robinson et al., 2011)](http://adsabs.harvard.edu/abs/2011AsBio..11..393R).
```python
lamhr, Ahr, fstar = cg.get_earth_reflect_spectrum()
```
Now let's isolate just the O$_2$ A-band.
```python
lammin = 0.750
lammax = 0.775
# Create a wavelength mask
m = (lamhr > lammin) & (lamhr < lammax)
# Plot the band
plt.plot(lamhr[m], Ahr[m])
plt.xlabel(r"Wavelength [$\mu$m]")
plt.ylabel("Geometric Albedo")
plt.title(r"Earth's O$_2$ A-Band");
```

Define a set of resolving powers for us to loop over.
```python
R = np.array([1, 10, 30, 70, 100, 150, 200, 500, 1000])
```
Let's down-bin the high-res spectrum at each `R`. For each `R` in the loop we will construct a new wavelength grid, down-bin the high-res spectrum, and plot the degraded spectrum. Let's also save the minimum value in the degraded spectrum to assess how close we get to the actual bottom of the band.
```python
bottom_val = np.zeros(len(R))
# Loop over R
for i, r in enumerate(R):
# Construct new low-res wavelength grid
wl, dwl = cg.noise_routines.construct_lam(lammin, lammax, r)
# Down-bin flux to low-res
Alr = cg.downbin_spec(Ahr, lamhr, wl, dlam=dwl)
# Plot
plt.plot(wl, Alr, ls = "steps-mid", alpha = 0.5, label = "%i" %r)
# Save bottom value
bottom_val[i] = np.min(Alr)
# Finsh plot
plt.plot(lamhr[m], Ahr[m], c = "k")
plt.xlim(lammin, lammax)
plt.legend(fontsize = 12, title = r"$\lambda / \Delta \lambda$")
plt.xlabel(r"Wavelength [$\mu$m]")
plt.ylabel("Geometric Albedo")
plt.title(r"Earth's O$_2$ A-Band");
```

We can now compare the bottom value in low-res spectra to the bottom of the high-res spectrum.
```python
# Create resolution array to loop over
Nres = 100
R = np.linspace(1,1000, Nres)
# Array to store bottom-of-band albedos
bottom_val = np.zeros(len(R))
# Loop over R
for i, r in enumerate(R):
# Construct new low-res wavelength grid
wl, dwl = cg.noise_routines.construct_lam(lammin, lammax, r)
# Down-bin flux to low-res
Alr = cg.downbin_spec(Ahr, lamhr, wl, dlam=dwl)
# Save bottom value
bottom_val[i] = np.min(Alr)
# Make plot
plt.plot(R, bottom_val);
plt.xlabel(r"Resolving Power ($\lambda / \Delta \lambda$)")
plt.ylabel("Bottom of the Band Albedo")
plt.title(r"Earth's O$_2$ A-Band");
```

The oscillations in the above plot are the result of non-optimal placement of the resolution element relative to the oxygen A-band central wavelength. We can do better than this by iterating over different minimum wavelength positions.
```python
# Create resolution array to loop over
Nres = 100
R = np.linspace(2,1000, Nres)
# Set number of initial positions
Ntest = 20
# Arrays to save quantities
bottom_vals = np.nan*np.zeros([len(R), Ntest])
best = np.nan*np.zeros(len(R), dtype=int)
Alrs = []
lams = []
# Loop over R
for i, r in enumerate(R):
# Set grid of minimum wavelengths to iterate over
lammin_vals = np.linspace(lammin - 1.0*0.76/r, lammin, Ntest)
# Loop over minimum wavelengths to adjust bin centers
for j, lmin in enumerate(lammin_vals):
# Construct new low-res wavelength grid
wl, dwl = cg.noise_routines.construct_lam(lmin, lammax, r)
# Down-bin flux to low-res
Alr = cg.downbin_spec(Ahr, lamhr, wl, dlam=dwl)
# Keep track of the minimum
if ~np.isfinite(best[i]) or (np.nansum(np.min(Alr) < bottom_vals[i,:]) > 0):
best[i] = j
# Save quantities
bottom_vals[i,j] = np.min(Alr)
Alrs.append(Alr)
lams.append(wl)
# Reshape saved arrays
Alrs = np.array(Alrs).reshape((Ntest, Nres), order = 'F')
lams = np.array(lams).reshape((Ntest, Nres), order = 'F')
best = np.array(best, dtype=int)
# Plot the global minimum
plt.plot(R, np.min(bottom_vals, axis = 1));
plt.xlabel(r"Resolving Power ($\lambda / \Delta \lambda$)")
plt.ylabel("Bottom of the Band Albedo")
plt.title(r"Earth's O$_2$ A-Band");
```

In the above plot we are looking at the minimum albedo of the oxygen A-band *after optimizing the central band location*, and we can see that the pesky oscillations are now gone.
At low resolution the above curve quickly decays as less and less continuum is mixed into the band measurement, while at high resolution the curve asymptotes to the true minimum of the band.
Essentially what we have done is ensure that, when possible, the oxygen band is not split between multiple spectral elements. This can be seen in the following plot, which samples a few of the optimal solutions. It doesn't look too different from the first version of this plot, but it does more efficiently sample the band shape (note that the lower resolution spectra have deeper bottoms and more tightly capture the band wings).
```python
# Get some log-spaced indices so we're not plotting all R
iz = np.array(np.logspace(np.log10(1), np.log10(100), 10).round(), dtype=int) - 1
# Loop over R
for i, r in enumerate(R):
# Plot some of the resolutions
if i in iz:
plt.plot(b[best[i], i], a[best[i], i], ls = "steps-mid", alpha = 0.5, label = "%i" %r)
plt.xlim(lammin, lammax)
# Finsh plot
plt.plot(lamhr[m], Ahr[m], c = "k")
plt.legend(fontsize = 12, title = r"$\lambda / \Delta \lambda$")
plt.xlabel(r"Wavelength [$\mu$m]")
plt.ylabel("Geometric Albedo")
plt.title(r"Earth's O$_2$ A-Band");
```

As you can see, much can be done with the simple re-binning functions, but the true utility of the `coronagraph` model comes from the noise calculations. Please refer to the other tutorials and examples for more details!
|
jlustigyREPO_NAMEcoronagraphPATH_START.@coronagraph_extracted@coronagraph-master@docs@notebooks@degrading_a_spectrum.ipynb@.PATH_END.py
|
{
"filename": "eht_unify.py",
"repo_name": "AFD-Illinois/iharm3d",
"repo_path": "iharm3d_extracted/iharm3d-master/script/analysis/eht_unify.py",
"type": "Python"
}
|
#!/usr/bin/env python3
import os, sys
import pickle
import numpy as np
import hdf5_to_dict as io
avgs = []
for fname in sys.argv[1:-1]:
print("Loading {}".format(fname))
avgs.append(pickle.load(open(fname, "rb")))
avgs[-1]['fname'] = fname
#for avg in avgs:
# print("Name: {}, contents: {}".format(avg['fname'], avg.keys()))
num_keys = [len(avg.keys()) for avg in avgs]
avg_max_keys = num_keys.index(max(num_keys))
# TODO organize this damn dict. HDF5?
direct_list = ['fname', 'a', 'gam', 'gam_e', 'gam_p', 'r', 'th', 'th_eh', 'th_bz', 'phi', 'avg_start', 'avg_end', 'avg_w', 't']
keys_to_sum = [key for key in avgs[avg_max_keys].keys() if key not in direct_list]
uni = {}
for key in keys_to_sum:
uni[key] = np.zeros_like(avgs[avg_max_keys][key])
for avg in avgs:
if key in avg:
# Keep track of averages w/weights, otherwise just sum since everything's time-dependent
if (key[-2:] == '_r' or key[-3:] == '_th' or key[-4:] == '_hth' or key[-4:] == '_phi' or
key[-4:] == '_rth' or key[-6:] == '_thphi' or key[-5:] == '_rphi' or key[-4:] == '_pdf'):
uni[key] += avg[key]*avg['avg_w']
elif key[-1:] == 't':
if uni[key].shape[0] < avg[key].shape[0]:
uni[key] += avg[key][:uni[key].shape[0]]
else:
uni[key][:avg[key].shape[0]] += avg[key]
else:
if uni[key].size < avg[key].size:
uni[key] += avg[key][:uni[key].size]
else:
uni[key][:avg[key].size] += avg[key]
for key in direct_list:
if key in avgs[avg_max_keys].keys():
uni[key] = avgs[avg_max_keys][key]
# Add compat/completeness stuff
uni['mdot'] = uni['Mdot']
uni['phi_b'] = uni['Phi_b']/np.sqrt(uni['Mdot'])
# Add the log versions of variables, for completeness/better ffts
if os.path.exists(sys.argv[-1]):
uni['diags'] = io.load_log(sys.argv[-1])
with open("eht_out.p", "wb") as outf:
print("Writing eht_out.p")
pickle.dump(uni, outf)
|
AFD-IllinoisREPO_NAMEiharm3dPATH_START.@iharm3d_extracted@iharm3d-master@script@analysis@eht_unify.py@.PATH_END.py
|
{
"filename": "vectorpotentialdipole_verify.py",
"repo_name": "fmihpc/vlasiator",
"repo_path": "vlasiator_extracted/vlasiator-master/doc/vectordipole/vectorpotentialdipole_verify.py",
"type": "Python"
}
|
#!/usr/bin/env python import matplotlib.pyplot as plt
# /*
# * This file is part of Vlasiator.
# * Copyright 2010-2016 Finnish Meteorological Institute
# * Copyright 2017-2019 University of Helsinki
# *
# * For details of usage, see the COPYING file and read the "Rules of the Road"
# * at http://www.physics.helsinki.fi/vlasiator/
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License along
# * with this program; if not, write to the Free Software Foundation, Inc.,
# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# */
import numpy as np
import math
import sys,os
import pytools as pt
import matplotlib.pyplot as plt
import fieldmodels
''' Testing routine for different dipole formulations
run using "python vectorpotentialdipole_verify.py [arg1] [arg2]" where arg1 is a number from 0 to 4
for different test profile starting positions, directions, and dipole tilts.
If arg2 is present, the code also calculates verification of derivative terms.
Generates plots of magnetic field components for different dipole models along cuts through the
simulation domain. For derivative analysis, calculates the derivatives along said cuts analytically
and numerically and outputs the ratio.
'''
if len(sys.argv)!=1:
testset = int(sys.argv[1])
else:
testset = 0
if len(sys.argv)!=2:
calcderivatives=True
else:
calcderivatives=False
plotmagnitude=False
plt.switch_backend('Agg')
outfilename = "./vecpotdip_verify_"+str(testset)+".png"
RE=6371000.
#RE=1
epsilon=1.e-15
if testset==0:
tilt_angle_phi = 0.
tilt_angle_theta = 0.
line_theta = 0.
line_start = np.array([0,0,0])
elif testset==1:
tilt_angle_phi = 0.
tilt_angle_theta = 0.
line_theta = 45.
line_start = np.array([0,0,0])
elif testset==2:
tilt_angle_phi = 0.
tilt_angle_theta = 0.
line_theta = 0.
line_start = np.array([-3,-3,-3])
elif testset==3:
tilt_angle_phi = 0.
tilt_angle_theta = 0.
line_theta = 45.
line_start = np.array([-3,-3,-3])
elif testset==4:
tilt_angle_phi = 10
tilt_angle_theta = 0.
line_theta = 0.
line_start = np.array([0,0,0])
elif testset==5:
tilt_angle_phi = 10
tilt_angle_theta = 45.
line_theta = 0.
line_start = np.array([0,0,0])
else: # Same as 0
print("Default")
tilt_angle_phi = 0.
tilt_angle_theta = 0.
line_theta = 0.
line_start = np.array([0,0,0])
print("Test set "+str(testset)+" line start "+str(line_start)+" tilt phi "+str(tilt_angle_phi)+" tilt theta "+str(tilt_angle_theta)+" line theta "+str(line_theta))
line_phi = np.array([0,45,80,90,110,135])*math.pi/180.
line_theta = np.zeros(len(line_phi)) + line_theta * math.pi/180.
#line_start = np.array([-5,-5,-5])
step = 0.1
linewidth=2
linthresh=1.e-10
fontsize=20
#fieldmodels.dipole.set_dipole(centerx, centery, centerz, tilt_phi, tilt_theta, mult=1.0, radius_f=None, radius_z=None):
dip = fieldmodels.dipole(0,0,0,tilt_angle_phi,tilt_angle_theta)
mdip = fieldmodels.dipole(80*RE,0,0,tilt_angle_phi,180.-tilt_angle_theta)
# Create figure
fig = plt.figure()
fig.set_size_inches(20,30)
nsubplots=len(line_theta)
for i in range(nsubplots):
fig.add_subplot(nsubplots,1,i+1)
axes = fig.get_axes()
radii = np.arange(0.1,100,step)*RE
nr=len(radii)
radiiRE = radii/RE
fig.suptitle(r"Profiles starting from ("+str(line_start[0])+","+str(line_start[1])+","+str(line_start[2])+") [RE] with dipole tilt $\Phi="+str(int(tilt_angle_phi))+"$, $\Theta="+str(int(tilt_angle_theta))+"$", fontsize=fontsize)
for i in range(nsubplots):
print("subplot ",i)
ax = axes[i]
ax.text(0.2,0.08,r"profile with $\theta="+str(int(line_theta[i]*180./math.pi))+"$, $\phi="+str(int(line_phi[i]*180./math.pi))+"$",transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.7), fontsize=fontsize)
xv = line_start[0]*RE + radii*np.sin(line_phi[i])*np.cos(line_theta[i])
yv = line_start[1]*RE + radii*np.sin(line_phi[i])*np.sin(line_theta[i])
zv = line_start[2]*RE + radii*np.cos(line_phi[i])
B1 = np.zeros([nr,4]) # X-scaled vector dipole
B2 = np.zeros([nr,4]) # regular dipole
B3 = np.zeros([nr,4]) # regular dipole + mirror dipole
B4 = np.zeros([nr,4]) # line dipole + mirror dipole
for j in range(nr):
for k in range(3):
B1[j,k] = dip.getX(xv[j],yv[j],zv[j],0,k,0)
B2[j,k] = dip.get_old(xv[j],yv[j],zv[j],0,k,0)
B3[j,k] = B2[j,k] + mdip.get_old(xv[j],yv[j],zv[j],0,k,0)
B4[j,k] = dip.get_ldp(xv[j],yv[j],zv[j],0,k,0)
B4[j,k] = B4[j,k] + mdip.get_ldp(xv[j],yv[j],zv[j],0,k,0)
if plotmagnitude is True:
B1[j,3] = np.linalg.norm(B1[j,0:3])
B2[j,3] = np.linalg.norm(B2[j,0:3])
B3[j,3] = np.linalg.norm(B3[j,0:3])
B4[j,3] = np.linalg.norm(B4[j,0:3])
colors=['r','k','b','magenta']
coords = ['x','y','z','mag']
plotrange = range(3)
if plotmagnitude is True:
plotrange = range(4)
for k in plotrange:
ax.plot(radiiRE, B1[:,k], c=colors[k], linestyle='-', linewidth=linewidth, label='vectorpot B'+coords[k], zorder=-10)
ax.plot(radiiRE, B2[:,k], c=colors[k], linestyle='--', linewidth=linewidth, label='regular B'+coords[k])
ax.plot(radiiRE, B3[:,k], c=colors[k], linestyle=':', linewidth=linewidth, label='reg+mirror B'+coords[k])
if tilt_angle_phi<epsilon:
ax.plot(radiiRE, B4[:,k], c=colors[k], linestyle='-.', linewidth=linewidth, label='line+mirror B'+coords[k])
ax.set_xlabel(r"$r$ [$r_\mathrm{E}$]", fontsize=fontsize)
ax.set_xlim([0,70])
#ax.set_yscale('log', nonposy='clip')
ax.set_yscale('symlog', linthreshy=linthresh)
for item in ax.get_xticklabels():
item.set_fontsize(fontsize)
for item in ax.get_yticklabels():
item.set_fontsize(fontsize)
ylims = np.array(ax.get_ylim())
if ylims[0] < -1e-4:
ylims[0] = -1e-4
if ylims[1] > 1e-4:
ylims[1] = 1e-4
ax.set_ylim(ylims)
handles, labels = axes[-1].get_legend_handles_labels()
axes[-1].legend(handles, labels, fontsize=fontsize)
fig.savefig(outfilename)
plt.close()
if calcderivatives:
# Derivatives
step2=0.00001 # distance in each direction for calculating numerical derivative
for kkk in range(3):
print("derivatives d"+coords[kkk])
dB1 = np.zeros([nr,3,3])
dB2 = np.zeros([nr,3,3])
dB3 = np.zeros([nr,3,3])
dB4 = np.zeros([nr,3,3])
# Create figure
fig = plt.figure()
fig.set_size_inches(20,30)
for i in range(nsubplots):
fig.add_subplot(nsubplots,1,i+1)
axes = fig.get_axes()
fig.suptitle(r"Numerical and analytical derivative ratios, profiles starting from ("+str(line_start[0])+","+str(line_start[1])+","+str(line_start[2])+") [RE] with dipole tilt $\Phi="+str(int(tilt_angle_phi))+"$, $\Theta="+str(int(tilt_angle_theta))+"$", fontsize=fontsize)
for i in range(nsubplots):
print("derivatives subplot ",i)
ax = axes[i]
xv = line_start[0]*RE + radii*np.sin(line_phi[i])*np.cos(line_theta[i])
yv = line_start[1]*RE + radii*np.sin(line_phi[i])*np.sin(line_theta[i])
zv = line_start[2]*RE + radii*np.cos(line_phi[i])
for j in range(nr):
for k in range(3):
B1[j,k] = dip.getX(xv[j],yv[j],zv[j],0,k,0)
B2[j,k] = dip.get_old(xv[j],yv[j],zv[j],0,k,0)
B3[j,k] = B2[j,k] + mdip.get_old(xv[j],yv[j],zv[j],0,k,0)
B4[j,k] = dip.get_ldp(xv[j],yv[j],zv[j],0,k,0)
B4[j,k] = B4[j,k] + mdip.get_ldp(xv[j],yv[j],zv[j],0,k,0)
#for kk in range(3):
kk=kkk
dB1[j,k,kk] = dip.getX(xv[j],yv[j],zv[j],1,k,kk)
dB2[j,k,kk] = dip.get_old(xv[j],yv[j],zv[j],1,k,kk)
dB3[j,k,kk] = dB2[j,k,kk] + mdip.get_old(xv[j],yv[j],zv[j],1,k,kk)
dB4[j,k,kk] = dip.get_ldp(xv[j],yv[j],zv[j],1,k,kk)
dB4[j,k,kk] = dB4[j,k,kk] + mdip.get_ldp(xv[j],yv[j],zv[j],1,k,kk)
# analytical derivative vs numerical derivative
for j in np.arange(1,nr-1):
for k in range(3):
# d/dx
if kkk==0:
cdbx=(dip.getX(xv[j]+step2*RE,yv[j],zv[j],0,k,0) - dip.getX(xv[j]-step2*RE,yv[j],zv[j],0,k,0))/(2*step2*RE)
if abs(cdbx) > epsilon*B1[j,k]:
dB1[j,k,0] = dB1[j,k,0]/cdbx
elif (abs(cdbx)<epsilon*B1[j,k]) and (abs(dB1[j,k,0])<epsilon*B1[j,k]):
dB1[j,k,0] = None
else:
dB1[j,k,0] = 0
if abs(B1[j,k])<epsilon:
dB1[j,k,0] = None
cdbx=(dip.get_old(xv[j]+step2*RE,yv[j],zv[j],0,k,0) - dip.get_old(xv[j]-step2*RE,yv[j],zv[j],0,k,0))/(2*step2*RE)
if abs(cdbx) > epsilon*B2[j,k]:
dB2[j,k,0] = dB2[j,k,0]/cdbx
elif (abs(cdbx)<epsilon*B2[j,k]) and (abs(dB2[j,k,0])<epsilon*B2[j,k]):
dB2[j,k,0] = None
else:
dB2[j,k,0] = 0
if abs(B2[j,k])<epsilon:
dB2[j,k,0] = None
cdbx=(dip.get_old(xv[j]+step2*RE,yv[j],zv[j],0,k,0)+mdip.get_old(xv[j]+step2*RE,yv[j],zv[j],0,k,0) - dip.get_old(xv[j]-step2*RE,yv[j],zv[j],0,k,0) - mdip.get_old(xv[j]-step2*RE,yv[j],zv[j],0,k,0))/(2*step2*RE)
if abs(cdbx) > epsilon*B3[j,k]:
dB3[j,k,0] = dB3[j,k,0]/cdbx
elif (abs(cdbx)<epsilon*B3[j,k]) and (abs(dB3[j,k,0])<epsilon*B3[j,k]):
dB3[j,k,0] = None
else:
dB3[j,k,0] = 0
if abs(B3[j,k])<epsilon:
dB3[j,k,0] = None
cdbx=(dip.get_ldp(xv[j]+step2*RE,yv[j],zv[j],0,k,0)+mdip.get_ldp(xv[j]+step2*RE,yv[j],zv[j],0,k,0) - dip.get_ldp(xv[j]-step2*RE,yv[j],zv[j],0,k,0) - mdip.get_ldp(xv[j]-step2*RE,yv[j],zv[j],0,k,0))/(2*step2*RE)
if abs(cdbx) > epsilon*B4[j,k]:
dB4[j,k,0] = dB4[j,k,0]/cdbx
elif (abs(cdbx)<epsilon*B4[j,k]) and (abs(dB4[j,k,0])<epsilon*B4[j,k]):
dB4[j,k,0] = None
else:
dB4[j,k,0] = 0
if abs(B4[j,k])<epsilon:
dB4[j,k,0] = None
# d/dy
if kkk==1:
cdby=(dip.getX(xv[j],yv[j]+step2*RE,zv[j],0,k,0) - dip.getX(xv[j],yv[j]-step2*RE,zv[j],0,k,0))/(2*step2*RE)
if abs(cdby) > epsilon*B1[j,k]:
dB1[j,k,1] = dB1[j,k,1]/cdby
elif (abs(cdby)<epsilon*B1[j,k]) and (abs(dB1[j,k,1])<epsilon*B1[j,k]):
dB1[j,k,1] = None
else:
dB1[j,k,1] = 0
if abs(B1[j,k])<epsilon:
dB1[j,k,1] = None
cdby=(dip.get_old(xv[j],yv[j]+step2*RE,zv[j],0,k,0) - dip.get_old(xv[j],yv[j]-step2*RE,zv[j],0,k,0))/(2*step2*RE)
if abs(cdby) > epsilon*B2[j,k]:
dB2[j,k,1] = dB2[j,k,1]/cdby
elif (abs(cdby)<epsilon*B2[j,k]) and (abs(dB2[j,k,1])<epsilon*B2[j,k]):
dB2[j,k,1] = None
else:
dB2[j,k,1] = 0
if abs(B2[j,k])<epsilon:
dB2[j,k,1] = None
cdby=(dip.get_old(xv[j],yv[j]+step2*RE,zv[j],0,k,0)+mdip.get_old(xv[j],yv[j]+step2*RE,zv[j],0,k,0) - dip.get_old(xv[j],yv[j]-step2*RE,zv[j],0,k,0) - mdip.get_old(xv[j],yv[j]-step2*RE,zv[j],0,k,0))/(2*step2*RE)
if abs(cdby) > epsilon*B3[j,k]:
dB3[j,k,1] = dB3[j,k,1]/cdby
elif (abs(cdby)<epsilon*B3[j,k]) and (abs(dB3[j,k,1])<epsilon*B3[j,k]):
dB3[j,k,1] = None
else:
dB3[j,k,1] = 0
if abs(B3[j,k])<epsilon:
dB3[j,k,1] = None
cdby=(dip.get_ldp(xv[j],yv[j]+step2*RE,zv[j],0,k,0)+mdip.get_ldp(xv[j],yv[j]+step2*RE,zv[j],0,k,0) - dip.get_ldp(xv[j],yv[j]-step2*RE,zv[j],0,k,0) - mdip.get_ldp(xv[j],yv[j]-step2*RE,zv[j],0,k,0))/(2*step2*RE)
if abs(cdby) > epsilon*B4[j,k]:
dB4[j,k,1] = dB4[j,k,1]/cdby
elif (abs(cdby)<epsilon*B4[j,k]) and (abs(dB4[j,k,1])<epsilon*B4[j,k]):
dB4[j,k,1] = None
else:
dB4[j,k,1] = 0
if abs(B4[j,k])<epsilon:
dB4[j,k,1] = None
# d/dz
if kkk==2:
cdbz=(dip.getX(xv[j],yv[j],zv[j]+step2*RE,0,k,0) - dip.getX(xv[j],yv[j],zv[j]-step2*RE,0,k,0))/(2*step2*RE)
if abs(cdbz) > epsilon*B1[j,k]:
dB1[j,k,2] = dB1[j,k,2]/cdbz
elif (abs(cdbz)<epsilon*B1[j,k]) and (abs(dB1[j,k,2])<epsilon*B1[j,k]):
dB1[j,k,2] = None
else:
dB1[j,k,2] = 0
if abs(B1[j,k])<epsilon:
dB1[j,k,2] = None
cdbz=(dip.get_old(xv[j],yv[j],zv[j]+step2*RE,0,k,0) - dip.get_old(xv[j],yv[j],zv[j]-step2*RE,0,k,0))/(2*step2*RE)
if abs(cdbz) > epsilon*B2[j,k]:
dB2[j,k,2] = dB2[j,k,2]/cdbz
elif (abs(cdbz)<epsilon*B2[j,k]) and (abs(dB2[j,k,2])<epsilon*B2[j,k]):
dB2[j,k,2] = None
else:
dB2[j,k,2] = 0
if abs(B2[j,k])<epsilon:
dB2[j,k,2] = None
cdbz=(dip.get_old(xv[j],yv[j],zv[j]+step2*RE,0,k,0)+mdip.get_old(xv[j],yv[j],zv[j]+step2*RE,0,k,0) - dip.get_old(xv[j],yv[j],zv[j]-step2*RE,0,k,0) - mdip.get_old(xv[j],yv[j],zv[j]-step2*RE,0,k,0))/(2*step2*RE)
if abs(cdbz) > epsilon*B3[j,k]:
dB3[j,k,2] = dB3[j,k,2]/cdbz
elif (abs(cdbz)<epsilon*B3[j,k]) and (abs(dB3[j,k,2])<epsilon*B3[j,k]):
dB3[j,k,2] = None
else:
dB3[j,k,2] = 0
if abs(B3[j,k])<epsilon:
dB3[j,k,2] = None
cdbz=(dip.get_ldp(xv[j],yv[j],zv[j]+step2*RE,0,k,0)+mdip.get_ldp(xv[j],yv[j],zv[j]+step2*RE,0,k,0) - dip.get_ldp(xv[j],yv[j],zv[j]-step2*RE,0,k,0) - mdip.get_ldp(xv[j],yv[j],zv[j]-step2*RE,0,k,0))/(2*step2*RE)
if abs(cdbz) > epsilon*B4[j,k]:
dB4[j,k,2] = dB4[j,k,2]/cdbz
elif (abs(cdbz)<epsilon*B4[j,k]) and (abs(dB4[j,k,2])<epsilon*B4[j,k]):
dB4[j,k,2] = None
else:
dB4[j,k,2] = 0
if abs(B4[j,k])<epsilon:
dB4[j,k,2] = None
print(np.ma.amin(np.ma.masked_invalid(dB1)),np.ma.amax(np.ma.masked_invalid(dB1)))
print(np.ma.amin(np.ma.masked_invalid(dB2)),np.ma.amax(np.ma.masked_invalid(dB2)))
print(np.ma.amin(np.ma.masked_invalid(dB3)),np.ma.amax(np.ma.masked_invalid(dB3)))
print(np.ma.amin(np.ma.masked_invalid(dB4)),np.ma.amax(np.ma.masked_invalid(dB4)))
# print(np.amin(B1),np.amax(B1))
# print(np.amin(B2),np.amax(B2))
# print(np.amin(B3),np.amax(B3))
# print(np.amin(B4),np.amax(B4))
colors=['r','k','b']
coords = ['x','y','z']
# for k in range(3):
# B1[:,k] = abs(B1[:,k])
# B2[:,k] = abs(B2[:,k])
# B3[:,k] = abs(B3[:,k])
# B4[:,k] = abs(B4[:,k])
# for kk in range(3):
# dB1[:,k,kk] = abs(dB1[:,k,kk])
# dB2[:,k,kk] = abs(dB2[:,k,kk])
# dB3[:,k,kk] = abs(dB3[:,k,kk])
# dB4[:,k,kk] = abs(dB4[:,k,kk])
for k in range(3):
ax.plot(radiiRE, dB1[:,k,kkk], c=colors[k], linestyle='-', linewidth=linewidth, label='vectorpot dB'+coords[k]+'/d'+coords[kkk])
ax.plot(radiiRE, dB2[:,k,kkk], c=colors[k], linestyle='--', linewidth=linewidth, label='regular dB'+coords[k]+'/d'+coords[kkk])
ax.plot(radiiRE, dB3[:,k,kkk], c=colors[k], linestyle=':', linewidth=linewidth, label='reg+mirror dB'+coords[k]+'/d'+coords[kkk])
if tilt_angle_phi<epsilon:
ax.plot(radiiRE, dB4[:,k,kkk], c=colors[k], linestyle='-.', linewidth=linewidth, label='line+mirror dB'+coords[k]+'/d'+coords[kkk])
ax.text(0.2,0.08,r"profile with $\theta="+str(int(line_theta[i]*180./math.pi))+"$, $\phi="+str(int(line_phi[i]*180./math.pi))+"$",transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.7), fontsize=fontsize)
ax.set_xlabel(r"$r$ [$r_\mathrm{E}$]", fontsize=fontsize)
ax.set_xlim([1,70])
#ax.set_yscale('log', nonposy='clip')
#ax.set_ylim([1.e-6,1.e-1])
#ax.set_ylim([1.e-26,1.e-21])
ax.set_ylim([-.1,1.1])
for item in ax.get_xticklabels():
item.set_fontsize(fontsize)
for item in ax.get_yticklabels():
item.set_fontsize(fontsize)
handles, labels = axes[-1].get_legend_handles_labels()
axes[-1].legend(handles, labels, fontsize=fontsize).set_zorder(10)
fig.savefig(outfilename[:-4]+"_d"+coords[kkk]+outfilename[-4:])
plt.close()
|
fmihpcREPO_NAMEvlasiatorPATH_START.@vlasiator_extracted@vlasiator-master@doc@vectordipole@vectorpotentialdipole_verify.py@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/llms/utils.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.llms.utils import enforce_stop_tokens
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"enforce_stop_tokens": "langchain_community.llms.utils"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"enforce_stop_tokens",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@llms@utils.py@.PATH_END.py
|
{
"filename": "waveforms.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py3/scipy/signal/waveforms.py",
"type": "Python"
}
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.signal` namespace for importing the functions
# included below.
import warnings
from . import _waveforms
__all__ = [ # noqa: F822
'sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly',
'unit_impulse', 'place', 'nan', 'mod', 'extract', 'log', 'exp',
'polyval', 'polyint'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.signal.waveforms is deprecated and has no attribute "
f"{name}. Try looking in scipy.signal instead.")
warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, "
"the `scipy.signal.waveforms` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_waveforms, name)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py3@scipy@signal@waveforms.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "spacetelescope/jwst",
"repo_path": "jwst_extracted/jwst-main/jwst/stpipe/tests/data/__init__.py",
"type": "Python"
}
|
spacetelescopeREPO_NAMEjwstPATH_START.@jwst_extracted@jwst-main@jwst@stpipe@tests@data@__init__.py@.PATH_END.py
|
|
{
"filename": "_detail.py",
"repo_name": "bek0s/gbkfit",
"repo_path": "gbkfit_extracted/gbkfit-master/src/gbkfit/fitting/fitters/_detail.py",
"type": "Python"
}
|
bek0sREPO_NAMEgbkfitPATH_START.@gbkfit_extracted@gbkfit-master@src@gbkfit@fitting@fitters@_detail.py@.PATH_END.py
|
|
{
"filename": "simple_anim.ipynb",
"repo_name": "HITS-AIN/PINK",
"repo_path": "PINK_extracted/PINK-master/jupyter/devel/simple_anim.ipynb",
"type": "Jupyter Notebook"
}
|
```python
%matplotlib widget
```
# Animated line plot
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig, ax = plt.subplots()
x = np.arange(0, 2*np.pi, 0.01)
line, = ax.plot(x, np.sin(x))
def init(): # only required for blitting to give a clean slate.
line.set_ydata([np.nan] * len(x))
return line,
def animate(i):
line.set_ydata(np.sin(x + i / 100)) # update the data.
return line,
ani = animation.FuncAnimation(
fig, animate, init_func=init, interval=2, blit=True, save_count=50)
# To save the animation, use e.g.
#
# ani.save("movie.mp4")
#
# or
#
# writer = animation.FFMpegWriter(
# fps=15, metadata=dict(artist='Me'), bitrate=1800)
# ani.save("movie.mp4", writer=writer)
plt.show()
```
<div style="display: inline-block;">
<div class="jupyter-widgets widget-label" style="text-align: center;">
Figure
</div>
<img src='data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAoAAAAHgCAYAAAA10dzkAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABcP0lEQVR4nO3deVxU5cIH8N+ZGRj2TXbZ3BE3FBVRS0sSl0rLSkszzexmWpltcu9N221/2yzLJfWmaZtlVqThmiIohCsuKAgCwyLCsMgAM+f9A50u1w0VeGbm/L6fz/m8rzNnDr8z4Z2fz5zzPJIsyzKIiIiISDFUogMQERERUetiASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGI3oANbMZDIhPz8frq6ukCRJdBwiIiJqAlmWUVFRgcDAQKhUyhwLYwG8Afn5+QgODhYdg4iIiK5Dbm4ugoKCRMcQggXwBri6ugJo+AVyc3MTnIaIiIiaQq/XIzg42Pw5rkQsgDfgwte+bm5uLIBERERWRsmXbynzi28iIiIiBWMBJCIiIlIYFkAiIiIihWEBJCIiIlIYFkAiIiIihWEBJCIiIlIYFkAiIiIihWEBJCIiIlIYFkAiIiIihbGKArh9+3bccccdCAwMhCRJ+PHHH6/6mq1bt6JPnz7QarXo2LEjli9fftE+CxcuRFhYGBwcHBAdHY2UlJTmD09ERERkYayiAFZVVaFXr15YuHBhk/bPysrC6NGjccsttyA9PR2zZ8/GI488gt9//928z9q1azFnzhzMnz8faWlp6NWrF+Li4lBUVNRSp0FERERkESRZlmXRIa6FJElYt24dxo4de9l9XnjhBfzyyy84ePCg+bEJEyagrKwMCQkJAIDo6Gj069cPn3zyCQDAZDIhODgYTzzxBObOndukLHq9Hu7u7igvL+dawERERFaCn9+ARnSAlpCUlITY2NhGj8XFxWH27NkAgNraWqSmpiI+Pt78vEqlQmxsLJKSki57XIPBAIPBYP6zXq9v3uBklUqrapFRoEduaTWKKwworjTAUGcyP+/mqIGPqxY+rlp08nVFR18XONipBSYmIiKls8kCqNPp4Ofn1+gxPz8/6PV6nDt3DmfPnoXRaLzkPkeOHLnscRcsWICXX365RTKT9dCV12D7sWJsO1aMtJyzKCivuabXq1USOvm6YED7NhjSxQcD2rWBoz0LIRERtR6bLIAtJT4+HnPmzDH/Wa/XIzg4WGAiai3l1XVYvz8f36Wexr7csoueD2vjhHbezvB1dYCPq9Zc6GRZRvm5OhRXGKDT1+CorgJnq+twRFeBI7oKLN+VDa1GheHd/DGuT1vc1MkHapXUymdHRERKY5MF0N/fH4WFhY0eKywshJubGxwdHaFWq6FWqy+5j7+//2WPq9VqodVqWyQzWabMogos2nYS6/flo7a+4WtdSQJ6BnlgSGcfDOrQBhGBbnB1sGvS8WRZhk5fg/ScMmw/XoLtx4qRV3YOP+/Lx8/78uHv5oCpg8LwQHRIk49JRER0rWyyAMbExODXX39t9NimTZsQExMDALC3t0dUVBQSExPNN5OYTCYkJiZi1qxZrR2XLNDhfD0++OMYNh7++x8J4f6uuCcqCHdGBsLX1eG6jitJEgLcHRHQwxEjewRAlmUczNPj+7TT+Ck9Dzp9DRb8dgSfbMnE5JhQ/GNIB7ixCBIRUTOzigJYWVmJzMxM85+zsrKQnp4OLy8vhISEID4+Hnl5eVi5ciUA4LHHHsMnn3yC559/Hg8//DA2b96Mb775Br/88ov5GHPmzMFDDz2Evn37on///vjggw9QVVWFqVOntvr5keUo1Nfg3d+P4ru007hwf/zwCD88NrQDegd7QJKa9+tZSZLQI8gdPYLcET8qHD+l5+OL7SeRWVSJhVtOYHVyDp4a1gkTB4TCTm0VszYREZEVsIppYLZu3YpbbrnloscfeughLF++HFOmTEF2dja2bt3a6DVPP/00Dh8+jKCgILz44ouYMmVKo9d/8skneOedd6DT6RAZGYmPPvoI0dHRTc7F28hth9EkY9mfWXh/0zGcqzMCAEb3DMDTsZ3Q0de1VbOYTDI2ZRTind+PIrOoEgDQwccZb47riX5hXq2ahYjIFvHz20oKoKXiL5BtOKLT44Xv9mPf6XIAQFSoJ/41uiv6hHgKzVVvNGHt3lz836ZjKKmsBQA8OCAUz4/owusDiYhuAD+/WQBvCH+BrJvJJGPZziy8lXAEdUYZrg4a/Ht0V9zXN7jZv+q9EeXVdXj918P4Zu9pAECwlyM+mtAbvQUXVCIia8XPbxbAG8JfIOtVWlWLZ7/dh81HGpb+i+3qh9fv6g4/t+u7uaM17MwswQvf78fps+egUUl4Lq4Lpt/UHipOG0NEdE34+c0CeEP4C2Sd9p8uw6MrU6HT18Beo8K82yMwMTrEokb9Lqf8XB3++cMB/HKgAAAwLNwXH0yI5FfCRETXgJ/fAG8rJEVZvy8f9y5Kgk5fg/Y+zvhp5iBMGhBqFeUPANwd7fDJA72x4O4e0GpUSDxShLs/3YVTZ6pERyMiIivCAkiKIMsy3tt4FE9+/RcM9SYMC/fFTzMHoWuA9f3LT5Ik3N8/BN/8IwZ+blocL6rEmIU7sfvkGdHRiIjISrAAks2rN5rw/Hf78fHmhrkkHxvSAV9M7mv1X5v2CvbA+lmD0SvYA2XVdZi8LAUJBwtExyIiIivAAkg2rabOiMe+SsW3qaehVkl4e1xPzB0ZbjPr7fq5OWDtowMQ180PtfUmPL4qDauTc0THIiIiC8cCSDarylCPyctS8EdGEbQaFRZNisJ9/YJFx2p2DnZqfDoxCvf3D4ZJBv657gC+2H5CdCwiIrJgLIBkk6pr6zF1+R6kZJXCVavBf6ZF47YIP9GxWoxaJeGNu3pg1i0dAQBv/HoEi7efFJyKiIgslVWsBUx0Lapr6/Hwf5e/R6IRGewhOlaLkyQJz8Z1gVol4cPE43j91wxIEvDITe1FRyMiIgvDEUCyKedqjZi2fC92nyyFi1aDldP6K6L8/benb+uMJ4d1AgC89ksGluzgSCARETXGAkg2o85owszVaUg6eQbO9mqseLi/YpdLezq2E564teHr4Nd+ycC3e3MFJyIiIkvCAkg2QZZl/POHA9h8pAgOdiosf7g/okKVWf6Ahq+D59zWGf+4ueHr37k/HMDmI4WCUxERkaVgASSb8N7GY/g29TRUEvDJ/X3QL8xLdCThJEnCCyPCcXfvtjCaZDy+Kg1/5ZwVHYuIiCwACyBZvZVJ2fhkS8Mkz2/c1QOxNny377VSqSS8dU9PDOnsg5o6Ex5evgcniitFxyIiIsFYAMmqbTlShPnrDwEA5tzWGRP6hwhOZHns1Cp8OrEPegW542x1HR5evgdl1bWiYxERkUAsgGS1Mosq8OTXf0GWgfF9g803PdDFnLUaLJvSD0Gejjh1phozV6ehzmgSHYuIiARhASSrVFZdi0dW7EWFoR79w7zw6tjukCTbWN6tpbRx0WLJQ33hbK/GzswzeHXDYdGRiIhIEBZAsjr1RhNmrf4L2Weq0dbDEZ9N6gN7DX+VmyLc3w3/Nz4SkgSsTDqFVcmnREciIiIB+KlJVmfBb0fwZ2YJnOzVWPJQX7Rx0YqOZFWGd/PHs8O7AADm/3QIe7JLBSciIqLWxgJIVuW3AwVY+mcWAOD9+3qha4Cb4ETW6fGhHXBHr0DUm2TMWp2GkkqD6EhERNSKWADJamSXVOH57/YDAP5xc3uM6B4gOJH1kiQJb97dAx19XVCoN2D2mnQYTbLoWERE1EpYAMkq1NQZMWNVGioM9egX5oln47qIjmT1nLUafDaxDxzt1PgzswQfJh4XHYmIiFoJCyBZhZfWH0JGgR5tnO3x8f19YKfmr25z6OTnigV39wAAfLz5OLYdKxaciIiIWgM/Rcni/ZSehzV7ciFJwEf394a/u4PoSDZlbO+2eCA6BLIMzFmbjqKKGtGRiIiohbEAkkXLLa3Gv9cdBAA8eWsnDOroLTiRbZp3ewTC/V1xpqoWz327H7LM6wGJiGwZCyBZLKNJxpxv0lFhqEefEA+u9NGCHOzU+Oj+3tBqVNh2rBgrdmWLjkRERC2IBZAs1mdbM7En+yxctBp8ML43NLzur0V19nPFP0d1BQC88dsRHNVVCE5EREQthZ+oZJHSc8vwwR8Nd6W+MqYbQto4CU6kDJNjQnFLFx/U1pvw5Nd/oabOKDoSERG1ABZAsjg1dUbMWZuOepOM23sG4K7ebUVHUgxJkvD2Pb3QxtkeRwsr8P6mY6IjERFRC2ABJIvz3sajOFlSBX83B7w+tgckSRIdSVF8XLV4c1xPAMCSHSeRlnNWcCIiImpuVlUAFy5ciLCwMDg4OCA6OhopKSmX3Xfo0KGQJOmibfTo0eZ9pkyZctHzI0aMaI1ToctIPVWKJeeXeltwdw+4O9kJTqRMt0X44a7ebWGSgee+3cevgomIbIzVFMC1a9dizpw5mD9/PtLS0tCrVy/ExcWhqKjokvv/8MMPKCgoMG8HDx6EWq3Gvffe22i/ESNGNNrv66+/bo3ToUuoqTPiue/2Q5aBcX2CcEu4r+hIijb/jgj4uGpxorjKfD0mERHZBqspgO+//z6mT5+OqVOnIiIiAosWLYKTkxOWLVt2yf29vLzg7+9v3jZt2gQnJ6eLCqBWq220n6enZ2ucDl3C/206hpPFVfB11WLe7RGi4yieh5M93rirYZWQL7afQHpumdhARETUbKyiANbW1iI1NRWxsbHmx1QqFWJjY5GUlNSkYyxduhQTJkyAs7Nzo8e3bt0KX19fdOnSBTNmzMCZM2cuewyDwQC9Xt9oo+bxV85ZLN5xEgDwxl386tdS3Bbhh7GRgfwqmIjIxlhFASwpKYHRaISfn1+jx/38/KDT6a76+pSUFBw8eBCPPPJIo8dHjBiBlStXIjExEW+99Ra2bduGkSNHwmi89IfcggUL4O7ubt6Cg4Ov/6TIzFDf8NWvSQbu6t0WsRF+V38RtZr5d3SDt4sWx4sq8fFmfhVMRGQLrKIA3qilS5eiR48e6N+/f6PHJ0yYgDvvvBM9evTA2LFjsWHDBuzZswdbt2695HHi4+NRXl5u3nJzc1shve37bOsJZBZVwttFi/l38KtfS+PpbI/XxnYHAHy+7SQniCYisgFWUQC9vb2hVqtRWFjY6PHCwkL4+/tf8bVVVVVYs2YNpk2bdtWf0759e3h7eyMzM/OSz2u1Wri5uTXa6MacLK7Ep1tOAGi46cDDyV5wIrqUEd39MTzCD/UmGf9adwAmE9cKJiKyZlZRAO3t7REVFYXExETzYyaTCYmJiYiJibnia7/99lsYDAZMmjTpqj/n9OnTOHPmDAICAm44M12dLMt48aeDqDWacHNnH9zek++7JXvpzm5wsldj76mz+GYvR7+JiKyZVRRAAJgzZw4WL16MFStWICMjAzNmzEBVVRWmTp0KAJg8eTLi4+Mvet3SpUsxduxYtGnTptHjlZWVeO6557B7925kZ2cjMTERY8aMQceOHREXF9cq56R0P6XnY2fmGWg1Krw2pjsnfLZwgR6OmHNbZwDAgt+OoKTSIDgRERFdL43oAE01fvx4FBcXY968edDpdIiMjERCQoL5xpCcnByoVI377NGjR/Hnn39i48aNFx1PrVZj//79WLFiBcrKyhAYGIjhw4fj1VdfhVarbZVzUrLy6jq89sthAMCTwzpxrV8rMWVgGH5Iy8PhAj3e+CUD74+PFB2JiIiugyTLMi/muU56vR7u7u4oLy/n9YDXKP6HA/g6JQcdfV3w65M3wV5jNYPRipeeW4a7Pt0JWQZWPxKNgR29RUciIrom/Py2oq+AyXaknjqLr1NyAACvj+3O8mdlIoM9MCk6FADw7x8PorbeJDgRERFdK37yUqsymmTMX38QAHBPVBCi27e5yivIEj03ogu8XbQ4WVKFL3dmiY5DRETXiAWQWtW3e3NxME8PVwcN5o4MFx2HrpObg535v99HicdRpK8RnIiIiK4FCyC1mvJzdXj796MAgNmxneHtwpttrNndvdsiMtgDVbVGvJlwRHQcIiK6BiyA1Go+/OM4Sqtq0dHXBZNjQkXHoRukUkl46c5uAIAf0vKQlnNWcCIiImoqFkBqFccLK7AiKRtAw4ofdmr+6tmCyGAP3BsVBAB4af0hrhBCRGQl+ClMLU6WZbz882EYTTJui/DDTZ18REeiZvT8iHC4ajXYf7oc36WeFh2HiIiagAWQWtzGw4X4M7ME9hoVXhwdIToONTMfVy2eiu0EAHgr4Qj0NXWCExER0dWwAFKLqq034Y1fMwAA029qxxU/bNTkmDB08HHGmapaLNySKToOERFdBQsgtaivdp/CqTPV8HHV4vGhHUXHoRZir1HhX6O7AgC+3JmN3NJqwYmIiOhKWACpxZSfq8NHm48DAObc1hnOWqtZepquwy1dfDGwQxvU1pvw7sajouMQEdEVsABSi/l0SybKquvQydfFfKco2S5JkvCv0V0hScBP6fnYl1smOhIREV0GCyC1iNzSany5MxsAED8qHBpO+6II3QLdcXfvhrL/+i8ZkGVOC0NEZIn4qUwt4t2NR1FrNGFghza4pYuv6DjUip6N6wytRoWU7FJsPFwoOg4REV0CCyA1u/2ny/BTej4A4J+jukKSJMGJqDUFuDti+k3tAQBv/nYEdUaT4ERERPS/WACpWcmyjNd/aZj25a7ebdG9rbvgRCTCY0M7wNvFHlklVVidnCM6DhER/Q8WQGpWiRlFSM4qhb1GhWfjuoiOQ4K4aDV4KrYzAOCjxOOoMtQLTkRERP+NBZCajdEk462EIwCAhwe1Q1sPR8GJSKQJ/YIR1sYJZ6pqsezPLNFxiIjov7AAUrP58a88HC+qhLujHWYM7SA6Dglmp1bh6dsaRgG/2H4SZ6tqBSciIqILWACpWdTWm/B/fxwDAMwY2gHujnaCE5EluKNnILoGuKHCUI9F206IjkNEROexAFKzWLsnB6fPnoOPqxYPxYSJjkMWQqWS8Fxcwyjg8l3Z0JXXCE5EREQACyA1g3O1Rny0ORMA8OStHeForxaciCzJLV180TfUE4Z6k3lpQCIiEosFkG7YiqRsFFcYEOTpiPH9QkTHIQsjSRKeHxEOAPhmTy6yS6oEJyIiIhZAuiH6mjp8trXh2q6nYzvDXsNfKbpY/3ZeGNrFB/UmGe9vOiY6DhGR4vHTmm7Iku0nUX6uDh19XTC2d1vRcciCPTu8YV7I9fvycThfLzgNEZGysQDSdTtTacDS8/O7PTu8M9QqLvlGl9e9rTtu7xkAAHhv41HBaYiIlI0FkK7bZ1tPoKrWiB5t3RHXzV90HLICc27rDJUEJB4pwr7cMtFxiIgUiwWQrkuRvgb/2X0KAPBsXBdIEkf/6Ora+7hgbGTDpQIfJvKOYCIiUVgA6bos2nYShnoTokI9cXMnb9FxyIo8MawT1CoJm48UIZ2jgEREQrAA0jUr0tdgVXLD6N/s2E4c/aNr0s7b2TwK+MEfvCOYiEgEFkC6Zp9vbxj96xPigcEdOfpH1+7JYR2hVknYerQYaTlnRcchIlIcqyqACxcuRFhYGBwcHBAdHY2UlJTL7rt8+XJIktRoc3BwaLSPLMuYN28eAgIC4OjoiNjYWBw/zuuSrqSoogZf7b4w+teZo390XULbOOPu3hdGAfl3joiotVlNAVy7di3mzJmD+fPnIy0tDb169UJcXByKioou+xo3NzcUFBSYt1OnTjV6/u2338ZHH32ERYsWITk5Gc7OzoiLi0NNDdcrvZzPz1/71zvEAzfx2j+6AU/c2gkalYTtx4qReoqjgERErclqCuD777+P6dOnY+rUqYiIiMCiRYvg5OSEZcuWXfY1kiTB39/fvPn5+Zmfk2UZH3zwAf79739jzJgx6NmzJ1auXIn8/Hz8+OOPrXBG1qeo4r+v/ePoH92YkDZOGNcnCACvBSQiam1WUQBra2uRmpqK2NhY82MqlQqxsbFISkq67OsqKysRGhqK4OBgjBkzBocOHTI/l5WVBZ1O1+iY7u7uiI6OvuwxDQYD9Hp9o01Jvth2EjV1JkQGe/DOX2oWs27tCI1Kwo7jJdibXSo6DhGRYlhFASwpKYHRaGw0ggcAfn5+0Ol0l3xNly5dsGzZMvz000/46quvYDKZMHDgQJw+fRoAzK+7lmMuWLAA7u7u5i04OPhGT81qFFcY8BXv/KVmFuzlhHv7NowCcl5AIqLWYxUF8HrExMRg8uTJiIyMxJAhQ/DDDz/Ax8cHn3/++XUfMz4+HuXl5eYtNze3GRNbti+2nzCP/g3p7CM6DtmQx4f+PQrIeQGJiFqHVRRAb29vqNVqFBYWNnq8sLAQ/v5NW4LMzs4OvXv3RmZmJgCYX3ctx9RqtXBzc2u0KUFxhcG86sdTHP2jZhbs5YSx5+8I/mRzpuA0RETKYBUF0N7eHlFRUUhMTDQ/ZjKZkJiYiJiYmCYdw2g04sCBAwgIaFiMvl27dvD39290TL1ej+Tk5CYfUykW72i49q9XsAeGcvSPWsCMoR0gScAfGYXIKFDWtbVERCJYRQEEgDlz5mDx4sVYsWIFMjIyMGPGDFRVVWHq1KkAgMmTJyM+Pt68/yuvvIKNGzfi5MmTSEtLw6RJk3Dq1Ck88sgjABruEJ49ezZee+01rF+/HgcOHMDkyZMRGBiIsWPHijhFi1RWXWue9+/JWzty9I9aRAcfF4zq0fCPs4VbOApIRNTSNKIDNNX48eNRXFyMefPmQafTITIyEgkJCeabOHJycqBS/d1nz549i+nTp0On08HT0xNRUVHYtWsXIiIizPs8//zzqKqqwqOPPoqysjIMHjwYCQkJF00YrWTLd2WjutaIrgFuuDXcV3QcsmGzbumIX/YX4JcDBXi6uBIdfFxERyIislmSLMuy6BDWSq/Xw93dHeXl5TZ5PWCloR6D3tyM8nN1+OSB3ri9Z6DoSGTjHlmxB39kFOGeqCC8e28v0XGIyEbZ+ud3U1jNV8DU+r5OzkH5uTq083bGyO4BouOQAsy8pSMA4Me/8nD6bLXgNEREtosFkC6pps6IL3acBADMGNIBahWv/aOW1zvEE4M7eqPeJOPzbSdFxyEislksgHRJ36WeRnGFAQHuDuYpOohaw4VRwLV7c1Gk57rcREQtgQWQLlJvNGHRthMAgEdvbg97DX9NqPUMaO+FvqGeqK03YfEOjgISEbUEfrLTRX7en4/TZ8+hjbM9JvQLER2HFEaSJMy8tWEU8KvdOSitqhWciIjI9rAAUiMmk4xPtzSM/j08uB0c7dWCE5ESDe3sg+5t3XCuzojlO7NExyEisjksgNTIpoxCHC+qhKtWgwdjQkXHIYWSJAmPD20YBVyRdApVhnrBiYiIbAsLIJnJsoxPz6/CMHlgKNwc7AQnIiWL6+aPsDZOKD9Xh7V7ckXHISKyKSyAZLYz8wz2nS6Hg50KUwe1Ex2HFE6tkjD95vYAgKV/ZqHOaBKciIjIdrAAktnn2xuu/ZvQLwTeLlrBaYiAcX2C4O2iRV7ZOWzYny86DhGRzWABJADAofxy7DheArVKwrTBHP0jy+Bgp8bUQWEAgM+3nQRXriQiah4sgAQA+GJ7w3xro3oEINjLSXAaor9Nig6Fs70aR3QV2Hq0WHQcIiKbwAJIOH22Ghv2FwAA/nH+misiS+HuZIcHohvmo7wwQTkREd0YFkDCsj+zYTTJGNSxDbq3dRcdh+gi0wa3h51aQnJWKf7KOSs6DhGR1WMBVLjy6jqs2ZMDAHj05g6C0xBdmr+7A8ZGNqxJzVFAIqIbxwKocF8ln0J1rRHh/q64uZO36DhEl/WPIQ2XJ2w8XIgTxZWC0xARWTcWQAWrqTPiy53ZABo+XCVJEhuI6Ao6+roitqsfZBlYfP6mJSIiuj4sgAq27q88lFQaEOjugNt7BoqOQ3RVM4Y2jAL+kJaHIn2N4DRERNaLBVChTCbZPIry8OB2sFPzV4EsX1SoF/qGeqLWaMLSnVmi4xARWS1+6ivUpoxCnCypgquDBhP6h4iOQ9Rkjw1puFlp9e4cVNTUCU5DRGSdWAAV6sLEz5MGhMJFqxGchqjpbg33RQcfZ1QY6vHN3tOi4xARWSUWQAXam12K1FNnYa9WYerAMNFxiK6JSiVh2uCGawGX/ZmFeqNJcCIiIuvDAqhAn58f/burd1v4ujkITkN07e7u0xZtnO2RV3YOCYd0ouMQEVkdFkCFySyqxB8ZhQCA6Te3E5yG6Po42KkxaUAoAGDxjizIsiw4ERGRdWEBVJilf2ZBloHYrr7o6OsqOg7RdXswJhT2GhX25ZYh9RSXhyMiuhYsgApSWlWLH9IaLpqfflN7wWmIboy3ixZ3925YHm7xDk4MTUR0LVgAFWTV7lMw1JvQo607+rfzEh2H6IZNG9xwGcPGw4U4daZKcBoiIuvBAqgQhnojViSdAtDwocll38gWdPJzxdAuPpDlhjuCiYioaVgAFeLnfQUoqTTAz02LUT0CRMchajYXLmf4Zu9plFXXCk5DRGQdWAAVQJZlLD0/OvLQwDDYa/ifnWzHwA5tEO7vinN1RqxOyREdh4jIKrAJKEDSiTPIKNDD0U6NB7jsG9kYSZLMo4ArdmWjtp4TQxMRXY1VFcCFCxciLCwMDg4OiI6ORkpKymX3Xbx4MW666SZ4enrC09MTsbGxF+0/ZcoUSJLUaBsxYkRLn0aruzD6d09UEDyc7AWnIWp+d/QKhK+rFoV6A37ely86DhGRxbOaArh27VrMmTMH8+fPR1paGnr16oW4uDgUFRVdcv+tW7fi/vvvx5YtW5CUlITg4GAMHz4ceXl5jfYbMWIECgoKzNvXX3/dGqfTak4UVyLxSMN7NHVQmNgwRC3EXqPCQ+eXNVzyJyeGJiK6GqspgO+//z6mT5+OqVOnIiIiAosWLYKTkxOWLVt2yf1XrVqFxx9/HJGRkQgPD8eSJUtgMpmQmJjYaD+tVgt/f3/z5unp2Rqn02q+3Nkw+hfb1RftfVwEpyFqOROjQ+Bop0ZGgR67TpwRHYeIyKJZRQGsra1FamoqYmNjzY+pVCrExsYiKSmpSceorq5GXV0dvLwaz3+3detW+Pr6okuXLpgxYwbOnLn8B4fBYIBer2+0WbKzVbX4LrVh4ueHB3PZN7JtHk72uK9vEABODE1EdDVWUQBLSkpgNBrh5+fX6HE/Pz/odE1bCP6FF15AYGBgoxI5YsQIrFy5EomJiXjrrbewbds2jBw5Ekaj8ZLHWLBgAdzd3c1bcHDw9Z9UK1idkoOaOhO6Brghpn0b0XGIWtzDg9tBkoCtR4uRWVQhOg4RkcWyigJ4o958802sWbMG69atg4ODg/nxCRMm4M4770SPHj0wduxYbNiwAXv27MHWrVsveZz4+HiUl5ebt9zc3FY6g2tXW2/CyqRsAMAjnPiZFCK0jTNiuzb8Q3H5rmyxYYiILJhVFEBvb2+o1WoUFhY2erywsBD+/v5XfO27776LN998Exs3bkTPnj2vuG/79u3h7e2NzMzMSz6v1Wrh5ubWaLNUvxzIR6HeAB9XLe7oFSg6DlGreXhQw+UO36fmoby6TnAaIiLLZBUF0N7eHlFRUY1u4LhwQ0dMTMxlX/f222/j1VdfRUJCAvr27XvVn3P69GmcOXMGAQHWvVJGo4mfY0I58TMpyoD2XuaJodfs4cTQRESXYjXNYM6cOVi8eDFWrFiBjIwMzJgxA1VVVZg6dSoAYPLkyYiPjzfv/9Zbb+HFF1/EsmXLEBYWBp1OB51Oh8rKSgBAZWUlnnvuOezevRvZ2dlITEzEmDFj0LFjR8TFxQk5x+aSnFWKg3l6aDUqPBAdKjoOUauSJMl809OKXdmoN3JiaCKi/2U1BXD8+PF49913MW/ePERGRiI9PR0JCQnmG0NycnJQUFBg3v+zzz5DbW0t7rnnHgQEBJi3d999FwCgVquxf/9+3HnnnejcuTOmTZuGqKgo7NixA1qtVsg5NpcLo3/jooLg5cyJn0l57uwViDbO9sgvr8HGw4VXfwERkcJIMmdMvW56vR7u7u4oLy+3mOsBs0uqcMt7WyHLwB9zhqCjL+f+I2V6b+NRfLw5E31DPfHdjIGi4xCRBbHEz+/WZjUjgNQ0y3dlQ5aBoV18WP5I0SYNCIWdWsLeU2dx4HS56DhERBaFBdCGVNTU/T3x8yBO/EzK5ufmgNE9Gm7ourAiDhERNWABtCHfp55GpaEeHXyccVMnb9FxiIS7cDPIz/vzUaSvEZyGiMhysADaCJNJxoqkUwCAKQPDOPEzEYCeQR6ICvVEnVHGV8mcEoaI6AIWQBux7Vgxskqq4Oqgwd19gkTHIbIYFy6HWLX7FGrqLr3MIxGR0rAA2ogvzy97Nb5vMJy1GrFhiCxIXDc/BLo74ExVLX7ely86DhGRRWABtAGZRZXYfqwYkgRMjgkTHYfIomjUKjx4/u/FlzuzwZmviIhYAG3CivOjf8PC/RDSxklsGCILdH//YDjYqXC4QI/krFLRcYiIhGMBtHLl5+rwfVrD1C9TB4WJDUNkoTyc7M3XxnJKGCIiFkCr9+3eXFTXGtHZzwUDO7QRHYfIYk0dGAYA2Hi4ELml1WLDEBEJxgJoxYwmGSvNU7+049QvRFfQyc8VN3Xyhiz/fdkEEZFSsQBasS1HipBTWg13RzuM7R0oOg6RxbswJczavbmoNNQLTkNEJA4LoBX7clfDtUwT+gXDyZ5TvxBdzZDOPmjv7YyKmnp8f37ZRCIiJWIBtFLHCiuwM/MMVBLwYEyo6DhEVkGlkjDl/M1Sy3dlw2TilDBEpEwsgFZq+flrmIZH+CPIk1O/EDXVuD5BcHXQIKukCluPFYmOQ0QkBAugFSqvrsMP56d+mcKpX4iuibNWgwn9ggE0TAxNRKRELIBWaO3eHNTUmRDu74rodl6i4xBZnckxYVBJwI7jJcgsqhAdh4io1bEAWpl6owkrdjVM/TJ1UBinfiG6DsFeTojt6gfg78spiIiUhAXQyvyRUYS8snPwdLLDmMi2ouMQWa2p56eE+T41D+XVdYLTEBG1LhZAK7P8/NQv9/cPgYOdWnAaIus1oL0Xwv1dca7OiG/25oqOQ0TUqlgArUhGgR67T5ZCrZIwaQCnfiG6EZIkmdfPXpGUDSOnhCEiBWEBtCLLz9+xOKKbPwI9HMWGIbIBYyLbwtPJDqfPnsOmw4Wi4xARtRoWQCtRWlWLH9PzAMA8akFEN8bBTo37+4cA+PvyCiIiJWABtBJr9uTAUG9C97ZuiAr1FB2HyGY8GBMKtUrC7pOlyCjQi45DRNQqWACtQL3RhP8kNUz9MmVgO079QtSMAtwdMaK7P4C/L7MgIrJ1LIBWYOPhQhSU16CNsz1u7xkgOg6RzXn4/GUVP6bnobSqVmwYIqJWwAJoBb7c2XBt0gPRnPqFqCX0CfFEj7buMNSb8HVKjug4REQtjgXQwh3MK8ee7LPQcOoXohbz31PC/CfpFOqMJrGBiIhaGAughbuwTNWoHgHwc3MQG4bIho3uGQBvFy10+hokHNSJjkNE1KJYAC1YSaUB69PzAQBTOPULUYvSatSYGH1hSphssWGIiFoYC6AFW5OSg1qjCb2C3NE72EN0HCKbN3FACOzUElJPncX+02Wi4xARtRirKoALFy5EWFgYHBwcEB0djZSUlCvu/+233yI8PBwODg7o0aMHfv3110bPy7KMefPmISAgAI6OjoiNjcXx48db8hSarM5own92N0z9MnUQp34hag2+rg64vWcgAE4JQ0S2zWoK4Nq1azFnzhzMnz8faWlp6NWrF+Li4lBUVHTJ/Xft2oX7778f06ZNw19//YWxY8di7NixOHjwoHmft99+Gx999BEWLVqE5ORkODs7Iy4uDjU1Na11Wpf120EdCvUG+LhqMaoHp34hai1TBoYBAH7en4+iCvH/W0BE1BIkWZatYgX06Oho9OvXD5988gkAwGQyITg4GE888QTmzp170f7jx49HVVUVNmzYYH5swIABiIyMxKJFiyDLMgIDA/HMM8/g2WefBQCUl5fDz88Py5cvx4QJE66aSa/Xw93dHeXl5XBzc2umM21w96c7kZZThtmxnTA7tnOzHpuIrox//4hsW0t+flsLqxgBrK2tRWpqKmJjY82PqVQqxMbGIikp6ZKvSUpKarQ/AMTFxZn3z8rKgk6na7SPu7s7oqOjL3tMg8EAvV7faGsJ+3LLkJZTBju1hAfOX5RORK1n6qB2AICvdufAUG8UnIZImc5UGnA4n8szthSrKIAlJSUwGo3w8/Nr9Lifnx90uktP16DT6a64/4X/ey3HXLBgAdzd3c1bcHDwdZ3P1axIygYA3N4zEL6unPqFqLWN6O4PfzcHlFQa8OuBAtFxiBTpP7tPYdRHOzD/p4NX35mumVUUQEsRHx+P8vJy85abm9siP+f5uHDMuqUjpg1u1yLHJ6Irs1Or8GBMw8TrX+7MhpVcKUNkM2rrTViV3LAqT1SYl+A0tskqCqC3tzfUajUKCwsbPV5YWAh/f/9Lvsbf3/+K+1/4v9dyTK1WCzc3t0ZbS/B3d8CzcV3Qva17ixyfiK7u/v4hsNeosP90OdJyzoqOQ6Qovx0sQHGFAb6uWozsfunPZLoxVlEA7e3tERUVhcTERPNjJpMJiYmJiImJueRrYmJiGu0PAJs2bTLv365dO/j7+zfaR6/XIzk5+bLHJCLl8HK2x9jIhilhvuSUMESt6sLfuUkDQmGntoqqYnWs5l2dM2cOFi9ejBUrViAjIwMzZsxAVVUVpk6dCgCYPHky4uPjzfs/9dRTSEhIwHvvvYcjR47gpZdewt69ezFr1iwADWt/zp49G6+99hrWr1+PAwcOYPLkyQgMDMTYsWNFnCIRWZgpAxsuw/jtoA4F5ecEpyFShvTcMqTnlsFercL9/XkjZEvRiA7QVOPHj0dxcTHmzZsHnU6HyMhIJCQkmG/iyMnJgUr1d58dOHAgVq9ejX//+9/45z//iU6dOuHHH39E9+7dzfs8//zzqKqqwqOPPoqysjIMHjwYCQkJcHDgjRdEBEQEuiG6nReSs0rx1e5TeC4uXHQkIpu34vxSjLf3DICPq1ZsGBtmNfMAWiLOI0Rk+xIO6vDYV6nwdLJDUvwwONipRUcisllFFTUY9OZm1Bll/DRzEHq10DKo/Py2oq+AiYhEuC3CD209HHG2ug7r0/NFxyGyaauTc1BnlNEnxKPFyh81YAEkIroCtUrCQwMbpoRZtjOLU8IQtZD/nvplyiBOg9bSWACJiK5ifN8QONqpcURXgd0nS0XHIbJJnPqldbEAEhFdhbuTHe7u0xYAsHxXluA0RLaJU7+0Lr7DRERNMGVgGABg0+FC5JZWiw1DZGM49UvrYwEkImqCTn6uuKmTN0xywxqlRNR8zFO/9OLUL62FBZCIqImmDgoDAKxJyUF1bb3YMEQ2oqiiBhv2N9xhf2GknVoeCyARURMN7eyLsDZO0NfU44e0PNFxiGzC18m55qlfegZ5iI6jGCyARERNpFJJeOj8CMXyXdmcEoboBtXWm/BVcsMlFZz6pXWxABIRXYN7ooLgotUgs6gSf2aWiI5DZNU49Ys4LIBERNfA1cEO90QFAfh72goiuj6c+kUcvttERNdoysAwSBKw+UgRskqqRMchskqc+kUsFkAiomsU5u2MW7r4Avh7+goiujac+kUsFkAioutwYUqY71JPo6KmTmwYIivDqV/EYwEkIroOgzt6o6OvCyoN9fgu9bToOERWZdXuHE79IhgLIBHRdZAkyTxysWJXNkwmTglD1BSGeiNWnZ/6ZSqnfhGGBZCI6Drd3act3Bw0yD5Tja3HikTHIbIKP+8rQEllLQLcHTCCU78IwwJIRHSdnOw1mHD+7kVOCUN0dbIs48udWQCAB2M49YtIfOeJiG7AgwNCoZKAHcdLcLywQnQcIouWklWKQ/l6ONipcH8/Tv0iEgsgEdENCPZywm0RfgAalocjosu7MFJ+V+8geDrbiw2jcCyAREQ36MKF7D+k5aG8mlPCEF1Kbmk1Nh7WAQAePj+NEonDAkhEdIOi23kh3N8V5+qMWLs3R3QcIou0MikbJhm4qZM3Ovm5io6jeCyAREQ3SJIkPHx+FHDFrlOoN5oEJyKyLFWGeqzZkwsA5r8rJBYLIBFRM7gzMhCeTnbIKzuHPzI4JQzRf/s+7TQqaurR3tsZQzr7iI5DYAEkImoWDnZqPBB9YUqYLMFpiCyHySRj+fmbPx4aGAaVShIbiACwABIRNZsHB4RBrZKQnFWKw/l60XGILMK2Y8U4WVIFVwcN7okKEh2HzmMBJCJqJv7uDhh5fmWD5bs4CkgEAMvOj4iP7xsMZ61GcBq6gAWQiKgZXZgS5sf0fJypNAhOQyTW8cIK7DheApXU8PUvWQ4WQCKiZtQnxAM9g9xRW28y3/VIpFRfnp8c/bYIPwR7OYkNQ42wABIRNSNJkjD1/CS3/0k6hTpOCUMKVVZdix/STgPg1C+WiAWQiKiZjeoRAG8XLXT6GiQc1ImOQyTE1ym5qKkzISLADf3beYmOQ//D4gtgaWkpJk6cCDc3N3h4eGDatGmorKy84v5PPPEEunTpAkdHR4SEhODJJ59EeXl5o/0kSbpoW7NmTUufDhEpgFajxqQBnBKGlKvOaMLKpGwAwMOD20GSOPWLpbH4Ajhx4kQcOnQImzZtwoYNG7B9+3Y8+uijl90/Pz8f+fn5ePfdd3Hw4EEsX74cCQkJmDZt2kX7fvnllygoKDBvY8eObcEzISIleSA6BHZqCWk5ZdiXWyY6DlGr+v2QDgXlNfB2sccdvQJEx6FLsOj7sTMyMpCQkIA9e/agb9++AICPP/4Yo0aNwrvvvovAwMCLXtO9e3d8//335j936NABr7/+OiZNmoT6+npoNH+fsoeHB/z9/Vv+RIhIcXxdHXBHz0D88Fcelu/Kxv+NjxQdiajVLPuzYeT7gehQaDVqwWnoUix6BDApKQkeHh7m8gcAsbGxUKlUSE5ObvJxysvL4ebm1qj8AcDMmTPh7e2N/v37Y9myZZBl+YrHMRgM0Ov1jTYiosu5MCXMhv35KKqoEZyGqHWk55YhLacMdmrJfCkEWR6LLoA6nQ6+vr6NHtNoNPDy8oJO17QLq0tKSvDqq69e9LXxK6+8gm+++QabNm3CuHHj8Pjjj+Pjjz++4rEWLFgAd3d38xYcHHxtJ0REitIjyB1RoZ6oM8pYtTtHdByiVnFh9O+OnoHwdXUQnIYuR0gBnDt37iVvwvjv7ciRIzf8c/R6PUaPHo2IiAi89NJLjZ578cUXMWjQIPTu3RsvvPACnn/+ebzzzjtXPF58fDzKy8vNW24u5/gioiu7MCXMquRTMNQbxYYhamF5Zefwy4ECAA03f5DlEnIN4DPPPIMpU6ZccZ/27dvD398fRUVFjR6vr69HaWnpVa/dq6iowIgRI+Dq6op169bBzs7uivtHR0fj1VdfhcFggFarveQ+Wq32ss8REV1KXDd/+Ls5QKevwS/7C3B3H66FSrZrxa5sGE0yYtq3Qfe27qLj0BUIKYA+Pj7w8fG56n4xMTEoKytDamoqoqKiAACbN2+GyWRCdHT0ZV+n1+sRFxcHrVaL9evXw8Hh6kPQ6enp8PT0ZMEjomZlp1bhwZhQvPP7UXy5Mxt39W7LKTHIJlXU1OHr5IZLHabfzNE/S2fR1wB27doVI0aMwPTp05GSkoKdO3di1qxZmDBhgvkO4Ly8PISHhyMlJQVAQ/kbPnw4qqqqsHTpUuj1euh0Ouh0OhiNDV+//Pzzz1iyZAkOHjyIzMxMfPbZZ3jjjTfwxBNPCDtXIrJd9/cPgVajwoG8cqTlnBUdh6hFrN2TiwpDPTr4OGNoZ9+rv4CEsuhpYABg1apVmDVrFoYNGwaVSoVx48bho48+Mj9fV1eHo0ePorq6GgCQlpZmvkO4Y8eOjY6VlZWFsLAw2NnZYeHChXj66achyzI6duyI999/H9OnT2+9EyMixfBytsfYyLZYuzcXy3ZmIyqUqyKQbak3mvDlzmwAwLTB7aFScZTb0kny1eY+ocvS6/Vwd3c3TzNDRHQ5GQV6jPxwB9QqCVufHYpgLyfRkYiazYb9+Zi1+i+0cbbHzrm3wsHOsuf+4+e3hX8FTERkK7oGuOGmTt4wmmTzSAmRLZBlGYt3NEz9MmlAqMWXP2rAAkhE1Eqm39QeALBmTw7Kq+sEpyFqHntPncW+3DLYaxpueCLrwAJIRNRKburkjXB/V1TXGrEq5ZToOETNYsmOkwCAu3u3hbcLZ9KwFiyAREStRJIk8yjg8p3ZnBiarF52SRU2Hi4EAEzjxM9WhQWQiKgV3dErEP5uDiiqMGB9er7oOEQ3ZNnOLMgyMLSLDzr5uYqOQ9eABZCIqBXZa1Tm5eEW7zgJTsRA1qqsuhbf7j0N4O/rW8l6sAASEbWy+6ND4KLV4FhhJbYdKxYdh+i6rErOwbk6I7oGuGFghzai49A1YgEkImplbg52mNAvGEDDKCCRtamtN2HFrmwAwCOD23F5QyvEAkhEJMDUwe2gVknYmXkGB/PKRcchuibr9+WjqMIAPzct7ugVKDoOXQcWQCIiAdp6OOL2ngEA/p5Gg8gayLJs/p2dHBMGew2rhDXifzUiIkEuXDj/8/4C5JedE5yGqGm2Hi3GEV0FnO3VmBTNiZ+tFQsgEZEg3du6Y2CHNueXh8sSHYeoSRZtOwEAuL9/CNyd7ASnoevFAkhEJND0mxtGAb9OyYW+hsvDkWX7K+cskrNKYaeWMO0mTvxszVgAiYgEGtrZB518XVBpqMealBzRcYiu6MLo35jItghwdxSchm4ECyARkUCSJJlHAZf9mY3aepPgRESXdqK40rzs2z9u5sTP1o4FkIhIsDGRgfB11UKnr8GP6Xmi4xBd0uLtJyHLQGxXXy77ZgNYAImIBNNq1Jg2uOF6qkXbTsBo4vJwZFmK9DX4Ia3hHyePDekgOA01BxZAIiILMHFAKNwcNDhZXIVNh3Wi4xA1smxnNmqNJvQN9UTfMC/RcagZsAASEVkAF60GDw0MAwB8uvUEZJmjgGQZ9DV1WLX7FADgHxz9sxksgEREFmLKwDA42Kmw/3Q5dp04IzoOEQBgdXIOKgz16OTrgmHhvqLjUDNhASQishBtXLSY0C8EAPDp1kzBaYgAQ70Ry/5smKT80ZvbQ6WSBCei5sICSERkQR65qR00Kgk7M89gX26Z6DikcD/+lYeiCgP83RwwJrKt6DjUjFgAiYgsSJCnE+6MDAQAfLb1hOA0pGQmk4zPt58EAEwb3A72GlYGW8L/mkREFmbG+Qvtfz+sQ2ZRpeA0pFS/H9LhZHEV3Bw0uD86RHQcamYsgEREFqaTnytui/CDLAOfb+MoILU+WZbxyZaG61CnDAyDi1YjOBE1NxZAIiILNGNowyjgur/ykF92TnAaUpqtR4txKF8PJ3s1pg5qJzoOtQAWQCIiC9QnxBMD2nuh3iRjyY4s0XFIQWRZxsebjwMAJg0IhaezveBE1BJYAImILNTjQzsCAL5OyUFpVa3gNKQUSSfPIC2nDPYaFR65iaN/tooFkIjIQt3UyRvdAt1wrs6I5Ts5Ckit45PNDdf+3d8vGL6uDoLTUEthASQislCSJGHWLQ2jgF/uykb5uTrBicjWpZ46i10nzkCjkvAol32zaSyAREQWLK6bPzr7uaCiph4rdmWLjkM2buH5O3/H9QlCWw9HwWmoJVl8ASwtLcXEiRPh5uYGDw8PTJs2DZWVV54Xa+jQoZAkqdH22GOPNdonJycHo0ePhpOTE3x9ffHcc8+hvr6+JU+FiOiaqVQSnri1EwBg6Z9ZqKjhKCC1jIN55dh8pAgq6e+70Ml2WXwBnDhxIg4dOoRNmzZhw4YN2L59Ox599NGrvm769OkoKCgwb2+//bb5OaPRiNGjR6O2tha7du3CihUrsHz5csybN68lT4WI6LqM6hGADj7OKD9Xh5VJp0THIRt1YfTvjl6BCPN2FpyGWppFF8CMjAwkJCRgyZIliI6OxuDBg/Hxxx9jzZo1yM/Pv+JrnZyc4O/vb97c3NzMz23cuBGHDx/GV199hcjISIwcORKvvvoqFi5ciNpa3mlHRJZF/V+jgEt2nESVgd9WUPM6XliBhEM6AMDM89edkm2z6AKYlJQEDw8P9O3b1/xYbGwsVCoVkpOTr/jaVatWwdvbG927d0d8fDyqq6sbHbdHjx7w8/MzPxYXFwe9Xo9Dhw5d9pgGgwF6vb7RRkTUGm7vGYB23s44W12Hr3ZzFJCa16dbT0CWgbhufujs5yo6DrUCiy6AOp0Ovr6+jR7TaDTw8vKCTqe77OseeOABfPXVV9iyZQvi4+Pxn//8B5MmTWp03P8ufwDMf77ScRcsWAB3d3fzFhwcfD2nRUR0zTRqlXlk5ovtJ3Gu1ig4EdmKk8WV+Ck9DwAw65ZOgtNQaxFSAOfOnXvRTRr/ux05cuS6j//oo48iLi4OPXr0wMSJE7Fy5UqsW7cOJ07c2Jqa8fHxKC8vN2+5ubk3dDwiomsxJjIQwV6OOFNVi1XJHAWk5vHx5kyYZGBYuC96BLmLjkOtRMjqzs888wymTJlyxX3at28Pf39/FBUVNXq8vr4epaWl8Pf3b/LPi46OBgBkZmaiQ4cO8Pf3R0pKSqN9CgsLAeCKx9VqtdBqtU3+uUREzclOrcLMoR0x94cD+Hz7SUwaEAoHO7XoWGTFTvzX6N/s2M6C01BrElIAfXx84OPjc9X9YmJiUFZWhtTUVERFRQEANm/eDJPJZC51TZGeng4ACAgIMB/39ddfR1FRkfkr5k2bNsHNzQ0RERHXeDZERK3n7j5B+HhzJvLKzmFNSg6mDOJSXXT9Pko8DpMMxHb14+ifwlj0NYBdu3bFiBEjMH36dKSkpGDnzp2YNWsWJkyYgMDAQABAXl4ewsPDzSN6J06cwKuvvorU1FRkZ2dj/fr1mDx5Mm6++Wb07NkTADB8+HBERETgwQcfxL59+/D777/j3//+N2bOnMkRPiKyaPYalXmOts+2nUBNHa8FpOuTWVSB9fsaZtSYHctr/5TGogsg0HA3b3h4OIYNG4ZRo0Zh8ODB+OKLL8zP19XV4ejRo+a7fO3t7fHHH39g+PDhCA8PxzPPPINx48bh559/Nr9GrVZjw4YNUKvViImJwaRJkzB58mS88sorrX5+RETX6t6+QQhwd0Ch3oA1KTmi45CV+jAxE7IMDI/wQ/e2HP1TGkmWZVl0CGul1+vh7u6O8vLyRvMMEhG1tK92n8K/fzwIbxctdjx/CxzteS0gNd2xwgrEfbAdsgz8+uRNiAhU1mcYP7+tYASQiIgudl/fYAR5OqKk0oCVSdmi45CV+TDxOGQZGNHNX3HljxqwABIRWSF7jQpPDWu4bmvRthNcI5ia7KiuAr8eKAAAPMVr/xSLBZCIyErd1bst2p9fHeTLndmi45CV+DDxGGQZGNXDH10DOPqnVCyARERWSqNWYfZtDXO3Ld5+EmXVXMucruxgXjl+PaCDJAFPDeO8f0rGAkhEZMVu7xGALn6uqDDUY/GOk6LjkIV75/ejAIA7ewWiiz/X/FUyFkAiIiumUkmYM7xhJOfLndkoqTQITkSWavfJM9h2rBgalYQ5t3H0T+lYAImIrNzwCD/0aOuO6lojFm29sTXPyTbJsoy3E44AACb0D0ZoG2fBiUg0FkAiIisnSRKeOT8KuHL3KejKawQnIkuTmFGEtJwyONip8OStvPOXWACJiGzCkM4+6Bvqidp6Ez7445joOGRBjCbZfO3flIHt4OvmIDgRWQIWQCIiGyBJEuaODAcAfLM3F8cLKwQnIkuxfl8ejhZWwM1BgxlDOoiOQxaCBZCIyEb0DfPCbRF+MMnAWwlHRcchC1Bbb8L7mxpGhP8xpAPcnewEJyJLwQJIRGRDXhjRBSoJ+COjEHuyS0XHIcHW7MlBbuk5eLtoMXVQmOg4ZEFYAImIbEhHX1eM7xcMAFjwawZkWRaciESpMtTjo8RMAMBTwzrCyV4jOBFZEhZAIiIbMzu2MxzsVEjLKcPvhwpFxyFBPt9+EiWVBoS2ccL4fiGi45CFYQEkIrIxfm4OeGRwewDA278fQb3RJDgRtTZdeQ2+2N4wJ+TcEeGw1/DjnhrjbwQRkQ36x5D28HK2x8niKqzdmys6DrWy9zYeRU2dCVGhnhjR3V90HLJALIBERDbI1cEOT9zaEQDwwR/HUWWoF5yIWsvhfD2+SzsNAPjX6K6QJElwIrJELIBERDZqYnQoQrycUFxhwOfbT4qOQ61AlmW88WsGZBkY3TMAfUI8RUciC8UCSERko+w1KsSfnxz6820nkFd2TnAiamnbjhXjz8wS2KtVeCEuXHQcsmAsgERENmxEd3/0b+cFQ70Jb/12RHQcakH1RhPe+DUDAPDQwFCEtHESnIgsGQsgEZENkyQJ826PgCQB6/flI/UUJ4e2VWv25OJYYSXcHe0w65ZOouOQhWMBJCKycd3bumN834bJoV/5+TBMJk4ObWvKqmvx7saG5f+eju3EJd/oqlgAiYgU4JnhXeCi1WDf6XKs+ytPdBxqZu9vOoay6jp08XPFpAGhouOQFWABJCJSAB9XLWadnxbm7d+PcFoYG5JRoMdXu08BAObfGQGNmh/tdHX8LSEiUoipg8IQ4uWEQr0Bn27NFB2HmoEsy3hp/SGYZGB0jwAM7OAtOhJZCRZAIiKF0GrU+NforgCAxduzcLK4UnAiulG/HChAclYpHOxUiB/FaV+o6VgAiYgUZHiEH4Z28UGt0YT56w9BlnlDiLU6V2vEG780TPsyY0hHBHly2hdqOhZAIiIFkSQJL9/ZDfYaFXYcL8GvB3SiI9F1+nRrJvLLa9DWwxH/GNJedByyMiyAREQKE9rGGY8P7QAAeGXDIVTyhhCrk1lUgUXbTgAAXrw9Ag52asGJyNqwABIRKdBjQzogtE3DDSEf/nFMdBy6BrIs45/rDqLOKGNYuC/iuvmJjkRWiAWQiEiBHOzUeOnObgCAZTuzcVRXITgRNdW3qaeRklUKRzs1Xh7TDZIkiY5EVsjiC2BpaSkmTpwINzc3eHh4YNq0aaisvPyda9nZ2ZAk6ZLbt99+a97vUs+vWbOmNU6JiMgi3NKlYfTIaJLx7x8PcIUQK1BaVYsF59f7nR3biTd+0HWz+AI4ceJEHDp0CJs2bcKGDRuwfft2PProo5fdPzg4GAUFBY22l19+GS4uLhg5cmSjfb/88stG+40dO7aFz4aIyLLMu6MbnOzV2JN9FqtTckTHoat449cMnK2uQ7i/Kx4e3E50HLJiGtEBriQjIwMJCQnYs2cP+vbtCwD4+OOPMWrUKLz77rsIDAy86DVqtRr+/v6NHlu3bh3uu+8+uLi4NHrcw8Pjon2JiJSkrYcjnovrgpd/Pow3fzuCYV19EeDuKDoWXULSiTP4LvU0JAl4/a4esOOKH3QDLPq3JykpCR4eHubyBwCxsbFQqVRITk5u0jFSU1ORnp6OadOmXfTczJkz4e3tjf79+2PZsmVXnQ/LYDBAr9c32oiIrN3kmDD0CfFApaEe/1p3kHMDWqCaOiP+9eMBAMD9/UMQFeopOBFZO4sugDqdDr6+vo0e02g08PLygk7XtLmrli5diq5du2LgwIGNHn/llVfwzTffYNOmTRg3bhwef/xxfPzxx1c81oIFC+Du7m7egoODr+2EiIgskFol4a1xPWGvVmHzkSKs35cvOhL9j//bdAwni6vg46rFC3Fc8YNunJACOHfu3MveqHFhO3LkyA3/nHPnzmH16tWXHP178cUXMWjQIPTu3RsvvPACnn/+ebzzzjtXPF58fDzKy8vNW25u7g1nJCKyBJ38XDHr1o4AgJd/PozSqlrBieiCtJyzWLzjJABgwV094O5kJzgR2QIh1wA+88wzmDJlyhX3ad++Pfz9/VFUVNTo8fr6epSWljbp2r3vvvsO1dXVmDx58lX3jY6OxquvvgqDwQCtVnvJfbRa7WWfIyKydo8N6YBf9hfgaGEFXvn5ED6Y0Ft0JMWrqTPi2W/3wSQDd/dui9gIzvlHzUNIAfTx8YGPj89V94uJiUFZWRlSU1MRFRUFANi8eTNMJhOio6Ov+vqlS5fizjvvbNLPSk9Ph6enJwseESmWvUaFt+7pibs/3Ykf0/MxqkcAhnfjjXIi/d8ff3/1O++OCNFxyIZY9DWAXbt2xYgRIzB9+nSkpKRg586dmDVrFiZMmGC+AzgvLw/h4eFISUlp9NrMzExs374djzzyyEXH/fnnn7FkyRIcPHgQmZmZ+Oyzz/DGG2/giSeeaJXzIiKyVJHBHph+c8O6svE/HEBxhUFwIuVKyzmLxdsbvvp9464e8HCyF5yIbIlFF0AAWLVqFcLDwzFs2DCMGjUKgwcPxhdffGF+vq6uDkePHkV1dXWj1y1btgxBQUEYPnz4Rce0s7PDwoULERMTg8jISHz++ed4//33MX/+/BY/HyIiSzfnts4I93fFmapaxP+wn3cFC1BTZ8Rz57/6vat3W9zGr36pmUky/2ZfN71eD3d3d5SXl8PNzU10HCKiZpNRoMeYT3ai1mjCW+N6YHy/ENGRFOWl9YewfFc2fFy12PT0zRz9a2b8/LaCEUAiImp9XQPc8GxcZwANdwWfOlMlOJFybDlShOW7sgEA79zTk+WPWgQLIBERXdK0we0R3c4L1bVGzPlmH4xcK7jFFVcY8Nx3+wAAUwaGYWgX36u8guj6sAASEdElqVUS3ruvF1y0GqSeOouFWzJFR7Jpsizj+e/2oaSyFl38XDF3JCd8ppbDAkhERJcV5OmEV8Z0AwB88Mcx7D55RnAi27Uy6RS2HC2GvUaFj+7vDQc7tehIZMNYAImI6Iru7hOEe6KCYJKBJ7/+CyWVnBqmue0/XYbXf8kAAMSPDEcXf1fBicjWsQASEdFVvTKmGzr5uqCowoCn16bDxOsBm01ZdS1mfJWGWqMJwyP8MGVgmOhIpAAsgEREdFVO9hp8OrEPHO3U2HG8BJ9u5fWAzcFkkvHMN/uQV3YOoW2c8M69vSBJkuhYpAAsgERE1CSd/Fzx6tjuAID3Nx3D9mPFghNZv0XbTyDxSBHsNSp8OrEP3B3tREcihWABJCKiJrsnKgj39W24HvCJr//i/IA3YGdmCd79/SgA4NUx3dAt0F1wIlISFkAiIromr47tjshgD5Sfq8OjK1NRZagXHcnqZJdU4fFVaTDJF0p1sOhIpDAsgEREdE20GjU+fzAKPq5aHC2swDPf7ONNIddAX1OHaSv2oPxcHSKDPfDa2O687o9aHQsgERFdMz83ByyaFAV7tQoJh3T4MPG46EhWwWiS8cTqv3CiuAoB7g74YnIU5/sjIVgAiYjoukSFeuLVsQ2TRH+YeBzfpZ4WnMjyvfFrBrYdK4aDnQqLJ/eFr6uD6EikUCyARER03cb3C8FjQzoAAOZ+vx87jvPO4MtZsuMklv6ZBQB4795IdG/Lmz5IHBZAIiK6Ic/HdcGdvQJRb5Ix46s0HM7Xi45kcX5Kz8Nr51f6eH5EF4zuGSA4ESkdCyAREd0QlUrCO/f2xID2Xqg01GPq8hTkllaLjmUxdhwvxrPf7gMATB0UhhnnR0yJRGIBJCKiG9ZwZ3BfdPZzQaHegAeW7EZB+TnRsYTbl1uGx/6TijqjjDt6BeLF0RG845csAgsgERE1C3dHO6x8OBqhbZyQW3oOExcno6iiRnQsYfafLsODS5NRVWvE4I7eePfenlCpWP7IMrAAEhFRs/F3d8Dq6QPQ1sMRJ0uqMHFxMs5UGkTHanX7T5dh0pJk6Gvq0TfUE4sejIJWw+leyHKwABIRUbNq6+GI1dOj4e/mgONFlZi4JBlFeuWMBP5v+Vv+cH+4aDWiYxE1wgJIRETNLrSNM1ZNj4aPqxZHdBW4Z1EScs7Y/o0he7JLzeUviuWPLBgLIBERtYgOPi74/rGBCPFyQk5pNe5ZtAtHdRWiY7WYTYcLG438rWD5IwvGAkhERC0mpI0TvnssBl38XFFUYcB9nychJatUdKxmt3ZPDv7xn70w1JswLNwX/5kWzfJHFo0FkIiIWpSvmwO++UcM+oR4oPxcHSYu2Y1v9uSKjtUs6o0mvLbhMF74/gBMMnBvVBA+fzAKjva84YMsGwsgERG1OHcnO6x6ZABG9whAnVHG89/vx2sbDqPeaBId7bqVn6vDwyv2Ysn55d2evLUj3r6nJzRqfrSS5eNvKRERtQpHezU+vr83Zsd2AgAs+TMLDyxOtsoJo/efLsOdn/yJ7ceK4WCnwsIH+mDO8C6c5JmsBgsgERG1GpVKwuzYzvh0Yh+4aDVIyS7FqA93YPORQtHRmsRkkrF4+0mM+2wXTp2pRlsPR3z32ECu7UtWR5JlWRYdwlrp9Xq4u7ujvLwcbm5uouMQEVmV7JIqzPo6DQfz9ACA+/oG4V+jI+DuaCc42aXlnKnGP9cdwJ+ZJQCAkd398ebdPeHuZJl56fL4+c0CeEP4C0REdGMM9Ua89dtRLNvZcB2dr6sWr4zpjrhufhbzdWq90YQvd2bjvU1HUVNnglajwrw7IvBA/xCLyUjXhp/fLIA3hL9ARETNY092KV74bj9OllQBAAZ2aIN/je6KboHuwjLJsowtR4uw4NcjOF5UCQCIad8GC+7ugTBvZ2G56Mbx85sF8IbwF4iIqPnU1Bnx8ebjWLw9C7VGEyQJuCuyLR6/pQM6+rq2Wg5ZlpGSVYoPE49j14kzAAAPJzvEjwzHfX2DOepnA/j5bQU3gbz++usYOHAgnJyc4OHh0aTXyLKMefPmISAgAI6OjoiNjcXx48cb7VNaWoqJEyfCzc0NHh4emDZtGiorK1vgDIiIqCkc7NR4Li4cic8Mwe09AyDLwA9/5SH2/e14dOVeJJ88g5Ycs6gzmvDbgQLc9ekujP9iN3adOAN7jQr/uLk9tj17C8b341e+ZDssfgRw/vz58PDwwOnTp7F06VKUlZVd9TVvvfUWFixYgBUrVqBdu3Z48cUXceDAARw+fBgODg4AgJEjR6KgoACff/456urqMHXqVPTr1w+rV69ucjb+C4KIqOXsyy3Dwi2Z2Hj47zuEQ9s44Z4+QRjdMwDtvJ1vuJCZTDIO5JVj3V95+HlfPs5U1QIA7DUq3BMVhMeHdkCQp9MN/QyyPPz8toICeMHy5csxe/bsqxZAWZYRGBiIZ555Bs8++ywAoLy8HH5+fli+fDkmTJiAjIwMREREYM+ePejbty8AICEhAaNGjcLp06cRGBjYpEz8BSIianmZRRVYsiMLP+/LR1Wt0fx4iJcThnbxQe8QD3QPdEd7HxeoVVcuhDV1RhwrrMDhfD2STp7Bn8dLzKUPALxd7DGhXwgeGhgGH1dti50TicXPb8DmFirMysqCTqdDbGys+TF3d3dER0cjKSkJEyZMQFJSEjw8PMzlDwBiY2OhUqmQnJyMu+6665LHNhgMMBgM5j/r9fqWOxEiIgIAdPR1xZvjeuLF2yPw20EdfvwrD8lZZ5BTWo2VSaewMukUgIZROz83LXxdHeDpZA+1CpAgodZoQkmlAWcqa1FQfg6m/xn2cLZX45ZwX4zrE4SbOnlzJQ9SBJsrgDqdDgDg5+fX6HE/Pz/zczqdDr6+vo2e12g08PLyMu9zKQsWLMDLL7/czImJiKgpnLUa3BMVhHuiglBlqMeuE2ewM7MEB/PKcbhAj+paI3JLzyG39Mori3g526NrgCsigz1wcycf9A7xhL2GpY+URUgBnDt3Lt56660r7pORkYHw8PBWStQ08fHxmDNnjvnPer0ewcHBAhMRESmTs1aD2yL8cFtEwz/2TSYZeWXnUFRRgyK9AWer62CSZcgA7NUS2jhr0cbFHm09HOHjquXNHKR4QgrgM888gylTplxxn/bt21/Xsf39/QEAhYWFCAj4e2mewsJCREZGmvcpKipq9Lr6+nqUlpaaX38pWq0WWi2vCSEisjQqlYRgLycEe/GGDaKmEFIAfXx84OPj0yLHbteuHfz9/ZGYmGgufHq9HsnJyZgxYwYAICYmBmVlZUhNTUVUVBQAYPPmzTCZTIiOjm6RXERERESWwuIvesjJyUF6ejpycnJgNBqRnp6O9PT0RnP2hYeHY926dQAASZIwe/ZsvPbaa1i/fj0OHDiAyZMnIzAwEGPHjgUAdO3aFSNGjMD06dORkpKCnTt3YtasWZgwYUKT7wAmIiIislYWfxPIvHnzsGLFCvOfe/fuDQDYsmULhg4dCgA4evQoysvLzfs8//zzqKqqwqOPPoqysjIMHjwYCQkJ5jkAAWDVqlWYNWsWhg0bBpVKhXHjxuGjjz5qnZMiIiIiEshq5gG0RJxHiIiIyPrw89sKvgImIiIioubFAkhERESkMCyARERERArDAkhERESkMCyARERERArDAkhERESkMCyARERERArDAkhERESkMCyARERERApj8UvBWbILi6jo9XrBSYiIiKipLnxuK3kxNBbAG1BRUQEACA4OFpyEiIiIrlVFRQXc3d1FxxCCawHfAJPJhPz8fLi6ukKSpGY9tl6vR3BwMHJzcxW7TuHl8L25Mr4/V8b358r4/lwZ35/Ls6b3RpZlVFRUIDAwECqVMq+G4wjgDVCpVAgKCmrRn+Hm5mbxf5FE4XtzZXx/rozvz5Xx/bkyvj+XZy3vjVJH/i5QZu0lIiIiUjAWQCIiIiKFYQG0UFqtFvPnz4dWqxUdxeLwvbkyvj9Xxvfnyvj+XBnfn8vje2NdeBMIERERkcJwBJCIiIhIYVgAiYiIiBSGBZCIiIhIYVgAiYiIiBSGBdACLVy4EGFhYXBwcEB0dDRSUlJER7II27dvxx133IHAwEBIkoQff/xRdCSLsmDBAvTr1w+urq7w9fXF2LFjcfToUdGxLMZnn32Gnj17miepjYmJwW+//SY6lkV68803IUkSZs+eLTqKRXjppZcgSVKjLTw8XHQsi5KXl4dJkyahTZs2cHR0RI8ePbB3717RsegKWAAtzNq1azFnzhzMnz8faWlp6NWrF+Li4lBUVCQ6mnBVVVXo1asXFi5cKDqKRdq2bRtmzpyJ3bt3Y9OmTairq8Pw4cNRVVUlOppFCAoKwptvvonU1FTs3bsXt956K8aMGYNDhw6JjmZR9uzZg88//xw9e/YUHcWidOvWDQUFBebtzz//FB3JYpw9exaDBg2CnZ0dfvvtNxw+fBjvvfcePD09RUejK+A0MBYmOjoa/fr1wyeffAKgYb3h4OBgPPHEE5g7d67gdJZDkiSsW7cOY8eOFR3FYhUXF8PX1xfbtm3DzTffLDqORfLy8sI777yDadOmiY5iESorK9GnTx98+umneO211xAZGYkPPvhAdCzhXnrpJfz4449IT08XHcUizZ07Fzt37sSOHTtER6FrwBFAC1JbW4vU1FTExsaaH1OpVIiNjUVSUpLAZGSNysvLATSUHGrMaDRizZo1qKqqQkxMjOg4FmPmzJkYPXp0o/8NogbHjx9HYGAg2rdvj4kTJyInJ0d0JIuxfv169O3bF/feey98fX3Ru3dvLF68WHQsugoWQAtSUlICo9EIPz+/Ro/7+flBp9MJSkXWyGQyYfbs2Rg0aBC6d+8uOo7FOHDgAFxcXKDVavHYY49h3bp1iIiIEB3LIqxZswZpaWlYsGCB6CgWJzo6GsuXL0dCQgI+++wzZGVl4aabbkJFRYXoaBbh5MmT+Oyzz9CpUyf8/vvvmDFjBp588kmsWLFCdDS6Ao3oAETU/GbOnImDBw/yOqX/0aVLF6Snp6O8vBzfffcdHnroIWzbtk3xJTA3NxdPPfUUNm3aBAcHB9FxLM7IkSPN/3/Pnj0RHR2N0NBQfPPNN7x8AA3/4Ozbty/eeOMNAEDv3r1x8OBBLFq0CA899JDgdHQ5HAG0IN7e3lCr1SgsLGz0eGFhIfz9/QWlImsza9YsbNiwAVu2bEFQUJDoOBbF3t4eHTt2RFRUFBYsWIBevXrhww8/FB1LuNTUVBQVFaFPnz7QaDTQaDTYtm0bPvroI2g0GhiNRtERLYqHhwc6d+6MzMxM0VEsQkBAwEX/iOratSu/JrdwLIAWxN7eHlFRUUhMTDQ/ZjKZkJiYyOuU6KpkWcasWbOwbt06bN68Ge3atRMdyeKZTCYYDAbRMYQbNmwYDhw4gPT0dPPWt29fTJw4Eenp6VCr1aIjWpTKykqcOHECAQEBoqNYhEGDBl005dSxY8cQGhoqKBE1Bb8CtjBz5szBQw89hL59+6J///744IMPUFVVhalTp4qOJlxlZWWjf3FnZWUhPT0dXl5eCAkJEZjMMsycOROrV6/GTz/9BFdXV/N1o+7u7nB0dBScTrz4+HiMHDkSISEhqKiowOrVq7F161b8/vvvoqMJ5+rqetG1os7OzmjTpg2vIQXw7LPP4o477kBoaCjy8/Mxf/58qNVq3H///aKjWYSnn34aAwcOxBtvvIH77rsPKSkp+OKLL/DFF1+IjkZXIpPF+fjjj+WQkBDZ3t5e7t+/v7x7927RkSzCli1bZAAXbQ899JDoaBbhUu8NAPnLL78UHc0iPPzww3JoaKhsb28v+/j4yMOGDZM3btwoOpbFGjJkiPzUU0+JjmERxo8fLwcEBMj29vZy27Zt5fHjx8uZmZmiY1mUn3/+We7evbus1Wrl8PBw+YsvvhAdia6C8wASERERKQyvASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSGBZAIiIiIoVhASQiIiJSmP8HzXVxKVQ86IsAAAAASUVORK5CYII=' width=640.0/>
</div>
|
HITS-AINREPO_NAMEPINKPATH_START.@PINK_extracted@PINK-master@jupyter@devel@simple_anim.ipynb@.PATH_END.py
|
{
"filename": "_tickformat.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/funnel/marker/colorbar/_tickformat.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="tickformat", parent_name="funnel.marker.colorbar", **kwargs
):
super(TickformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@funnel@marker@colorbar@_tickformat.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "geggo/phase-unwrap",
"repo_path": "phase-unwrap_extracted/phase-unwrap-master/unwrap/__init__.py",
"type": "Python"
}
|
from __future__ import absolute_import
from unwrap.unwrap import unwrap
|
geggoREPO_NAMEphase-unwrapPATH_START.@phase-unwrap_extracted@phase-unwrap-master@unwrap@__init__.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "cheng-zhao/FCFC",
"repo_path": "FCFC_extracted/FCFC-main/README.md",
"type": "Markdown"
}
|
# Fast Correlation Function Calculator (FCFC)


<img src="doc/logo/FCFC_logo.svg" align="right" />
## Table of Contents
- [Introduction](#introduction)
- [Compilation](#compilation)
- [Components and configurations](#components-and-configurations)
- [Acknowledgements](#acknowledgements)
## Introduction
**F**ast **C**orrelation **F**unction **C**alculator (FCFC) is a C toolkit for computing correlation functions from pair counts. It is designed in the hope of being (both time and space) efficient, portable, and user-friendly.
So far the following products are supported:
- Isotropic 2-point correlation function (2PCF, a.k.a. radial distribution function): *ξ*(*s*);
- Anisotropic 2PCF: *ξ*(*s*, *μ*);
- 2-D 2PCF: *ξ*(*s*<sub>perp</sub>, *s*<sub>para</sub>), also known as *ξ*(*s*<sub>perp</sub>, *π*);
- 2PCF Legendre multipoles: *ξ*<sub>*ℓ*</sub>(*s*);
- Projected 2PCF: *w*<sub>*p*</sub>(*s*<sub>perp</sub>).
FCFC takes advantage of 3 parallelisms that can be used simultaneously:
- Distributed-memory processes via Message Passing Interface (MPI);
- Shared-memory threads via Open Multi-Processing (OpenMP);
- Single instruction, multiple data (SIMD).
This program is compliant with the ISO C99 and IEEE POSIX.1-2008 standards, and no external library is mandatory. Thus it is compatible with most modern C compilers and operating systems. Optionally, `FITS` and `HDF5` file formats can be supported through external libraries (see [Compilation](#compilation)).
FCFC is written by Cheng Zhao (赵成), and is distributed under the [MIT license](LICENSE.txt). If you use this program in research work that results in publications, please cite the following paper:
> Zhao et al. 2020, [arXiv:2007.08997](https://ui.adsabs.harvard.edu/abs/2020arXiv200708997Z/abstract)
<sub>[\[TOC\]](#table-of-contents)</sub>
## Compilation
The building of FCFC is based on the make utility. Customisable compilation options can be set in the file [`options.mk`](options.mk), as summarised below:
| Option | Description | Dependences |
|:--------------:|---------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------|
| `CC` | Set the C compiler | — |
| `MPICC` | Set the C compiler that supports MPI | — |
| `CFLAGS` | Set optimisation flags of the C compiler | — |
| `WITH_MPI` | `T` for enabling MPI parallelism | `MPICC` |
| `WITH_OMP` | `T` for enabling OpenMP parallelism<br />(Specify the corresponding compiler flag via `OMP_FLAG`) | The [OpenMP](https://www.openmp.org/) library and compiler support |
| `WITH_SIMD` | `T` for enabling SIMD parallelism | Advanced Vector Extensions (`AVX`/`AVX2`/`AVX-512`) and compiler support |
| `SINGLE_PREC` | `T` for using single precision for floating-point calculations | — |
| `WITH_MU_ONE` | `T` for including *μ*=1 in the last *μ* bin of pair counts in (*s*, *μ*) | — |
| `WITH_CFITSIO` | `T` for enabling `FITS` format inputs<br />(Set the directories containing header and library files via `CFITSIO_INC_DIR` and `CFITSIO_LIB_DIR` respectively) | The [CFITSIO](https://heasarc.gsfc.nasa.gov/fitsio/) library |
| `WITH_HDF5` | `T` for enabling `HDF5` format inputs<br />(Set the directories containing header and library files via `HDF5_INC_DIR` and `HDF5_LIB_DIR` respectively) | The [HDF5](https://www.hdfgroup.org/solutions/hdf5/) library |
Once the setting is done, the following command should compile the code:
```bash
make
```
The compilation options used for an executable can be checked with the `--version` or `-V` command line flags, e.g.,
```bash
./FCFC_2PT -V
```
To compile only a certain component of the program (see [Components and configurations](#components-and-configurations)), the name of the component can be supplied via
```bash
make [COMPONENT_NAME]
```
<sub>[\[TOC\]](#table-of-contents)</sub>
## Components and configurations
FCFC comes along with several components for different tasks. They are served as separate executables, and have to be supplied the corresponding configurations, either via command line options or a text file with configuration parameters.
The list of available command line options can be consulted using the `-h` or `--help` flags, and a template configuration file can be printed via the `-t` or `--template` flags.
An introduction of the components and the corresponding configuration parameters are listed below:
| Component | Description | Configuration parameters |
|:------------:|-----------------------------------------------------------------|:--------------------------------------:|
| FCFC_2PT | Compute 2PCF for survey-like data | [FCFC_2PT.md](doc/FCFC_2PT.md) |
| FCFC_2PT_BOX | Compute 2PCF for periodic simulation boxes<sup>[*](#tab1)</sup> | [FCFC_2PT_BOX.md](doc/FCFC_2PT_BOX.md) |
<span id="tab1">*: treat the 3<sup>rd</sup> dimension (*z*-direction) as the line of sight</span>
<sub>[\[TOC\]](#table-of-contents)</sub>
## Acknowledgements
This program benefits from the following open-source projects:
- [Fast Cubic Spline Interpolation](https://doi.org/10.5281/zenodo.3611922) (see also [arXiv:2001.09253](https://arxiv.org/abs/2001.09253))
- [https://github.com/andralex/MedianOfNinthers](https://github.com/andralex/MedianOfNinthers) (see also [this paper](http://dx.doi.org/10.4230/LIPIcs.SEA.2017.24))
- [https://github.com/swenson/sort](https://github.com/swenson/sort)
<sub>[\[TOC\]](#table-of-contents)</sub>
|
cheng-zhaoREPO_NAMEFCFCPATH_START.@FCFC_extracted@FCFC-main@README.md@.PATH_END.py
|
{
"filename": "test_mcmc.py",
"repo_name": "andreatramacere/jetset",
"repo_path": "jetset_extracted/jetset-master/jetset/tests/test_mcmc.py",
"type": "Python"
}
|
import pytest
from .base_class import TestBase
class TestEmcee(TestBase):
def integration_suite(self,fit_dict=None,sed_number=None,plot=False):
if sed_number is not None and fit_dict is None:
from .test_model_fit import prepare_asset
fit_dict=prepare_asset(plot=plot,sed_number=sed_number,skip_minuit=True)
elif fit_dict and sed_number is None:
pass
else:
raise RecursionError("please provide either fit_dict or sed_number")
self.run_emcee(fit_dict,plot=plot)
def test(self,plot=False):
from .test_model_fit import prepare_asset
fit_dict=prepare_asset(plot=plot,sed_number=1,skip_minuit=True)
self.run_emcee(fit_dict=fit_dict,plot=plot)
def run_emcee(self,fit_dict=None,model_minimizer=None,sed_data=None,plot=False):
if fit_dict is not None:
model_minimizer = fit_dict['model_minimizer']
sed_data = fit_dict['sed_data']
elif sed_data is None or model_minimizer is None:
raise RuntimeError("please, provide either fit_dict, or both sed_data and model_minimizer")
else:
pass
from jetset.mcmc import McmcSampler
mcmc = McmcSampler(model_minimizer)
labels = ['N', 'B', 'beam_obj', 's', 'gamma0_log_parab']
model_name = 'jet_leptonic'
use_labels_dict = {model_name: labels}
mcmc.set_labels(use_labels_dict=use_labels_dict)
mcmc.set_bounds(bound=5.0,bound_rel=True)
mcmc.run_sampler(nwalkers=64, burnin=10, steps=50)
print(mcmc.acceptance_fraction)
if plot is True:
p = mcmc.plot_model(sed_data=sed_data, fit_range=[11., 27.4], size=50)
p.setlim(y_min=1E-13, x_min=1E6, x_max=3E28)
mcmc.save('mcmc_sampler.pkl')
ms = McmcSampler.load('mcmc_sampler.pkl')
if plot is True:
p = ms.plot_model(sed_data=sed_data, fit_range=[11., 27.4], size=50)
p.setlim(y_min=1E-13, x_min=1E6, x_max=3E28)
|
andreatramacereREPO_NAMEjetsetPATH_START.@jetset_extracted@jetset-master@jetset@tests@test_mcmc.py@.PATH_END.py
|
{
"filename": "frequency_flux.py",
"repo_name": "HeRTA/FRBSTATS",
"repo_path": "FRBSTATS_extracted/FRBSTATS-main/figs/frequency_flux.py",
"type": "Python"
}
|
from csv import reader
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
### Set MPL plot parameters
# Selectable SVG text
plt.rcParams['svg.fonttype'] = 'none'
# Use TeX
plt.rcParams['text.usetex'] = True
# Set figsize
plt.rcParams["figure.figsize"] = (24,20)
plt.rcParams["figure.dpi"] = 300
# Set xtick size
plt.rcParams['xtick.major.size'] = 20
plt.rcParams['xtick.major.width'] = 2
plt.rcParams['xtick.minor.size'] = 10
plt.rcParams['xtick.minor.width'] = 2
# Set ytick size
plt.rcParams['ytick.major.size'] = 20
plt.rcParams['ytick.major.width'] = 2
plt.rcParams['ytick.minor.size'] = 10
plt.rcParams['ytick.minor.width'] = 2
# Hide secondary spines
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.spines.right'] = False
### Load data
# Initiate empty parameter lists
frequency = []
flux = []
# Read FRBSTATS CSV catalogue
with open('../catalogue.csv', 'r') as read_obj:
csv_reader = reader(read_obj)
header = next(csv_reader)
# Skip header
if header != None:
for row in csv_reader:
frequency.append(row[8])
flux.append(row[10])
### Pre-process data
# Pick out incompatible rows
idx_mask = set()
for idx, val in enumerate(frequency):
try:
frequency[idx] = float(val)
except ValueError:
idx_mask.add(idx)
for idx, val in enumerate(flux):
try:
flux[idx] = float(val)
except ValueError:
idx_mask.add(idx)
# Dump rows with missing data
for idx in sorted(idx_mask, reverse=True):
del frequency[idx]
del flux[idx]
### Initiate plot
# Apply grid
plt.grid(color='grey', linestyle='-', linewidth=0.25, alpha=1)
# Scatter plot
plt.scatter(flux, frequency, c='cornflowerblue', s=500, alpha=0.8, edgecolor='royalblue', linewidth=2, zorder=10)
# Set axis labels & figure title
plt.xlabel(r'$\mathrm{Peak \ Flux \ Density \ [Jy]}$', fontsize=52)
plt.ylabel(r'$\mathrm{Center \ Frequency \ [MHz]}$', fontsize=52)
plt.title(r'$\mathrm{FRB \ Frequency-Flux \ Distribution}$', fontsize=72, y=1.01)
# Set linear-log scaling
plt.xscale('log')
#plt.yscale('log')
# Set axis limits
plt.gca().set_ylim(bottom=0)
# Set tick size
plt.xticks(fontsize=42, y=-0.005)
plt.yticks(fontsize=42)
plt.tight_layout()
# Save data to a scalable format
plt.savefig('frequency_flux.svg', format='svg')
plt.savefig('frequency_flux.pdf')
plt.savefig('frequency_flux.png')
|
HeRTAREPO_NAMEFRBSTATSPATH_START.@FRBSTATS_extracted@FRBSTATS-main@figs@frequency_flux.py@.PATH_END.py
|
{
"filename": "aggregate.py",
"repo_name": "hongwanliu/DarkHistory",
"repo_path": "DarkHistory_extracted/DarkHistory-master/darkhistory/numpy_groupies/aggregate.py",
"type": "Python"
}
|
"""numpy-groupies aggregate function, numpy implementation."""
import numpy as np
"""Common helpers without certain dependencies."""
aggregate_common_doc = """
See readme file at https://github.com/ml31415/numpy-groupies for a full
description. Below we reproduce the "Full description of inputs"
section from that readme, note that the text below makes references to
other portions of the readme that are not shown here.
group_idx:
this is an array of non-negative integers, to be used as the "labels"
with which to group the values in ``a``. Although we have so far
assumed that ``group_idx`` is one-dimesnaional, and the same length as
``a``, it can in fact be two-dimensional (or some form of nested
sequences that can be converted to 2D). When ``group_idx`` is 2D, the
size of the 0th dimension corresponds to the number of dimesnions in
the output, i.e. ``group_idx[i,j]`` gives the index into the ith
dimension in the output
for ``a[j]``. Note that ``a`` should still be 1D (or scalar), with
length matching ``group_idx.shape[1]``.
a:
this is the array of values to be aggregated. See above for a
simple demonstration of what this means. ``a`` will normally be a
one-dimensional array, however it can also be a scalar in some cases.
func: default='sum'
the function to use for aggregation. See the section above for
details. Note that the simplest way to specify the function is using a
string (e.g. ``func='max'``) however a number of aliases are also
defined (e.g. you can use the ``func=np.max``, or even ``func=max``,
where ``max`` is the
builtin function). To check the available aliases see ``utils.py``.
size: default=None
the shape of the output array. If ``None``, the maximum value in
``group_idx`` will set the size of the output. Note that for
multidimensional output you need to list the size of each dimension
here, or give ``None``.
fill_value: default=0
in the example above, group 2 does not have any data, so requires some
kind of filling value - in this case the default of ``0`` is used. If
you had set ``fill_value=nan`` or something else, that value would
appear instead of ``0`` for the 2 element in the output. Note that
there are some subtle interactions between what is permitted for
``fill_value`` and the input/output ``dtype`` - exceptions should be
raised in most cases to alert the programmer if issue arrise.
order: default='C'
this is relevant only for multimensional output. It controls the
layout of the output array in memory, can be ``'F'`` for fortran-style.
dtype: default=None
the ``dtype`` of the output. By default something sensible is chosen
based on the input, aggregation function, and ``fill_value``.
ddof: default=0
passed through into calculations of variance and standard deviation
(see above).
"""
funcs_common = 'first last len mean var std allnan anynan max min argmax argmin cumsum cumprod cummax cummin'.split()
funcs_no_separate_nan = frozenset(['sort', 'rsort', 'array', 'allnan', 'anynan'])
_alias_str = {
'or': 'any',
'and': 'all',
'add': 'sum',
'count': 'len',
'plus': 'sum',
'multiply': 'prod',
'product': 'prod',
'times': 'prod',
'amax': 'max',
'maximum': 'max',
'amin': 'min',
'minimum': 'min',
'split': 'array',
'splice': 'array',
'sorted': 'sort',
'asort': 'sort',
'asorted': 'sort',
'rsorted': 'sort',
'dsort': 'sort',
'dsorted': 'rsort',
}
_alias_builtin = {
all: 'all',
any: 'any',
len: 'len',
max: 'max',
min: 'min',
sum: 'sum',
sorted: 'sort',
slice: 'array',
list: 'array',
}
def get_aliasing(*extra):
"""The assembles the dict mapping strings and functions to the list of
supported function names:
e.g. alias['add'] = 'sum' and alias[sorted] = 'sort'
This funciton should only be called during import.
"""
alias = dict((k, k) for k in funcs_common)
alias.update(_alias_str)
alias.update((fn, fn) for fn in _alias_builtin.values())
alias.update(_alias_builtin)
for d in extra:
alias.update(d)
alias.update((k, k) for k in set(alias.values()))
# Treat nan-functions as firstclass member and add them directly
for key in set(alias.values()):
if key not in funcs_no_separate_nan:
key = 'nan' + key
alias[key] = key
return alias
aliasing = get_aliasing()
def get_func(func, aliasing, implementations):
""" Return the key of a found implementation or the func itself """
try:
func_str = aliasing[func]
except KeyError:
if callable(func):
return func
else:
if func_str in implementations:
return func_str
if func_str.startswith('nan') and \
func_str[3:] in funcs_no_separate_nan:
raise ValueError("%s does not have a nan-version".format(func_str[3:]))
else:
raise NotImplementedError("No such function available")
raise ValueError("func %s is neither a valid function string nor a "
"callable object".format(func))
def check_boolean(x):
if x not in (0, 1):
raise ValueError("Value not boolean")
try:
basestring # Attempt to evaluate basestring
def isstr(s):
return isinstance(s, basestring)
except NameError:
# Probably Python 3.x
def isstr(s):
return isinstance(s, str)
#############################################################################
"""Common helper functions for typing and general numpy tools."""
_alias_numpy = {
np.add: 'sum',
np.sum: 'sum',
np.any: 'any',
np.all: 'all',
np.multiply: 'prod',
np.prod: 'prod',
np.amin: 'min',
np.min: 'min',
np.minimum: 'min',
np.amax: 'max',
np.max: 'max',
np.maximum: 'max',
np.argmax: 'argmax',
np.argmin: 'argmin',
np.mean: 'mean',
np.std: 'std',
np.var: 'var',
np.array: 'array',
np.asarray: 'array',
np.sort: 'sort',
np.nansum: 'nansum',
np.nanprod: 'nanprod',
np.nanmean: 'nanmean',
np.nanvar: 'nanvar',
np.nanmax: 'nanmax',
np.nanmin: 'nanmin',
np.nanstd: 'nanstd',
np.nanargmax: 'nanargmax',
np.nanargmin: 'nanargmin',
np.cumsum: 'cumsum',
np.cumprod: 'cumprod',
}
aliasing = get_aliasing(_alias_numpy)
_next_int_dtype = dict(
bool=np.int8,
uint8=np.int16,
int8=np.int16,
uint16=np.int32,
int16=np.int32,
uint32=np.int64,
int32=np.int64
)
_next_float_dtype = dict(
float16=np.float32,
float32=np.float64,
float64=np.complex64,
complex64=np.complex128
)
def minimum_dtype(x, dtype=np.bool_):
"""returns the "most basic" dtype which represents `x` properly, which
provides at least the same value range as the specified dtype."""
def check_type(x, dtype):
try:
converted = dtype.type(x)
except (ValueError, OverflowError):
return False
# False if some overflow has happened
return converted == x or np.isnan(x)
def type_loop(x, dtype, dtype_dict, default=None):
while True:
try:
dtype = np.dtype(dtype_dict[dtype.name])
if check_type(x, dtype):
return np.dtype(dtype)
except KeyError:
if default is not None:
return np.dtype(default)
raise ValueError("Can not determine dtype of %r" % x)
dtype = np.dtype(dtype)
if check_type(x, dtype):
return dtype
if np.issubdtype(dtype, np.inexact):
return type_loop(x, dtype, _next_float_dtype)
else:
return type_loop(x, dtype, _next_int_dtype, default=np.float32)
def minimum_dtype_scalar(x, dtype, a):
if dtype is None:
dtype = np.dtype(type(a)) if isinstance(a, (int, float))\
else a.dtype
return minimum_dtype(x, dtype)
_forced_types = {
#'array': np.object,
'all': np.bool_,
'any': np.bool_,
'nanall': np.bool_,
'nanany': np.bool_,
'len': np.int64,
'nanlen': np.int64,
'allnan': np.bool_,
'anynan': np.bool_,
'argmax': np.int64,
'argmin': np.int64,
}
_forced_float_types = {'mean', 'var', 'std', 'nanmean', 'nanvar', 'nanstd'}
_forced_same_type = {'min', 'max', 'first', 'last', 'nanmin', 'nanmax',
'nanfirst', 'nanlast'}
def check_dtype(dtype, func_str, a, n):
if np.isscalar(a) or not a.shape:
if func_str not in ("sum", "prod", "len"):
raise ValueError("scalar inputs are supported only for 'sum', "
"'prod' and 'len'")
a_dtype = np.dtype(type(a))
else:
a_dtype = a.dtype
if dtype is not None:
# dtype set by the user
# Careful here: np.bool != np.bool_ !
if np.issubdtype(dtype, np.bool_) and \
not('all' in func_str or 'any' in func_str):
raise TypeError("function %s requires a more complex datatype "
"than bool" % func_str)
if not np.issubdtype(dtype, np.integer) and func_str in ('len', 'nanlen'):
raise TypeError("function %s requires an integer datatype" % func_str)
# TODO: Maybe have some more checks here
return np.dtype(dtype)
else:
try:
return np.dtype(_forced_types[func_str])
except KeyError:
if func_str in _forced_float_types:
if np.issubdtype(a_dtype, np.floating):
return a_dtype
else:
return np.dtype(np.float64)
else:
if func_str == 'sum':
# Try to guess the minimally required int size
if np.issubdtype(a_dtype, np.int64):
# It's not getting bigger anymore
# TODO: strictly speaking it might need float
return np.dtype(np.int64)
elif np.issubdtype(a_dtype, np.integer):
maxval = np.iinfo(a_dtype).max * n
return minimum_dtype(maxval, a_dtype)
elif np.issubdtype(a_dtype, np.bool_):
return minimum_dtype(n, a_dtype)
else:
# floating, inexact, whatever
return a_dtype
elif func_str in _forced_same_type:
return a_dtype
else:
if isinstance(a_dtype, np.integer):
return np.dtype(np.int64)
else:
return a_dtype
def check_fill_value(fill_value, dtype):
try:
return dtype.type(fill_value)
except ValueError:
raise ValueError("fill_value must be convertible into %s"
% dtype.type.__name__)
def check_group_idx(group_idx, a=None, check_min=True):
if a is not None and group_idx.size != a.size:
raise ValueError("The size of group_idx must be the same as "
"a.size")
if not issubclass(group_idx.dtype.type, np.integer):
raise TypeError("group_idx must be of integer type")
if check_min and np.min(group_idx) < 0:
raise ValueError("group_idx contains negative indices")
def input_validation(group_idx, a, size=None, order='C', axis=None,
ravel_group_idx=True, check_bounds=True):
""" Do some fairly extensive checking of group_idx and a, trying to
give the user as much help as possible with what is wrong. Also,
convert ndim-indexing to 1d indexing.
"""
if not isinstance(a, (int, float, complex)):
a = np.asanyarray(a)
group_idx = np.asanyarray(group_idx)
if not np.issubdtype(group_idx.dtype, np.integer):
raise TypeError("group_idx must be of integer type")
# This check works for multidimensional indexing as well
if check_bounds and np.any(group_idx < 0):
raise ValueError("negative indices not supported")
ndim_idx = np.ndim(group_idx)
ndim_a = np.ndim(a)
# Deal with the axis arg: if present, then turn 1d indexing into
# multi-dimensional indexing along the specified axis.
if axis is None:
if ndim_a > 1:
raise ValueError("a must be scalar or 1 dimensional, use .ravel to"
" flatten. Alternatively specify axis.")
elif axis >= ndim_a or axis < -ndim_a:
raise ValueError("axis arg too large for np.ndim(a)")
else:
axis = axis if axis >= 0 else ndim_a + axis # negative indexing
if ndim_idx > 1:
# TODO: we could support a sequence of axis values for multiple
# dimensions of group_idx.
raise NotImplementedError("only 1d indexing currently"
"supported with axis arg.")
elif a.shape[axis] != len(group_idx):
raise ValueError("a.shape[axis] doesn't match length of group_idx.")
elif size is not None and not np.isscalar(size):
raise NotImplementedError("when using axis arg, size must be"
"None or scalar.")
else:
# Create the broadcast-ready multidimensional indexing.
# Note the user could do this themselves, so this is
# very much just a convenience.
size_in = np.max(group_idx) + 1 if size is None else size
group_idx_in = group_idx
group_idx = []
size = []
for ii, s in enumerate(a.shape):
ii_idx = group_idx_in if ii == axis else np.arange(s)
ii_shape = [1] * ndim_a
ii_shape[ii] = s
group_idx.append(ii_idx.reshape(ii_shape))
size.append(size_in if ii == axis else s)
# Use the indexing, and return. It's a bit simpler than
# using trying to keep all the logic below happy
group_idx = np.ravel_multi_index(group_idx, size, order=order,
mode='raise')
flat_size = np.prod(size)
ndim_idx = ndim_a
return group_idx.ravel(), a.ravel(), flat_size, ndim_idx, size
if ndim_idx == 1:
if size is None:
size = np.max(group_idx) + 1
else:
if not np.isscalar(size):
raise ValueError("output size must be scalar or None")
if check_bounds and np.any(group_idx > size - 1):
raise ValueError("one or more indices are too large for "
"size %d" % size)
flat_size = size
else:
if size is None:
size = np.max(group_idx, axis=1) + 1
elif np.isscalar(size):
raise ValueError("output size must be of length %d"
% len(group_idx))
elif len(size) != len(group_idx):
raise ValueError("%d sizes given, but %d output dimensions "
"specified in index" % (len(size),
len(group_idx)))
if ravel_group_idx:
group_idx = np.ravel_multi_index(group_idx, size, order=order,
mode='raise')
flat_size = np.prod(size)
if not (np.ndim(a) == 0 or len(a) == group_idx.size):
raise ValueError("group_idx and a must be of the same length, or a"
" can be scalar")
return group_idx, a, flat_size, ndim_idx, size
### General tools ###
def unpack(group_idx, ret):
""" Take an aggregate packed array and uncompress it to the size of group_idx.
This is equivalent to ret[group_idx].
"""
return ret[group_idx]
def allnan(x):
return np.all(np.isnan(x))
def anynan(x):
return np.any(np.isnan(x))
def nanfirst(x):
return x[~np.isnan(x)][0]
def nanlast(x):
return x[~np.isnan(x)][-1]
def multi_arange(n):
"""By example:
# 0 1 2 3 4 5 6 7 8
n = [0, 0, 3, 0, 0, 2, 0, 2, 1]
res = [0, 1, 2, 0, 1, 0, 1, 0]
That is it is equivalent to something like this :
hstack((arange(n_i) for n_i in n))
This version seems quite a bit faster, at least for some
possible inputs, and at any rate it encapsulates a task
in a function.
"""
if n.ndim != 1:
raise ValueError("n is supposed to be 1d array.")
n_mask = n.astype(bool)
n_cumsum = np.cumsum(n)
ret = np.ones(n_cumsum[-1] + 1, dtype=int)
ret[n_cumsum[n_mask]] -= n[n_mask]
ret[0] -= 1
return np.cumsum(ret)[:-1]
def label_contiguous_1d(X):
"""
WARNING: API for this function is not liable to change!!!
By example:
X = [F T T F F T F F F T T T]
result = [0 1 1 0 0 2 0 0 0 3 3 3]
Or:
X = [0 3 3 0 0 5 5 5 1 1 0 2]
result = [0 1 1 0 0 2 2 2 3 3 0 4]
The ``0`` or ``False`` elements of ``X`` are labeled as ``0`` in the output. If ``X``
is a boolean array, each contiguous block of ``True`` is given an integer
label, if ``X`` is not boolean, then each contiguous block of identical values
is given an integer label. Integer labels are 1, 2, 3,..... (i.e. start a 1
and increase by 1 for each block with no skipped numbers.)
"""
if X.ndim != 1:
raise ValueError("this is for 1d masks only.")
is_start = np.empty(len(X), dtype=bool)
is_start[0] = X[0] # True if X[0] is True or non-zero
if X.dtype.kind == 'b':
is_start[1:] = ~X[:-1] & X[1:]
M = X
else:
M = X.astype(bool)
is_start[1:] = X[:-1] != X[1:]
is_start[~M] = False
L = np.cumsum(is_start)
L[~M] = 0
return L
def relabel_groups_unique(group_idx):
"""
See also ``relabel_groups_masked``.
keep_group: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
ret: [0 3 3 3 0 2 4 2 0 1 1 0 3 4 4]
Description of above: unique groups in input was ``1,2,3,5``, i.e.
``4`` was missing, so group 5 was relabled to be ``4``.
Relabeling maintains order, just "compressing" the higher numbers
to fill gaps.
"""
keep_group = np.zeros(np.max(group_idx) + 1, dtype=bool)
keep_group[0] = True
keep_group[group_idx] = True
return relabel_groups_masked(group_idx, keep_group)
def relabel_groups_masked(group_idx, keep_group):
"""
group_idx: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
0 1 2 3 4 5
keep_group: [0 1 0 1 1 1]
ret: [0 2 2 2 0 0 4 0 0 1 1 0 2 4 4]
Description of above in words: remove group 2, and relabel group 3,4, and 5
to be 2, 3 and 4 respecitvely, in order to fill the gap. Note that group 4 was never used
in the input group_idx, but the user supplied mask said to keep group 4, so group
5 is only moved up by one place to fill the gap created by removing group 2.
That is, the mask describes which groups to remove,
the remaining groups are relabled to remove the gaps created by the falsy
elements in ``keep_group``. Note that ``keep_group[0]`` has no particular meaning because it refers
to the zero group which cannot be "removed".
``keep_group`` should be bool and ``group_idx`` int.
Values in ``group_idx`` can be any order, and
"""
keep_group = keep_group.astype(bool, copy=not keep_group[0])
if not keep_group[0]: # ensuring keep_group[0] is True makes life easier
keep_group[0] = True
relabel = np.zeros(keep_group.size, dtype=group_idx.dtype)
relabel[keep_group] = np.arange(np.count_nonzero(keep_group))
return relabel[group_idx]
#############################################################################
def _sum(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype_scalar(fill_value, dtype, a)
if np.ndim(a) == 0:
ret = np.bincount(group_idx, minlength=size).astype(dtype)
if a != 1:
ret *= a
else:
if np.iscomplexobj(a):
ret = np.empty(size, dtype=dtype)
ret.real = np.bincount(group_idx, weights=a.real,
minlength=size)
ret.imag = np.bincount(group_idx, weights=a.imag,
minlength=size)
else:
ret = np.bincount(group_idx, weights=a,
minlength=size).astype(dtype)
if fill_value != 0:
_fill_untouched(group_idx, ret, fill_value)
return ret
def _prod(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype_scalar(fill_value, dtype, a)
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != 1:
ret[group_idx] = 1 # product starts from 1
np.multiply.at(ret, group_idx, a)
return ret
def _len(group_idx, a, size, fill_value, dtype=None):
return _sum(group_idx, 1, size, fill_value, dtype=int)
def _last(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
ret = np.full(size, fill_value, dtype=dtype)
# repeated indexing gives last value, see:
# the phrase "leaving behind the last value" on this page:
# http://wiki.scipy.org/Tentative_NumPy_Tutorial
ret[group_idx] = a
return ret
def _first(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
ret = np.full(size, fill_value, dtype=dtype)
ret[group_idx[::-1]] = a[::-1] # same trick as _last, but in reverse
return ret
def _all(group_idx, a, size, fill_value, dtype=None):
check_boolean(fill_value)
ret = np.full(size, fill_value, dtype=bool)
if not fill_value:
ret[group_idx] = True
ret[group_idx.compress(np.logical_not(a))] = False
return ret
def _any(group_idx, a, size, fill_value, dtype=None):
check_boolean(fill_value)
ret = np.full(size, fill_value, dtype=bool)
if fill_value:
ret[group_idx] = False
ret[group_idx.compress(a)] = True
return ret
def _min(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
dmax = np.iinfo(a.dtype).max if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).max
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != dmax:
ret[group_idx] = dmax # min starts from maximum
np.minimum.at(ret, group_idx, a)
return ret
def _max(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or a.dtype)
dmin = np.iinfo(a.dtype).min if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).min
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != dmin:
ret[group_idx] = dmin # max starts from minimum
np.maximum.at(ret, group_idx, a)
return ret
def _argmax(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or int)
dmin = np.iinfo(a.dtype).min if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).min
group_max = _max(group_idx, a, size, dmin)
is_max = a == group_max[group_idx]
ret = np.full(size, fill_value, dtype=dtype)
group_idx_max = group_idx[is_max]
argmax, = is_max.nonzero()
ret[group_idx_max[::-1]] = argmax[::-1] # reverse to ensure first value for each group wins
return ret
def _argmin(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype(fill_value, dtype or int)
dmax = np.iinfo(a.dtype).max if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).max
group_min = _min(group_idx, a, size, dmax)
is_min = a == group_min[group_idx]
ret = np.full(size, fill_value, dtype=dtype)
group_idx_min = group_idx[is_min]
argmin, = is_min.nonzero()
ret[group_idx_min[::-1]] = argmin[::-1] # reverse to ensure first value for each group wins
return ret
def _mean(group_idx, a, size, fill_value, dtype=np.dtype(np.float64)):
if np.ndim(a) == 0:
raise ValueError("cannot take mean with scalar a")
counts = np.bincount(group_idx, minlength=size)
if np.iscomplexobj(a):
dtype = a.dtype # TODO: this is a bit clumsy
sums = np.empty(size, dtype=dtype)
sums.real = np.bincount(group_idx, weights=a.real,
minlength=size)
sums.imag = np.bincount(group_idx, weights=a.imag,
minlength=size)
else:
sums = np.bincount(group_idx, weights=a,
minlength=size).astype(dtype)
with np.errstate(divide='ignore', invalid='ignore'):
ret = sums.astype(dtype) / counts
if not np.isnan(fill_value):
ret[counts == 0] = fill_value
return ret
def _var(group_idx, a, size, fill_value, dtype=np.dtype(np.float64),
sqrt=False, ddof=0):
if np.ndim(a) == 0:
raise ValueError("cannot take variance with scalar a")
counts = np.bincount(group_idx, minlength=size)
sums = np.bincount(group_idx, weights=a, minlength=size)
with np.errstate(divide='ignore'):
means = sums.astype(dtype) / counts
ret = np.bincount(group_idx, (a - means[group_idx]) ** 2,
minlength=size) / (counts - ddof)
if sqrt:
ret = np.sqrt(ret) # this is now std not var
if not np.isnan(fill_value):
ret[counts == 0] = fill_value
return ret
def _std(group_idx, a, size, fill_value, dtype=np.dtype(np.float64), ddof=0):
return _var(group_idx, a, size, fill_value, dtype=dtype, sqrt=True,
ddof=ddof)
def _allnan(group_idx, a, size, fill_value, dtype=bool):
return _all(group_idx, np.isnan(a), size, fill_value=fill_value,
dtype=dtype)
def _anynan(group_idx, a, size, fill_value, dtype=bool):
return _any(group_idx, np.isnan(a), size, fill_value=fill_value,
dtype=dtype)
def _sort(group_idx, a, size=None, fill_value=None, dtype=None, reverse=False):
sortidx = np.lexsort((-a if reverse else a, group_idx))
# Reverse sorting back to into grouped order, but preserving groupwise sorting
revidx = np.argsort(np.argsort(group_idx, kind='mergesort'), kind='mergesort')
return a[sortidx][revidx]
def _array(group_idx, a, size, fill_value, dtype=None):
"""groups a into separate arrays, keeping the order intact."""
if fill_value is not None and not (np.isscalar(fill_value) or
len(fill_value) == 0):
raise ValueError("fill_value must be None, a scalar or an empty "
"sequence")
order_group_idx = np.argsort(group_idx, kind='mergesort')
counts = np.bincount(group_idx, minlength=size)
ret = np.split(a[order_group_idx], np.cumsum(counts)[:-1])
ret = np.asanyarray(ret)
if fill_value is None or np.isscalar(fill_value):
_fill_untouched(group_idx, ret, fill_value)
return ret
def _generic_callable(group_idx, a, size, fill_value, dtype=None,
func=lambda g: g, **kwargs):
"""groups a by inds, and then applies foo to each group in turn, placing
the results in an array."""
groups = _array(group_idx, a, size, ())
ret = np.full(size, fill_value, dtype=dtype or np.float64)
for i, grp in enumerate(groups):
if np.ndim(grp) == 1 and len(grp) > 0:
ret[i] = func(grp)
return ret
def _cumsum(group_idx, a, size, fill_value=None, dtype=None):
"""
N to N aggregate operation of cumsum. Perform cumulative sum for each group.
>>> group_idx = np.array([4, 3, 3, 4, 4, 1, 1, 1, 7, 8, 7, 4, 3, 3, 1, 1])
>>> a = np.array([3, 4, 1, 3, 9, 9, 6, 7, 7, 0, 8, 2, 1, 8, 9, 8])
>>> _cumsum(group_idx, a, np.max(group_idx) + 1)
array([ 3, 4, 5, 6, 15, 9, 15, 22, 7, 0, 15, 17, 6, 14, 31, 39])
"""
sortidx = np.argsort(group_idx, kind='mergesort')
invsortidx = np.argsort(sortidx, kind='mergesort')
group_idx_srt = group_idx[sortidx]
a_srt = a[sortidx]
a_srt_cumsum = np.cumsum(a_srt, dtype=dtype)
increasing = np.arange(len(a), dtype=int)
group_starts = _min(group_idx_srt, increasing, size, fill_value=0)[group_idx_srt]
a_srt_cumsum += -a_srt_cumsum[group_starts] + a_srt[group_starts]
return a_srt_cumsum[invsortidx]
def _nancumsum(group_idx, a, size, fill_value=None, dtype=None):
a_nonans = np.where(np.isnan(a), 0, a)
group_idx_nonans = np.where(np.isnan(group_idx), np.nanmax(group_idx) + 1, group_idx)
return _cumsum(group_idx_nonans, a_nonans, size, fill_value=fill_value, dtype=dtype)
_impl_dict = dict(min=_min, max=_max, sum=_sum, prod=_prod, last=_last,
first=_first, all=_all, any=_any, mean=_mean, std=_std,
var=_var, anynan=_anynan, allnan=_allnan, sort=_sort,
array=_array, argmax=_argmax, argmin=_argmin, len=_len,
cumsum=_cumsum, generic=_generic_callable)
_impl_dict.update(('nan' + k, v) for k, v in list(_impl_dict.items())
if k not in funcs_no_separate_nan)
def _aggregate_base(group_idx, a, func='sum', size=None, fill_value=0,
order='C', dtype=None, axis=None, _impl_dict=_impl_dict,
_nansqueeze=False, cache=None, **kwargs):
group_idx, a, flat_size, ndim_idx, size = input_validation(group_idx, a,
size=size, order=order, axis=axis)
func = get_func(func, aliasing, _impl_dict)
if not isstr(func):
# do simple grouping and execute function in loop
ret = _impl_dict.get('generic', _generic_callable)(group_idx, a, flat_size, fill_value, func=func,
dtype=dtype, **kwargs)
else:
# deal with nans and find the function
if func.startswith('nan'):
if np.ndim(a) == 0:
raise ValueError("nan-version not supported for scalar input.")
if _nansqueeze:
good = ~np.isnan(a)
a = a[good]
group_idx = group_idx[good]
dtype = check_dtype(dtype, func, a, flat_size)
func = _impl_dict[func]
ret = func(group_idx, a, flat_size, fill_value=fill_value, dtype=dtype,
**kwargs)
# deal with ndimensional indexing
if ndim_idx > 1:
ret = ret.reshape(size, order=order)
return ret
def aggregate(group_idx, a, func='sum', size=None, fill_value=0, order='C',
dtype=None, axis=None, **kwargs):
return _aggregate_base(group_idx, a, size=size, fill_value=fill_value,
order=order, dtype=dtype, func=func, axis=axis,
_impl_dict=_impl_dict, _nansqueeze=True, **kwargs)
aggregate.__doc__ = """
This is the pure numpy implementation of aggregate.
""" + aggregate_common_doc
def _fill_untouched(idx, ret, fill_value):
"""any elements of ret not indexed by idx are set to fill_value."""
untouched = np.ones_like(ret, dtype=bool)
untouched[idx] = False
ret[untouched] = fill_value
|
hongwanliuREPO_NAMEDarkHistoryPATH_START.@DarkHistory_extracted@DarkHistory-master@darkhistory@numpy_groupies@aggregate.py@.PATH_END.py
|
{
"filename": "_isosurface.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/layout/template/data/_isosurface.py",
"type": "Python"
}
|
from plotly.graph_objs import Isosurface
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@layout@template@data@_isosurface.py@.PATH_END.py
|
{
"filename": "disc_example.py",
"repo_name": "Jingxuan97/nemesispy",
"repo_path": "nemesispy_extracted/nemesispy-main/nemesispy/examples/disc_example.py",
"type": "Python"
}
|
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import time
from nemesispy.common.constants import G
from nemesispy.radtran.forward_model import ForwardModel
matplotlib.interactive(True)
# Read GCM data
from nemesispy.data.gcm.process_gcm import (nlon,nlat,xlon,xlat,npv,pv,\
tmap,h2omap,comap,co2map,ch4map,hemap,h2map,vmrmap,\
tmap_mod,h2omap_mod,comap_mod,co2map_mod,ch4map_mod,\
hemap_mod,h2map_mod,vmrmap_mod,phase_grid,\
kevin_phase_by_wave,kevin_wave_by_phase,\
pat_phase_by_wave,pat_wave_by_phase,\
vmrmap_mod_new,tmap_hot)
from nemesispy.data.helper import lowres_file_paths, cia_file_path
print('creating example phase curve')
### Wavelengths grid and orbital phase grid
wave_grid = np.array([1.1425, 1.1775, 1.2125, 1.2475, 1.2825, 1.3175, 1.3525, 1.3875,
1.4225, 1.4575, 1.4925, 1.5275, 1.5625, 1.5975, 1.6325, 3.6 ,
4.5 ])
phase_grid = np.array([ 22.5, 45. , 67.5, 90. , 112.5, 135. , 157.5, 180. , 202.5,
225. , 247.5, 270. , 292.5, 315. , 337.5])
nwave = len(wave_grid)
nphase = len(phase_grid)
wasp43_spec = np.array([3.341320e+25, 3.215455e+25, 3.101460e+25, 2.987110e+25,
2.843440e+25, 2.738320e+25, 2.679875e+25, 2.598525e+25,
2.505735e+25, 2.452230e+25, 2.391140e+25, 2.345905e+25,
2.283720e+25, 2.203690e+25, 2.136015e+25, 1.234010e+24,
4.422200e+23])
### Reference Planet Input
M_plt = 3.8951064000000004e+27 # kg
R_plt = 74065.70 * 1e3 # m
gas_id = np.array([ 1, 2, 5, 6, 40, 39])
iso_id = np.array([0, 0, 0, 0, 0, 0])
NLAYER = 20
phasenumber = 3
nmu = 5
phase = phase_grid[phasenumber]
P_model = np.geomspace(20e5,100,NLAYER)
NITER = 1
T_star = 4520 # star temperature in K
R_star = 463892759.99999994 # m, 0.6668 * R_SUN
SMA = 2243970000.0 # m, 0.015*AU
M_plt = 3.8951064000000004e+27 # kg
R_plt = 74065.70 * 1e3 # m
T_irr = T_star * (R_star/SMA)**0.5 # 2055 K
T_eq = T_irr/2**0.5 # 1453 K
g = G*M_plt/R_plt**2 # 47.39 ms-2
gas_id = np.array([ 1, 2, 5, 6, 40, 39])
iso_id = np.array([0, 0, 0, 0, 0, 0])
### Set up forward model
FM = ForwardModel()
FM.set_planet_model(M_plt=M_plt,R_plt=R_plt,gas_id_list=gas_id,iso_id_list=iso_id,
NLAYER=NLAYER)
FM.set_opacity_data(kta_file_paths=lowres_file_paths, cia_file_path=cia_file_path)
log_kappa_day = -2.2
log_gamma_day = -1
log_f_day = - 1
T_int_day = 200
log_kappa_night = -4
log_gamma_night = 0
log_f_night = -2
T_int_night = 200
h2o = 1e-4
co2 = 1e-4
co = 1e-4
ch4 = 1e-4
h2_frac = 0.84
he_frac = 1 - h2_frac
vmr_grid = np.ones((NLAYER,6))
vmr_grid[:,0] *= 10**h2o
vmr_grid[:,1] *= 10**co2
vmr_grid[:,2] *= 10**co
vmr_grid[:,3] *= 10**ch4
vmr_grid[:,4] *= he_frac * (1-10**h2o-10**co2-10**co-10**ch4)
vmr_grid[:,5] *= h2_frac * (1-10**h2o-10**co2-10**co-10**ch4)
from nemesispy.models.TP_profiles import TP_Guillot
T_day = TP_Guillot(P_model,g,T_eq,10**log_kappa_day,10**log_gamma_day,
10**log_f_day,T_int_day)
T_night = TP_Guillot(P_model,g,T_eq,10**log_kappa_night,10**log_gamma_night,
10**log_f_night,T_int_night)
spec1 = FM.calc_disc_spectrum_uniform(nmu, P_model,T_day,vmr_grid)
print(spec1)
phase=180
daymin=-90
daymax=90
spec2 = FM.calc_disc_spectrum_2tp(phase,nmu,daymin,daymax,
P_model, T_day,T_night,vmr_grid)
print(spec2)
s = time.time()
spec2 = FM.calc_disc_spectrum_2tp(phase,nmu,daymin,daymax,
P_model, T_day,T_night,vmr_grid)
e = time.time()
print('time 2tp',e-s)
s = time.time()
spec1 = FM.calc_disc_spectrum_uniform(nmu, P_model,T_day,vmr_grid)
e = time.time()
print('time disc',e-s)
|
Jingxuan97REPO_NAMEnemesispyPATH_START.@nemesispy_extracted@nemesispy-main@nemesispy@examples@disc_example.py@.PATH_END.py
|
{
"filename": "PlotContours.py",
"repo_name": "bradkav/AntiparticleDM",
"repo_path": "AntiparticleDM_extracted/AntiparticleDM-master/analysis/PlotContours.py",
"type": "Python"
}
|
#!/usr/bin/python
"""
PlotContours.py
Plot individual contour plots of discrimination significance
for a given ensemble and DM mass...
BJK 30/06/2017
"""
import numpy as np
from numpy import pi
from scipy.integrate import quad
from scipy.interpolate import interp1d, interp2d
from scipy import ndimage
from matplotlib.ticker import MultipleLocator
import os.path
import sys
import CalcParamPoint as CPP
#------ Matplotlib parameters ------
import matplotlib.pyplot as pl
import matplotlib as mpl
import matplotlib.colors as colors
font = {'family' : 'sans-serif',
'size' : 17}
mpl.rcParams['xtick.major.size'] = 8
mpl.rcParams['xtick.major.width'] = 1
mpl.rcParams['xtick.minor.size'] = 3
mpl.rcParams['xtick.minor.width'] = 1
mpl.rcParams['ytick.major.size'] = 8
mpl.rcParams['ytick.major.width'] = 1
mpl.rcParams['ytick.minor.size'] = 3
mpl.rcParams['ytick.minor.width'] = 1
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rc('font', **font)
mpl.rc('text.latex', preamble=[r'\usepackage{color}', r'\usepackage{amssymb}'])
#-----------------------------------
#----Run Parameters---
if (len(sys.argv) != 3):
print " PlotContours.py requires 2 argument - e.g. PlotContours.py EXPT MX"
print " where EXPT = A, B, C, D and MX = DM mass in GeV"
print " Exiting..."
sys.exit()
expt = str(sys.argv[1])
mx = int(sys.argv[2])
print " Plotting contour-plot for ensemble " + expt + " and DM mass " + str(mx) + " GeV..."
#---Constants-----
# Proton-to-neutron ratios for different nuclei
R_Xe = (131.0-54)/54
R_Ar = (40-18.0)/18.0
R_Ge = (73.0-32.0)/32.0
R_Si = 1.0
R_Ca = 1.0
R_O = 1.0
R_W = (184-74.0)/74
#---Functions----
#Read in a list of significances for a given point (pID) in parameter space
#for a given reconstruction (reconID)
def getSigvals(reconID, pID):
#Filename for results file
fname = "../results/" + reconID + "/Results_p" + str(pID) +'.txt'
#Check if the file exists (and clean up the data if necessary)
if (os.path.exists(fname)):
data=np.loadtxt(fname)
data = data[data != float('+inf')]
data = data[~np.isnan(data)]
if (len(data) == 0):
data = [0]
else:
print " Error: File not found - " + fname
data = np.zeros(1)
return np.sort(data)
#Calculate significance (median, mean, upper, lower) for a given point and reconstruction
def getSignificance(reconID, pID, kind="Median"):
sigvals = getSigvals(reconID, pID)
Nsamps = len(sigvals)
if (kind == "Mean"):
return np.mean(sigvals)
if (kind == "Median"):
return np.median(sigvals)
if (kind == "Upper"):
return np.percentile(sigvals, 84.0)
if (kind == "Lower"):
return np.percentile(sigvals, 16.0)
ind = int(np.round(Nsamps*0.1))
return sigvals[ind]
#Calculating top axis from bottom axis
def inv_tick_function(X):
return X*1.0/np.sqrt(1+X**2)
#----Calculations---
fig, ax1 = pl.subplots(1,1, figsize=(7,6))
levels = np.array([1,2,3,4,5]) #Contours of sigma
colmap = pl.get_cmap("Greens") #Color map
reconID = "AP_Expt" + expt + "_" + str(mx)
#Number of grid points in the parameter space in each direction
#CPP (i.e. CalcParamPoint.py) transforms indices into values of couplings
Np = CPP.Np
xvals = CPP.frange #x-axis
yvals = CPP.Rrange #y-axis
sigvals = np.zeros(Np*Np)
#Get significance for each point
for i in range(Np*Np):
sigvals[i] = getSignificance(reconID, i+1, kind="Median")
zvals = np.reshape(sigvals, (Np, Np)).T
#Resample and filter the median results along the y-axis
Nvals = 50
xgrid = np.linspace(-1.0, -0.94, Nvals)
ygrid = np.linspace(0.6, 1.00, Nvals)
z_interp = interp2d(xvals, yvals, zvals, kind='linear')
xvals = xgrid*1.0
yvals = ygrid*1.0
zvals = z_interp(xvals, yvals)
for i in range(Nvals):
zvals[:,i] = ndimage.filters.median_filter(zvals[:,i], 5)
# Do some plotting
#Plot filled contours
cf = ax1.contourf(xvals, yvals, zvals, \
levels, cmap=colmap, extend='max')
#Plot contour lines
cons0 = ax1.contour(xvals, yvals, zvals, \
levels, colors='forestgreen')
#Find and plot maximum point
maxID = np.argmax(sigvals)
ax1.plot(CPP.getf(maxID+1), CPP.getR(maxID+1),ms=12,marker='*', mew=0.5, color='k')
print " Maximum significance: ", np.max(sigvals), "[INDEX " + str(maxID+1)+"]"
#Add red squares in some cases
if ((expt == "D" or expt == "A") and (mx == 50)):
ax1.plot(-0.995, 0.75, ms=8,marker='s',color='r', mew=0.5)
ax1.plot(-0.995, 0.8, ms=8,marker='s',color='r', mew=0.5)
ax1.set_xlim(-1.00, -0.94)
ax1.set_ylim(0.5, 1.1)
ax1.yaxis.set_major_locator(MultipleLocator(0.1))
#Add horizontal dashed lines for different elements
ax1.axhline(1.0/R_Xe, linestyle="--", color='k')
ax1.text(-0.943, 1.0/R_Xe+0.008, r"Xe",ha="right")
ax1.axhline(1.0/R_Ar, linestyle="--", color='k')
ax1.text(-0.943, 1.0/R_Ar+0.008, r"Ar",ha="right")
if (expt == "A"):
ax1.axhline(1.0/R_Si, linestyle="--", color='k')
ax1.text(-0.943, 1.0/R_Si-0.03, r"Si",ha="right")
if (expt == "B" or expt == "D"):
ax1.axhline(1.0/R_Ge, linestyle="--", color='k')
ax1.text(-0.943, 1.0/R_Ge+0.008, r"Ge",ha="right")
if (expt == "C" or expt == "D"):
ax1.axhline(1.0/R_Ca, linestyle="--", color='k')
ax1.text(-0.943, 1.0/R_Ca-0.03, r"Ca,O", ha="right")
ax1.axhline(1.0/R_W, linestyle="--", color='k')
ax1.text(-0.943, 1.0/R_W-0.03, r"W", ha="right")
#Sort out the top axis
ax2 = ax1.twiny()
topticks = np.array([-30,-10, -5,-4,-3])
ax2.set_xticks(inv_tick_function(topticks))
ax2.set_xticklabels(np.abs(topticks))
ax2.set_xlim(ax1.get_xlim())
#Add some labels
ax1.text(-0.942, 1.055, r"Ensemble " + expt + "; $m_\chi = " + str(mx) + "\,\,\mathrm{GeV}$", ha="right", fontsize=18.0)
ax1.text(-0.999, 0.52, r"Max. significance ($\bigstar$): $" + "{0:.1f}".format(np.max(sigvals)) + "\sigma$", fontsize=16.0)
ax1.set_ylabel(r'$\lambda_n/\lambda_p$', fontsize=20)
ax2.set_xlabel(r'$|\lambda_n^D/\lambda_n^{\overline{D}}|$', fontsize=20.0)
ax1.set_xticklabels(["-1", "-0.99", "-0.98", "-0.97", "-0.96","-0.95", "-0.94"])
fig.suptitle( r'$f = (\lambda_p^D \lambda_n^{D} + \lambda_p^{\overline{D}} \lambda_n^{\overline{D}})/ \sqrt{(\lambda_p^{D \,2} + \lambda_p^{\overline{D}\, 2})(\lambda_n^{D \,2} + \lambda_n^{\overline{D}\, 2})}$', \
ha='center',x=0.5, y=0.05, fontsize=20.0)
#Add colorbar
cbar_ax = fig.add_axes([0.96, 0.15, 0.015, 0.7])
cb0 = fig.colorbar(cf, cax=cbar_ax, ticks=levels, extend='max')
cb0.set_ticklabels([r'$1\sigma$',\
r'$2\sigma$',r'$3\sigma$',r'$4\sigma$',r'$5\sigma$'])
cb0.ax.tick_params(labelsize=18.0)
#Save to file
pl.savefig("../plots/individual/Contours-" + expt + "_" + str(mx) + ".pdf", bbox_inches="tight")
#pl.show()
|
bradkavREPO_NAMEAntiparticleDMPATH_START.@AntiparticleDM_extracted@AntiparticleDM-master@analysis@PlotContours.py@.PATH_END.py
|
{
"filename": "_customdatasrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattermap/_customdatasrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CustomdatasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="customdatasrc", parent_name="scattermap", **kwargs):
super(CustomdatasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattermap@_customdatasrc.py@.PATH_END.py
|
{
"filename": "addmathjax.py",
"repo_name": "NumCosmo/NumCosmo",
"repo_path": "NumCosmo_extracted/NumCosmo-master/docs/addmathjax.py",
"type": "Python"
}
|
#!/usr/bin/env python3
#
# addmathjax.py
#
# Wed Nov 29 10:52:10 2023
# Copyright 2023 Sandro Dias Pinto Vitenti
# <vitenti@uel.br>
#
# addmathjax.py
# Copyright (C) 2023 Sandro Dias Pinto Vitenti <vitenti@uel.br>
#
# numcosmo is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# numcosmo is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Add MathJax script tag to HTML files in-place."""
import sys
import os
from bs4 import BeautifulSoup
def add_mathjax_script_inplace(filename):
"""Add MathJax script tag to HTML files in-place."""
with open(filename, "r", encoding="utf-8") as file:
html_content = file.read()
soup = BeautifulSoup(html_content, "html.parser")
head_tag = soup.head
link_tag = soup.new_tag(
"link",
rel="stylesheet",
href="container.css",
type="text/css",
)
head_tag.append(link_tag)
script_conf_tag = soup.new_tag(
"script",
type="text/x-mathjax-config",
)
script_conf_tag.append(
"""
//<![CDATA[
MathJax.Hub.Config({"HTML-CSS": { preferredFont: "TeX", availableFonts: ["STIX","TeX"],
linebreaks: { automatic:true }, EqnChunk: (MathJax.Hub.Browser.isMobile ? 10 : 50) },
tex2jax: { inlineMath: [ ["$", "$"], ["\\\\(","\\\\)"] ], displayMath: [ ["$$","$$"],
["\\[", "\\]"] ], processEscapes: true, ignoreClass: "tex2jax_ignore|dno" },
TeX: { noUndefined: {
attributes: { mathcolor: "red", mathbackground: "#FFEEEE", mathsize: "90%"
} },
equationNumbers: { autoNumber: "AMS" } },
messageStyle: "none"
});
//]]>
"""
)
head_tag.append(script_conf_tag)
script_tag = soup.new_tag(
"script",
type="text/javascript",
src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/"
"MathJax.js?config=TeX-AMS_HTML",
)
head_tag.append(script_tag)
modified_html = str(soup)
with open(filename, "w", encoding="utf-8") as file:
file.write(modified_html)
def run_addmathjax():
"""Run add in-place MathJax script tag to HTML files in a directory."""
if len(sys.argv) != 2:
print("Usage: python addmathjax.py <html_directory>")
sys.exit(1)
directory = sys.argv[1]
if not os.path.isdir(directory):
print(f"Error: directory '{directory}' does not exist.")
sys.exit(1)
for filename in os.listdir(directory):
if filename.endswith(".html"):
filename0 = os.path.join(directory, filename)
add_mathjax_script_inplace(filename0)
if __name__ == "__main__":
run_addmathjax()
|
NumCosmoREPO_NAMENumCosmoPATH_START.@NumCosmo_extracted@NumCosmo-master@docs@addmathjax.py@.PATH_END.py
|
{
"filename": "_coloraxis.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/surface/_coloraxis.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColoraxisValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(self, plotly_name="coloraxis", parent_name="surface", **kwargs):
super(ColoraxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", None),
edit_type=kwargs.pop("edit_type", "calc"),
regex=kwargs.pop("regex", "/^coloraxis([2-9]|[1-9][0-9]+)?$/"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@surface@_coloraxis.py@.PATH_END.py
|
{
"filename": "test_random_shapes.py",
"repo_name": "scikit-image/scikit-image",
"repo_path": "scikit-image_extracted/scikit-image-main/skimage/draw/tests/test_random_shapes.py",
"type": "Python"
}
|
import numpy as np
import pytest
from skimage._shared import testing
from skimage._shared._warnings import expected_warnings
from skimage.draw import random_shapes
def test_generates_color_images_with_correct_shape():
image, _ = random_shapes((128, 128), max_shapes=10)
assert image.shape == (128, 128, 3)
def test_generates_gray_images_with_correct_shape():
image, _ = random_shapes(
(4567, 123), min_shapes=3, max_shapes=20, channel_axis=None
)
assert image.shape == (4567, 123)
def test_generates_gray_images_with_correct_shape_deprecated_multichannel():
image, _ = random_shapes(
(4567, 123), min_shapes=3, max_shapes=20, channel_axis=None
)
assert image.shape == (4567, 123)
@pytest.mark.parametrize('channel_axis', [None, 0, 1, 2])
def test_generated_shape_for_channel_axis(channel_axis):
shape = (128, 64)
num_channels = 5
image, _ = random_shapes(
shape,
num_channels=num_channels,
min_shapes=3,
max_shapes=10,
channel_axis=channel_axis,
)
if channel_axis is None:
expected_shape = shape
else:
expected_shape = tuple(np.insert(shape, channel_axis, num_channels))
assert image.shape == expected_shape
def test_generates_correct_bounding_boxes_for_rectangles():
image, labels = random_shapes((128, 128), max_shapes=1, shape='rectangle', rng=42)
assert len(labels) == 1
label, bbox = labels[0]
assert label == 'rectangle', label
crop = image[bbox[0][0] : bbox[0][1], bbox[1][0] : bbox[1][1]]
# The crop is filled.
assert (crop >= 0).all() and (crop < 255).all()
# The crop is complete.
image[bbox[0][0] : bbox[0][1], bbox[1][0] : bbox[1][1]] = 255
assert (image == 255).all()
def test_generates_correct_bounding_boxes_for_triangles():
image, labels = random_shapes((128, 128), max_shapes=1, shape='triangle', rng=42)
assert len(labels) == 1
label, bbox = labels[0]
assert label == 'triangle', label
crop = image[bbox[0][0] : bbox[0][1], bbox[1][0] : bbox[1][1]]
# The crop is filled.
assert (crop >= 0).any() and (crop < 255).any()
# The crop is complete.
image[bbox[0][0] : bbox[0][1], bbox[1][0] : bbox[1][1]] = 255
assert (image == 255).all()
def test_generates_correct_bounding_boxes_for_circles():
image, labels = random_shapes(
(43, 44), max_shapes=1, min_size=20, max_size=20, shape='circle', rng=42
)
assert len(labels) == 1
label, bbox = labels[0]
assert label == 'circle', label
crop = image[bbox[0][0] : bbox[0][1], bbox[1][0] : bbox[1][1]]
# The crop is filled.
assert (crop >= 0).any() and (crop < 255).any()
# The crop is complete.
image[bbox[0][0] : bbox[0][1], bbox[1][0] : bbox[1][1]] = 255
assert (image == 255).all()
def test_generates_correct_bounding_boxes_for_ellipses():
image, labels = random_shapes(
(43, 44), max_shapes=1, min_size=20, max_size=20, shape='ellipse', rng=42
)
assert len(labels) == 1
label, bbox = labels[0]
assert label == 'ellipse', label
crop = image[bbox[0][0] : bbox[0][1], bbox[1][0] : bbox[1][1]]
# The crop is filled.
assert (crop >= 0).any() and (crop < 255).any()
# The crop is complete.
image[bbox[0][0] : bbox[0][1], bbox[1][0] : bbox[1][1]] = 255
assert (image == 255).all()
def test_generate_circle_throws_when_size_too_small():
with testing.raises(ValueError):
random_shapes((64, 128), max_shapes=1, min_size=1, max_size=1, shape='circle')
def test_generate_ellipse_throws_when_size_too_small():
with testing.raises(ValueError):
random_shapes((64, 128), max_shapes=1, min_size=1, max_size=1, shape='ellipse')
def test_generate_triangle_throws_when_size_too_small():
with testing.raises(ValueError):
random_shapes((128, 64), max_shapes=1, min_size=1, max_size=1, shape='triangle')
def test_can_generate_one_by_one_rectangle():
image, labels = random_shapes(
(50, 128), max_shapes=1, min_size=1, max_size=1, shape='rectangle'
)
assert len(labels) == 1
_, bbox = labels[0]
crop = image[bbox[0][0] : bbox[0][1], bbox[1][0] : bbox[1][1]]
# rgb
assert np.shape(crop) == (1, 1, 3) and np.any(crop >= 1) and np.any(crop < 255)
def test_throws_when_intensity_range_out_of_range():
with testing.raises(ValueError):
random_shapes(
(1000, 1234), max_shapes=1, channel_axis=None, intensity_range=(0, 256)
)
with testing.raises(ValueError):
random_shapes((2, 2), max_shapes=1, intensity_range=((-1, 255),))
def test_returns_empty_labels_and_white_image_when_cannot_fit_shape():
# The circle will never fit this.
with expected_warnings(['Could not fit']):
image, labels = random_shapes(
(10000, 10000), max_shapes=1, min_size=10000, shape='circle'
)
assert len(labels) == 0
assert (image == 255).all()
def test_random_shapes_is_reproducible_with_seed():
random_seed = 42
labels = []
for _ in range(5):
_, label = random_shapes((128, 128), max_shapes=5, rng=random_seed)
labels.append(label)
assert all(other == labels[0] for other in labels[1:])
def test_generates_white_image_when_intensity_range_255():
image, labels = random_shapes(
(128, 128), max_shapes=3, intensity_range=((255, 255),), rng=42
)
assert len(labels) > 0
assert (image == 255).all()
|
scikit-imageREPO_NAMEscikit-imagePATH_START.@scikit-image_extracted@scikit-image-main@skimage@draw@tests@test_random_shapes.py@.PATH_END.py
|
{
"filename": "_style.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/smith/realaxis/tickfont/_style.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StyleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="style",
parent_name="layout.smith.realaxis.tickfont",
**kwargs,
):
super(StyleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop("values", ["normal", "italic"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@smith@realaxis@tickfont@_style.py@.PATH_END.py
|
{
"filename": "setup_package.py",
"repo_name": "LCOGT/banzai",
"repo_path": "banzai_extracted/banzai-main/banzai/utils/setup_package.py",
"type": "Python"
}
|
import os
import numpy as np
from distutils.core import Extension
from extension_helpers import add_openmp_flags_if_available
UTIL_DIR = os.path.relpath(os.path.dirname(__file__))
def get_extensions():
med_sources = [str(os.path.join(UTIL_DIR, "median_utils.pyx")),
str(os.path.join(UTIL_DIR, "quick_select.c"))]
include_dirs = [np.get_include(), UTIL_DIR]
libraries = []
if 'CFLAGS' in os.environ:
extra_compile_args = os.environ['CFLAGS'].split()
else:
extra_compile_args = ['-g', '-O3', '-funroll-loops', '-ffast-math']
ext_med = Extension(name=str('banzai.utils.median_utils'),
sources=med_sources,
include_dirs=include_dirs,
libraries=libraries,
language="c",
extra_compile_args=extra_compile_args)
add_openmp_flags_if_available(ext_med)
return [ext_med]
|
LCOGTREPO_NAMEbanzaiPATH_START.@banzai_extracted@banzai-main@banzai@utils@setup_package.py@.PATH_END.py
|
{
"filename": "application.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/traitlets/py2/traitlets/config/application.py",
"type": "Python"
}
|
# encoding: utf-8
"""A base class for a configurable application."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
from copy import deepcopy
import json
import logging
import os
import re
import sys
from collections import defaultdict, OrderedDict
from decorator import decorator
from traitlets.config.configurable import Configurable, SingletonConfigurable
from traitlets.config.loader import (
KVArgParseConfigLoader, PyFileConfigLoader, Config, ArgumentError, ConfigFileNotFound, JSONFileConfigLoader
)
from traitlets.traitlets import (
Bool, Unicode, List, Enum, Dict, Instance, TraitError, observe, observe_compat, default,
)
from ipython_genutils.importstring import import_item
from ipython_genutils.text import indent, wrap_paragraphs, dedent
from ipython_genutils import py3compat
import six
#-----------------------------------------------------------------------------
# Descriptions for the various sections
#-----------------------------------------------------------------------------
# merge flags&aliases into options
option_description = """
Arguments that take values are actually convenience aliases to full
Configurables, whose aliases are listed on the help line. For more information
on full configurables, see '--help-all'.
""".strip() # trim newlines of front and back
keyvalue_description = """
Parameters are set from command-line arguments of the form:
`--Class.trait=value`.
This line is evaluated in Python, so simple expressions are allowed, e.g.::
`--C.a='range(3)'` For setting C.a=[0,1,2].
""".strip() # trim newlines of front and back
# sys.argv can be missing, for example when python is embedded. See the docs
# for details: http://docs.python.org/2/c-api/intro.html#embedding-python
if not hasattr(sys, "argv"):
sys.argv = [""]
subcommand_description = """
Subcommands are launched as `{app} cmd [args]`. For information on using
subcommand 'cmd', do: `{app} cmd -h`.
"""
# get running program name
#-----------------------------------------------------------------------------
# Application class
#-----------------------------------------------------------------------------
_envvar = os.environ.get('TRAITLETS_APPLICATION_RAISE_CONFIG_FILE_ERROR','')
if _envvar.lower() in {'1','true'}:
TRAITLETS_APPLICATION_RAISE_CONFIG_FILE_ERROR = True
elif _envvar.lower() in {'0','false',''} :
TRAITLETS_APPLICATION_RAISE_CONFIG_FILE_ERROR = False
else:
raise ValueError("Unsupported value for environment variable: 'TRAITLETS_APPLICATION_RAISE_CONFIG_FILE_ERROR' is set to '%s' which is none of {'0', '1', 'false', 'true', ''}."% _envvar )
@decorator
def catch_config_error(method, app, *args, **kwargs):
"""Method decorator for catching invalid config (Trait/ArgumentErrors) during init.
On a TraitError (generally caused by bad config), this will print the trait's
message, and exit the app.
For use on init methods, to prevent invoking excepthook on invalid input.
"""
try:
return method(app, *args, **kwargs)
except (TraitError, ArgumentError) as e:
app.print_help()
app.log.fatal("Bad config encountered during initialization:")
app.log.fatal(str(e))
app.log.debug("Config at the time: %s", app.config)
app.exit(1)
class ApplicationError(Exception):
pass
class LevelFormatter(logging.Formatter):
"""Formatter with additional `highlevel` record
This field is empty if log level is less than highlevel_limit,
otherwise it is formatted with self.highlevel_format.
Useful for adding 'WARNING' to warning messages,
without adding 'INFO' to info, etc.
"""
highlevel_limit = logging.WARN
highlevel_format = " %(levelname)s |"
def format(self, record):
if record.levelno >= self.highlevel_limit:
record.highlevel = self.highlevel_format % record.__dict__
else:
record.highlevel = ""
return super(LevelFormatter, self).format(record)
class Application(SingletonConfigurable):
"""A singleton application with full configuration support."""
# The name of the application, will usually match the name of the command
# line application
name = Unicode(u'application')
# The description of the application that is printed at the beginning
# of the help.
description = Unicode(u'This is an application.')
# default section descriptions
option_description = Unicode(option_description)
keyvalue_description = Unicode(keyvalue_description)
subcommand_description = Unicode(subcommand_description)
python_config_loader_class = PyFileConfigLoader
json_config_loader_class = JSONFileConfigLoader
# The usage and example string that goes at the end of the help string.
examples = Unicode()
# A sequence of Configurable subclasses whose config=True attributes will
# be exposed at the command line.
classes = []
def _classes_inc_parents(self):
"""Iterate through configurable classes, including configurable parents
Children should always be after parents, and each class should only be
yielded once.
"""
seen = set()
for c in self.classes:
# We want to sort parents before children, so we reverse the MRO
for parent in reversed(c.mro()):
if issubclass(parent, Configurable) and (parent not in seen):
seen.add(parent)
yield parent
# The version string of this application.
version = Unicode(u'0.0')
# the argv used to initialize the application
argv = List()
# Whether failing to load config files should prevent startup
raise_config_file_errors = Bool(TRAITLETS_APPLICATION_RAISE_CONFIG_FILE_ERROR)
# The log level for the application
log_level = Enum((0,10,20,30,40,50,'DEBUG','INFO','WARN','ERROR','CRITICAL'),
default_value=logging.WARN,
help="Set the log level by value or name.").tag(config=True)
@observe('log_level')
@observe_compat
def _log_level_changed(self, change):
"""Adjust the log level when log_level is set."""
new = change.new
if isinstance(new, six.string_types):
new = getattr(logging, new)
self.log_level = new
self.log.setLevel(new)
_log_formatter_cls = LevelFormatter
log_datefmt = Unicode("%Y-%m-%d %H:%M:%S",
help="The date format used by logging formatters for %(asctime)s"
).tag(config=True)
log_format = Unicode("[%(name)s]%(highlevel)s %(message)s",
help="The Logging format template",
).tag(config=True)
@observe('log_datefmt', 'log_format')
@observe_compat
def _log_format_changed(self, change):
"""Change the log formatter when log_format is set."""
_log_handler = self.log.handlers[0]
_log_formatter = self._log_formatter_cls(fmt=self.log_format, datefmt=self.log_datefmt)
_log_handler.setFormatter(_log_formatter)
@default('log')
def _log_default(self):
"""Start logging for this application.
The default is to log to stderr using a StreamHandler, if no default
handler already exists. The log level starts at logging.WARN, but this
can be adjusted by setting the ``log_level`` attribute.
"""
log = logging.getLogger(self.__class__.__name__)
log.setLevel(self.log_level)
log.propagate = False
_log = log # copied from Logger.hasHandlers() (new in Python 3.2)
while _log:
if _log.handlers:
return log
if not _log.propagate:
break
else:
_log = _log.parent
if sys.executable and sys.executable.endswith('pythonw.exe'):
# this should really go to a file, but file-logging is only
# hooked up in parallel applications
_log_handler = logging.StreamHandler(open(os.devnull, 'w'))
else:
_log_handler = logging.StreamHandler()
_log_formatter = self._log_formatter_cls(fmt=self.log_format, datefmt=self.log_datefmt)
_log_handler.setFormatter(_log_formatter)
log.addHandler(_log_handler)
return log
# the alias map for configurables
aliases = Dict({'log-level' : 'Application.log_level'})
# flags for loading Configurables or store_const style flags
# flags are loaded from this dict by '--key' flags
# this must be a dict of two-tuples, the first element being the Config/dict
# and the second being the help string for the flag
flags = Dict()
@observe('flags')
@observe_compat
def _flags_changed(self, change):
"""ensure flags dict is valid"""
new = change.new
for key, value in new.items():
assert len(value) == 2, "Bad flag: %r:%s" % (key, value)
assert isinstance(value[0], (dict, Config)), "Bad flag: %r:%s" % (key, value)
assert isinstance(value[1], six.string_types), "Bad flag: %r:%s" % (key, value)
# subcommands for launching other applications
# if this is not empty, this will be a parent Application
# this must be a dict of two-tuples,
# the first element being the application class/import string
# and the second being the help string for the subcommand
subcommands = Dict()
# parse_command_line will initialize a subapp, if requested
subapp = Instance('traitlets.config.application.Application', allow_none=True)
# extra command-line arguments that don't set config values
extra_args = List(Unicode())
cli_config = Instance(Config, (), {},
help="""The subset of our configuration that came from the command-line
We re-load this configuration after loading config files,
to ensure that it maintains highest priority.
"""
)
_loaded_config_files = List()
def __init__(self, **kwargs):
SingletonConfigurable.__init__(self, **kwargs)
# Ensure my class is in self.classes, so my attributes appear in command line
# options and config files.
cls = self.__class__
if cls not in self.classes:
if self.classes is cls.classes:
# class attr, assign instead of insert
cls.classes = [cls] + self.classes
else:
self.classes.insert(0, self.__class__)
@observe('config')
@observe_compat
def _config_changed(self, change):
super(Application, self)._config_changed(change)
self.log.debug('Config changed:')
self.log.debug(repr(change.new))
@catch_config_error
def initialize(self, argv=None):
"""Do the basic steps to configure me.
Override in subclasses.
"""
self.parse_command_line(argv)
def start(self):
"""Start the app mainloop.
Override in subclasses.
"""
if self.subapp is not None:
return self.subapp.start()
def print_alias_help(self):
"""Print the alias part of the help."""
if not self.aliases:
return
lines = []
classdict = {}
for cls in self.classes:
# include all parents (up to, but excluding Configurable) in available names
for c in cls.mro()[:-3]:
classdict[c.__name__] = c
for alias, longname in self.aliases.items():
classname, traitname = longname.split('.',1)
cls = classdict[classname]
trait = cls.class_traits(config=True)[traitname]
help = cls.class_get_trait_help(trait).splitlines()
# reformat first line
help[0] = help[0].replace(longname, alias) + ' (%s)'%longname
if len(alias) == 1:
help[0] = help[0].replace('--%s='%alias, '-%s '%alias)
lines.extend(help)
# lines.append('')
print(os.linesep.join(lines))
def print_flag_help(self):
"""Print the flag part of the help."""
if not self.flags:
return
lines = []
for m, (cfg,help) in self.flags.items():
prefix = '--' if len(m) > 1 else '-'
lines.append(prefix+m)
lines.append(indent(dedent(help.strip())))
# lines.append('')
print(os.linesep.join(lines))
def print_options(self):
if not self.flags and not self.aliases:
return
lines = ['Options']
lines.append('-'*len(lines[0]))
lines.append('')
for p in wrap_paragraphs(self.option_description):
lines.append(p)
lines.append('')
print(os.linesep.join(lines))
self.print_flag_help()
self.print_alias_help()
print()
def print_subcommands(self):
"""Print the subcommand part of the help."""
if not self.subcommands:
return
lines = ["Subcommands"]
lines.append('-'*len(lines[0]))
lines.append('')
for p in wrap_paragraphs(self.subcommand_description.format(
app=self.name)):
lines.append(p)
lines.append('')
for subc, (cls, help) in self.subcommands.items():
lines.append(subc)
if help:
lines.append(indent(dedent(help.strip())))
lines.append('')
print(os.linesep.join(lines))
def print_help(self, classes=False):
"""Print the help for each Configurable class in self.classes.
If classes=False (the default), only flags and aliases are printed.
"""
self.print_description()
self.print_subcommands()
self.print_options()
if classes:
help_classes = self.classes
if help_classes:
print("Class parameters")
print("----------------")
print()
for p in wrap_paragraphs(self.keyvalue_description):
print(p)
print()
for cls in help_classes:
cls.class_print_help()
print()
else:
print("To see all available configurables, use `--help-all`")
print()
self.print_examples()
def document_config_options(self):
"""Generate rST format documentation for the config options this application
Returns a multiline string.
"""
return '\n'.join(c.class_config_rst_doc()
for c in self._classes_inc_parents())
def print_description(self):
"""Print the application description."""
for p in wrap_paragraphs(self.description):
print(p)
print()
def print_examples(self):
"""Print usage and examples.
This usage string goes at the end of the command line help string
and should contain examples of the application's usage.
"""
if self.examples:
print("Examples")
print("--------")
print()
print(indent(dedent(self.examples.strip())))
print()
def print_version(self):
"""Print the version string."""
print(self.version)
@catch_config_error
def initialize_subcommand(self, subc, argv=None):
"""Initialize a subcommand with argv."""
subapp,help = self.subcommands.get(subc)
if isinstance(subapp, six.string_types):
subapp = import_item(subapp)
# clear existing instances
self.__class__.clear_instance()
# instantiate
self.subapp = subapp.instance(parent=self)
# and initialize subapp
self.subapp.initialize(argv)
def flatten_flags(self):
"""flatten flags and aliases, so cl-args override as expected.
This prevents issues such as an alias pointing to InteractiveShell,
but a config file setting the same trait in TerminalInteraciveShell
getting inappropriate priority over the command-line arg.
Only aliases with exactly one descendent in the class list
will be promoted.
"""
# build a tree of classes in our list that inherit from a particular
# it will be a dict by parent classname of classes in our list
# that are descendents
mro_tree = defaultdict(list)
for cls in self.classes:
clsname = cls.__name__
for parent in cls.mro()[1:-3]:
# exclude cls itself and Configurable,HasTraits,object
mro_tree[parent.__name__].append(clsname)
# flatten aliases, which have the form:
# { 'alias' : 'Class.trait' }
aliases = {}
for alias, cls_trait in self.aliases.items():
cls,trait = cls_trait.split('.',1)
children = mro_tree[cls]
if len(children) == 1:
# exactly one descendent, promote alias
cls = children[0]
aliases[alias] = '.'.join([cls,trait])
# flatten flags, which are of the form:
# { 'key' : ({'Cls' : {'trait' : value}}, 'help')}
flags = {}
for key, (flagdict, help) in self.flags.items():
newflag = {}
for cls, subdict in flagdict.items():
children = mro_tree[cls]
# exactly one descendent, promote flag section
if len(children) == 1:
cls = children[0]
newflag[cls] = subdict
flags[key] = (newflag, help)
return flags, aliases
@catch_config_error
def parse_command_line(self, argv=None):
"""Parse the command line arguments."""
argv = sys.argv[1:] if argv is None else argv
self.argv = [ py3compat.cast_unicode(arg) for arg in argv ]
if argv and argv[0] == 'help':
# turn `ipython help notebook` into `ipython notebook -h`
argv = argv[1:] + ['-h']
if self.subcommands and len(argv) > 0:
# we have subcommands, and one may have been specified
subc, subargv = argv[0], argv[1:]
if re.match(r'^\w(\-?\w)*$', subc) and subc in self.subcommands:
# it's a subcommand, and *not* a flag or class parameter
return self.initialize_subcommand(subc, subargv)
# Arguments after a '--' argument are for the script IPython may be
# about to run, not IPython iteslf. For arguments parsed here (help and
# version), we want to only search the arguments up to the first
# occurrence of '--', which we're calling interpreted_argv.
try:
interpreted_argv = argv[:argv.index('--')]
except ValueError:
interpreted_argv = argv
if any(x in interpreted_argv for x in ('-h', '--help-all', '--help')):
self.print_help('--help-all' in interpreted_argv)
self.exit(0)
if '--version' in interpreted_argv or '-V' in interpreted_argv:
self.print_version()
self.exit(0)
# flatten flags&aliases, so cl-args get appropriate priority:
flags,aliases = self.flatten_flags()
loader = KVArgParseConfigLoader(argv=argv, aliases=aliases,
flags=flags, log=self.log)
self.cli_config = deepcopy(loader.load_config())
self.update_config(self.cli_config)
# store unparsed args in extra_args
self.extra_args = loader.extra_args
@classmethod
def _load_config_files(cls, basefilename, path=None, log=None, raise_config_file_errors=False):
"""Load config files (py,json) by filename and path.
yield each config object in turn.
"""
if not isinstance(path, list):
path = [path]
for path in path[::-1]:
# path list is in descending priority order, so load files backwards:
pyloader = cls.python_config_loader_class(basefilename+'.py', path=path, log=log)
if log:
log.debug("Looking for %s in %s", basefilename, path or os.getcwd())
jsonloader = cls.json_config_loader_class(basefilename+'.json', path=path, log=log)
loaded = []
filenames = []
for loader in [pyloader, jsonloader]:
config = None
try:
config = loader.load_config()
except ConfigFileNotFound:
pass
except Exception:
# try to get the full filename, but it will be empty in the
# unlikely event that the error raised before filefind finished
filename = loader.full_filename or basefilename
# problem while running the file
if raise_config_file_errors:
raise
if log:
log.error("Exception while loading config file %s",
filename, exc_info=True)
else:
if log:
log.debug("Loaded config file: %s", loader.full_filename)
if config:
for filename, earlier_config in zip(filenames, loaded):
collisions = earlier_config.collisions(config)
if collisions and log:
log.warning("Collisions detected in {0} and {1} config files."
" {1} has higher priority: {2}".format(
filename, loader.full_filename, json.dumps(collisions, indent=2),
))
yield (config, loader.full_filename)
loaded.append(config)
filenames.append(loader.full_filename)
@property
def loaded_config_files(self):
"""Currently loaded configuration files"""
return self._loaded_config_files[:]
@catch_config_error
def load_config_file(self, filename, path=None):
"""Load config files by filename and path."""
filename, ext = os.path.splitext(filename)
new_config = Config()
for (config, filename) in self._load_config_files(filename, path=path, log=self.log,
raise_config_file_errors=self.raise_config_file_errors,
):
new_config.merge(config)
if filename not in self._loaded_config_files: # only add to list of loaded files if not previously loaded
self._loaded_config_files.append(filename)
# add self.cli_config to preserve CLI config priority
new_config.merge(self.cli_config)
self.update_config(new_config)
def _classes_in_config_sample(self):
"""
Yields only classes with own traits, and their subclasses.
Thus, produced sample config-file will contain all classes
on which a trait-value may be overridden:
- either on the class owning the trait,
- or on its subclasses, even if those subclasses do not define
any traits themselves.
"""
cls_to_config = OrderedDict( (cls, bool(cls.class_own_traits(config=True)))
for cls
in self._classes_inc_parents())
def is_any_parent_included(cls):
return any(b in cls_to_config and cls_to_config[b] for b in cls.__bases__)
## Mark "empty" classes for inclusion if their parents own-traits,
# and loop until no more classes gets marked.
#
while True:
to_incl_orig = cls_to_config.copy()
cls_to_config = OrderedDict( (cls, inc_yes or is_any_parent_included(cls))
for cls, inc_yes
in cls_to_config.items())
if cls_to_config == to_incl_orig:
break
for cl, inc_yes in cls_to_config.items():
if inc_yes:
yield cl
def generate_config_file(self):
"""generate default config file from Configurables"""
lines = ["# Configuration file for %s." % self.name]
lines.append('')
for cls in self._classes_in_config_sample():
lines.append(cls.class_config_section())
return '\n'.join(lines)
def exit(self, exit_status=0):
self.log.debug("Exiting application: %s" % self.name)
sys.exit(exit_status)
@classmethod
def launch_instance(cls, argv=None, **kwargs):
"""Launch a global instance of this Application
If a global instance already exists, this reinitializes and starts it
"""
app = cls.instance(**kwargs)
app.initialize(argv)
app.start()
#-----------------------------------------------------------------------------
# utility functions, for convenience
#-----------------------------------------------------------------------------
def boolean_flag(name, configurable, set_help='', unset_help=''):
"""Helper for building basic --trait, --no-trait flags.
Parameters
----------
name : str
The name of the flag.
configurable : str
The 'Class.trait' string of the trait to be set/unset with the flag
set_help : unicode
help string for --name flag
unset_help : unicode
help string for --no-name flag
Returns
-------
cfg : dict
A dict with two keys: 'name', and 'no-name', for setting and unsetting
the trait, respectively.
"""
# default helpstrings
set_help = set_help or "set %s=True"%configurable
unset_help = unset_help or "set %s=False"%configurable
cls,trait = configurable.split('.')
setter = {cls : {trait : True}}
unsetter = {cls : {trait : False}}
return {name : (setter, set_help), 'no-'+name : (unsetter, unset_help)}
def get_config():
"""Get the config object for the global Application instance, if there is one
otherwise return an empty config object
"""
if Application.initialized():
return Application.instance().config
else:
return Config()
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@traitlets@py2@traitlets@config@application.py@.PATH_END.py
|
{
"filename": "simple_anim.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/galleries/examples/animation/simple_anim.py",
"type": "Python"
}
|
"""
==================
Animated line plot
==================
Output generated via `matplotlib.animation.Animation.to_jshtml`.
"""
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
fig, ax = plt.subplots()
x = np.arange(0, 2*np.pi, 0.01)
line, = ax.plot(x, np.sin(x))
def animate(i):
line.set_ydata(np.sin(x + i / 50)) # update the data.
return line,
ani = animation.FuncAnimation(
fig, animate, interval=20, blit=True, save_count=50)
# To save the animation, use e.g.
#
# ani.save("movie.mp4")
#
# or
#
# writer = animation.FFMpegWriter(
# fps=15, metadata=dict(artist='Me'), bitrate=1800)
# ani.save("movie.mp4", writer=writer)
plt.show()
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@galleries@examples@animation@simple_anim.py@.PATH_END.py
|
{
"filename": "radiative_transfer.py",
"repo_name": "dust-busters/DBNets",
"repo_path": "DBNets_extracted/DBNets-main/training/radiative_transfer.py",
"type": "Python"
}
|
import astropy.constants as k
from astropy import units as u
import numpy as np
import matplotlib.pyplot as plt
import dsharp_opac as opacity
import oofargo
import pandas as pd
import numba as nb
rho_dust = 1.6686*u.g/u.cm**3#g/cm^3
m_star = k.M_sun.cgs
r_planet = k.au.cgs*50
obs_wl = 1.3 * u.mm
mu = 2.3
Sigma0 = 1e-3
surf_rho_dim = m_star/(r_planet**2)
def get_grain_size(Sigma0, Stokes, sigma_slope, r):
return 2*Sigma0*Stokes*(r**-sigma_slope)*surf_rho_dim/(np.pi*rho_dust)
datafile = opacity.get_datafile('default_opacities_smooth.npz')
res = np.load(datafile)
obs_wl = 0.13
a_birn = res['a']
lam = res['lam']
k_abs = res['k_abs']
k_sca = res['k_sca']
res = opacity.size_average_opacity(0.13, a_birn, lam, k_abs, k_sca)
def get_opacity(a, lamb):
value = np.interp(a.to('cm')/u.cm, a_birn, res['ka'][0, :])*u.cm**2/u.g
return value
def get_opac_map(a_map, lamb):
k_map = get_opacity(a_map, lamb)
return k_map
def radiative_transfer(data_path, index, i):
data = oofargo.open_img(data_path,
ntheta=index.loc[i, 'nx'].astype(int),
nr = index.loc[i, 'ny'].astype(int),
image_rmax=index.loc[i, 'rout'],
ylog=True)
h0 = index.loc[i, 'AspectRatio']
fi = index.loc[i, 'FlaringIndex']
St = index.loc[i, 'InvStokes1']**-1
slope = index.loc[i, 'SigmaSlope']
r = np.linspace(0.4, index.loc[i, 'rout'], index.loc[i, 'ny'].astype(int)).reshape(-1,1)
r = r*np.ones((index.loc[i, 'ny'].astype(int), index.loc[i, 'nx'].astype(int) ))
Td = ((mu*k.m_p/k.k_B)*(h0**2)*(k.G*m_star/r_planet)*(r**(2*fi-1))).to('K')
a_map = get_grain_size(Sigma0, St, slope, r)
opac = get_opac_map(a_map, obs_wl)
tau = opac*data*surf_rho_dim
Ts = Td*(np.ones(Td.shape)-np.exp(-tau))
return Ts
|
dust-bustersREPO_NAMEDBNetsPATH_START.@DBNets_extracted@DBNets-main@training@radiative_transfer.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/sankey/node/hoverlabel/font/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="sankey.node.hoverlabel.font", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@sankey@node@hoverlabel@font@_color.py@.PATH_END.py
|
{
"filename": "convolve_vlsr.ipynb",
"repo_name": "tere-valdivia/Barnard_5_infall",
"repo_path": "Barnard_5_infall_extracted/Barnard_5_infall-main/B5_NOEMA_30m/gaussfit/convolve_vlsr.ipynb",
"type": "Jupyter Notebook"
}
|
```python
# original resolution: 4.07672 arcsec x 3.78778 arcsec, pa: -81.4035 deg
# matching kernel: 4.65325 arcsec x 4.40231 arcsec, pa: 8.59652 deg
# target resolution: 6 arcsec x 6 arcsec, pa: 0 deg
import numpy as np
from astropy.modeling.models import Gaussian2D
import astropy.units as u
from photutils.psf import create_matching_kernel, TopHatWindow
from astropy.io import fits
from astropy.wcs import WCS
import matplotlib.pyplot as plt
from astropy.convolution import convolve
```
```python
filenameHC3Nvlsr = 'B5-NOEMA+30m-H3CN-10-9_cut_K_1G_fitparams_filtered_Vlsr'
filenameNH3vlsr = '../../B5_wide_multiple/data/B5_VLA_GBT_model_vc_QA'
maskfile = 'B5-NOEMA+30m-H3CN-10-9_cut_K_mask'
filename_convolved = filenameHC3Nvlsr + '_conv_NH3'
```
```python
# we first need to transform all angle units to pixels
headerHC3N = fits.getheader(filenameHC3Nvlsr+'.fits')
headerNH3 = fits.getheader(filenameNH3vlsr+'.fits')
wcsHC3N = WCS(headerHC3N)
HC3Nvlsr = fits.getdata(filenameHC3Nvlsr+'.fits')
mask = fits.getdata(maskfile+'.fits')
kernelsize = 29
xcent = int(round(kernelsize/2))
pixsizeHC3N = np.abs(headerHC3N['CDELT2']) # pixel size in degrees
bmaj_original = headerHC3N['BMAJ']
bmin_original = headerHC3N['BMIN']
bpa_original = headerHC3N['BPA'] * u.deg
sigmamaj_original_pix = (bmaj_original/pixsizeHC3N) / np.sqrt(8*np.log(2))
sigmamin_original_pix = (bmin_original/pixsizeHC3N) / np.sqrt(8*np.log(2))
bmaj_target = headerNH3['BMAJ']
bmin_target = headerNH3['BMIN']
bpa_target = headerNH3['BPA'] * u.deg
sigmamaj_target_pix = (bmaj_target/pixsizeHC3N) / np.sqrt(8*np.log(2))
sigmamin_target_pix = (bmin_target/pixsizeHC3N) / np.sqrt(8*np.log(2))
```
```python
y, x = np.mgrid[0:kernelsize, 0:kernelsize]
beamoriginalg = Gaussian2D(100, xcent, xcent, sigmamaj_original_pix, sigmamin_original_pix, theta=bpa_original)
beamtargetg = Gaussian2D(100, xcent, xcent, sigmamaj_target_pix, sigmamin_target_pix, theta=bpa_target)
beamoriginal = beamoriginalg(x, y)
beamtarget = beamtargetg(x, y)
beamtarget /= np.sum(beamtarget)
```
```python
window = TopHatWindow(beta=0.45)
matchingkernel = create_matching_kernel(beamoriginal, beamtarget, window=window)
```
```python
HC3Nvlsrconvolved = convolve(HC3Nvlsr, matchingkernel)
HC3Nvlsrconvolved = np.where(mask, HC3Nvlsrconvolved, np.nan)
```
WARNING: nan_treatment='interpolate', however, NaN values detected post convolution. A contiguous region of NaN values, larger than the kernel size, are present in the input array. Increase the kernel size to avoid this. [astropy.convolution.convolve]
```python
newheaderHC3N = headerHC3N.copy()
newheaderHC3N['BMAJ'] = bmaj_target
newheaderHC3N['BMIN'] = bmin_target
newheaderHC3N['BPA'] = bpa_target.value
fits.writeto(filename_convolved+'.fits', HC3Nvlsrconvolved, newheaderHC3N)
```
```python
```
```python
```
|
tere-valdiviaREPO_NAMEBarnard_5_infallPATH_START.@Barnard_5_infall_extracted@Barnard_5_infall-main@B5_NOEMA_30m@gaussfit@convolve_vlsr.ipynb@.PATH_END.py
|
{
"filename": "MobileNetV2Spec.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/g3doc/api_docs/python/tflite_model_maker/image_classifier/MobileNetV2Spec.md",
"type": "Markdown"
}
|
page_type: reference
description: Creates MobileNet v2 model spec. See also: <a href="../../tflite_model_maker/image_classifier/ModelSpec"><code>tflite_model_maker.image_classifier.ModelSpec</code></a>.
<link rel="stylesheet" href="/site-assets/css/style.css">
<!-- DO NOT EDIT! Automatically generated file. -->
<div itemscope itemtype="http://developers.google.com/ReferenceObject">
<meta itemprop="name" content="tflite_model_maker.image_classifier.MobileNetV2Spec" />
<meta itemprop="path" content="Stable" />
</div>
# tflite_model_maker.image_classifier.MobileNetV2Spec
<!-- Insert buttons and diff -->
<table class="tfo-notebook-buttons tfo-api nocontent" align="left">
</table>
Creates MobileNet v2 model spec. See also: <a href="../../tflite_model_maker/image_classifier/ModelSpec"><code>tflite_model_maker.image_classifier.ModelSpec</code></a>.
<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
<code>tflite_model_maker.image_classifier.MobileNetV2Spec(
*,
uri='https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4',
compat_tf_versions=2,
input_image_shape=None,
name='mobilenet_v2'
)
</code></pre>
<!-- Placeholder for "Used in" -->
<!-- Tabular view -->
<table class="responsive fixed orange">
<colgroup><col width="214px"><col></colgroup>
<tr><th colspan="2"><h2 class="add-link">Args</h2></th></tr>
<tr>
<td>
`uri`<a id="uri"></a>
</td>
<td>
str, URI to the pretrained model.
</td>
</tr><tr>
<td>
`compat_tf_versions`<a id="compat_tf_versions"></a>
</td>
<td>
list of int, compatible TF versions.
</td>
</tr><tr>
<td>
`input_image_shape`<a id="input_image_shape"></a>
</td>
<td>
list of int, input image shape. Default: [224, 224].
</td>
</tr><tr>
<td>
`name`<a id="name"></a>
</td>
<td>
str, model spec name.
</td>
</tr>
</table>
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@g3doc@api_docs@python@tflite_model_maker@image_classifier@MobileNetV2Spec.md@.PATH_END.py
|
{
"filename": "panel_c_d_e.py",
"repo_name": "nespinoza/wasp39-terminators",
"repo_path": "wasp39-terminators_extracted/wasp39-terminators-main/extended-figure3/panel_c_d_e.py",
"type": "Python"
}
|
import numpy as np
import matplotlib
from matplotlib.gridspec import GridSpec
import matplotlib.ticker as tck
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import pickle
import glob
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = 'Arial'
matplotlib.rcParams['pdf.fonttype']=42
matplotlib.rcParams['axes.linewidth']=1
fs=10
#fig = plt.figure(figsize=[6.69, 6.0])
fig = plt.figure(figsize=[4.3, 4.0])
hspace = 10
delta = 50
all_experiments = ['bin1_eureka_fixedlds_fake_badtiming',
'bin1_eureka_fixedlds_fake_badlds_0.01',
'bin1_eureka_fixedlds_fake_badecc_omega10']
all_names = [r'c. wrong timing ($3\sigma_{T_0}$)',
r'd. wrong limb-darkening ($\Delta u_i = 0.01$)',#r'Case 3: wrong eccentricity ($3\sigma_{e}$)',
r'e. wrong eccentricity ($3\sigma_{e}$)']
gs = GridSpec(len(all_experiments)*(delta + hspace) - hspace, 1, figure=fig, wspace=0.05, hspace=0)
axs = []
for i in range(len(all_experiments)):
wavs_30, thediff_30, thediff_err_up_30, thediff_err_down_30, p1_30, p1_err_30, p2_30, p2_err_30, covariances_30 = np.loadtxt(all_experiments[i]+'_30.txt', unpack = True)
wavs_100, thediff_100, thediff_err_up_100, thediff_err_down_100, p1_100, p1_err_100, p2_100, p2_err_100, covariances_100 = np.loadtxt(all_experiments[i]+'_100.txt', unpack = True)
axs.append(fig.add_subplot( gs[i*(delta + hspace):i*(delta + hspace) + delta, 0]) )
ax = axs[-1]
ax.text(2.1,800, s = all_names[i])
ax.plot([0.6,5.3], [0., 0.], '--', color = 'black')
ax.errorbar(wavs_100, thediff_100, [thediff_err_down_100, thediff_err_up_100], fmt = '.', alpha = 0.5, color = 'grey')
ax.errorbar(wavs_30, thediff_30, [thediff_err_down_30, thediff_err_up_30], fmt = 'o', ms = 10, elinewidth = 3, mfc = 'black', mec = 'black', ecolor = 'black')
ax.set_xlim(2.0,5.3)
ax.set_ylim(-1500,1500)
ax.yaxis.set_major_locator(tck.FixedLocator([-1000,-500,0,500,1000]))
ax.set_ylabel('o-c (ppm)', fontsize = fs, fontstyle = 'normal')
ax.tick_params(which = 'both', direction = 'in', labelsize = fs, axis='both', top=True, left=True, right=True, zorder=100)
if i != 2:
ax.axes.xaxis.set_ticklabels([])
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
ax.set_xlabel('wavelength (um)', fontsize = fs, fontstyle = 'normal')
ax.tick_params(which = 'both', direction = 'in', labelsize = fs, axis='both', top=True, left=True, right=True, zorder=100)
plt.savefig('limb_all_exps.pdf', dpi=350, bbox_inches='tight', transparent=True)
|
nespinozaREPO_NAMEwasp39-terminatorsPATH_START.@wasp39-terminators_extracted@wasp39-terminators-main@extended-figure3@panel_c_d_e.py@.PATH_END.py
|
{
"filename": "_textfont.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/scattergl/unselected/_textfont.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattergl.unselected"
_path_str = "scattergl.unselected.textfont"
_valid_props = {"color"}
# color
# -----
@property
def color(self):
"""
Sets the text font color of unselected points, applied only
when a selection exists.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the text font color of unselected points, applied
only when a selection exists.
"""
def __init__(self, arg=None, color=None, **kwargs):
"""
Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattergl.unse
lected.Textfont`
color
Sets the text font color of unselected points, applied
only when a selection exists.
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergl.unselected.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergl.unselected.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@scattergl@unselected@_textfont.py@.PATH_END.py
|
{
"filename": "pyLIMA_example_1.py",
"repo_name": "ebachelet/pyLIMA",
"repo_path": "pyLIMA_extracted/pyLIMA-master/examples/pyLIMA_example_1.py",
"type": "Python"
}
|
'''
Welcome to pyLIMA (v2) tutorial 1!
In this tutorial you will learn how pyLIMA works by fitting a simulated data set.
We will cover how to read in data files, call different fitting routines and how to
make plots.
Please take some time to familiarize yourself with the pyLIMA documentation.
'''
### Import the required libraries.
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import LogNorm
from pyLIMA.fits import DE_fit
from pyLIMA.fits import LM_fit
from pyLIMA.fits import MCMC_fit
from pyLIMA.models import FSPL_model
from pyLIMA.models import PSPL_model
from pyLIMA.outputs import pyLIMA_plots
from pyLIMA import event
from pyLIMA import telescopes
### Create a new EVENT object and give it a name.
your_event = event.Event()
your_event.name = 'My event name'
### You now need to associate some data sets with this EVENT.
### For this example, you will use simulated I-band data sets from two telescopes,
# OGLE and LCO.
### The data sets are pre-formatted: column 1 is the date, column 2 the magnitude and
# column 3
### the uncertainty in the magnitude.
data_1 = np.loadtxt('./data/Survey_1.dat')
telescope_1 = telescopes.Telescope(name='OGLE',
camera_filter='I',
lightcurve=data_1.astype(float),
lightcurve_names=['time', 'mag', 'err_mag'],
lightcurve_units=['JD', 'mag', 'mag'])
data_2 = np.loadtxt('./data/Followup_1.dat')
telescope_2 = telescopes.Telescope(name='LCO',
camera_filter='I',
lightcurve=data_2.astype(float),
lightcurve_names=['time', 'mag', 'err_mag'],
lightcurve_units=['JD', 'mag', 'mag'])
### Append these two telescope data sets to your EVENT object.
your_event.telescopes.append(telescope_1)
your_event.telescopes.append(telescope_2)
### Define the survey telescope that you want to use to align all other data sets to.
### We recommend using the data set with the most measurements covering the gretest
### time span of observations:
your_event.find_survey('OGLE')
### Run a quick sanity check on your input.
your_event.check_event()
### Next, construct the MODEL you want to fit and link it to the EVENT you prepared.
### Let's go with a basic PSPL, without second order effects:
pspl = PSPL_model.PSPLmodel(your_event)
### Let's try fitting the event with a simple Levenvberg_Marquardt (LM) algorithm.
### Define the FITTING ALGORITHM you want to use for the MODEL you prepared.
### For more information about the models and fitting algorithms available
### please consult the pyLIMA documentation.
### Initialize the fit by declaring a simple FIT object using the MODEL you defined:
my_fit = LM_fit.LMfit(pspl)
### Before we run it, let's have a look at the initial fit parameters:
my_fit.fit_parameters
### Now fit the MODEL to the EVENT. This may take a few seconds.
my_fit.fit()
my_fit.fit_outputs()
### You can now recall the fit results on the screen by executing:
my_fit.fit_results
### You can now recall any entry in the output dictionary by using the appropriate key.
### For example, if you want to see the best fit results, you can access them like this:
my_fit.fit_results['best_model']
### If you don't remember which parameter each entry represents, you can always
# access the
### descriptions from fit_parameters.
my_fit.fit_parameters.keys()
### Let's see some plots. Import the pyLIMA plotting tools.
pyLIMA_plots.plot_lightcurves(pspl, my_fit.fit_results['best_model'])
plt.show()
### Let's try another fit with the differential evolution (DE) algorithm.
### This will take longer...
my_fit2 = DE_fit.DEfit(pspl,loss_function='chi2')
my_fit2.fit()
### Look at the results:
pyLIMA_plots.plot_lightcurves(pspl, my_fit2.fit_results['best_model'])
plt.show()
### You can use the Zoom-in function to look at the peak.
### There is strong evidence of finite source effects in this event, so let's try to
# fit this.
### You will need to import the FSPL MODEL to do this:
fspl = FSPL_model.FSPLmodel(your_event)
### You can still use the FITTING ALGORITHM that you imported previously.
### Let's just use DE_fit for this:
my_fit3 = DE_fit.DEfit(fspl, loss_function='chi2')
my_fit3.fit()
### Let's see some plots. You can zoom close to the peak to see what is going on.
pyLIMA_plots.plot_lightcurves(fspl, my_fit3.fit_results['best_model'])
plt.show()
### There is evidently still some structure in the residuals. Could be some limb
# darkening going on!
### Let's try to fit for it.
### Set the microlensing limb-darkening coefficients (gamma) for each telescope:
your_event.telescopes[0].ld_gamma = 0.5
your_event.telescopes[1].ld_gamma = 0.5
### Fit again:
my_fit4 = DE_fit.DEfit(fspl, loss_function='chi2')
my_fit4.fit()
### And plot it. Then zoom at the peak again.
pyLIMA_plots.plot_lightcurves(fspl, my_fit4.fit_results['best_model'])
plt.show()
### You can use the results of a previous good fit as initial guesses
### for the parameters in another fit:
guess_parameters = my_fit4.fit_results['best_model']
print(guess_parameters)
### These parameter guesses can now be used to start an MCMC run, for example.
### Using MCMC is recommended when you want to explore the posterior distribution of
# the parameters.
### Let's fit again using MCMC. This might take some time ...
my_fit5 = MCMC_fit.MCMCfit(fspl)
my_fit5.model_parameters_guess = guess_parameters
my_fit5.fit()
### Now your MCMC run is complete. Congratulations!
### You can now plot the chains and explore how they evolve for each parameter.
### For example, to see how the chains for u0 evolve, do:
plt.plot(my_fit5.fit_results['MCMC_chains'][:, :, 1])
plt.show()
### The first part in the slice [:,:,1] represents the iteration number, the second
# the chain number
### and the last represents the parameter number (in addition to the likelihood at
# the end).
### The parameters are in the same order as in my_fit5.fit_parameters.keys()
### You can compare the MCMC distributions with the input values that were used to
# generate the light curve.
### For this, let's only consider the chains after the 1000th iteration (i.e. after
# burn-in).
### [:7] at the end is just so only the first 7 digits are printed.
MCMC_results = my_fit5.fit_results['MCMC_chains']
print('Parameters', ' Model', ' Fit', ' Errors')
print('-----------------------------------')
print('t_0:', ' 79.9309 ', str(np.median(MCMC_results[1000:, :, 0]))[:7], '',
str(np.std(MCMC_results[1000:, :, 0]))[:7])
print('u_0:', ' 0.00826 ', str(np.median(MCMC_results[1000:, :, 1]))[:7], '',
str(np.std(MCMC_results[1000:, :, 1]))[:7])
print('t_E:', ' 10.1171 ', str(np.median(MCMC_results[1000:, :, 2]))[:7], '',
str(np.std(MCMC_results[1000:, :, 2]))[:7])
print('rho:', ' 0.02268 ', str(np.median(MCMC_results[1000:, :, 3]))[:7], '',
str(np.std(MCMC_results[1000:, :, 3]))[:7])
### You can now plot the correlation between any two parameters.
### Import the relevant libraries:
### Now plot u0 against tE:
plt.hist2d(MCMC_results[1000:, :, 1].ravel(), MCMC_results[1000:, :, 2].ravel(),
norm=LogNorm(), bins=50)
plt.xlabel('u0')
plt.ylabel('tE')
plt.show()
### You can consult the matplotlib.pyplot.hist2d documentation to see additional
# arguments.
### This concludes tutorial 1.
|
ebacheletREPO_NAMEpyLIMAPATH_START.@pyLIMA_extracted@pyLIMA-master@examples@pyLIMA_example_1.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/streamtube/lighting/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._vertexnormalsepsilon import VertexnormalsepsilonValidator
from ._specular import SpecularValidator
from ._roughness import RoughnessValidator
from ._fresnel import FresnelValidator
from ._facenormalsepsilon import FacenormalsepsilonValidator
from ._diffuse import DiffuseValidator
from ._ambient import AmbientValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._vertexnormalsepsilon.VertexnormalsepsilonValidator",
"._specular.SpecularValidator",
"._roughness.RoughnessValidator",
"._fresnel.FresnelValidator",
"._facenormalsepsilon.FacenormalsepsilonValidator",
"._diffuse.DiffuseValidator",
"._ambient.AmbientValidator",
],
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@streamtube@lighting@__init__.py@.PATH_END.py
|
{
"filename": "TestWeights.py",
"repo_name": "dokester/BayesicFitting",
"repo_path": "BayesicFitting_extracted/BayesicFitting-master/BayesicFitting/test/TestWeights.py",
"type": "Python"
}
|
# run with : python3 -m unittest TestWeights
import unittest
import numpy as np
import math
import os
from numpy.testing import assert_array_equal as assertAE
from numpy.testing import assert_array_almost_equal as assertAAE
import matplotlib.pyplot as plt
from BayesicFitting import *
from BayesicFitting import formatter as fmt
class Test( unittest.TestCase ) :
def __init__( self, testname ):
super( ).__init__( testname )
self.doplot = ( "DOPLOT" in os.environ and os.environ["DOPLOT"] == "1" )
def testWeights1( self ) :
print( "====testWeights 1====================" )
self.stdtst1( Fitter )
def testWeights2( self ) :
print( "====testWeights 1====================" )
self.stdtst2( Fitter, verbose=0 )
def testWeights3( self ) :
print( "====testWeights 3====================" )
self.stdtst1( LevenbergMarquardtFitter )
def testWeights4( self ) :
print( "====testWeights 4====================" )
self.stdtst1( QRFitter )
def testWeights5( self ) :
print( "====testWeights 5====================" )
self.stdtst1( CurveFitter )
### Test is OK but fails exact outcomes.
### because we need order = 1 on AmoebaFitter.
# def testWeights6( self ) :
# print( "====testWeights 6====================" )
#
# self.stdtst1( AmoebaFitter, order=1 )
def testPlot( self ) :
if not self.doplot :
return
nn = 5
x = np.arange( nn, dtype=float )
y = 2 * ( x % 2 ) - 1
y[2] = 0.0
x4 = np.arange( 4 * nn, dtype=float )
y4 = np.append( y, y )
y4 = np.append( y4, y4 )
upr = UniformPrior( limits=[-10,10] )
pm = PolynomialModel( 0 )
pm.priors = upr
bf = Fitter( x, pm )
par = bf.fit( y )
evi = bf.evidence
print( "evidence %8.3f" % evi )
s = 0
ns = 10
sam = numpy.zeros( ns, dtype=float )
for k in range( ns ):
evid1, prec1, logL1 = self.nstst( x, y, pm, seed=2345+k*56 )
print( "%4d NSevid %8.3f +- %.3f" % ( k, evid1, prec1 ) )
sam[k] = evid1
s += evid1
aver = s / ns
print( "aver %8.3f" % aver )
nbin = 5
plt.hist( sam, nbin, facecolor='g', alpha=0.5 )
plt.plot( [evi,evi], [0, ns/nbin], 'r-' )
plt.plot( [aver,aver], [0, ns/nbin], 'b-' )
plt.show()
def nstdtst( self, x, y, mdl, ftr, wgt=None, acc=None, verbose=0 ) :
par = ftr.fit( y, weights=wgt, accuracy=acc )
std = ftr.stdevs
cov = ftr.covariance
hes = ftr.hessian
var = ftr.makeVariance()
chi = ftr.chisq
scl = ftr.scale
swt = ftr.sumwgt
print( "pars ", par, "stdv ", std, "hess ", hes, "cov ", cov )
print( "chisq %8.3f scale %8.3f sumwgt %8.3f" % ( chi, scl, swt ) )
assertAE( par, ftr.parameters )
assertAE( std, np.sqrt( np.diag( cov ) ) )
assertAE( scl, np.sqrt( var ) )
evi = ftr.evidence
lgl = ftr.logLikelihood
lgo = ftr.logOccam
print( "Evid %8.3f logL %8.3f logOcc %8.3f" % ( evi, lgl, lgo ) )
ns = NestedSampler( x, mdl, y, weights=wgt, accuracy=acc, verbose=verbose,
limits=[0.01,10] )
nevi = ns.sample()
npar = ns.parameters
nstd = ns.stdevs
nlgl = ns.walkers[0].logL
prec = ns.precision
print( "NSpars ", ns.parameters, "stdv ", ns.stdevs )
print( "NSevid %8.3f +- %.3f logL %.3f" % ( nevi, prec, lgl ) )
assertAAE( par, npar )
assertAAE( std, nstd )
assertAAe( lgl, nlgl )
self.assertTrue( abs( evi - nevi ) < 2 * prec )
return ( par, std, scl, var, chi, swt )
def stdtst( self, y, pm, bf, wgt=None, acc=None ) :
par = bf.fit( y, weights=wgt, accuracy=acc )
std = bf.stdevs
cov = bf.covariance
hes = bf.hessian
var = bf.makeVariance()
chi = bf.chisq
scl = bf.scale
swt = bf.sumwgt
print( "pars ", par, "stdv ", std, "hess ", hes, "cov ", cov )
print( "chisq %8.3f scale %8.3f sumwgt %8.3f" % ( chi, scl, swt ) )
assertAE( par, bf.parameters )
assertAE( std, np.sqrt( np.diag( cov ) ) )
assertAE( scl, np.sqrt( var ) )
if pm.hasPriors() :
evi = bf.evidence
print( "Evid %8.3f logL %8.3f logOcc %8.3f" % ( evi, bf.logLikelihood, bf.logOccam ) )
return ( par, std, scl, var, chi, swt )
def nstst( self, x, y, mdl, wgt=None, acc=None, verbose=0, seed=2023 ) :
ns = NestedSampler( x, mdl, y, weights=wgt, accuracy=acc, verbose=verbose, seed=seed )
evi = ns.sample()
print( "NSpars ", ns.parameters, "stdv ", ns.stdevs )
return ( evi, ns.precision, ns.walkers[0].logL )
def stdtst2( self, myFitter, verbose=0 ) :
nn = 5
x = np.arange( nn, dtype=float )
y = 2 * ( x % 2 ) - 1
y[2] = 0.0
x4 = np.arange( 4 * nn, dtype=float )
y4 = np.append( y, y )
y4 = np.append( y4, y4 )
upr = UniformPrior( limits=[-10,10] )
pm = PolynomialModel( 0 )
pm.priors = upr
bf = myFitter( x, pm )
print( "\n %s" % bf )
wgt = None
print( "\n======= 5 data points; weighs = ", wgt )
print( "ydata ", y )
( par1, std1, scl1, var1, chi1, swt1 ) = self.stdtst( y, pm, bf, wgt=wgt )
evi1 = bf.evidence
evid1, prec1, logL1 = self.nstst( x, y, pm, wgt=wgt, verbose=verbose )
print( "NSevid %8.3f +- %.3f logL %.3f" % ( evid1, prec1, logL1 ) )
print( "\n======= 20 data points; weights = ", wgt )
print( "ydata ", y4 )
pm = PolynomialModel( 0 )
pm.priors = upr
bf = myFitter( x4, pm )
( par2, std2, scl2, var2, chi2, swt2 ) = self.stdtst( y4, pm, bf )
evid2, prec2, logL2 = self.nstst( x4, y4, pm, verbose=verbose )
print( "NSevid %8.3f +- %.3f logL %.3f" % ( evid2, prec2, logL2 ) )
sq15 = 1.0 / math.sqrt( 1.5 )
acc = np.asarray( [2, 0.5, sq15, 2.0, 0.5], dtype=float )
wgt = 1.0 / ( acc * acc )
# normalize to weights to nn (=5)
# making case 3 completely the same as case 1 (also for the evidence
wgt *= 5 / np.sum( wgt )
acc = 1 / np.sqrt( wgt )
y *= acc
print( "\n======= 5 data points; weights = ", fmt( wgt, max=None ) )
print( "ydata ", y )
pm = PolynomialModel( 0 )
pm.priors = upr
bf = myFitter( x, pm )
( par3, std3, scl3, var3, chi3, swt3 ) = self.stdtst( y, pm, bf, wgt=wgt )
evi3 = bf.evidence
evid3, prec3, logL3 = self.nstst( x, y, pm, wgt=wgt, verbose=verbose )
print( "NSevid %8.3f +- %.3f logL %.3f" % ( evid3, prec3, logL3 ) )
print( "\n======= 5 data points; accuracy = ", fmt( acc, max=None ) )
print( "ydata ", y )
pm = PolynomialModel( 0 )
pm.priors = upr
bf = myFitter( x, pm )
( par4, std4, scl4, var4, chi4, swt4 ) = self.stdtst( y, pm, bf, acc=acc )
evid4, prec4, logL4 = self.nstst( x, y, pm, acc=acc, verbose=verbose )
print( "NSevid %8.3f +- %.3f logL %.3f" % ( evid4, prec4, logL4 ) )
## all parameters are the same
assertAAE( par1, par2 )
assertAAE( par3, par4 )
assertAAE( par1, par4 )
assertAAE( evi1, evi3 )
assertAAE( chi1, chi3 )
assertAAE( chi1, chi4 )
assertAAE( scl1, scl3 )
assertAAE( scl1, scl4 )
assertAAE( swt1, swt3 )
assertAAE( swt1, swt4 )
assertAAE( std1, std3 )
assertAAE( std1, std4 )
def stdtst1( self, myFitter, order=0 ) :
nn = 5
x = np.arange( nn, dtype=float )
y = x % 2
y[2] = 0.5
x4 = np.arange( 4*nn, dtype=float )
y4 = x4 % 2
y4[range(2,20,5)] = 0.5
upr = UniformPrior( limits=[-10,10] )
# printclass( upr )
pm = PolynomialModel( order )
pm.priors = upr
# printclass( pm )
bf = myFitter( x, pm )
print( "\n %s" % bf )
print( "\n======= 5 data points; no weights ===" )
( par1, std1, scl1, var1, chi1, swt1 ) = self.stdtst( y, pm, bf )
evid1, prec1, logL1 = self.nstst( x, y, pm )
print( "NSevid %8.3f +- %.3f logL %.3f" % ( evid1, prec1, logL1 ) )
# ( par1, std1, scl1, var1, chi1, swt1 ) = self.nstdtst( x, y, pm, bf )
print( "\n======= 20 data points; no weights ===" )
pm = PolynomialModel( order )
pm.priors = upr
bf = myFitter( x4, pm )
( par2, std2, scl2, var2, chi2, swt2 ) = self.stdtst( y4, pm, bf )
print( "\n======= 5 data points; weights = 4 ===" )
pm = PolynomialModel( order )
pm.priors = upr
bf = myFitter( x, pm )
w = np.zeros( nn, dtype=float ) + 4
( par3, std3, scl3, var3, chi3, swt3 ) = self.stdtst( y, pm, bf, wgt=w )
print( "\n======= 5 data points; accuracy = 0.5 ===" )
pm = PolynomialModel( order )
pm.priors = upr
bf = myFitter( x, pm )
acc = np.ones( nn, dtype=float ) / 2
# acc = np.ones( nn, dtype=float )
print( "ydata ", y )
( par4, std4, scl4, var4, chi4, swt4 ) = self.stdtst( y, pm, bf, acc=acc )
evid4, prec4, logL4 = self.nstst( x, y, pm, acc=acc )
print( "NSevid %8.3f +- %.3f logL %.3f" % ( evid4, prec4, logL4 ) )
## all parameters are the same
assertAAE( par1, par2 )
assertAAE( par3, par4 )
assertAAE( par1, par4 )
## 2 and 3 are completely equal
assertAAE( std2, std3 )
assertAAE( chi2, chi3 )
assertAAE( scl2, scl3 )
assertAAE( var2, var3 )
assertAAE( swt2, swt3 )
assertAAE( std1, std4 )
assertAAE( scl1 * 2, scl4 )
if __name__ == '__main__':
unittest.main( )
|
dokesterREPO_NAMEBayesicFittingPATH_START.@BayesicFitting_extracted@BayesicFitting-master@BayesicFitting@test@TestWeights.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.