metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "access.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/jedi/py2/jedi/evaluate/compiled/access.py",
"type": "Python"
}
|
import inspect
import types
import sys
from textwrap import dedent
import operator as op
from collections import namedtuple
from jedi._compatibility import unicode, is_py3, builtins, \
py_version, force_unicode, print_to_stderr
from jedi.evaluate.compiled.getattr_static import getattr_static
MethodDescriptorType = type(str.replace)
# These are not considered classes and access is granted even though they have
# a __class__ attribute.
NOT_CLASS_TYPES = (
types.BuiltinFunctionType,
types.CodeType,
types.FrameType,
types.FunctionType,
types.GeneratorType,
types.GetSetDescriptorType,
types.LambdaType,
types.MemberDescriptorType,
types.MethodType,
types.ModuleType,
types.TracebackType,
MethodDescriptorType
)
if is_py3:
NOT_CLASS_TYPES += (
types.MappingProxyType,
types.SimpleNamespace,
types.DynamicClassAttribute,
)
# Those types don't exist in typing.
MethodDescriptorType = type(str.replace)
WrapperDescriptorType = type(set.__iter__)
# `object.__subclasshook__` is an already executed descriptor.
object_class_dict = type.__dict__["__dict__"].__get__(object)
ClassMethodDescriptorType = type(object_class_dict['__subclasshook__'])
def _a_generator(foo):
"""Used to have an object to return for generators."""
yield 42
yield foo
_sentinel = object()
# Maps Python syntax to the operator module.
COMPARISON_OPERATORS = {
'==': op.eq,
'!=': op.ne,
'is': op.is_,
'is not': op.is_not,
'<': op.lt,
'<=': op.le,
'>': op.gt,
'>=': op.ge,
}
_OPERATORS = {
'+': op.add,
'-': op.sub,
}
_OPERATORS.update(COMPARISON_OPERATORS)
ALLOWED_DESCRIPTOR_ACCESS = (
types.FunctionType,
types.GetSetDescriptorType,
types.MemberDescriptorType,
MethodDescriptorType,
WrapperDescriptorType,
ClassMethodDescriptorType,
staticmethod,
classmethod,
)
def safe_getattr(obj, name, default=_sentinel):
try:
attr, is_get_descriptor = getattr_static(obj, name)
except AttributeError:
if default is _sentinel:
raise
return default
else:
if type(attr) in ALLOWED_DESCRIPTOR_ACCESS:
# In case of descriptors that have get methods we cannot return
# it's value, because that would mean code execution.
return getattr(obj, name)
return attr
SignatureParam = namedtuple(
'SignatureParam',
'name has_default default has_annotation annotation kind_name'
)
def compiled_objects_cache(attribute_name):
def decorator(func):
"""
This decorator caches just the ids, oopposed to caching the object itself.
Caching the id has the advantage that an object doesn't need to be
hashable.
"""
def wrapper(evaluator, obj, parent_context=None):
cache = getattr(evaluator, attribute_name)
# Do a very cheap form of caching here.
key = id(obj)
try:
cache[key]
return cache[key][0]
except KeyError:
# TODO wuaaaarrghhhhhhhh
if attribute_name == 'mixed_cache':
result = func(evaluator, obj, parent_context)
else:
result = func(evaluator, obj)
# Need to cache all of them, otherwise the id could be overwritten.
cache[key] = result, obj, parent_context
return result
return wrapper
return decorator
def create_access(evaluator, obj):
return evaluator.compiled_subprocess.get_or_create_access_handle(obj)
def load_module(evaluator, dotted_name, sys_path):
temp, sys.path = sys.path, sys_path
try:
__import__(dotted_name)
except ImportError:
# If a module is "corrupt" or not really a Python module or whatever.
print_to_stderr('Module %s not importable in path %s.' % (dotted_name, sys_path))
return None
except Exception:
# Since __import__ pretty much makes code execution possible, just
# catch any error here and print it.
import traceback
print_to_stderr("Cannot import:\n%s" % traceback.format_exc())
return None
finally:
sys.path = temp
# Just access the cache after import, because of #59 as well as the very
# complicated import structure of Python.
module = sys.modules[dotted_name]
return create_access_path(evaluator, module)
class AccessPath(object):
def __init__(self, accesses):
self.accesses = accesses
# Writing both of these methods here looks a bit ridiculous. However with
# the differences of Python 2/3 it's actually necessary, because we will
# otherwise have a accesses attribute that is bytes instead of unicode.
def __getstate__(self):
return self.accesses
def __setstate__(self, value):
self.accesses = value
def create_access_path(evaluator, obj):
access = create_access(evaluator, obj)
return AccessPath(access.get_access_path_tuples())
def _force_unicode_decorator(func):
return lambda *args, **kwargs: force_unicode(func(*args, **kwargs))
class DirectObjectAccess(object):
def __init__(self, evaluator, obj):
self._evaluator = evaluator
self._obj = obj
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.get_repr())
def _create_access(self, obj):
return create_access(self._evaluator, obj)
def _create_access_path(self, obj):
return create_access_path(self._evaluator, obj)
def py__bool__(self):
return bool(self._obj)
def py__file__(self):
try:
return self._obj.__file__
except AttributeError:
return None
def py__doc__(self, include_call_signature=False):
return force_unicode(inspect.getdoc(self._obj)) or u''
def py__name__(self):
if not _is_class_instance(self._obj) or \
inspect.ismethoddescriptor(self._obj): # slots
cls = self._obj
else:
try:
cls = self._obj.__class__
except AttributeError:
# happens with numpy.core.umath._UFUNC_API (you get it
# automatically by doing `import numpy`.
return None
try:
return force_unicode(cls.__name__)
except AttributeError:
return None
def py__mro__accesses(self):
return tuple(self._create_access_path(cls) for cls in self._obj.__mro__[1:])
def py__getitem__(self, index):
if type(self._obj) not in (str, list, tuple, unicode, bytes, bytearray, dict):
# Get rid of side effects, we won't call custom `__getitem__`s.
return None
return self._create_access_path(self._obj[index])
def py__iter__list(self):
if type(self._obj) not in (str, list, tuple, unicode, bytes, bytearray, dict):
# Get rid of side effects, we won't call custom `__getitem__`s.
return []
lst = []
for i, part in enumerate(self._obj):
if i > 20:
# Should not go crazy with large iterators
break
lst.append(self._create_access_path(part))
return lst
def py__class__(self):
return self._create_access_path(self._obj.__class__)
def py__bases__(self):
return [self._create_access_path(base) for base in self._obj.__bases__]
def py__path__(self):
return self._obj.__path__
@_force_unicode_decorator
def get_repr(self):
builtins = 'builtins', '__builtin__'
if inspect.ismodule(self._obj):
return repr(self._obj)
# Try to avoid execution of the property.
if safe_getattr(self._obj, '__module__', default='') in builtins:
return repr(self._obj)
type_ = type(self._obj)
if type_ == type:
return type.__repr__(self._obj)
if safe_getattr(type_, '__module__', default='') in builtins:
# Allow direct execution of repr for builtins.
return repr(self._obj)
return object.__repr__(self._obj)
def is_class(self):
return inspect.isclass(self._obj)
def ismethoddescriptor(self):
return inspect.ismethoddescriptor(self._obj)
def dir(self):
return list(map(force_unicode, dir(self._obj)))
def has_iter(self):
try:
iter(self._obj)
return True
except TypeError:
return False
def is_allowed_getattr(self, name):
# TODO this API is ugly.
try:
attr, is_get_descriptor = getattr_static(self._obj, name)
except AttributeError:
return False, False
else:
if is_get_descriptor and type(attr) not in ALLOWED_DESCRIPTOR_ACCESS:
# In case of descriptors that have get methods we cannot return
# it's value, because that would mean code execution.
return True, True
return True, False
def getattr(self, name, default=_sentinel):
try:
return self._create_access(getattr(self._obj, name))
except AttributeError:
# Happens e.g. in properties of
# PyQt4.QtGui.QStyleOptionComboBox.currentText
# -> just set it to None
if default is _sentinel:
raise
return self._create_access(default)
def get_safe_value(self):
if type(self._obj) in (bool, bytes, float, int, str, unicode, slice):
return self._obj
raise ValueError("Object is type %s and not simple" % type(self._obj))
def get_api_type(self):
obj = self._obj
if self.is_class():
return u'class'
elif inspect.ismodule(obj):
return u'module'
elif inspect.isbuiltin(obj) or inspect.ismethod(obj) \
or inspect.ismethoddescriptor(obj) or inspect.isfunction(obj):
return u'function'
# Everything else...
return u'instance'
def get_access_path_tuples(self):
accesses = [create_access(self._evaluator, o) for o in self._get_objects_path()]
return [(access.py__name__(), access) for access in accesses]
def _get_objects_path(self):
def get():
obj = self._obj
yield obj
try:
obj = obj.__objclass__
except AttributeError:
pass
else:
yield obj
try:
# Returns a dotted string path.
imp_plz = obj.__module__
except AttributeError:
# Unfortunately in some cases like `int` there's no __module__
if not inspect.ismodule(obj):
yield builtins
else:
if imp_plz is None:
# Happens for example in `(_ for _ in []).send.__module__`.
yield builtins
else:
try:
# TODO use sys.modules, __module__ can be faked.
yield sys.modules[imp_plz]
except KeyError:
# __module__ can be something arbitrary that doesn't exist.
yield builtins
return list(reversed(list(get())))
def execute_operation(self, other_access_handle, operator):
other_access = other_access_handle.access
op = _OPERATORS[operator]
return self._create_access_path(op(self._obj, other_access._obj))
def needs_type_completions(self):
return inspect.isclass(self._obj) and self._obj != type
def get_signature_params(self):
obj = self._obj
if py_version < 33:
raise ValueError("inspect.signature was introduced in 3.3")
if py_version == 34:
# In 3.4 inspect.signature are wrong for str and int. This has
# been fixed in 3.5. The signature of object is returned,
# because no signature was found for str. Here we imitate 3.5
# logic and just ignore the signature if the magic methods
# don't match object.
# 3.3 doesn't even have the logic and returns nothing for str
# and classes that inherit from object.
user_def = inspect._signature_get_user_defined_method
if (inspect.isclass(obj)
and not user_def(type(obj), '__init__')
and not user_def(type(obj), '__new__')
and (obj.__init__ != object.__init__
or obj.__new__ != object.__new__)):
raise ValueError
try:
signature = inspect.signature(obj)
except (RuntimeError, TypeError):
# Reading the code of the function in Python 3.6 implies there are
# at least these errors that might occur if something is wrong with
# the signature. In that case we just want a simple escape for now.
raise ValueError
return [
SignatureParam(
name=p.name,
has_default=p.default is not p.empty,
default=self._create_access_path(p.default),
has_annotation=p.annotation is not p.empty,
annotation=self._create_access_path(p.annotation),
kind_name=str(p.kind)
) for p in signature.parameters.values()
]
def negate(self):
return self._create_access_path(-self._obj)
def dict_values(self):
return [self._create_access_path(v) for v in self._obj.values()]
def is_super_class(self, exception):
return issubclass(exception, self._obj)
def get_dir_infos(self):
"""
Used to return a couple of infos that are needed when accessing the sub
objects of an objects
"""
# TODO is_allowed_getattr might raise an AttributeError
tuples = dict(
(force_unicode(name), self.is_allowed_getattr(name))
for name in self.dir()
)
return self.needs_type_completions(), tuples
def _is_class_instance(obj):
"""Like inspect.* methods."""
try:
cls = obj.__class__
except AttributeError:
return False
else:
return cls != type and not issubclass(cls, NOT_CLASS_TYPES)
if py_version >= 35:
exec(compile(dedent("""
async def _coroutine(): pass
_coroutine = _coroutine()
CoroutineType = type(_coroutine)
_coroutine.close() # Prevent ResourceWarning
"""), 'blub', 'exec'))
_coroutine_wrapper = _coroutine.__await__()
else:
_coroutine = None
_coroutine_wrapper = None
if py_version >= 36:
exec(compile(dedent("""
async def _async_generator():
yield
_async_generator = _async_generator()
AsyncGeneratorType = type(_async_generator)
"""), 'blub', 'exec'))
else:
_async_generator = None
class _SPECIAL_OBJECTS(object):
FUNCTION_CLASS = types.FunctionType
BOUND_METHOD_CLASS = type(DirectObjectAccess(None, None).py__bool__)
MODULE_CLASS = types.ModuleType
GENERATOR_OBJECT = _a_generator(1.0)
BUILTINS = builtins
COROUTINE = _coroutine
COROUTINE_WRAPPER = _coroutine_wrapper
ASYNC_GENERATOR = _async_generator
def get_special_object(evaluator, identifier):
obj = getattr(_SPECIAL_OBJECTS, identifier)
return create_access_path(evaluator, obj)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@jedi@py2@jedi@evaluate@compiled@access.py@.PATH_END.py
|
{
"filename": "json_to_cassandra.py",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/utility/DP03/json_to_cassandra.py",
"type": "Python"
}
|
import os, sys, json, io, gzip
from cassandra.cluster import Cluster
from gkdbutils.ingesters.cassandra import executeLoad
from confluent_kafka import Producer
import settings
def insert_cassandra(obj, cassandra_session):
"""insert_casssandra.
Creates an insert for cassandra
a query for inserting it.
Args:
alert:
"""
# if this is not set, then we are not doing cassandra
if not cassandra_session:
return None # failure of batch
executeLoad(cassandra_session, 'SSObjects', [obj['SSObject']])
executeLoad(cassandra_session, 'MPCORBs', [obj['MPCORB']])
# will be list of real detections, each has a non-null candid
diaSourceList = obj['DiaSourceList']
ssSourceList = obj['SSSourceList']
if len(diaSourceList) > 0:
executeLoad(cassandra_session, 'diaSources', diaSourceList)
if len(ssSourceList) > 0:
executeLoad(cassandra_session, 'ssSources', ssSourceList)
return len(diaSourceList)
if __name__ == '__main__':
if len(sys.argv) >= 2:
datadir = sys.argv[1]
else:
print('Usage: json_to_cassandra.py <dataset>')
sys.exit()
cluster = Cluster(settings.CASSANDRA_HEAD)
cassandra_session = cluster.connect()
cassandra_session.set_keyspace('adler')
n = 0
objList = None
print(datadir)
for file in os.listdir(datadir):
if not file.endswith('gz'): continue
del objList
fin = gzip.open(datadir +'/'+ file, 'r')
json_bytes = fin.read()
fin.close()
json_str = json_bytes.decode('utf-8')
del json_bytes
objList = json.loads(json_str)
del json_str
for obj in objList:
# there will never be an alert with no detections
if len(obj['DiaSourceList']) < 1: continue
insert_cassandra(obj, cassandra_session)
n +=1
if n%100 == 0:
print(n)
print('%d alerts pushed to cassandra ' % n)
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@utility@DP03@json_to_cassandra.py@.PATH_END.py
|
{
"filename": "manifold_wrapper.py",
"repo_name": "dealii/dealii",
"repo_path": "dealii_extracted/dealii-master/contrib/python-bindings/tests/manifold_wrapper.py",
"type": "Python"
}
|
## ------------------------------------------------------------------------
##
## SPDX-License-Identifier: LGPL-2.1-or-later
## Copyright (C) 2019 - 2023 by the deal.II authors
##
## This file is part of the deal.II library.
##
## Part of the source code is dual licensed under Apache-2.0 WITH
## LLVM-exception OR LGPL-2.1-or-later. Detailed license information
## governing the source code and code contributions can be found in
## LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
##
## ------------------------------------------------------------------------
import math
import os
import unittest
try:
from PyDealII.Debug import *
except ImportError:
from PyDealII.Release import *
class TestManifoldWrapperShell(unittest.TestCase):
def setUp(self):
self.triangulation = Triangulation('2D')
p_center = Point([0, 0])
self.triangulation.generate_hyper_shell(center = p_center, inner_radius = 0.5, outer_radius = 1., n_cells = 0, colorize = True)
self.manifold = Manifold(dim = 2, spacedim = 2)
self.manifold.create_polar(p_center)
def test_manifold(self):
self.triangulation.set_manifold(0, self.manifold)
for cell in self.triangulation.active_cells():
cell.manifold_id = 0
self.triangulation.refine_global(3)
circumference = 0
for cell in self.triangulation.active_cells():
for face in cell.faces():
if face.at_boundary() and face.boundary_id == 1:
circumference += face.measure()
self.assertTrue(abs(circumference - 2*math.pi)/(2*math.pi) < 1e-2)
class TestManifoldWrapperBall(unittest.TestCase):
def setUp(self):
self.triangulation = Triangulation('3D')
p_center = Point([0., 0., 0.])
self.triangulation.generate_hyper_ball(center = p_center, radius = 1.)
self.manifold = Manifold(dim = 3, spacedim = 3)
self.manifold.create_spherical(p_center)
def test_manifold(self):
self.triangulation.reset_manifold(number = 0)
self.triangulation.set_manifold(number = 0, manifold = self.manifold)
for cell in self.triangulation.active_cells():
if cell.at_boundary():
cell.manifold_id = 0
self.triangulation.refine_global(3)
volume = 0
for cell in self.triangulation.active_cells():
volume += cell.measure()
self.assertTrue(abs(volume - 4./3. * math.pi) / (4./3.*math.pi) < 2e-2)
class TestManifoldWrapperFunction(unittest.TestCase):
def setUp(self):
self.manifold_1 = Manifold(dim = 2, spacedim = 2)
self.manifold_1.create_function_string("x^2;y^2", "sqrt(x);sqrt(y)")
self.manifold_2 = Manifold(dim = 2, spacedim = 2)
self.manifold_2.create_function(lambda p: [p[0]**2., p[1]**2.],\
lambda p: [math.sqrt(p[0]), math.sqrt(p[1])] )
self.tria_reference = Triangulation('2D')
test_directory = os.environ.get('DEAL_II_PYTHON_TESTPATH')
self.tria_reference.read(test_directory+'/manifold_wrapper.vtk', 'vtk')
def test_manifold_str(self):
self.triangulation = Triangulation('2D')
p_center = Point([0., 0., 0.])
self.triangulation.generate_hyper_cube()
self.triangulation.reset_manifold(number = 0)
self.triangulation.set_manifold(number = 0, manifold = self.manifold_1)
for cell in self.triangulation.active_cells():
cell.set_all_manifold_ids(0)
self.triangulation.refine_global(2)
for cell_ref, cell in zip(self.tria_reference.active_cells(), self.triangulation.active_cells()):
self.assertTrue(abs(cell_ref.measure() - cell.measure()) < 1e-8)
def test_manifold_lambda(self):
self.triangulation = Triangulation('2D')
p_center = Point([0., 0., 0.])
self.triangulation.generate_hyper_cube()
self.triangulation.reset_manifold(number = 0)
self.triangulation.set_manifold(number = 0, manifold = self.manifold_2)
for cell in self.triangulation.active_cells():
cell.set_all_manifold_ids(0)
self.triangulation.refine_global(2)
for cell_ref, cell in zip(self.tria_reference.active_cells(), self.triangulation.active_cells()):
self.assertTrue(abs(cell_ref.measure() - cell.measure()) < 1e-8)
if __name__ == '__main__':
unittest.main()
|
dealiiREPO_NAMEdealiiPATH_START.@dealii_extracted@dealii-master@contrib@python-bindings@tests@manifold_wrapper.py@.PATH_END.py
|
{
"filename": "triangle_collection.py",
"repo_name": "macrocosme/shwirl",
"repo_path": "shwirl_extracted/shwirl-master/shwirl/extern/vispy/visuals/collections/triangle_collection.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
from . raw_triangle_collection import RawTriangleCollection
def TriangleCollection(mode="raw", *args, **kwargs):
"""
mode: string
- "raw" (speed: fastest, size: small, output: ugly)
- "agg" (speed: fast, size: small, output: beautiful)
"""
return RawTriangleCollection(*args, **kwargs)
|
macrocosmeREPO_NAMEshwirlPATH_START.@shwirl_extracted@shwirl-master@shwirl@extern@vispy@visuals@collections@triangle_collection.py@.PATH_END.py
|
{
"filename": "plot_constraints.py",
"repo_name": "MikeSWang/Harmonia",
"repo_path": "Harmonia_extracted/Harmonia-master/application/pipeline/cosmological_reading/plot_constraints.py",
"type": "Python"
}
|
"""Plot posterior evaluations across different datasets.
"""
import numpy as np
from matplotlib import pyplot as plt
from scipy.integrate import cumtrapz, simps
LINE_SCATTER_ALPHA = 1./10.
AREA_FILL_ALPHA = 1./2.
CONTOUR_FILL_ALPHA = 1./25.
ONE_SIGMA_QUANTILES = [0.158655, 0.841345]
SIGMA_LEVELS = [0.864665, 0.393469, 0.000001]
def plot_1d_constraints(posteriors, x, fig=None, label='', colour=None,
x_label=None, x_range=None, aggregation='average',
estimation=None, precision=None, scatter_plot=False):
"""Plot 1-d constraints.
Parameters
----------
posteriors : float, array_like
(Sequence of) logarithmic posterior evaluations.
x : float, array_like
Parameter coordinates.
fig : :class:`matplotlib.figure.Figure` *or None, optional*
Existing figure object to plot on (default is `None`).
label : str, optional
Constraint label (default is '').
colour : str or None, optional
Principal colour (default is `None`).
x_label : str or None
Parameter label (default is `None`).
x_range : sequence or None
Parameter range as a sequence of length 2 (default is `None`).
aggregation : {'average', 'combine'}, optional
How to aggregate posterior evaluations, either by averaging
or combining (default is 'average').
estimation : {'median', 'maximum', None}, optional
Parameter estimate type, if any (default is `None`).
precision : int or None, optional
Parameter precision as a number of decimal places (default is
`None`).
scatter_plot : bool, optional
If `True`, Plot individual posteriors evaluations (default is
`False`).
Returns
-------
fig : :class:`matplotlib.figure.Figure`
Plotted figure object.
estimate : tuple of float
Parameter estimate with low and upper uncertainties.
"""
# Set up the plottable grid.
if x_range:
x_selector = slice(
np.argmin(np.abs(x - x_range[0])),
np.argmin(np.abs(x - x_range[1])) + 1
)
else:
x_selector = slice(None)
x, posts = np.asarray(x)[x_selector], np.asarray(posteriors)[:, x_selector]
# Aggregate posteriors and safely exponentiate.
if aggregation == 'average':
posterior = np.average(posts, axis=0)
elif aggregation == 'combine':
posterior = np.sum(posts, axis=0)
else:
raise ValueError(f"Invalid aggregation: {aggregation}.")
posts = np.exp(posts - np.max(posts, axis=-1, keepdims=True))
posterior = np.exp(posterior - np.max(posterior))
# Normalisation with CDF.
cdfs = np.asarray([cumtrapz(pdf, x, initial=0.) for pdf in posts])
cum_dist_function = cumtrapz(posterior, x, initial=0.)
posts /= cdfs[:, [-1]]
posterior /= cum_dist_function[-1]
cdfs /= cdfs[:, [-1]]
cum_dist_function /= cum_dist_function[-1]
# Plot to specifications.
if fig is None:
fig, canvas = plt.subplots()
else:
canvas, *_ = fig.axes
posterior_plot = canvas.plot(x, posterior, c=colour, zorder=3, label=label)
posterior_colour = posterior_plot[-1].get_color()
if scatter_plot:
for pdf in posts:
canvas.plot(
x, pdf, c=posterior_colour, zorder=1, alpha=LINE_SCATTER_ALPHA
)
if estimation:
# Obtain estimates and uncertainties.
if estimation == 'maximum':
x_fit_idx = np.argmax(posterior)
elif estimation == 'median':
x_fit_idx = np.argmin(np.abs(cum_dist_function - 1./2.))
x_lower_idx = np.argmin(
np.abs(cum_dist_function - ONE_SIGMA_QUANTILES[0])
)
x_upper_idx = np.argmin(
np.abs(cum_dist_function - ONE_SIGMA_QUANTILES[-1])
)
x_fit, x_lower, x_upper = x[x_fit_idx], x[x_lower_idx], x[x_upper_idx]
dx_lower, dx_upper = x_fit - x_lower, x_upper - x_fit
estimate = x_fit, dx_lower, dx_upper
# Trim estimates and uncertainties.
if precision is None:
x_fit_disp, dx_lower_disp, dx_upper_disp = \
x_fit, dx_lower, dx_upper
elif precision == 0:
x_fit_disp, dx_lower_disp, dx_upper_disp = \
map(round, (x_fit, dx_lower, dx_upper))
else:
x_fit_disp = np.around(x_fit, decimals=precision)
dx_lower_disp = np.around(dx_lower, decimals=precision)
dx_upper_disp = np.around(dx_upper, decimals=precision)
# Mark estimates and uncertainties.
canvas.vlines(
x_fit, ymin=0., ymax=posterior[x_fit_idx],
color=posterior_colour, ls='--', zorder=3,
label=r"${} {{{}}}_{{-{}}}^{{+{}}}$".format(
x_label + '=' if x_label else '',
x_fit_disp, dx_lower_disp, dx_upper_disp
)
)
canvas.fill_between(
x[x_lower_idx:(x_upper_idx + 1)],
posterior[x_lower_idx:(x_upper_idx + 1)],
antialiased=True,
facecolor=posterior_colour, edgecolor='none',
alpha=AREA_FILL_ALPHA, label=label, zorder=2
)
return fig, estimate
# pylint: disable=unused-argument
def plot_2d_constraints(posteriors, x, y, fig=None, cmap=None, alpha=None,
x_label=None, y_label=None, x_range=None, y_range=None,
aggregation='average', estimation=None,
x_precision=None, y_precision=None,
scatter_plot=False, line_style='-'):
"""Plot 2-d constraints.
Parameters
----------
posteriors : float, array_like
(Sequence of) logarithmic posterior evaluations.
x, y : float, array_like
Parameter coordinates.
fig : :class:`matplotlib.figure.Figure` *or None*, optional
Existing figure object to plot on (default is `None`).
cmap : str or None, optional
Principal colour map (default is `None`).
alpha : str or None, optional
Principal alpha transparency (default is `None`).
x_label, y_label : str or None
Parameter label (default is `None`).
x_range, y_range : sequence or None
Parameter range as a sequence of length 2 (default is `None`).
aggregation : {'average', 'combine'}, optional
How to aggregate posterior evaluations, either by averaging
or combining (default is 'average').
estimation : {'median', 'maximum', None}, optional
Parameter estimate type, if any (default is `None`).
x_precision, y_precision : int or None, optional
Parameter precision as a number of decimal places (default is
`None`).
scatter_plot : bool, optional
If `True`, plot individual posteriors evaluations (default is
`False`).
line_style : str, optional
Line style for the contours (default is '-').
Returns
-------
fig : :class:`matplotlib.figure.Figure`
Plotted figure object.
x_estimate, y_estimate : tuple of float
Parameter estimates with low and upper uncertainties.
patch : :class:`matplotlib.patches.Rectangle`
A colour patch to be used in the legend.
"""
# Set up the plottable grid.
if x_range:
x_selector = slice(
np.argmin(np.abs(x - x_range[0])),
np.argmin(np.abs(x - x_range[1])) + 1
)
else:
x_selector = slice(None)
if y_range:
y_selector = slice(
np.argmin(np.abs(y - y_range[0])),
np.argmin(np.abs(y - y_range[1])) + 1
)
else:
y_selector = slice(None)
x, y = np.asarray(x)[x_selector], np.asarray(y)[y_selector]
xx, yy = np.meshgrid(x, y, indexing='ij')
posts = np.asarray(posteriors)[:, x_selector, y_selector]
# Aggregate posteriors and safely exponentiate.
if aggregation == 'average':
posterior = np.average(posts, axis=0)
elif aggregation == 'combine':
posterior = np.sum(posts, axis=0)
else:
raise ValueError(f"Invalid aggregation: {aggregation}.")
posts = np.exp(posts - np.max(posts, axis=(-2, -1), keepdims=True))
posterior = np.exp(posterior - np.max(posterior))
# Normalisation with probability mass.
def _prob_mass(hh):
return simps([simps(hh_xslice, y) for hh_xslice in hh], x)
posts = [post / _prob_mass(post) for post in posts]
posterior /= _prob_mass(posterior)
# Plot to specifications.
if fig is None:
fig = plt.figure()
main_panel = plt.subplot2grid((4, 4), (1, 0), rowspan=3, colspan=3)
x_panel = plt.subplot2grid(
(4, 4), (0, 0), colspan=3, sharex=main_panel
)
y_panel = plt.subplot2grid(
(4, 4), (1, 3), rowspan=3, sharey=main_panel
)
else:
main_panel, x_panel, y_panel = fig.axes
# Likelihood contours.
def _plot_contours(hh, relative_alpha, zorder=None):
# Get posterior surface heights on grid and cumulative heights.
h_flat = np.flip(np.sort(hh.flatten()))
cum_h = np.cumsum(h_flat)
cum_h /= cum_h[-1]
# Determine posterior surface height levels.
h_levels = np.zeros_like(SIGMA_LEVELS)
for n_sigma, sigma_level in enumerate(SIGMA_LEVELS):
try:
h_levels[n_sigma] = h_flat[cum_h <= sigma_level][-1]
except IndexError:
h_levels[n_sigma] = h_flat[0]
# Plot the contour.
try:
contour = main_panel.contourf(
xx, yy, hh, h_levels, cmap=cmap, antialiased=True,
alpha=relative_alpha*alpha, zorder=zorder or 1
)
main_panel.contour(
contour,
linestyles=line_style, colors=contour.cmap(contour.cmap.N),
alpha=min(2*relative_alpha*alpha, 1.), zorder=zorder or 1
)
except ValueError as error:
if str(error) == "Contour levels must be increasing":
raise ValueError(
"Cannot process posterior values into contours."
) from error
raise ValueError from error
return contour
main_contour = _plot_contours(posterior, relative_alpha=1., zorder=2)
patch = plt.Rectangle(
(0., 0.), 2., 1., ls=line_style,
ec=None, fc=main_contour.cmap(main_contour.cmap.N), alpha=0.8,
# main_contour.collections[-1].get_facecolor()[0]
)
if scatter_plot:
scattered_contours = []
for post in posts:
individual_contour = _plot_contours(
post, relative_alpha=CONTOUR_FILL_ALPHA
)
scattered_contours.append(individual_contour)
# Likelihood marginal PDFs.
def _plot_pdfs(hh, relative_alpha, cm, estimate=False, zorder=None):
pdf_x = np.asarray([simps(hh_xslice, y) for hh_xslice in hh])
pdf_y = np.asarray([simps(hh_yslice, x) for hh_yslice in hh.T])
cdf_x = cumtrapz(pdf_x, x, initial=0.)
cdf_y = cumtrapz(pdf_y, y, initial=0.)
pdf_x /= cdf_x[-1]
pdf_y /= cdf_y[-1]
cdf_x /= cdf_x[-1]
cdf_y /= cdf_y[-1]
x_panel.plot(
x, pdf_x, c=cm(cm.N), ls=line_style,
alpha=relative_alpha, zorder=zorder or 1
)
y_panel.plot(
pdf_y, y, c=cm(cm.N), ls=line_style,
alpha=relative_alpha, zorder=zorder or 1
)
if estimate:
if estimation == 'maximum':
x_fit_idx = np.argmax(pdf_x)
y_fit_idx = np.argmax(pdf_y)
elif estimation == 'median':
x_fit_idx = np.argmin(np.abs(cdf_x - 1./2.))
y_fit_idx = np.argmin(np.abs(cdf_y - 1./2.))
x_lower_idx = np.argmin(np.abs(cdf_x - ONE_SIGMA_QUANTILES[0]))
y_lower_idx = np.argmin(np.abs(cdf_y - ONE_SIGMA_QUANTILES[0]))
x_upper_idx = np.argmin(np.abs(cdf_x - ONE_SIGMA_QUANTILES[-1]))
y_upper_idx = np.argmin(np.abs(cdf_y - ONE_SIGMA_QUANTILES[-1]))
x_fit, x_lower, x_upper = x[[x_fit_idx, x_lower_idx, x_upper_idx]]
y_fit, y_lower, y_upper = y[[y_fit_idx, y_lower_idx, y_upper_idx]]
# dx_lower, dx_upper = x_fit - x_lower, x_upper - x_fit
# dy_lower, dy_upper = y_fit - y_lower, y_upper - y_fit
# if x_precision is None:
# x_fit_disp, dx_lower_disp, dx_upper_disp = \
# x_fit, dx_lower, dx_upper
# elif x_precision == 0:
# x_fit_disp, dx_lower_disp, dx_upper_disp = \
# map(int, map(round, (x_fit, dx_lower, dx_upper)))
# else:
# x_fit_disp = np.around(x_fit, decimals=x_precision)
# dx_lower_disp = np.around(dx_lower, decimals=x_precision)
# dx_upper_disp = np.around(dx_upper, decimals=x_precision)
# if y_precision is None:
# y_fit_disp, dy_lower_disp, dy_upper_disp = \
# y_fit, dy_lower, dy_upper
# elif y_precision == 0:
# y_fit_disp, dy_lower_disp, dy_upper_disp = \
# map(int, map(round, (y_fit, dy_lower, dy_upper)))
# else:
# y_fit_disp = np.around(y_fit, decimals=y_precision)
# dy_lower_disp = np.around(dy_lower, decimals=y_precision)
# dy_upper_disp = np.around(dy_upper, decimals=y_precision)
# x_panel.vlines(
# x_fit, ymin=0., ymax=pdf_x[x_fit_idx],
# linestyles='--', colors=[cm(cm.N)], zorder=3,
# label=r"${} {{{}}}_{{-{}}}^{{+{}}}$".format(
# x_label + '=' if x_label else '',
# x_fit_disp, dx_lower_disp, dx_upper_disp
# )
# )
x_panel.fill_between(
x[x_lower_idx:(x_upper_idx + 1)],
pdf_x[x_lower_idx:(x_upper_idx + 1)],
antialiased=True, facecolor=[cm(cm.N)], edgecolor='none',
alpha=AREA_FILL_ALPHA, zorder=2
)
# y_panel.hlines(
# y_fit, xmin=0., xmax=pdf_y[y_fit_idx],
# linestyles='--', colors=[cm(cm.N)], zorder=3,
# label=r"${} {{{}}}_{{-{}}}^{{+{}}}$".format(
# y_label + '=' if y_label else '',
# y_fit_disp, dy_lower_disp, dy_upper_disp
# )
# )
y_panel.fill_betweenx(
y[y_lower_idx:(y_upper_idx + 1)],
pdf_y[y_lower_idx:(y_upper_idx + 1)],
antialiased=True, facecolor=[cm(cm.N)], edgecolor='none',
alpha=AREA_FILL_ALPHA, zorder=2
)
return (x_fit, x_lower, x_upper), (y_fit, y_lower, y_upper)
return None, None
x_result, y_result = _plot_pdfs(
posterior, estimate=True, relative_alpha=1.,
cm=main_contour.cmap, zorder=3
)
if scatter_plot:
for post, individual_contour in zip(posts, scattered_contours):
_plot_pdfs(
post,
relative_alpha=LINE_SCATTER_ALPHA, cm=individual_contour.cmap
)
# if estimation == 'median':
# main_panel.scatter(
# x_result[0], y_result[0], marker='+', s=40,
# c=[main_contour.cmap(main_contour.cmap.N)], zorder=3
# )
# elif estimation == 'maximum':
# # Note this provides the joint maximum posterior estimates
# # not the marginal maximum posterior estimates.
# xy_fit_idx = np.unravel_index(np.argmax(posterior), posterior.shape)
# main_panel.scatter(
# xx[xy_fit_idx], yy[xy_fit_idx], marker='+', s=40,
# c=[main_contour.cmap(main_contour.cmap.N)],
# zorder=3
# )
return fig, x_result, y_result, patch
|
MikeSWangREPO_NAMEHarmoniaPATH_START.@Harmonia_extracted@Harmonia-master@application@pipeline@cosmological_reading@plot_constraints.py@.PATH_END.py
|
{
"filename": "_bordercolor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/xaxis/rangeselector/_bordercolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="bordercolor",
parent_name="layout.xaxis.rangeselector",
**kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@xaxis@rangeselector@_bordercolor.py@.PATH_END.py
|
{
"filename": "doc_model_savemodelresult.py",
"repo_name": "lmfit/lmfit-py",
"repo_path": "lmfit-py_extracted/lmfit-py-master/examples/doc_model_savemodelresult.py",
"type": "Python"
}
|
# <examples/doc_model_savemodelresult.py>
import numpy as np
from lmfit.model import save_modelresult
from lmfit.models import GaussianModel
data = np.loadtxt('model1d_gauss.dat')
x = data[:, 0]
y = data[:, 1]
gmodel = GaussianModel()
result = gmodel.fit(y, x=x, amplitude=5, center=5, sigma=1)
save_modelresult(result, 'gauss_modelresult.sav')
print(result.fit_report())
# <end examples/doc_model_savemodelresult.py>
|
lmfitREPO_NAMElmfit-pyPATH_START.@lmfit-py_extracted@lmfit-py-master@examples@doc_model_savemodelresult.py@.PATH_END.py
|
{
"filename": "fp.py",
"repo_name": "ChrisBeaumont/brut",
"repo_path": "brut_extracted/brut-master/figures/fp.py",
"type": "Python"
}
|
import json
import numpy as np
import matplotlib.pyplot as plt
from bubbly.extractors import RGBExtractor
from bubbly.dr1 import bubble_params
def hide_axes():
plt.gca().get_xaxis().set_visible(False)
plt.gca().get_yaxis().set_visible(False)
def ex(params):
rgb = RGBExtractor()
rgb.shp = (100, 100)
p = list(params)
p[-1] *= 1.5
return rgb.extract(*p)
def closest_bubble(p):
bubbles = np.array(bubble_params())
l0, b0 = bubbles[:, 1], bubbles[:, 2]
d = np.hypot(l0 - p[1], b0 - p[2])
ind = np.argmin(d)
return bubbles[ind], d[ind]
def main():
labels = json.load(open('../models/benchmark_scores.json'))
ind = np.argsort(labels['off_score'])[::-1]
scores = [labels['off_score'][i] for i in ind[:9]]
images = [ex(labels['off'][i]) for i in ind[:9]]
for i in range(9):
p = labels['off'][ind[i]]
b, d = closest_bubble(p)
print "Offset: %0.2f\t radii: %0.2f %0.2f" % (d, p[-1], b[-1])
ims = np.vstack(np.hstack(images[i:i+3]) for i in [0, 3, 6])
plt.imshow(ims, origin='upper')
dx = images[0].shape[0]
kw = {'color': 'white'}
plt.axhline(dx, **kw)
plt.axhline(dx * 2, **kw)
plt.axvline(dx, **kw)
plt.axvline(dx * 2, **kw)
for i in range(9):
x = dx * (i % 3) + dx / 10
y = dx * (i / 3) + 9.5 * dx / 10
plt.annotate("%0.2f" % scores[i], xy=(x, y), color='white')
hide_axes()
plt.savefig('fp.eps')
if __name__ == "__main__":
main()
|
ChrisBeaumontREPO_NAMEbrutPATH_START.@brut_extracted@brut-master@figures@fp.py@.PATH_END.py
|
{
"filename": "test_stringdtype.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/_core/tests/test_stringdtype.py",
"type": "Python"
}
|
import concurrent.futures
import itertools
import os
import pickle
import string
import sys
import tempfile
import numpy as np
import pytest
from numpy.dtypes import StringDType
from numpy._core.tests._natype import pd_NA
from numpy.testing import assert_array_equal, IS_WASM, IS_PYPY
@pytest.fixture
def string_list():
return ["abc", "def", "ghi" * 10, "AΒ’ββ¬ π" * 100, "Abc" * 1000, "DEF"]
@pytest.fixture
def random_string_list():
chars = list(string.ascii_letters + string.digits)
chars = np.array(chars, dtype="U1")
ret = np.random.choice(chars, size=100 * 10, replace=True)
return ret.view("U100")
@pytest.fixture(params=[True, False])
def coerce(request):
return request.param
@pytest.fixture(
params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"],
ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"],
)
def na_object(request):
return request.param
def get_dtype(na_object, coerce=True):
# explicit is check for pd_NA because != with pd_NA returns pd_NA
if na_object is pd_NA or na_object != "unset":
return StringDType(na_object=na_object, coerce=coerce)
else:
return StringDType(coerce=coerce)
@pytest.fixture()
def dtype(na_object, coerce):
return get_dtype(na_object, coerce)
# second copy for cast tests to do a cartesian product over dtypes
@pytest.fixture(params=[True, False])
def coerce2(request):
return request.param
@pytest.fixture(
params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"],
ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"],
)
def na_object2(request):
return request.param
@pytest.fixture()
def dtype2(na_object2, coerce2):
# explicit is check for pd_NA because != with pd_NA returns pd_NA
if na_object2 is pd_NA or na_object2 != "unset":
return StringDType(na_object=na_object2, coerce=coerce2)
else:
return StringDType(coerce=coerce2)
def test_dtype_creation():
hashes = set()
dt = StringDType()
assert not hasattr(dt, "na_object") and dt.coerce is True
hashes.add(hash(dt))
dt = StringDType(na_object=None)
assert dt.na_object is None and dt.coerce is True
hashes.add(hash(dt))
dt = StringDType(coerce=False)
assert not hasattr(dt, "na_object") and dt.coerce is False
hashes.add(hash(dt))
dt = StringDType(na_object=None, coerce=False)
assert dt.na_object is None and dt.coerce is False
hashes.add(hash(dt))
assert len(hashes) == 4
dt = np.dtype("T")
assert dt == StringDType()
assert dt.kind == "T"
assert dt.char == "T"
hashes.add(hash(dt))
assert len(hashes) == 4
def test_dtype_equality(dtype):
assert dtype == dtype
for ch in "SU":
assert dtype != np.dtype(ch)
assert dtype != np.dtype(f"{ch}8")
def test_dtype_repr(dtype):
if not hasattr(dtype, "na_object") and dtype.coerce:
assert repr(dtype) == "StringDType()"
elif dtype.coerce:
assert repr(dtype) == f"StringDType(na_object={dtype.na_object!r})"
elif not hasattr(dtype, "na_object"):
assert repr(dtype) == "StringDType(coerce=False)"
else:
assert (
repr(dtype)
== f"StringDType(na_object={dtype.na_object!r}, coerce=False)"
)
def test_create_with_na(dtype):
if not hasattr(dtype, "na_object"):
pytest.skip("does not have an na object")
na_val = dtype.na_object
string_list = ["hello", na_val, "world"]
arr = np.array(string_list, dtype=dtype)
assert str(arr) == "[" + " ".join([repr(s) for s in string_list]) + "]"
assert arr[1] is dtype.na_object
@pytest.mark.parametrize("i", list(range(5)))
def test_set_replace_na(i):
# Test strings of various lengths can be set to NaN and then replaced.
s_empty = ""
s_short = "0123456789"
s_medium = "abcdefghijklmnopqrstuvwxyz"
s_long = "-=+" * 100
strings = [s_medium, s_empty, s_short, s_medium, s_long]
a = np.array(strings, StringDType(na_object=np.nan))
for s in [a[i], s_medium+s_short, s_short, s_empty, s_long]:
a[i] = np.nan
assert np.isnan(a[i])
a[i] = s
assert a[i] == s
assert_array_equal(a, strings[:i] + [s] + strings[i+1:])
def test_null_roundtripping():
data = ["hello\0world", "ABC\0DEF\0\0"]
arr = np.array(data, dtype="T")
assert data[0] == arr[0]
assert data[1] == arr[1]
def test_string_too_large_error():
arr = np.array(["a", "b", "c"], dtype=StringDType())
with pytest.raises(MemoryError):
arr * (2**63 - 2)
@pytest.mark.parametrize(
"data",
[
["abc", "def", "ghi"],
["π€£", "π΅", "π°"],
["π", "π", "πΎ"],
["πΉ", "π ", "π"],
],
)
def test_array_creation_utf8(dtype, data):
arr = np.array(data, dtype=dtype)
assert str(arr) == "[" + " ".join(["'" + str(d) + "'" for d in data]) + "]"
assert arr.dtype == dtype
@pytest.mark.parametrize(
"data",
[
[1, 2, 3],
[b"abc", b"def", b"ghi"],
[object, object, object],
],
)
def test_scalars_string_conversion(data, dtype):
if dtype.coerce:
assert_array_equal(
np.array(data, dtype=dtype),
np.array([str(d) for d in data], dtype=dtype),
)
else:
with pytest.raises(ValueError):
np.array(data, dtype=dtype)
@pytest.mark.parametrize(
("strings"),
[
["this", "is", "an", "array"],
["β¬", "", "π"],
["AΒ’ββ¬ π", " Aββ¬Β’π", "ββ¬π AΒ’", "πβAΒ’ β¬"],
],
)
def test_self_casts(dtype, dtype2, strings):
if hasattr(dtype, "na_object"):
strings = strings + [dtype.na_object]
elif hasattr(dtype2, "na_object"):
strings = strings + [""]
arr = np.array(strings, dtype=dtype)
newarr = arr.astype(dtype2)
if hasattr(dtype, "na_object") and not hasattr(dtype2, "na_object"):
assert newarr[-1] == str(dtype.na_object)
with pytest.raises(TypeError):
arr.astype(dtype2, casting="safe")
elif hasattr(dtype, "na_object") and hasattr(dtype2, "na_object"):
assert newarr[-1] is dtype2.na_object
arr.astype(dtype2, casting="safe")
elif hasattr(dtype2, "na_object"):
assert newarr[-1] == ""
arr.astype(dtype2, casting="safe")
else:
arr.astype(dtype2, casting="safe")
if hasattr(dtype, "na_object") and hasattr(dtype2, "na_object"):
na1 = dtype.na_object
na2 = dtype2.na_object
if (na1 is not na2 and
# check for pd_NA first because bool(pd_NA) is an error
((na1 is pd_NA or na2 is pd_NA) or
# the second check is a NaN check, spelled this way
# to avoid errors from math.isnan and np.isnan
(na1 != na2 and not (na1 != na1 and na2 != na2)))):
with pytest.raises(TypeError):
arr[:-1] == newarr[:-1]
return
assert_array_equal(arr[:-1], newarr[:-1])
@pytest.mark.parametrize(
("strings"),
[
["this", "is", "an", "array"],
["β¬", "", "π"],
["AΒ’ββ¬ π", " Aββ¬Β’π", "ββ¬π AΒ’", "πβAΒ’ β¬"],
],
)
class TestStringLikeCasts:
def test_unicode_casts(self, dtype, strings):
arr = np.array(strings, dtype=np.str_).astype(dtype)
expected = np.array(strings, dtype=dtype)
assert_array_equal(arr, expected)
arr_as_U8 = expected.astype("U8")
assert_array_equal(arr_as_U8, np.array(strings, dtype="U8"))
assert_array_equal(arr_as_U8.astype(dtype), arr)
arr_as_U3 = expected.astype("U3")
assert_array_equal(arr_as_U3, np.array(strings, dtype="U3"))
assert_array_equal(
arr_as_U3.astype(dtype),
np.array([s[:3] for s in strings], dtype=dtype),
)
def test_void_casts(self, dtype, strings):
sarr = np.array(strings, dtype=dtype)
utf8_bytes = [s.encode("utf-8") for s in strings]
void_dtype = f"V{max([len(s) for s in utf8_bytes])}"
varr = np.array(utf8_bytes, dtype=void_dtype)
assert_array_equal(varr, sarr.astype(void_dtype))
assert_array_equal(varr.astype(dtype), sarr)
def test_bytes_casts(self, dtype, strings):
sarr = np.array(strings, dtype=dtype)
try:
utf8_bytes = [s.encode("ascii") for s in strings]
bytes_dtype = f"S{max([len(s) for s in utf8_bytes])}"
barr = np.array(utf8_bytes, dtype=bytes_dtype)
assert_array_equal(barr, sarr.astype(bytes_dtype))
assert_array_equal(barr.astype(dtype), sarr)
except UnicodeEncodeError:
with pytest.raises(UnicodeEncodeError):
sarr.astype("S20")
def test_additional_unicode_cast(random_string_list, dtype):
arr = np.array(random_string_list, dtype=dtype)
# test that this short-circuits correctly
assert_array_equal(arr, arr.astype(arr.dtype))
# tests the casts via the comparison promoter
assert_array_equal(arr, arr.astype(random_string_list.dtype))
def test_insert_scalar(dtype, string_list):
"""Test that inserting a scalar works."""
arr = np.array(string_list, dtype=dtype)
scalar_instance = "what"
arr[1] = scalar_instance
assert_array_equal(
arr,
np.array(string_list[:1] + ["what"] + string_list[2:], dtype=dtype),
)
comparison_operators = [
np.equal,
np.not_equal,
np.greater,
np.greater_equal,
np.less,
np.less_equal,
]
@pytest.mark.parametrize("op", comparison_operators)
@pytest.mark.parametrize("o_dtype", [np.str_, object, StringDType()])
def test_comparisons(string_list, dtype, op, o_dtype):
sarr = np.array(string_list, dtype=dtype)
oarr = np.array(string_list, dtype=o_dtype)
# test that comparison operators work
res = op(sarr, sarr)
ores = op(oarr, oarr)
# test that promotion works as well
orres = op(sarr, oarr)
olres = op(oarr, sarr)
assert_array_equal(res, ores)
assert_array_equal(res, orres)
assert_array_equal(res, olres)
# test we get the correct answer for unequal length strings
sarr2 = np.array([s + "2" for s in string_list], dtype=dtype)
oarr2 = np.array([s + "2" for s in string_list], dtype=o_dtype)
res = op(sarr, sarr2)
ores = op(oarr, oarr2)
olres = op(oarr, sarr2)
orres = op(sarr, oarr2)
assert_array_equal(res, ores)
assert_array_equal(res, olres)
assert_array_equal(res, orres)
res = op(sarr2, sarr)
ores = op(oarr2, oarr)
olres = op(oarr2, sarr)
orres = op(sarr2, oarr)
assert_array_equal(res, ores)
assert_array_equal(res, olres)
assert_array_equal(res, orres)
def test_isnan(dtype, string_list):
if not hasattr(dtype, "na_object"):
pytest.skip("no na support")
sarr = np.array(string_list + [dtype.na_object], dtype=dtype)
is_nan = isinstance(dtype.na_object, float) and np.isnan(dtype.na_object)
bool_errors = 0
try:
bool(dtype.na_object)
except TypeError:
bool_errors = 1
if is_nan or bool_errors:
# isnan is only true when na_object is a NaN
assert_array_equal(
np.isnan(sarr),
np.array([0] * len(string_list) + [1], dtype=np.bool),
)
else:
assert not np.any(np.isnan(sarr))
def test_pickle(dtype, string_list):
arr = np.array(string_list, dtype=dtype)
with tempfile.NamedTemporaryFile("wb", delete=False) as f:
pickle.dump([arr, dtype], f)
with open(f.name, "rb") as f:
res = pickle.load(f)
assert_array_equal(res[0], arr)
assert res[1] == dtype
os.remove(f.name)
@pytest.mark.parametrize(
"strings",
[
["left", "right", "leftovers", "righty", "up", "down"],
[
"left" * 10,
"right" * 10,
"leftovers" * 10,
"righty" * 10,
"up" * 10,
],
["π€£π€£", "π€£", "π΅", "π°"],
["π", "π", "πΎ"],
["πΉ", "π ", "π"],
["AΒ’ββ¬ π", " Aββ¬Β’π", "ββ¬π AΒ’", "πβAΒ’ β¬"],
],
)
def test_sort(dtype, strings):
"""Test that sorting matches python's internal sorting."""
def test_sort(strings, arr_sorted):
arr = np.array(strings, dtype=dtype)
np.random.default_rng().shuffle(arr)
na_object = getattr(arr.dtype, "na_object", "")
if na_object is None and None in strings:
with pytest.raises(
ValueError,
match="Cannot compare null that is not a nan-like value",
):
arr.sort()
else:
arr.sort()
assert np.array_equal(arr, arr_sorted, equal_nan=True)
# make a copy so we don't mutate the lists in the fixture
strings = strings.copy()
arr_sorted = np.array(sorted(strings), dtype=dtype)
test_sort(strings, arr_sorted)
if not hasattr(dtype, "na_object"):
return
# make sure NAs get sorted to the end of the array and string NAs get
# sorted like normal strings
strings.insert(0, dtype.na_object)
strings.insert(2, dtype.na_object)
# can't use append because doing that with NA converts
# the result to object dtype
if not isinstance(dtype.na_object, str):
arr_sorted = np.array(
arr_sorted.tolist() + [dtype.na_object, dtype.na_object],
dtype=dtype,
)
else:
arr_sorted = np.array(sorted(strings), dtype=dtype)
test_sort(strings, arr_sorted)
@pytest.mark.parametrize(
"strings",
[
["AΒ’ββ¬ π", " Aββ¬Β’π", "ββ¬π AΒ’", "πβAΒ’ β¬"],
["AΒ’ββ¬ π", "", " ", "ο "],
["", "a", "πΈ", "ÑÑðfΓ‘ΓΓ³Γ₯éë"],
],
)
def test_nonzero(strings, na_object):
dtype = get_dtype(na_object)
arr = np.array(strings, dtype=dtype)
is_nonzero = np.array(
[i for i, item in enumerate(strings) if len(item) != 0])
assert_array_equal(arr.nonzero()[0], is_nonzero)
if na_object is not pd_NA and na_object == 'unset':
return
strings_with_na = np.array(strings + [na_object], dtype=dtype)
is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0]
if is_nan:
assert strings_with_na.nonzero()[0][-1] == 4
else:
assert strings_with_na.nonzero()[0][-1] == 3
# check that the casting to bool and nonzero give consistent results
assert_array_equal(strings_with_na[strings_with_na.nonzero()],
strings_with_na[strings_with_na.astype(bool)])
def test_where(string_list, na_object):
dtype = get_dtype(na_object)
a = np.array(string_list, dtype=dtype)
b = a[::-1]
res = np.where([True, False, True, False, True, False], a, b)
assert_array_equal(res, [a[0], b[1], a[2], b[3], a[4], b[5]])
def test_fancy_indexing(string_list):
sarr = np.array(string_list, dtype="T")
assert_array_equal(sarr, sarr[np.arange(sarr.shape[0])])
inds = [
[True, True],
[0, 1],
...,
np.array([0, 1], dtype='uint8'),
]
lops = [
['a'*25, 'b'*25],
['', ''],
['hello', 'world'],
['hello', 'world'*25],
]
# see gh-27003 and gh-27053
for ind in inds:
for lop in lops:
a = np.array(lop, dtype="T")
assert_array_equal(a[ind], a)
rop = ['d'*25, 'e'*25]
for b in [rop, np.array(rop, dtype="T")]:
a[ind] = b
assert_array_equal(a, b)
assert a[0] == 'd'*25
def test_creation_functions():
assert_array_equal(np.zeros(3, dtype="T"), ["", "", ""])
assert_array_equal(np.empty(3, dtype="T"), ["", "", ""])
assert np.zeros(3, dtype="T")[0] == ""
assert np.empty(3, dtype="T")[0] == ""
def test_concatenate(string_list):
sarr = np.array(string_list, dtype="T")
sarr_cat = np.array(string_list + string_list, dtype="T")
assert_array_equal(np.concatenate([sarr], axis=0), sarr)
def test_resize_method(string_list):
sarr = np.array(string_list, dtype="T")
if IS_PYPY:
sarr.resize(len(string_list)+3, refcheck=False)
else:
sarr.resize(len(string_list)+3)
assert_array_equal(sarr, np.array(string_list + ['']*3, dtype="T"))
def test_create_with_copy_none(string_list):
arr = np.array(string_list, dtype=StringDType())
# create another stringdtype array with an arena that has a different
# in-memory layout than the first array
arr_rev = np.array(string_list[::-1], dtype=StringDType())
# this should create a copy and the resulting array
# shouldn't share an allocator or arena with arr_rev, despite
# explicitly passing arr_rev.dtype
arr_copy = np.array(arr, copy=None, dtype=arr_rev.dtype)
np.testing.assert_array_equal(arr, arr_copy)
assert arr_copy.base is None
with pytest.raises(ValueError, match="Unable to avoid copy"):
np.array(arr, copy=False, dtype=arr_rev.dtype)
# because we're using arr's dtype instance, the view is safe
arr_view = np.array(arr, copy=None, dtype=arr.dtype)
np.testing.assert_array_equal(arr, arr)
np.testing.assert_array_equal(arr_view[::-1], arr_rev)
assert arr_view is arr
def test_astype_copy_false():
orig_dt = StringDType()
arr = np.array(["hello", "world"], dtype=StringDType())
assert not arr.astype(StringDType(coerce=False), copy=False).dtype.coerce
assert arr.astype(orig_dt, copy=False).dtype is orig_dt
@pytest.mark.parametrize(
"strings",
[
["left", "right", "leftovers", "righty", "up", "down"],
["π€£π€£", "π€£", "π΅", "π°"],
["π", "π", "πΎ"],
["πΉ", "π ", "π"],
["AΒ’ββ¬ π", " Aββ¬Β’π", "ββ¬π AΒ’", "πβAΒ’ β¬"],
],
)
def test_argmax(strings):
"""Test that argmax/argmin matches what python calculates."""
arr = np.array(strings, dtype="T")
assert np.argmax(arr) == strings.index(max(strings))
assert np.argmin(arr) == strings.index(min(strings))
@pytest.mark.parametrize(
"arrfunc,expected",
[
[np.sort, None],
[np.nonzero, (np.array([], dtype=np.int_),)],
[np.argmax, 0],
[np.argmin, 0],
],
)
def test_arrfuncs_zeros(arrfunc, expected):
arr = np.zeros(10, dtype="T")
result = arrfunc(arr)
if expected is None:
expected = arr
assert_array_equal(result, expected, strict=True)
@pytest.mark.parametrize(
("strings", "cast_answer", "any_answer", "all_answer"),
[
[["hello", "world"], [True, True], True, True],
[["", ""], [False, False], False, False],
[["hello", ""], [True, False], True, False],
[["", "world"], [False, True], True, False],
],
)
def test_cast_to_bool(strings, cast_answer, any_answer, all_answer):
sarr = np.array(strings, dtype="T")
assert_array_equal(sarr.astype("bool"), cast_answer)
assert np.any(sarr) == any_answer
assert np.all(sarr) == all_answer
@pytest.mark.parametrize(
("strings", "cast_answer"),
[
[[True, True], ["True", "True"]],
[[False, False], ["False", "False"]],
[[True, False], ["True", "False"]],
[[False, True], ["False", "True"]],
],
)
def test_cast_from_bool(strings, cast_answer):
barr = np.array(strings, dtype=bool)
assert_array_equal(barr.astype("T"), np.array(cast_answer, dtype="T"))
@pytest.mark.parametrize("bitsize", [8, 16, 32, 64])
@pytest.mark.parametrize("signed", [True, False])
def test_sized_integer_casts(bitsize, signed):
idtype = f"int{bitsize}"
if signed:
inp = [-(2**p - 1) for p in reversed(range(bitsize - 1))]
inp += [2**p - 1 for p in range(1, bitsize - 1)]
else:
idtype = "u" + idtype
inp = [2**p - 1 for p in range(bitsize)]
ainp = np.array(inp, dtype=idtype)
assert_array_equal(ainp, ainp.astype("T").astype(idtype))
# safe casting works
ainp.astype("T", casting="safe")
with pytest.raises(TypeError):
ainp.astype("T").astype(idtype, casting="safe")
oob = [str(2**bitsize), str(-(2**bitsize))]
with pytest.raises(OverflowError):
np.array(oob, dtype="T").astype(idtype)
with pytest.raises(ValueError):
np.array(["1", np.nan, "3"],
dtype=StringDType(na_object=np.nan)).astype(idtype)
@pytest.mark.parametrize("typename", ["byte", "short", "int", "longlong"])
@pytest.mark.parametrize("signed", ["", "u"])
def test_unsized_integer_casts(typename, signed):
idtype = f"{signed}{typename}"
inp = [1, 2, 3, 4]
ainp = np.array(inp, dtype=idtype)
assert_array_equal(ainp, ainp.astype("T").astype(idtype))
@pytest.mark.parametrize(
"typename",
[
pytest.param(
"longdouble",
marks=pytest.mark.xfail(
np.dtypes.LongDoubleDType() != np.dtypes.Float64DType(),
reason="numpy lacks an ld2a implementation",
strict=True,
),
),
"float64",
"float32",
"float16",
],
)
def test_float_casts(typename):
inp = [1.1, 2.8, -3.2, 2.7e4]
ainp = np.array(inp, dtype=typename)
assert_array_equal(ainp, ainp.astype("T").astype(typename))
inp = [0.1]
sres = np.array(inp, dtype=typename).astype("T")
res = sres.astype(typename)
assert_array_equal(np.array(inp, dtype=typename), res)
assert sres[0] == "0.1"
if typename == "longdouble":
# let's not worry about platform-dependent rounding of longdouble
return
fi = np.finfo(typename)
inp = [1e-324, fi.smallest_subnormal, -1e-324, -fi.smallest_subnormal]
eres = [0, fi.smallest_subnormal, -0, -fi.smallest_subnormal]
res = np.array(inp, dtype=typename).astype("T").astype(typename)
assert_array_equal(eres, res)
inp = [2e308, fi.max, -2e308, fi.min]
eres = [np.inf, fi.max, -np.inf, fi.min]
res = np.array(inp, dtype=typename).astype("T").astype(typename)
assert_array_equal(eres, res)
@pytest.mark.parametrize(
"typename",
[
"csingle",
"cdouble",
pytest.param(
"clongdouble",
marks=pytest.mark.xfail(
np.dtypes.CLongDoubleDType() != np.dtypes.Complex128DType(),
reason="numpy lacks an ld2a implementation",
strict=True,
),
),
],
)
def test_cfloat_casts(typename):
inp = [1.1 + 1.1j, 2.8 + 2.8j, -3.2 - 3.2j, 2.7e4 + 2.7e4j]
ainp = np.array(inp, dtype=typename)
assert_array_equal(ainp, ainp.astype("T").astype(typename))
inp = [0.1 + 0.1j]
sres = np.array(inp, dtype=typename).astype("T")
res = sres.astype(typename)
assert_array_equal(np.array(inp, dtype=typename), res)
assert sres[0] == "(0.1+0.1j)"
def test_take(string_list):
sarr = np.array(string_list, dtype="T")
res = sarr.take(np.arange(len(string_list)))
assert_array_equal(sarr, res)
# make sure it also works for out
out = np.empty(len(string_list), dtype="T")
out[0] = "hello"
res = sarr.take(np.arange(len(string_list)), out=out)
assert res is out
assert_array_equal(sarr, res)
@pytest.mark.parametrize("use_out", [True, False])
@pytest.mark.parametrize(
"ufunc_name,func",
[
("min", min),
("max", max),
],
)
def test_ufuncs_minmax(string_list, ufunc_name, func, use_out):
"""Test that the min/max ufuncs match Python builtin min/max behavior."""
arr = np.array(string_list, dtype="T")
uarr = np.array(string_list, dtype=str)
res = np.array(func(string_list), dtype="T")
assert_array_equal(getattr(arr, ufunc_name)(), res)
ufunc = getattr(np, ufunc_name + "imum")
if use_out:
res = ufunc(arr, arr, out=arr)
else:
res = ufunc(arr, arr)
assert_array_equal(uarr, res)
assert_array_equal(getattr(arr, ufunc_name)(), func(string_list))
def test_max_regression():
arr = np.array(['y', 'y', 'z'], dtype="T")
assert arr.max() == 'z'
@pytest.mark.parametrize("use_out", [True, False])
@pytest.mark.parametrize(
"other_strings",
[
["abc", "def" * 500, "ghi" * 16, "π€£" * 100, "π΅", "π°"],
["π", "π", "πΎ", "πΉ", "π ", "π"],
["π₯¦", "Β¨", "β¨―", "β° ", "β¨ ", "βΆ "],
],
)
def test_ufunc_add(dtype, string_list, other_strings, use_out):
arr1 = np.array(string_list, dtype=dtype)
arr2 = np.array(other_strings, dtype=dtype)
result = np.array([a + b for a, b in zip(arr1, arr2)], dtype=dtype)
if use_out:
res = np.add(arr1, arr2, out=arr1)
else:
res = np.add(arr1, arr2)
assert_array_equal(res, result)
if not hasattr(dtype, "na_object"):
return
is_nan = isinstance(dtype.na_object, float) and np.isnan(dtype.na_object)
is_str = isinstance(dtype.na_object, str)
bool_errors = 0
try:
bool(dtype.na_object)
except TypeError:
bool_errors = 1
arr1 = np.array([dtype.na_object] + string_list, dtype=dtype)
arr2 = np.array(other_strings + [dtype.na_object], dtype=dtype)
if is_nan or bool_errors or is_str:
res = np.add(arr1, arr2)
assert_array_equal(res[1:-1], arr1[1:-1] + arr2[1:-1])
if not is_str:
assert res[0] is dtype.na_object and res[-1] is dtype.na_object
else:
assert res[0] == dtype.na_object + arr2[0]
assert res[-1] == arr1[-1] + dtype.na_object
else:
with pytest.raises(ValueError):
np.add(arr1, arr2)
def test_ufunc_add_reduce(dtype):
values = ["a", "this is a long string", "c"]
arr = np.array(values, dtype=dtype)
out = np.empty((), dtype=dtype)
expected = np.array("".join(values), dtype=dtype)
assert_array_equal(np.add.reduce(arr), expected)
np.add.reduce(arr, out=out)
assert_array_equal(out, expected)
def test_add_promoter(string_list):
arr = np.array(string_list, dtype=StringDType())
lresult = np.array(["hello" + s for s in string_list], dtype=StringDType())
rresult = np.array([s + "hello" for s in string_list], dtype=StringDType())
for op in ["hello", np.str_("hello"), np.array(["hello"])]:
assert_array_equal(op + arr, lresult)
assert_array_equal(arr + op, rresult)
# The promoter should be able to handle things if users pass `dtype=`
res = np.add("hello", string_list, dtype=StringDType)
assert res.dtype == StringDType()
# The promoter should not kick in if users override the input,
# which means arr is cast, this fails because of the unknown length.
with pytest.raises(TypeError, match="cannot cast dtype"):
np.add(arr, "add", signature=("U", "U", None), casting="unsafe")
# But it must simply reject the following:
with pytest.raises(TypeError, match=".*did not contain a loop"):
np.add(arr, "add", signature=(None, "U", None))
with pytest.raises(TypeError, match=".*did not contain a loop"):
np.add("a", "b", signature=("U", "U", StringDType))
def test_add_no_legacy_promote_with_signature():
# Possibly misplaced, but useful to test with string DType. We check that
# if there is clearly no loop found, a stray `dtype=` doesn't break things
# Regression test for the bad error in gh-26735
# (If legacy promotion is gone, this can be deleted...)
with pytest.raises(TypeError, match=".*did not contain a loop"):
np.add("3", 6, dtype=StringDType)
def test_add_promoter_reduce():
# Exact TypeError could change, but ensure StringDtype doesn't match
with pytest.raises(TypeError, match="the resolved dtypes are not"):
np.add.reduce(np.array(["a", "b"], dtype="U"))
# On the other hand, using `dtype=T` in the *ufunc* should work.
np.add.reduce(np.array(["a", "b"], dtype="U"), dtype=np.dtypes.StringDType)
def test_multiply_reduce():
# At the time of writing (NumPy 2.0) this is very limited (and rather
# ridiculous anyway). But it works and actually makes some sense...
# (NumPy does not allow non-scalar initial values)
repeats = np.array([2, 3, 4])
val = "school-π"
res = np.multiply.reduce(repeats, initial=val, dtype=np.dtypes.StringDType)
assert res == val * np.prod(repeats)
def test_multiply_two_string_raises():
arr = np.array(["hello", "world"], dtype="T")
with pytest.raises(np._core._exceptions._UFuncNoLoopError):
np.multiply(arr, arr)
@pytest.mark.parametrize("use_out", [True, False])
@pytest.mark.parametrize("other", [2, [2, 1, 3, 4, 1, 3]])
@pytest.mark.parametrize(
"other_dtype",
[
None,
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"short",
"int",
"intp",
"long",
"longlong",
"ushort",
"uint",
"uintp",
"ulong",
"ulonglong",
],
)
def test_ufunc_multiply(dtype, string_list, other, other_dtype, use_out):
"""Test the two-argument ufuncs match python builtin behavior."""
arr = np.array(string_list, dtype=dtype)
if other_dtype is not None:
other_dtype = np.dtype(other_dtype)
try:
len(other)
result = [s * o for s, o in zip(string_list, other)]
other = np.array(other)
if other_dtype is not None:
other = other.astype(other_dtype)
except TypeError:
if other_dtype is not None:
other = other_dtype.type(other)
result = [s * other for s in string_list]
if use_out:
arr_cache = arr.copy()
lres = np.multiply(arr, other, out=arr)
assert_array_equal(lres, result)
arr[:] = arr_cache
assert lres is arr
arr *= other
assert_array_equal(arr, result)
arr[:] = arr_cache
rres = np.multiply(other, arr, out=arr)
assert rres is arr
assert_array_equal(rres, result)
else:
lres = arr * other
assert_array_equal(lres, result)
rres = other * arr
assert_array_equal(rres, result)
if not hasattr(dtype, "na_object"):
return
is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0]
is_str = isinstance(dtype.na_object, str)
bool_errors = 0
try:
bool(dtype.na_object)
except TypeError:
bool_errors = 1
arr = np.array(string_list + [dtype.na_object], dtype=dtype)
try:
len(other)
other = np.append(other, 3)
if other_dtype is not None:
other = other.astype(other_dtype)
except TypeError:
pass
if is_nan or bool_errors or is_str:
for res in [arr * other, other * arr]:
assert_array_equal(res[:-1], result)
if not is_str:
assert res[-1] is dtype.na_object
else:
try:
assert res[-1] == dtype.na_object * other[-1]
except (IndexError, TypeError):
assert res[-1] == dtype.na_object * other
else:
with pytest.raises(TypeError):
arr * other
with pytest.raises(TypeError):
other * arr
def test_findlike_promoters():
r = "Wally"
l = "Where's Wally?"
s = np.int32(3)
e = np.int8(13)
for dtypes in [("T", "U"), ("U", "T")]:
for function, answer in [
(np.strings.index, 8),
(np.strings.endswith, True),
]:
assert answer == function(
np.array(l, dtype=dtypes[0]), np.array(r, dtype=dtypes[1]), s, e
)
def test_strip_promoter():
arg = ["Hello!!!!", "Hello??!!"]
strip_char = "!"
answer = ["Hello", "Hello??"]
for dtypes in [("T", "U"), ("U", "T")]:
result = np.strings.strip(
np.array(arg, dtype=dtypes[0]),
np.array(strip_char, dtype=dtypes[1])
)
assert_array_equal(result, answer)
assert result.dtype.char == "T"
def test_replace_promoter():
arg = ["Hello, planet!", "planet, Hello!"]
old = "planet"
new = "world"
answer = ["Hello, world!", "world, Hello!"]
for dtypes in itertools.product("TU", repeat=3):
if dtypes == ("U", "U", "U"):
continue
answer_arr = np.strings.replace(
np.array(arg, dtype=dtypes[0]),
np.array(old, dtype=dtypes[1]),
np.array(new, dtype=dtypes[2]),
)
assert_array_equal(answer_arr, answer)
assert answer_arr.dtype.char == "T"
def test_center_promoter():
arg = ["Hello", "planet!"]
fillchar = "/"
for dtypes in [("T", "U"), ("U", "T")]:
answer = np.strings.center(
np.array(arg, dtype=dtypes[0]), 9, np.array(fillchar, dtype=dtypes[1])
)
assert_array_equal(answer, ["//Hello//", "/planet!/"])
assert answer.dtype.char == "T"
DATETIME_INPUT = [
np.datetime64("1923-04-14T12:43:12"),
np.datetime64("1994-06-21T14:43:15"),
np.datetime64("2001-10-15T04:10:32"),
np.datetime64("NaT"),
np.datetime64("1995-11-25T16:02:16"),
np.datetime64("2005-01-04T03:14:12"),
np.datetime64("2041-12-03T14:05:03"),
]
TIMEDELTA_INPUT = [
np.timedelta64(12358, "s"),
np.timedelta64(23, "s"),
np.timedelta64(74, "s"),
np.timedelta64("NaT"),
np.timedelta64(23, "s"),
np.timedelta64(73, "s"),
np.timedelta64(7, "s"),
]
@pytest.mark.parametrize(
"input_data, input_dtype",
[
(DATETIME_INPUT, "M8[s]"),
(TIMEDELTA_INPUT, "m8[s]")
]
)
def test_datetime_timedelta_cast(dtype, input_data, input_dtype):
a = np.array(input_data, dtype=input_dtype)
has_na = hasattr(dtype, "na_object")
is_str = isinstance(getattr(dtype, "na_object", None), str)
if not has_na or is_str:
a = np.delete(a, 3)
sa = a.astype(dtype)
ra = sa.astype(a.dtype)
if has_na and not is_str:
assert sa[3] is dtype.na_object
assert np.isnat(ra[3])
assert_array_equal(a, ra)
if has_na and not is_str:
# don't worry about comparing how NaT is converted
sa = np.delete(sa, 3)
a = np.delete(a, 3)
if input_dtype.startswith("M"):
assert_array_equal(sa, a.astype("U"))
else:
# The timedelta to unicode cast produces strings
# that aren't round-trippable and we don't want to
# reproduce that behavior in stringdtype
assert_array_equal(sa, a.astype("int64").astype("U"))
def test_nat_casts():
s = 'nat'
all_nats = itertools.product(*zip(s.upper(), s.lower()))
all_nats = list(map(''.join, all_nats))
NaT_dt = np.datetime64('NaT')
NaT_td = np.timedelta64('NaT')
for na_object in [np._NoValue, None, np.nan, 'nat', '']:
# numpy treats empty string and all case combinations of 'nat' as NaT
dtype = StringDType(na_object=na_object)
arr = np.array([''] + all_nats, dtype=dtype)
dt_array = arr.astype('M8[s]')
td_array = arr.astype('m8[s]')
assert_array_equal(dt_array, NaT_dt)
assert_array_equal(td_array, NaT_td)
if na_object is np._NoValue:
output_object = 'NaT'
else:
output_object = na_object
for arr in [dt_array, td_array]:
assert_array_equal(
arr.astype(dtype),
np.array([output_object]*arr.size, dtype=dtype))
def test_nat_conversion():
for nat in [np.datetime64("NaT", "s"), np.timedelta64("NaT", "s")]:
with pytest.raises(ValueError, match="string coercion is disabled"):
np.array(["a", nat], dtype=StringDType(coerce=False))
def test_growing_strings(dtype):
# growing a string leads to a heap allocation, this tests to make sure
# we do that bookkeeping correctly for all possible starting cases
data = [
"hello", # a short string
"abcdefghijklmnopqestuvwxyz", # a medium heap-allocated string
"hello" * 200, # a long heap-allocated string
]
arr = np.array(data, dtype=dtype)
uarr = np.array(data, dtype=str)
for _ in range(5):
arr = arr + arr
uarr = uarr + uarr
assert_array_equal(arr, uarr)
@pytest.mark.skipif(IS_WASM, reason="no threading support in wasm")
def test_threaded_access_and_mutation(dtype, random_string_list):
# this test uses an RNG and may crash or cause deadlocks if there is a
# threading bug
rng = np.random.default_rng(0x4D3D3D3)
def func(arr):
rnd = rng.random()
# either write to random locations in the array, compute a ufunc, or
# re-initialize the array
if rnd < 0.25:
num = np.random.randint(0, arr.size)
arr[num] = arr[num] + "hello"
elif rnd < 0.5:
if rnd < 0.375:
np.add(arr, arr)
else:
np.add(arr, arr, out=arr)
elif rnd < 0.75:
if rnd < 0.875:
np.multiply(arr, np.int64(2))
else:
np.multiply(arr, np.int64(2), out=arr)
else:
arr[:] = random_string_list
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe:
arr = np.array(random_string_list, dtype=dtype)
futures = [tpe.submit(func, arr) for _ in range(500)]
for f in futures:
f.result()
UFUNC_TEST_DATA = [
"hello" * 10,
"AeΒ’ββ¬ π" * 20,
"entry\nwith\nnewlines",
"entry\twith\ttabs",
]
@pytest.fixture
def string_array(dtype):
return np.array(UFUNC_TEST_DATA, dtype=dtype)
@pytest.fixture
def unicode_array():
return np.array(UFUNC_TEST_DATA, dtype=np.str_)
NAN_PRESERVING_FUNCTIONS = [
"capitalize",
"expandtabs",
"lower",
"lstrip",
"rstrip",
"splitlines",
"strip",
"swapcase",
"title",
"upper",
]
BOOL_OUTPUT_FUNCTIONS = [
"isalnum",
"isalpha",
"isdigit",
"islower",
"isspace",
"istitle",
"isupper",
"isnumeric",
"isdecimal",
]
UNARY_FUNCTIONS = [
"str_len",
"capitalize",
"expandtabs",
"isalnum",
"isalpha",
"isdigit",
"islower",
"isspace",
"istitle",
"isupper",
"lower",
"lstrip",
"rstrip",
"splitlines",
"strip",
"swapcase",
"title",
"upper",
"isnumeric",
"isdecimal",
"isalnum",
"islower",
"istitle",
"isupper",
]
UNIMPLEMENTED_VEC_STRING_FUNCTIONS = [
"capitalize",
"expandtabs",
"lower",
"splitlines",
"swapcase",
"title",
"upper",
]
ONLY_IN_NP_CHAR = [
"join",
"split",
"rsplit",
"splitlines"
]
@pytest.mark.parametrize("function_name", UNARY_FUNCTIONS)
def test_unary(string_array, unicode_array, function_name):
if function_name in ONLY_IN_NP_CHAR:
func = getattr(np.char, function_name)
else:
func = getattr(np.strings, function_name)
dtype = string_array.dtype
sres = func(string_array)
ures = func(unicode_array)
if sres.dtype == StringDType():
ures = ures.astype(StringDType())
assert_array_equal(sres, ures)
if not hasattr(dtype, "na_object"):
return
is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0]
is_str = isinstance(dtype.na_object, str)
na_arr = np.insert(string_array, 0, dtype.na_object)
if function_name in UNIMPLEMENTED_VEC_STRING_FUNCTIONS:
if not is_str:
# to avoid these errors we'd need to add NA support to _vec_string
with pytest.raises((ValueError, TypeError)):
func(na_arr)
else:
if function_name == "splitlines":
assert func(na_arr)[0] == func(dtype.na_object)[()]
else:
assert func(na_arr)[0] == func(dtype.na_object)
return
if function_name == "str_len" and not is_str:
# str_len always errors for any non-string null, even NA ones because
# it has an integer result
with pytest.raises(ValueError):
func(na_arr)
return
if function_name in BOOL_OUTPUT_FUNCTIONS:
if is_nan:
assert func(na_arr)[0] is np.False_
elif is_str:
assert func(na_arr)[0] == func(dtype.na_object)
else:
with pytest.raises(ValueError):
func(na_arr)
return
if not (is_nan or is_str):
with pytest.raises(ValueError):
func(na_arr)
return
res = func(na_arr)
if is_nan and function_name in NAN_PRESERVING_FUNCTIONS:
assert res[0] is dtype.na_object
elif is_str:
assert res[0] == func(dtype.na_object)
unicode_bug_fail = pytest.mark.xfail(
reason="unicode output width is buggy", strict=True
)
# None means that the argument is a string array
BINARY_FUNCTIONS = [
("add", (None, None)),
("multiply", (None, 2)),
("mod", ("format: %s", None)),
("center", (None, 25)),
("count", (None, "A")),
("encode", (None, "UTF-8")),
("endswith", (None, "lo")),
("find", (None, "A")),
("index", (None, "e")),
("join", ("-", None)),
("ljust", (None, 12)),
("lstrip", (None, "A")),
("partition", (None, "A")),
("replace", (None, "A", "B")),
("rfind", (None, "A")),
("rindex", (None, "e")),
("rjust", (None, 12)),
("rsplit", (None, "A")),
("rstrip", (None, "A")),
("rpartition", (None, "A")),
("split", (None, "A")),
("strip", (None, "A")),
("startswith", (None, "A")),
("zfill", (None, 12)),
]
PASSES_THROUGH_NAN_NULLS = [
"add",
"center",
"ljust",
"multiply",
"replace",
"rjust",
"strip",
"lstrip",
"rstrip",
"replace"
"zfill",
]
NULLS_ARE_FALSEY = [
"startswith",
"endswith",
]
NULLS_ALWAYS_ERROR = [
"count",
"find",
"rfind",
]
SUPPORTS_NULLS = (
PASSES_THROUGH_NAN_NULLS +
NULLS_ARE_FALSEY +
NULLS_ALWAYS_ERROR
)
def call_func(func, args, array, sanitize=True):
if args == (None, None):
return func(array, array)
if args[0] is None:
if sanitize:
san_args = tuple(
np.array(arg, dtype=array.dtype) if isinstance(arg, str) else
arg for arg in args[1:]
)
else:
san_args = args[1:]
return func(array, *san_args)
if args[1] is None:
return func(args[0], array)
# shouldn't ever happen
assert 0
@pytest.mark.parametrize("function_name, args", BINARY_FUNCTIONS)
def test_binary(string_array, unicode_array, function_name, args):
if function_name in ONLY_IN_NP_CHAR:
func = getattr(np.char, function_name)
else:
func = getattr(np.strings, function_name)
sres = call_func(func, args, string_array)
ures = call_func(func, args, unicode_array, sanitize=False)
if not isinstance(sres, tuple) and sres.dtype == StringDType():
ures = ures.astype(StringDType())
assert_array_equal(sres, ures)
dtype = string_array.dtype
if function_name not in SUPPORTS_NULLS or not hasattr(dtype, "na_object"):
return
na_arr = np.insert(string_array, 0, dtype.na_object)
is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0]
is_str = isinstance(dtype.na_object, str)
should_error = not (is_nan or is_str)
if (
(function_name in NULLS_ALWAYS_ERROR and not is_str)
or (function_name in PASSES_THROUGH_NAN_NULLS and should_error)
or (function_name in NULLS_ARE_FALSEY and should_error)
):
with pytest.raises((ValueError, TypeError)):
call_func(func, args, na_arr)
return
res = call_func(func, args, na_arr)
if is_str:
assert res[0] == call_func(func, args, na_arr[:1])
elif function_name in NULLS_ARE_FALSEY:
assert res[0] is np.False_
elif function_name in PASSES_THROUGH_NAN_NULLS:
assert res[0] is dtype.na_object
else:
# shouldn't ever get here
assert 0
@pytest.mark.parametrize("function, expected", [
(np.strings.find, [[2, -1], [1, -1]]),
(np.strings.startswith, [[False, False], [True, False]])])
@pytest.mark.parametrize("start, stop", [
(1, 4),
(np.int8(1), np.int8(4)),
(np.array([1, 1], dtype='u2'), np.array([4, 4], dtype='u2'))])
def test_non_default_start_stop(function, start, stop, expected):
a = np.array([["--π--", "--π¦--"],
["-π---", "-π¦---"]], "T")
indx = function(a, "π", start, stop)
assert_array_equal(indx, expected)
@pytest.mark.parametrize("count", [2, np.int8(2), np.array([2, 2], 'u2')])
def test_replace_non_default_repeat(count):
a = np.array(["π--", "π¦-π¦-"], "T")
result = np.strings.replace(a, "π¦-", "π¦β ", count)
assert_array_equal(result, np.array(["π--", "π¦β π¦β "], "T"))
def test_strip_ljust_rjust_consistency(string_array, unicode_array):
rjs = np.char.rjust(string_array, 1000)
rju = np.char.rjust(unicode_array, 1000)
ljs = np.char.ljust(string_array, 1000)
lju = np.char.ljust(unicode_array, 1000)
assert_array_equal(
np.char.lstrip(rjs),
np.char.lstrip(rju).astype(StringDType()),
)
assert_array_equal(
np.char.rstrip(ljs),
np.char.rstrip(lju).astype(StringDType()),
)
assert_array_equal(
np.char.strip(ljs),
np.char.strip(lju).astype(StringDType()),
)
assert_array_equal(
np.char.strip(rjs),
np.char.strip(rju).astype(StringDType()),
)
def test_unset_na_coercion():
# a dtype instance with an unset na object is compatible
# with a dtype that has one set
# this test uses the "add" and "equal" ufunc but all ufuncs that
# accept more than one string argument and produce a string should
# behave this way
# TODO: generalize to more ufuncs
inp = ["hello", "world"]
arr = np.array(inp, dtype=StringDType(na_object=None))
for op_dtype in [None, StringDType(), StringDType(coerce=False),
StringDType(na_object=None)]:
if op_dtype is None:
op = "2"
else:
op = np.array("2", dtype=op_dtype)
res = arr + op
assert_array_equal(res, ["hello2", "world2"])
# dtype instances with distinct explicitly set NA objects are incompatible
for op_dtype in [StringDType(na_object=pd_NA), StringDType(na_object="")]:
op = np.array("2", dtype=op_dtype)
with pytest.raises(TypeError):
arr + op
# comparisons only consider the na_object
for op_dtype in [None, StringDType(), StringDType(coerce=True),
StringDType(na_object=None)]:
if op_dtype is None:
op = inp
else:
op = np.array(inp, dtype=op_dtype)
assert_array_equal(arr, op)
for op_dtype in [StringDType(na_object=pd_NA),
StringDType(na_object=np.nan)]:
op = np.array(inp, dtype=op_dtype)
with pytest.raises(TypeError):
arr == op
def test_repeat(string_array):
res = string_array.repeat(1000)
# Create an empty array with expanded dimension, and fill it. Then,
# reshape it to the expected result.
expected = np.empty_like(string_array, shape=string_array.shape + (1000,))
expected[...] = string_array[:, np.newaxis]
expected = expected.reshape(-1)
assert_array_equal(res, expected, strict=True)
@pytest.mark.parametrize("tile", [1, 6, (2, 5)])
def test_accumulation(string_array, tile):
"""Accumulation is odd for StringDType but tests dtypes with references.
"""
# Fill with mostly empty strings to not create absurdly big strings
arr = np.zeros_like(string_array, shape=(100,))
arr[:len(string_array)] = string_array
arr[-len(string_array):] = string_array
# Bloat size a bit (get above thresholds and test >1 ndim).
arr = np.tile(string_array, tile)
res = np.add.accumulate(arr, axis=0)
res_obj = np.add.accumulate(arr.astype(object), axis=0)
assert_array_equal(res, res_obj.astype(arr.dtype), strict=True)
if arr.ndim > 1:
res = np.add.accumulate(arr, axis=-1)
res_obj = np.add.accumulate(arr.astype(object), axis=-1)
assert_array_equal(res, res_obj.astype(arr.dtype), strict=True)
class TestImplementation:
"""Check that strings are stored in the arena when possible.
This tests implementation details, so should be adjusted if
the implementation changes.
"""
@classmethod
def setup_class(self):
self.MISSING = 0x80
self.INITIALIZED = 0x40
self.OUTSIDE_ARENA = 0x20
self.LONG = 0x10
self.dtype = StringDType(na_object=np.nan)
self.sizeofstr = self.dtype.itemsize
sp = self.dtype.itemsize // 2 # pointer size = sizeof(size_t)
# Below, size is not strictly correct, since it really uses
# 7 (or 3) bytes, but good enough for the tests here.
self.view_dtype = np.dtype([
('offset', f'u{sp}'),
('size', f'u{sp // 2}'),
('xsiz', f'V{sp // 2 - 1}'),
('size_and_flags', 'u1'),
] if sys.byteorder == 'little' else [
('size_and_flags', 'u1'),
('xsiz', f'V{sp // 2 - 1}'),
('size', f'u{sp // 2}'),
('offset', f'u{sp}'),
])
self.s_empty = ""
self.s_short = "01234"
self.s_medium = "abcdefghijklmnopqrstuvwxyz"
self.s_long = "-=+" * 100
self.a = np.array(
[self.s_empty, self.s_short, self.s_medium, self.s_long],
self.dtype)
def get_view(self, a):
# Cannot view a StringDType as anything else directly, since
# it has references. So, we use a stride trick hack.
from numpy.lib._stride_tricks_impl import DummyArray
interface = dict(a.__array_interface__)
interface['descr'] = self.view_dtype.descr
interface['typestr'] = self.view_dtype.str
return np.asarray(DummyArray(interface, base=a))
def get_flags(self, a):
return self.get_view(a)['size_and_flags'] & 0xf0
def is_short(self, a):
return self.get_flags(a) == self.INITIALIZED | self.OUTSIDE_ARENA
def is_on_heap(self, a):
return self.get_flags(a) == (self.INITIALIZED
| self.OUTSIDE_ARENA
| self.LONG)
def is_missing(self, a):
return self.get_flags(a) & self.MISSING == self.MISSING
def in_arena(self, a):
return (self.get_flags(a) & (self.INITIALIZED | self.OUTSIDE_ARENA)
== self.INITIALIZED)
def test_setup(self):
is_short = self.is_short(self.a)
length = np.strings.str_len(self.a)
assert_array_equal(is_short, (length > 0) & (length <= 15))
assert_array_equal(self.in_arena(self.a), [False, False, True, True])
assert_array_equal(self.is_on_heap(self.a), False)
assert_array_equal(self.is_missing(self.a), False)
view = self.get_view(self.a)
sizes = np.where(is_short, view['size_and_flags'] & 0xf,
view['size'])
assert_array_equal(sizes, np.strings.str_len(self.a))
assert_array_equal(view['xsiz'][2:],
np.void(b'\x00' * (self.sizeofstr // 4 - 1)))
# Check that the medium string uses only 1 byte for its length
# in the arena, while the long string takes 8 (or 4).
offsets = view['offset']
assert offsets[2] == 1
assert offsets[3] == 1 + len(self.s_medium) + self.sizeofstr // 2
def test_empty(self):
e = np.empty((3,), self.dtype)
assert_array_equal(self.get_flags(e), 0)
assert_array_equal(e, "")
def test_zeros(self):
z = np.zeros((2,), self.dtype)
assert_array_equal(self.get_flags(z), 0)
assert_array_equal(z, "")
def test_copy(self):
c = self.a.copy()
assert_array_equal(self.get_flags(c), self.get_flags(self.a))
assert_array_equal(c, self.a)
offsets = self.get_view(c)['offset']
assert offsets[2] == 1
assert offsets[3] == 1 + len(self.s_medium) + self.sizeofstr // 2
def test_arena_use_with_setting(self):
c = np.zeros_like(self.a)
assert_array_equal(self.get_flags(c), 0)
c[:] = self.a
assert_array_equal(self.get_flags(c), self.get_flags(self.a))
assert_array_equal(c, self.a)
def test_arena_reuse_with_setting(self):
c = self.a.copy()
c[:] = self.a
assert_array_equal(self.get_flags(c), self.get_flags(self.a))
assert_array_equal(c, self.a)
def test_arena_reuse_after_missing(self):
c = self.a.copy()
c[:] = np.nan
assert np.all(self.is_missing(c))
# Replacing with the original strings, the arena should be reused.
c[:] = self.a
assert_array_equal(self.get_flags(c), self.get_flags(self.a))
assert_array_equal(c, self.a)
def test_arena_reuse_after_empty(self):
c = self.a.copy()
c[:] = ""
assert_array_equal(c, "")
# Replacing with the original strings, the arena should be reused.
c[:] = self.a
assert_array_equal(self.get_flags(c), self.get_flags(self.a))
assert_array_equal(c, self.a)
def test_arena_reuse_for_shorter(self):
c = self.a.copy()
# A string slightly shorter than the shortest in the arena
# should be used for all strings in the arena.
c[:] = self.s_medium[:-1]
assert_array_equal(c, self.s_medium[:-1])
# first empty string in original was never initialized, so
# filling it in now leaves it initialized inside the arena.
# second string started as a short string so it can never live
# in the arena.
in_arena = np.array([True, False, True, True])
assert_array_equal(self.in_arena(c), in_arena)
# But when a short string is replaced, it will go on the heap.
assert_array_equal(self.is_short(c), False)
assert_array_equal(self.is_on_heap(c), ~in_arena)
# We can put the originals back, and they'll still fit,
# and short strings are back as short strings
c[:] = self.a
assert_array_equal(c, self.a)
assert_array_equal(self.in_arena(c), in_arena)
assert_array_equal(self.is_short(c), self.is_short(self.a))
assert_array_equal(self.is_on_heap(c), False)
def test_arena_reuse_if_possible(self):
c = self.a.copy()
# A slightly longer string will not fit in the arena for
# the medium string, but will fit for the longer one.
c[:] = self.s_medium + "Β±"
assert_array_equal(c, self.s_medium + "Β±")
in_arena_exp = np.strings.str_len(self.a) >= len(self.s_medium) + 1
# first entry started uninitialized and empty, so filling it leaves
# it in the arena
in_arena_exp[0] = True
assert not np.all(in_arena_exp == self.in_arena(self.a))
assert_array_equal(self.in_arena(c), in_arena_exp)
assert_array_equal(self.is_short(c), False)
assert_array_equal(self.is_on_heap(c), ~in_arena_exp)
# And once outside arena, it stays outside, since offset is lost.
# But short strings are used again.
c[:] = self.a
is_short_exp = self.is_short(self.a)
assert_array_equal(c, self.a)
assert_array_equal(self.in_arena(c), in_arena_exp)
assert_array_equal(self.is_short(c), is_short_exp)
assert_array_equal(self.is_on_heap(c), ~in_arena_exp & ~is_short_exp)
def test_arena_no_reuse_after_short(self):
c = self.a.copy()
# If we replace a string with a short string, it cannot
# go into the arena after because the offset is lost.
c[:] = self.s_short
assert_array_equal(c, self.s_short)
assert_array_equal(self.in_arena(c), False)
c[:] = self.a
assert_array_equal(c, self.a)
assert_array_equal(self.in_arena(c), False)
assert_array_equal(self.is_on_heap(c), self.in_arena(self.a))
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@_core@tests@test_stringdtype.py@.PATH_END.py
|
{
"filename": "CODE_OF_CONDUCT.md",
"repo_name": "rhayes777/PyAutoFit",
"repo_path": "PyAutoFit_extracted/PyAutoFit-main/CODE_OF_CONDUCT.md",
"type": "Markdown"
}
|
# PyAutoFit Code of Conduct
**Table of Contents**
- [The Short Version](#the-short-version)
- [The Longer Version](#the-longer-version)
- [PyAutoFit Diversity Statement](#project-diversity-statement)
- [PyAutoFit Code of Conduct: Introduction & Scope](#project-code-of-conduct-introduction--scope)
- [Standards for Behavior](#standards-for-behavior)
- [Unacceptable Behavior](#unacceptable-behavior)
- [Reporting Guidelines](#reporting-guidelines)
- [How to Submit a Report](#how-to-submit-a-report)
- [Person(s) Responsible for Resolving Complaints](#persons-responsible-for-resolving-complaints)
- [Fitlicts of Interest](#Fitlicts-of-interest)
- [What to Include in a Report](#what-to-include-in-a-report)
- [Enforcement: What Happens After a Report is Filed?](#enforcement-what-happens-after-a-report-is-filed)
- [Acknowledgment and Responding to Immediate Needs](#acknowledgment-and-responding-to-immediate-needs)
- [Reviewing the Report](#reviewing-the-report)
- [Contacting the Person Reported](#contacting-the-person-reported)
- [Response and Potential Consequences](#response-and-potential-consequences)
- [Appealing a Decision](#appealing-a-decision)
- [Timeline Summary:](#timeline-summary)
- [Fitirming Receipt](#Fitirming-receipt)
- [Reviewing the Report](#reviewing-the-report-1)
- [Consequences & Resolution](#consequences--resolution)
- [License](#license)
## The Short Version
Be kind to others. Do not insult or put down others. Behave professionally. Remember that harassment and sexist,
racist, or exclusionary jokes are not appropriate for PyAutoFit.
All communication should be appropriate for a professional audience including people of many different backgrounds.
Sexual language and imagery is not appropriate.
PyAutoFit is dedicated to providing a harassment-free community for everyone, regardless of gender, sexual orientation,
gender identity and expression, disability, physical appearance, body size, race, or religion. We do not tolerate
harassment of community members in any form.
Thank you for helping make this a welcoming, friendly community for all.
## The Longer Version
### PyAutoFit Diversity Statement
PyAutoFit welcomes and encourages participation in our community by people of all backgrounds and identities. We
are committed to promoting and sustaining a culture that values mutual respect, tolerance, and learning, and we work
together as a community to help each other live out these values.
We have created this diversity statement because we believe that a diverse community is stronger, more vibrant,
and produces better software and better science. A diverse community where people treat each other with respect has
more potential contributors, more sources for ideas, and fewer shared assumptions that might hinder development
or research.
Although we have phrased the formal diversity statement generically to make it all-inclusive, we recognize that there
are specific identities that are impacted by systemic discrimination and marginalization. We welcome all people to
participate in the PyAutoFit community regardless of their identity or background.
### PyAutoFit Code of Conduct: Introduction & Scope
This code of conduct should be honored by everyone who participates in the PyAutoFit community. It should be
honored in any PyAutoFit-related activities, by anyone claiming affiliation with PyAutoFit, and especially when
someone is representing PyAutoFit in any role (including as an event volunteer or speaker).
This code of conduct applies to all spaces managed by PyAutoFit, including all public and private mailing lists,
issue trackers, wikis, forums, and any other communication channel used by our community. The code of conduct equally
applies at PyAutoFit events and governs standards of behavior for attendees, speakers, volunteers, booth staff,
and event sponsors.
This code is not exhaustive or complete. It serves to distill our understanding of a collaborative, inclusive
community culture. Please try to follow this code in spirit as much as in letter, to create a friendly and
productive environment that enriches the PyAutoFit community.
The PyAutoFit Code of Conduct follows below.
### Standards for Behavior
PyAutoFit is a worldwide community. All communication should be appropriate for a professional audience including
people of many different backgrounds.
**Please always be kind and courteous. There's never a need to be mean or rude or disrespectful.** Thank you for
helping make this a welcoming, friendly community for all.
We strive to:
**Be empathetic, welcoming, friendly, and patient.** We remember that PyAutoFit is crafted by human beings who
deserve to be treated with kindness and empathy. We work together to resolve Fitlict and assume good intentions.
We may all experience some frustration from time to time, but we do not allow frustration to turn into a personal
attack. A community where people feel uncomfortable or threatened is not a productive one.
**Be collaborative.** Our work depends on the participation of many people, and in turn others depend on our work.
Open source communities depend on effective and friendly collaboration to achieve their goals.
**Be inquisitive.** Nobody knows everything! Asking questions early avoids many problems later, so we encourage
questions, although we may direct them to the appropriate forum. We will try hard to be responsive and helpful.
**Be careful in the words that we choose.** We are careful and respectful in our communication and we take
responsibility for our own speech. Be kind to others. Do not insult or put down other members of the community.
#### Unacceptable Behavior
We are committed to making participation in this community a harassment-free experience.
We will not accept harassment or other exclusionary behaviours, such as:
- The use of sexualized language or imagery
- Excessive profanity (please avoid curse words; people differ greatly in their sensitivity to swearing)
- Posting sexually explicit or violent material
- Violent or intimidating threats or language directed against another person
- Inappropriate physical contact and/or unwelcome sexual attention or sexual comments
- Sexist, racist, or otherwise discriminatory jokes and language
- Trolling or insulting and derogatory comments
- Written or verbal comments which have the effect of excluding people on the basis of membership in a specific group,
including level of experience, gender, gender identity and expression, sexual orientation, disability, neurotype,
personal appearance, body size, race, ethnicity, age, religion, or nationality
- Public or private harassment
- Sharing private content, such as emails sent privately or non-publicly, or direct message history, without the
sender's consent
- Continuing to initiate interaction (such as photography, recording, messaging, or conversation) with someone after
being asked to stop
- Sustained disruption of talks, events, or communications, such as heckling of a speaker
- Publishing (or threatening to post) other people's personally identifying information ("doxing"), such as
physical or electronic addresses, without explicit permission
- Other unethical or unprofessional conduct
- Advocating for, or encouraging, any of the above behaviors
### Reporting Guidelines
If you believe someone is violating the code of conduct, please report this in a timely manner. Code of conduct
violations reduce the value of the community for everyone. The PyAutoFit leadership team takes reports of misconduct
very seriously and is committed to preserving and maintaining the welcoming nature of our community.
**All reports will be kept Fitidential.**
In some cases we may determine that a public statement will need to be made. If that's the case, the identities of
all involved parties and reporters will remain Fitidential unless those individuals instruct us otherwise.
All complaints will be reviewed and investigated and will result in a response that is deemed necessary and
appropriate to the circumstances. The PyAutoFit team commits to maintaining Fitidentiality with regard to the
reporter of an incident.
For possibly unintentional breaches of the code of conduct, you may want to respond to the person and point out
this code of conduct (either in public or in private, whatever is most appropriate). If you would prefer not to do
that, please report the issue to PyAutoFit directly, or ask James Nightingale for advice in Fitidence. Complete contact
information is below, under "How to Submit a Report."
Take care of each other. Alert PyAutoFit if you notice a dangerous situation, someone in distress, or violations of
this code of conduct, even if they seem inconsequential.
#### How to Submit a Report
**If you feel your safety is in jeopardy or the situation is an emergency, we urge you to contact local law enforcement
before making a report to PyAutoFit.** (In the U.K., dial 999.)
PyAutoFit is committed to promptly addressing any reported issues. If you have experienced or witnessed behavior that
violates the PyAutoFit Code of Conduct, please report it by sending an email to one of the members of the PyAutoFit
CoC Enforcement Team.
#### Person(s) Responsible for Resolving Complaints
All reports of breaches of the code of conduct will be investigated and handled by the **PyAutoFit Code of Conduct Enforcement Team**.
The current PyAutoFit Code of Conduct Enforcement Team consists of:
- James Nightingale
- [*james.w.nightingale@durham.ac.uk*](mailto:james.w.nightingale@durham.ac.uk)
#### Fitlicts of Interest
In the event of any Fitlict of interest, the team member will immediately notify the PyAutoFit Code of Conduct
Enforcement Team and recuse themselves if necessary.
#### What to Include in a Report
Our ability to address any code of conduct breaches in a timely and effective manner is impacted by the amount of
information you can provide, so, **our reporting form asks you to include as much of the following information as you can**:
- **Your contact info** (so we can get in touch with you if we need to follow up). This will be kept Fitidential.
If you wish to remain anonymous, your information will not be shared beyond the person receiving the initial report.
- The **approximate time and location of the incident** (please be as specific as possible)
- **Identifying information** (e.g. name, nickname, screen name, physical description) of the individual whose
behavior is being reported
- **Description of the behavior** (if reporting harassing language, please be specific about the words
used), **your account of what happened**, and any available **supporting records** (e.g. email, GitHub issue, screenshots, etc.)
- **Description of the circumstances/context** surrounding the incident
- Let us know **if the incident is ongoing**, and/or if this is part of an ongoing pattern of behavior
- Names and contact info, if possible, of **anyone else who witnessed** or was involved in this incident. (Did
anyone else observe the incident?)
- **Any other relevant information** you believe we should have
At PyAutoFit Events: Event staff will attempt to gather and write down the above information from anyone making a
verbal report in-person at an event. Recording the details in writing is exceedingly important in order for us to
effectively respond to reports. If event staff write down a report taken verbally, then the person making the
report will be asked to review the written report for accuracy.
**If urgent action is needed regarding an incident at an in-person event, we strongly encourage you to reach out to the local event staff for immediate assistance.**
### Enforcement: What Happens After a Report is Filed?
What happens after a report is filed?
#### Acknowledgment and Responding to Immediate Needs
PyAutoFit and/or our event staff will attempt to ensure your safety and help with any immediate needs, particularly
at an in-person event. PyAutoFit will make every effort to **acknowledge receipt within 24 hours** (and we'll aim
for much more quickly than that).
<!-- PROJECT SHOULD REVIEW THE RESPONSE SCHEDULE LISTED ABOVE AND BELOW, AND DETERMINE WHETHER IT IS REALISTIC FOR THE PROJECT. -->
#### Reviewing the Report
PyAutoFit will make all efforts to **review the incident within three days** and determine:
- Whether this is an ongoing situation, or if there is a threat to anyone's physical safety
- What happened
- Whether this event constitutes a code of conduct violation
- Who the bad actor was, if any
#### Contacting the Person Reported
After PyAutoFit has had time to review and discuss the report, someone will attempt to contact the person who is the
subject of the report to inform them of what has been reported about them. We will then ask that person for their
account of what happened.
#### Response and Potential Consequences
Once PyAutoFit has completed our investigation of the report, we will make a decision as to how to respond. The
person making a report will not normally be consulted as to the proposed resolution of the issue, except insofar as
we need to understand how to help them feel safe.
Potential consequences for violating the PyAutoFit code of conduct include:
- Nothing (if we determine that no violation occurred)
- Private feedback or reprimand from PyAutoFit to the individual(s) involved
- Warning the person to cease their behavior and that any further reports will result in sanctions
- A public announcement that an incident occurred
- Mediation (only if both reporter and reportee agree)
- An imposed vacation (e.g. asking someone to "take a week off" from a mailing list)
- A permanent or temporary ban from some or all PyAutoFit spaces (mailing lists, GitHub repos, in-person events, etc.)
- Assistance to the complainant with a report to other bodies, for example, institutional offices or appropriate law enforcement agencies
- Removing a person from PyAutoFit membership or other formal affiliation
- Publishing an account of the harassment and calling for the resignation of the alleged harasser from their
responsibilities (usually pursued by people without formal authority: may be called for if the person is the
event leader, or refuses to stand aside from the Fitlict of interest, or similar)
- Any other response that PyAutoFit deems necessary and appropriate to the situation
At PyAutoFit events, if a participant engages in behavior that violates this code of conduct, the Fiterence
organizers and staff may take any action they deem appropriate.
Potential consequences for violating the PyAutoFit Code of Conduct at an in-person event include:
- Warning the person to cease their behavior and that any further reports will result in sanctions
- Requiring that the person avoid any interaction with, and physical proximity to, the person they are harassing
for the remainder of the event
- Ending a talk that violates the policy early
- Not publishing the video or slides of a talk that violated the policy
- Not allowing a speaker who violated the policy to give (further) talks at the event now or in the future
- Immediately ending any event volunteer responsibilities and privileges the reported person holds
- Requiring that the person not volunteer for future events PyAutoFit runs (either indefinitely or for a certain time period)
- Expelling the person from the event without a refund
- Requiring that the person immediately leave the event and not return
- Banning the person from future events (either indefinitely or for a certain time period)
- Any other response that PyAutoFit deems necessary and appropriate to the situation
No one espousing views or values contrary to the standards of our code of conduct will be permitted to hold any
position representing PyAutoFit, including volunteer positions. PyAutoFit has the right and responsibility to
remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not
aligned with this code of conduct.
We aim to **respond within one week** to the original reporter with either a resolution or an explanation of why the
situation is not yet resolved.
We will contact the person who is the subject of the report to let them know what actions will be taken as a result of
the report, if any.
Our policy is to make sure that everyone aware of the initial incident is also made aware that official action has
been taken, while still respecting the privacy of individuals. PyAutoFit may choose to make a public report of the
incident, while maintaining the anonymity of those involved.
#### Appealing a Decision
To appeal a decision of PyAutoFit, contact James Nightingale via email at
[*james.w.nightingale@durham.ac.uk*](mailto:james.w.nightingale@durham.ac.uk) with your appeal and
the Leadership Team will review the case.
### Timeline Summary:
#### Fitirming Receipt
PyAutoFit will make every effort to acknowledge receipt of a report **within 24 hours** (and we'll aim for much more
quickly than that).
#### Reviewing the Report
PyAutoFit will make all efforts to review the incident **within three days**.
#### Consequences & Resolution
We aim to respond **within one week** to the original reporter with either a resolution or an explanation of why
the situation is not yet resolved.
## License
This code of conduct has been adapted from [*NUMFOCUS code of conduct*](https://github.com/numfocus/numfocus/blob/main/manual/numfocus-coc.md#the-short-version),
which is adapted from numerous sources, including the [*Geek Feminism wiki, created by the Ada Initiative and other volunteers, which is under a Creative Commons Zero license*](http://geekfeminism.wikia.com/wiki/Fiterence_anti-harassment/Policy), the [*Contributor Covenant version 1.2.0*](http://contributor-covenant.org/version/1/2/0/), the [*Bokeh Code of Conduct*](https://github.com/bokeh/bokeh/blob/main/CODE_OF_CONDUCT.md), the [*SciPy Code of Conduct*](https://github.com/jupyter/governance/blob/main/conduct/enforcement.md), the [*Carpentries Code of Conduct*](https://docs.carpentries.org/topic_folders/policies/code-of-conduct.html#enforcement-manual), and the [*NeurIPS Code of Conduct*](https://neurips.cc/public/CodeOfConduct).
**PyAutoFit Code of Conduct is licensed under the [Creative Commons Attribution 3.0 Unported License](https://creativecommons.org/licenses/by/3.0/).**
|
rhayes777REPO_NAMEPyAutoFitPATH_START.@PyAutoFit_extracted@PyAutoFit-main@CODE_OF_CONDUCT.md@.PATH_END.py
|
{
"filename": "thermo.ipynb",
"repo_name": "wlxu/RelicClass",
"repo_path": "RelicClass_extracted/RelicClass-master/notebooks/thermo.ipynb",
"type": "Jupyter Notebook"
}
|
```python
# import necessary modules
# uncomment to get plots displayed in notebook
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from classy import Class
from scipy.optimize import fsolve
from scipy.interpolate import interp1d
import math
```
```python
# esthetic definitions for the plots
font = {'size' : 16, 'family':'STIXGeneral'}
axislabelfontsize='large'
matplotlib.rc('font', **font)
matplotlib.mathtext.rcParams['legend.fontsize']='medium'
plt.rcParams["figure.figsize"] = [8.0,6.0]
```
```python
common_settings = {'output' : 'tCl',
# LambdaCDM parameters
'h':0.67556,
'omega_b':0.022032,
'omega_cdm':0.12038,
'A_s':2.215e-9,
'n_s':0.9619,
'tau_reio':0.0925,
# Take fixed value for primordial Helium (instead of automatic BBN adjustment)
'YHe':0.246,
'thermodynamics_verbose':1
}
##############
#
# call CLASS
#
###############
M = Class()
M.set(common_settings)
M.compute()
derived = M.get_current_derived_parameters(['tau_rec','conformal_age'])
thermo = M.get_thermodynamics()
print thermo.viewkeys()
```
```python
tau = thermo['conf. time [Mpc]']
g = thermo['g [Mpc^-1]']
# to make the reionisation peak visible, rescale g by 100 for late times
g[:500] *= 100
#################
#
# start plotting
#
#################
#
plt.xlim([1.e2,derived['conformal_age']])
plt.xlabel(r'$\tau \,\,\, \mathrm{[Mpc]}$')
plt.ylabel(r'$\mathrm{visibility} \,\,\, g \,\,\, [\mathrm{Mpc}^{-1}]$')
plt.axvline(x=derived['tau_rec'],color='k')
# The conformal time at reionisation could be extracted from the code.
# But we know it because it is part of the standard output
# when thermodynamics_verbose=1
plt.axvline(x=4255.316282,color='k')
#
# Print functions one by one, saving between each (for slides)
#
plt.semilogx(tau,g,'r',label=r'$\psi$')
```
```python
plt.savefig('thermo.pdf',bbox_inches='tight')
```
|
wlxuREPO_NAMERelicClassPATH_START.@RelicClass_extracted@RelicClass-master@notebooks@thermo.ipynb@.PATH_END.py
|
{
"filename": "dada_trigger.py",
"repo_name": "loostrum/darc",
"repo_path": "darc_extracted/darc-master/darc/dada_trigger.py",
"type": "Python"
}
|
#!/usr/bin/env python3
#
# dada_dbevent triggers
import os
import threading
import socket
import numpy as np
from astropy.time import Time, TimeDelta
from darc import DARCBase
from darc import util
from darc.definitions import TIME_UNIT
class DADATriggerException(Exception):
pass
class DADATrigger(DARCBase):
"""
Generate and send dada_dbevent triggers
"""
def __init__(self, *args, **kwargs):
"""
"""
super(DADATrigger, self).__init__(*args, **kwargs)
self.thread_trigger = None
self.thread_polcal = None
self.triggers_enabled = True
def start_observation(self, obs_config, reload=True):
"""
Start observation: run IQUV dumps automatically if source is polarisation calibrator.
Else ensure normal FRB candidate I/IQUV dumps are enabled
:param dict obs_config: Observation config
:param bool reload: reload service settings (default: True)
"""
# reload config
if reload:
self.load_config()
# load parset
parset = self._load_parset(obs_config)
if parset is None:
self.logger.warning("No observation parset found; not checking for polcal observation")
# ensure normal triggers can be received and processed
self.triggers_enabled = True
return
# Override regular IQUV trigger with polcal
if 'polcal' in parset['task.source.name'] and int(parset['task.source.beam']) == obs_config['beam']:
self.logger.info("Found polarisation calibrator in this beam")
# disable regular triggering
self.triggers_enabled = False
# do the automated polcal dumps
self.thread_polcal = threading.Thread(target=self.polcal_dumps, args=[obs_config])
self.thread_polcal.start()
else:
self.logger.info("No polarisation calibrator in this beam, enabling regular triggering")
# no polcal obs, ensure normal triggers can be received and processed
self.triggers_enabled = True
def process_command(self, command):
"""
Process command received from queue
:param dict command: command with arguments
"""
if command['command'] == 'trigger':
# trigger received, check if triggering is enabled
if not self.triggers_enabled:
self.logger.warning("Trigger received but triggering is disabled, ignoring")
return
# process trigger
self.thread_trigger = threading.Thread(target=self.send_event, args=[command['trigger']])
self.thread_trigger.start()
elif command['command'] == 'get_attr':
self.get_attribute(command)
else:
self.logger.error("Unknown command received: {}".format(command['command']))
def cleanup(self):
"""
Remove all trigger-sending threads
"""
if self.thread_trigger:
self.thread_trigger.join()
if self.thread_polcal:
self.thread_polcal.join()
def send_event(self, triggers):
"""
Send trigger to dada_dbevent
:param list triggers: list of trigger dictionaries
"""
self.logger.info("Received {} trigger(s):".format(len(triggers)))
self.logger.info(triggers)
# utc start is identical for all triggers of a set
utc_start = triggers[0]['utc_start'].iso.replace(' ', '-')
events_i = ""
events_iquv = ""
ntrig_i = 0
ntrig_iquv = 0
for trigger in triggers:
stokes = trigger['stokes']
if stokes.upper() not in ['I', 'IQUV']:
self.logger.error("Skipping trigger with unknown stokes mode: {}".format(stokes))
continue
# start 2 pages before trigger time
# 1 page should be enough, but due to a bug in dada_dbevent the start time is rounded up
# to the next page, instead of down
shift = 2.048 # 2 pages
# calculate window size: equal to DM delay, but at least some minimum set in config
# DM is roughly delay acros band in ms
# add end delay defined in config and shift
window_size = max(self.min_window_size, trigger['dm'] / 1000.) + self.delay_end + shift
event_start_full = Time(trigger['utc_start']) + TimeDelta(trigger['time'], format='sec') - \
TimeDelta(shift, format='sec')
# ensure start time is past start time of observation
if event_start_full < trigger['utc_start']:
self.logger.info("Event start before start of observation - adapting event start")
event_start_full = trigger['utc_start']
event_end_full = event_start_full + TimeDelta(window_size, format='sec')
# ToDo: ensure end time is before end of observation
event_start, event_start_frac = event_start_full.iso.split('.')
# event_start_frac = '.' + event_start_frac
event_end, event_end_frac = event_end_full.iso.split('.')
# event_end_frac = '.' + event_end_frac
# Add utc start/end for event
trigger['event_start'] = event_start.replace(' ', '-')
trigger['event_start_frac'] = event_start_frac
trigger['event_end'] = event_end.replace(' ', '-')
trigger['event_end_frac'] = event_end_frac
# Add to the event
# here already sure that stokes.upper() is either IQUV or I
if stokes.upper() == 'I':
ntrig_i += 1
events_i += ("{event_start} {event_start_frac} {event_end} {event_end_frac} {dm} "
"{snr} {width} {beam}\n".format(**trigger))
else:
ntrig_iquv += 1
events_iquv += ("{event_start} {event_start_frac} {event_end} {event_end_frac} {dm} "
"{snr} {width} {beam}\n".format(**trigger))
# send stokes I events
if ntrig_i > 0:
info_i = {'num_event': ntrig_i, 'utc_start': utc_start, 'events': events_i}
event_i = "N_EVENTS {num_event}\n{utc_start}\n{events}".format(**info_i)
self.send_events(event_i, 'I')
# send stokes IQUV events
if ntrig_iquv > 0:
info_iquv = {'num_event': ntrig_iquv, 'utc_start': utc_start, 'events': events_iquv}
event_iquv = "N_EVENTS {num_event}\n{utc_start}\n{events}".format(**info_iquv)
self.send_events(event_iquv, 'IQUV')
def send_events(self, event, stokes):
"""
Send stokes I or IQUV events
:param str event: raw event to send
:param str stokes: I or IQUV
:return:
"""
# open socket
if stokes.upper() == 'I':
port = self.port_i
else:
port = self.port_iquv
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
sock.connect(("localhost", port))
except socket.error as e:
self.logger.error("Failed to connect to stokes {} dada_dbevent on port {}: {}".format(stokes,
port, e))
return
# send event
try:
sock.sendall(event.encode())
except socket.timeout:
self.logger.error("Failed to send events within timeout limit")
sock.close()
return
self.logger.info("Successfully sent events")
# close socket
sock.close()
def _load_parset(self, obs_config):
"""
Load the observation parset
:param dict obs_config: Observation config
:return: parset as dict
"""
try:
# encoded parset is already in config on master node
# decode the parset
raw_parset = util.decode_parset(obs_config['parset'])
# convert to dict and store
parset = util.parse_parset(raw_parset)
except KeyError:
self.logger.info("Observation parset not found in input config, looking for master parset")
# Load the parset from the master parset file
master_config_file = os.path.join(obs_config['master_dir'], 'parset', 'darc_master.parset')
try:
# Read raw config
with open(master_config_file) as f:
master_config = f.read().strip()
# Convert to dict
master_config = util.parse_parset(master_config)
# extract obs parset and decode
raw_parset = util.decode_parset(master_config['parset'])
parset = util.parse_parset(raw_parset)
except Exception as e:
self.logger.warning(
"Failed to load parset from master config file {}, "
"setting parset to None: {}".format(master_config_file, e))
parset = None
return parset
def polcal_dumps(self, obs_config):
"""
Automatically dump IQUV data at regular intervals for polcal calibrator observations
:param dict obs_config: Observation config
"""
tstart = Time(obs_config['startpacket'] / TIME_UNIT, format='unix')
duration = TimeDelta(obs_config['duration'], format='sec')
tend = tstart + duration
# round up polcal dump size to nearest 1.024 s
dump_size = TimeDelta(np.ceil(self.polcal_dump_size / 1.024) * 1.024, format='sec')
dump_interval = TimeDelta(self.polcal_interval, format='sec')
# sleep until first trigger time
util.sleepuntil_utc(tstart + dump_interval, event=self.stop_event)
# run until trigger would be end past end time
# add a second to avoid trigger running a little bit over end time
# also stay below global limit on number of dumps during one obs
# TODO: also set a total length limit
ndump = 0
while Time.now() + dump_interval - TimeDelta(1.0, format='sec') < tend and ndump < self.polcal_max_dumps:
# generate an IQUV trigger
params = {'utc_start': tstart.iso.replace(' ', '-')}
# trigger start time: now, rounded to nearest 1.024s since utc start
dt = TimeDelta(np.round((Time.now() - tstart).sec / 1.024) * 1.024, format='sec')
# trigger start/end times
event_start_full = tstart + dt
event_end_full = tstart + dt + dump_size
# convert to dada_dbevent format
event_start, event_start_frac = event_start_full.iso.split('.')
event_end, event_end_frac = event_end_full.iso.split('.')
# store to params
params['event_start'] = event_start.replace(' ', '-')
params['event_start_frac'] = event_start_frac
params['event_end'] = event_end.replace(' ', '-')
params['event_end_frac'] = event_end_frac
event = "N_EVENTS 1\n{utc_start}\n{event_start} {event_start_frac} {event_end} {event_end_frac} " \
"0 0 0 0".format(**params) # dm, snr, width, beam are 0
self.logger.info("Sending automated polcal IQUV dump event: {}".format(params))
self.send_events(event, 'IQUV')
# keep track of number of performed dumps
ndump += 1
# sleep
self.stop_event.wait(dump_interval.sec)
|
loostrumREPO_NAMEdarcPATH_START.@darc_extracted@darc-master@darc@dada_trigger.py@.PATH_END.py
|
{
"filename": "mobilenetv2.py",
"repo_name": "pytorch/vision",
"repo_path": "vision_extracted/vision-main/torchvision/models/mobilenetv2.py",
"type": "Python"
}
|
from functools import partial
from typing import Any, Callable, List, Optional
import torch
from torch import nn, Tensor
from ..ops.misc import Conv2dNormActivation
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _make_divisible, _ovewrite_named_param, handle_legacy_interface
__all__ = ["MobileNetV2", "MobileNet_V2_Weights", "mobilenet_v2"]
# necessary for backwards compatibility
class InvertedResidual(nn.Module):
def __init__(
self, inp: int, oup: int, stride: int, expand_ratio: int, norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super().__init__()
self.stride = stride
if stride not in [1, 2]:
raise ValueError(f"stride should be 1 or 2 instead of {stride}")
if norm_layer is None:
norm_layer = nn.BatchNorm2d
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers: List[nn.Module] = []
if expand_ratio != 1:
# pw
layers.append(
Conv2dNormActivation(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer, activation_layer=nn.ReLU6)
)
layers.extend(
[
# dw
Conv2dNormActivation(
hidden_dim,
hidden_dim,
stride=stride,
groups=hidden_dim,
norm_layer=norm_layer,
activation_layer=nn.ReLU6,
),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
norm_layer(oup),
]
)
self.conv = nn.Sequential(*layers)
self.out_channels = oup
self._is_cn = stride > 1
def forward(self, x: Tensor) -> Tensor:
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(
self,
num_classes: int = 1000,
width_mult: float = 1.0,
inverted_residual_setting: Optional[List[List[int]]] = None,
round_nearest: int = 8,
block: Optional[Callable[..., nn.Module]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
dropout: float = 0.2,
) -> None:
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
block: Module specifying inverted residual building block for mobilenet
norm_layer: Module specifying the normalization layer to use
dropout (float): The droupout probability
"""
super().__init__()
_log_api_usage_once(self)
if block is None:
block = InvertedResidual
if norm_layer is None:
norm_layer = nn.BatchNorm2d
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError(
f"inverted_residual_setting should be non-empty or a 4-element list, got {inverted_residual_setting}"
)
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features: List[nn.Module] = [
Conv2dNormActivation(3, input_channel, stride=2, norm_layer=norm_layer, activation_layer=nn.ReLU6)
]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t, norm_layer=norm_layer))
input_channel = output_channel
# building last several layers
features.append(
Conv2dNormActivation(
input_channel, self.last_channel, kernel_size=1, norm_layer=norm_layer, activation_layer=nn.ReLU6
)
)
# make it nn.Sequential
self.features = nn.Sequential(*features)
# building classifier
self.classifier = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(self.last_channel, num_classes),
)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x: Tensor) -> Tensor:
# This exists since TorchScript doesn't support inheritance, so the superclass method
# (this one) needs to have a name other than `forward` that can be accessed in a subclass
x = self.features(x)
# Cannot use "squeeze" as batch-size can be 1
x = nn.functional.adaptive_avg_pool2d(x, (1, 1))
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
_COMMON_META = {
"num_params": 3504872,
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
}
class MobileNet_V2_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/mobilenet_v2-b0353104.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv2",
"_metrics": {
"ImageNet-1K": {
"acc@1": 71.878,
"acc@5": 90.286,
}
},
"_ops": 0.301,
"_file_size": 13.555,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/mobilenet_v2-7ebf99e0.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning",
"_metrics": {
"ImageNet-1K": {
"acc@1": 72.154,
"acc@5": 90.822,
}
},
"_ops": 0.301,
"_file_size": 13.598,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
@register_model()
@handle_legacy_interface(weights=("pretrained", MobileNet_V2_Weights.IMAGENET1K_V1))
def mobilenet_v2(
*, weights: Optional[MobileNet_V2_Weights] = None, progress: bool = True, **kwargs: Any
) -> MobileNetV2:
"""MobileNetV2 architecture from the `MobileNetV2: Inverted Residuals and Linear
Bottlenecks <https://arxiv.org/abs/1801.04381>`_ paper.
Args:
weights (:class:`~torchvision.models.MobileNet_V2_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.MobileNet_V2_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.mobilenetv2.MobileNetV2``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv2.py>`_
for more details about this class.
.. autoclass:: torchvision.models.MobileNet_V2_Weights
:members:
"""
weights = MobileNet_V2_Weights.verify(weights)
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = MobileNetV2(**kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
|
pytorchREPO_NAMEvisionPATH_START.@vision_extracted@vision-main@torchvision@models@mobilenetv2.py@.PATH_END.py
|
{
"filename": "let.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/projects/stereo/let.py",
"type": "Python"
}
|
from .load import load
# This routine was originally in stereo/__init__.py, until being moved to its own file.
# Please refer to __init__.py if you need to see the revision history before it was moved.
def let(trange=['2013-11-5', '2013-11-6'],
probe='a',
level='l1',
suffix='',
prefix='',
get_support_data=False,
varformat=None,
varnames=[],
downloadonly=False,
notplot=False,
no_update=False,
time_clip=False):
"""
This function loads data from the Low Energy Telescope
Parameters
----------
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
Default: ['2013-11-5', '2013-11-6']
probe: str
Spacecraft probe ('a' for ahead, 'b' for behind)
Default: 'a'
level: str
Default: 'l1'
suffix: str
The tplot variable names will be given this suffix.
Default: '', no suffix is added.
prefix: str
The tplot variable names will be given this prefix.
Default: '', no prefix is added.
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot.
Default: False. Only loads in data with a "VAR_TYPE" attribute of "data".
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted.
Default: None. All variables are loaded in.
varnames: list of str
List of variable names to load.
Default: [], if not specified, all data variables are loaded
downloadonly: bool
Set this flag to download the CDF files, but not load them into
tplot variables
Default: False
notplot: bool
Return the data in hash tables instead of creating tplot variables
Default: False
no_update: bool
If set, only load data from your local cache
Default: False
time_clip: bool
Time clip the variables to exactly the range specified in the trange keyword
Default: False
Returns
----------
List of tplot variables created.
Example:
----------
>>> import pyspedas
>>> from pytplot import tplot
>>> sit_vars = pyspedas.projects.stereo.let(trange=['2013-1-5', '2013-1-6'])
>>> tplot(sit_vars)
"""
return load(instrument='let', trange=trange, probe=probe, suffix=suffix, prefix=prefix, level=level,
get_support_data=get_support_data, varformat=varformat, varnames=varnames, downloadonly=downloadonly,
notplot=notplot, time_clip=time_clip, no_update=no_update)
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@projects@stereo@let.py@.PATH_END.py
|
{
"filename": "_y.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/isosurface/caps/_y.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Y(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "isosurface.caps"
_path_str = "isosurface.caps.y"
_valid_props = {"fill", "show"}
# fill
# ----
@property
def fill(self):
"""
Sets the fill ratio of the `caps`. The default fill value of
the `caps` is 1 meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than one would allow
the creation of openings parallel to the edges.
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
# show
# ----
@property
def show(self):
"""
Sets the fill ratio of the `slices`. The default fill value of
the y `slices` is 1 meaning that they are entirely shaded. On
the other hand Applying a `fill` ratio less than one would
allow the creation of openings parallel to the edges.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["show"]
@show.setter
def show(self, val):
self["show"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
fill
Sets the fill ratio of the `caps`. The default fill
value of the `caps` is 1 meaning that they are entirely
shaded. On the other hand Applying a `fill` ratio less
than one would allow the creation of openings parallel
to the edges.
show
Sets the fill ratio of the `slices`. The default fill
value of the y `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
"""
def __init__(self, arg=None, fill=None, show=None, **kwargs):
"""
Construct a new Y object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.isosurface.caps.Y`
fill
Sets the fill ratio of the `caps`. The default fill
value of the `caps` is 1 meaning that they are entirely
shaded. On the other hand Applying a `fill` ratio less
than one would allow the creation of openings parallel
to the edges.
show
Sets the fill ratio of the `slices`. The default fill
value of the y `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
Returns
-------
Y
"""
super(Y, self).__init__("y")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.caps.Y
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.caps.Y`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("fill", None)
_v = fill if fill is not None else _v
if _v is not None:
self["fill"] = _v
_v = arg.pop("show", None)
_v = show if show is not None else _v
if _v is not None:
self["show"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@isosurface@caps@_y.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/sqlite3/__init__.py",
"type": "Python"
}
|
# pysqlite2/__init__.py: the pysqlite2 package.
#
# Copyright (C) 2005 Gerhard HΓ€ring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
"""
The sqlite3 extension module provides a DB-API 2.0 (PEP 249) compliant
interface to the SQLite library, and requires SQLite 3.7.15 or newer.
To use the module, start by creating a database Connection object:
import sqlite3
cx = sqlite3.connect("test.db") # test.db will be created or opened
The special path name ":memory:" can be provided to connect to a transient
in-memory database:
cx = sqlite3.connect(":memory:") # connect to a database in RAM
Once a connection has been established, create a Cursor object and call
its execute() method to perform SQL queries:
cu = cx.cursor()
# create a table
cu.execute("create table lang(name, first_appeared)")
# insert values into a table
cu.execute("insert into lang values (?, ?)", ("C", 1972))
# execute a query and iterate over the result
for row in cu.execute("select * from lang"):
print(row)
cx.close()
The sqlite3 module is written by Gerhard HΓ€ring <gh@ghaering.de>.
"""
from sqlite3.dbapi2 import *
from sqlite3.dbapi2 import (_deprecated_names,
_deprecated_version_info,
_deprecated_version)
def __getattr__(name):
if name in _deprecated_names:
from warnings import warn
warn(f"{name} is deprecated and will be removed in Python 3.14",
DeprecationWarning, stacklevel=2)
return globals()[f"_deprecated_{name}"]
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@sqlite3@__init__.py@.PATH_END.py
|
{
"filename": "calculate_sensitivity_from_Crab.ipynb",
"repo_name": "cta-observatory/cta-lstchain",
"repo_path": "cta-lstchain_extracted/cta-lstchain-main/notebooks/calculate_sensitivity_from_Crab.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib as mpl
import numpy as np
import glob
import astropy.units as u
import tables
import gc
import time
from astropy.coordinates import SkyCoord, AltAz
from astropy.coordinates.erfa_astrom import ErfaAstromInterpolator, erfa_astrom
from astropy.time import Time
from lstchain.reco.utils import add_delta_t_key, get_effective_time
from os import path
from scipy.stats import binned_statistic
from pyirf.statistics import li_ma_significance
from lstchain.spectra.crab import crab_magic
from ctapipe.containers import EventType
from pyirf.spectral import CRAB_MAGIC_JHEAP2015
from ctapipe.coordinates import CameraFrame, TelescopeFrame
from gammapy.stats import WStatCountsStatistic
from ctapipe_io_lst import LSTEventSource
from ctapipe.io import read_table
%matplotlib inline
```
```python
#
# This is a notebook to calculate the flux sensitivity using real Crab observations.
#
# NOTE: the inputs of this notebook are DL2 files of Crab observations,
#Β both for source-independent and source-dependent analyses.
#
# The source-independent files must contain the event-wise nominal position
# of Crab (src_x, src_y), in a table called "source_position". This can be
# achieved by processing standard DL2 files with $LSTCHAIN/scripts/lstchain_dl2_add_sourcepos.py
# (this is done like this because it is too costly to compute the position every time we need it)
#
#
# The samples below are those used for the ApJ LST-1 performance (or "Crab") paper:
# NOTE!! The full sample (34.2 h of effective time) takes about 3 hours to process in the IT cluster!
# The time is mostly to read the data and fill the "super-histograms" from which the event statistics
# are derived. One can fill the histograms once, and the run only the (much faster) second part of the
# notebook with different settings for the sensitivity calculation.
#
# source-independent dataset
tag1 = "source-independent"
#dataset1 = glob.glob("/fefs/aswg/workspace/abelardo.moralejo/Crab_performance_paper/data_v0.9.9/DL2/process_with*/dl2_LST-1.Run*.h5")
dataset1 = glob.glob("/fefs/aswg/workspace/abelardo.moralejo/Crab_performance_paper/data_v0.9.9/DL2_sourcepos_with_v010/dl2_LST-1.Run*.h5")
# Source-dependent dataset
# Created with RFs trained using gammas out to 1 deg, AND additional src-indep params:
tag2 = "source-dependent+, ring-wobble-0-1-trained"
dataset2 = glob.glob("/fefs/aswg/workspace/seiya.nozaki/Crab_performance_paper/20221027_v0.9.9_crab_tuned/combined_off_axis_1deg/DL2/data/dl2_LST-1.Run*.h5")
dataset1.sort()
dataset2.sort()
```
```python
#
# Settings for sensitivity calculations:
# (NOTE: one can change these afterwards, to obtain results with different conditions just
# running the second part of the notebook, with no need to re-read all data!!!)
#
alpha = 0.2 # Li & Ma's "alpha", ratio of the ON to the OFF exposure
# This what we *assume* for the calculation, because it is the standard value.
# Note however that for a single LST, and to avoid large systematics, we use only one off
# region to estimate the background (in source-dependent analysis; in source-independent
# analysis it is one region up to 250 GeV and 3 regions for >250 GeV). We then "extrapolate"
# that background to what we would have in case we really had alpha=0.2...
# Assumed (effective) observation time for the calculation (note: it has nothing to do with
# the actual observation time of the sample used for the calculation!):
obs_time = 50 * u.h
# obs_time = 1 * u.min
# obs_time = 5 * u.min
# obs_time = 0.5 * u.h
# obs_time = 0.7 * u.h
# obs_time = 5 * u.h
# The 5-sigma condition is so standard that we do not make it configurable! It is hardcoded.
# Additional sensitivity conditions:
min_n_gammas = 10 # Minimum number of excess events
min_backg_percent = 5 # Excess must be at least this percentage of the background
```
```python
# Binnins of gammaness, Alpha, thetas and energy for all calculations:
#Β Very fine binnings to allow cut optimization:
gammaness_bins = np.linspace(0, 1, 2001)
alphabins = np.linspace(0, 60, 3001)
theta2bins = np.linspace(0, 0.6, 6001)
logenergy_bins = np.linspace(-2.0, 2.2, 22) # 5 per decade
evt_id_bins = [-0.5, 0.5, 1.5]
# for event_id%2 (to separate odd & even event numbers).
# Even event-id's end up in the first bin; odd event-id's in the second one.
# We separate them so that we can optimize the cuts in one and apply them to
# the other (and vice versa)
# Background estimation:
# For source-independent analysis, we will use one off region up to 250 GeV.
# Three off regions for higher energies:
first_ebin_with_3offs = 7 # i.e. from ~250 GeV onwards
# NOTE! This is just what is used to *estimate* the average background in the on-region.
# For the actual computation of sensitivity, in order to follow the standard definition,
# we use (Li & Ma's) alpha = 0.2 (define in the cell above), i.e. we assume we can compute
#Β the background in a region 5 times larger than the on-region. In practice it is somehow
#Β optimistic for a standalone telescope like LST-1, with its limited angular resolution.
#
#Β Intensity cuts:
min_intensity = 50
max_intensity = 1e10
```
```python
#
# CUTS TO ENSURE THE QUALITY OF THE REAL DATA EXCESS FROM WHICH THE SENSITIVITY WILL BE COMPUTED!
#
# We set here the conditions to ensure that all used histogram bins (defined by Ebin, gammaness cut and
# angular cut) have, in the data sample, a reliable excess which we can use for estimating sensitivity:
#
min_signi = 3 # below this value (significance of the test source, Crab, for the *actual* observation
# time of the sample and obtained with 1 off region) we ignore the corresponding cut
# combination
min_exc = 0.002 # in fraction of off. Below this we ignore the corresponding cut combination.
min_off_events = 10 # minimum number of off events in the actual observation used. Below this we
# ignore the corresponding cut combination.
# Note that min_exc is set to a pretty low value! In the observations published in the LST1 performance
# paper the background at low E is stable to 0.5% (i.e. 0.005)
```
```python
#
# For the calculation of the uncertainty of sensitivity:
backg_syst = 0.01 # relative background systematic uncertainty
backg_normalization = False # generally normalization is not needed, background is uniform enough.
norm_range = np.array([[0.1, 0.16], [20., 59.8]]) # deg2 for theta2, deg for Alpha
```
```python
sa = LSTEventSource.create_subarray(tel_id=1)
focal = sa.tel[1].optics.effective_focal_length
# The source position src_x, src_y (in m), stored in "source_position", is calculated by
# lstchain_dl2_add_sourcepos.py using the effective focal length (29.30565 m), which means
# that it is "consistent" with the reco values reco_src_x, reco_src_y (which are affected
# by the telescope's optical aberration)
#
print(focal)
```
```python
on_events = [None, None]
off_events = [None, None]
# In on_events and off_events (which are basically 5-axis histograms):
# axis 0 has two bins. Indicates analysis type. index=0: source-independent; index=1: source-dependent
#
# axis 1 has two bins. index=0: even event-id events; index=1: odd-event-id events
# axis 2 is the energy axis, see logenergy_bins above
# axis 3 is the gammaness axis, see gammaness_bins above
# axis 4 is the angular axis (theta or Alpha), see theta2bins and alphabins above
```
```python
tablename = "/dl2/event/telescope/parameters/LST_LSTCam"
livetimes = []
livetime = 0
for ifile, file in enumerate(dataset1):
print(ifile+1, '/', len(dataset1), ': ', file, time.asctime(time.gmtime()))
tb = read_table(file, tablename)
# See above the note on the source_position table!
tb_extra = read_table(file, "source_position/table")
lt, _ = get_effective_time(tb)
livetime += lt
dx = np.rad2deg((tb['reco_src_x']-tb_extra['src_x'])/focal.to_value(u.m))
dy = np.rad2deg((tb['reco_src_y']-tb_extra['src_y'])/focal.to_value(u.m))
tb['theta2_on'] = (dx**2 + dy**2).astype('float32')
dx = np.rad2deg((tb['reco_src_x']+tb_extra['src_x'])/focal.to_value(u.m))
dy = np.rad2deg((tb['reco_src_y']+tb_extra['src_y'])/focal.to_value(u.m))
tb['theta2_off'] = (dx**2 + dy**2).astype('float32')
dx = np.rad2deg((tb['reco_src_x']+tb_extra['src_y'])/focal.to_value(u.m))
dy = np.rad2deg((tb['reco_src_y']-tb_extra['src_x'])/focal.to_value(u.m))
tb['theta2_off_90'] = (dx**2 + dy**2).astype('float32')
dx = np.rad2deg((tb['reco_src_x']-tb_extra['src_y'])/focal.to_value(u.m))
dy = np.rad2deg((tb['reco_src_y']+tb_extra['src_x'])/focal.to_value(u.m))
tb['theta2_off_270'] = (dx**2 + dy**2).astype('float32')
tb['odd_or_even'] = (tb['event_id']%2).astype('float32')
# Filter to select cosmics (=shower events)
noped = (tb['event_type'] != EventType.SKY_PEDESTAL.value)
nocal = (tb['event_type'] != EventType.FLATFIELD.value)
cosmics = noped & nocal
mask = ((tb['intensity']>min_intensity) &
(tb['intensity']<max_intensity) &
cosmics)
on, _ = np.histogramdd(np.array([tb['odd_or_even'][mask],
tb['log_reco_energy'][mask].astype('float32'),
tb['gammaness'][mask].astype('float32'),
tb['theta2_on'][mask]]).T,
bins=[evt_id_bins, logenergy_bins, gammaness_bins, theta2bins])
on = on.astype('float32')
if on_events[0] is None:
on_events[0] = on
else:
on_events[0] += on
off, _ = np.histogramdd(np.array([tb['odd_or_even'][mask],
tb['log_reco_energy'][mask].astype('float32'),
tb['gammaness'][mask].astype('float32'),
tb['theta2_off'][mask]]).T,
bins=[evt_id_bins, logenergy_bins, gammaness_bins, theta2bins])
off = off.astype('float32')
# For bins >= first_ebin_with_3offs we fill the off with the average of 3 off regions
off[:,first_ebin_with_3offs:,:,:] *= 1/3.
high_e_mask = mask & (tb['log_reco_energy'] >= logenergy_bins[first_ebin_with_3offs])
off += np.histogramdd(np.array([tb['odd_or_even'][high_e_mask],
tb['log_reco_energy'][high_e_mask].astype('float32'),
tb['gammaness'][high_e_mask].astype('float32'),
tb['theta2_off_90'][high_e_mask]]).T,
bins=[evt_id_bins, logenergy_bins, gammaness_bins,
theta2bins])[0].astype('float32') / 3.
off += np.histogramdd(np.array([tb['odd_or_even'][high_e_mask],
tb['log_reco_energy'][high_e_mask].astype('float32'),
tb['gammaness'][high_e_mask].astype('float32'),
tb['theta2_off_270'][high_e_mask]]).T,
bins=[evt_id_bins, logenergy_bins, gammaness_bins,
theta2bins])[0].astype('float32') / 3.
if off_events[0] is None:
off_events[0] = off
else:
off_events[0] += off
on = None
off = None
tb = None
tb_extra = None
gc.collect() # memory clean-up
print("Live time:", livetime.to(u.h))
livetimes.append(livetime)
```
```python
# Columns to be kept in the case of source-dependent analysis (again, so save memory):
columns_srcdep = ["('on', 'expected_src_x')",
"('on', 'expected_src_y')",
"('on', 'alpha')",
"('off_180', 'alpha')",
"('on', 'gammaness')",
"('off_180', 'gammaness')",
"('on', 'reco_energy')",
"('off_180', 'reco_energy')"]
columns = ['obs_id',
'event_id',
'intensity',
'event_type',
'x', 'y', 'psi']
tablename = "/dl2/event/telescope/parameters/LST_LSTCam"
tablename_srcdep = "/dl2/event/telescope/parameters_src_dependent/LST_LSTCam"
livetime = 0
on_events[1] = None
off_events[1] = None
for ifile, file in enumerate(dataset2):
print(ifile+1, '/', len(dataset2), ': ', file, time.asctime(time.gmtime()))
tb = read_table(file, tablename)
tb['odd_or_even'] = (tb['event_id']%2).astype('float32')
lt, _ = get_effective_time(tb)
livetime += lt
tb_srcdep = read_table(file, tablename_srcdep)
tb_srcdep.rename_columns(["('on', 'expected_src_x')", "('on', 'expected_src_y')",
"('on', 'alpha')", "('off_180', 'alpha')",
"('on', 'gammaness')", "('off_180', 'gammaness')",
"('on', 'reco_energy')", "('off_180', 'reco_energy')"],
['src_x', 'src_y', 'alpha_on', 'alpha_off',
'gammaness_on', 'gammaness_off', 'reco_energy_on', 'reco_energy_off'])
noped = (tb['event_type'] != EventType.SKY_PEDESTAL.value)
nocal = (tb['event_type'] != EventType.FLATFIELD.value)
cosmics = noped & nocal
mask = ((tb['intensity']>min_intensity) &
(tb['intensity']<max_intensity) &
cosmics)
on, _ = np.histogramdd(np.array([tb['odd_or_even'][mask],
np.log10(tb_srcdep['reco_energy_on'][mask]),
tb_srcdep['gammaness_on'][mask],
tb_srcdep['alpha_on'][mask]]).T,
bins=[evt_id_bins, logenergy_bins, gammaness_bins, alphabins])
on = on.astype('float32')
off, _ = np.histogramdd(np.array([tb['odd_or_even'][mask],
np.log10(tb_srcdep['reco_energy_off'][mask]),
tb_srcdep['gammaness_off'][mask],
tb_srcdep['alpha_off'][mask]]).T,
bins=[evt_id_bins, logenergy_bins, gammaness_bins, alphabins])
off = off.astype('float32')
if on_events[1] is None:
on_events[1] = on
off_events[1] = off
else:
on_events[1] += on
off_events[1] += off
on = None
off = None
tb = None
tb_srcdep = None
gc.collect() # memory cleanup
print("Live time:", livetime.to(u.h))
livetimes.append(livetime)
```
```python
#Β Index 0 refers to the source-independent analysis, index 1 to the source-dependent analysis.
# If the Crab data samples are the same (just different analysis method), the live times should
# be the same:
print(livetimes[0].to(u.h))
print(livetimes[1].to(u.h))
```
```python
# Show the excess:
# plt.errorbar(0.5*(bins[1:]+bins[:-1]), onevts-offevts, yerr=(onevts+offevts)**0.5, fmt='o', markersize=3)
# plt.xlabel('Alpha (deg)')
# plt.ylabel('Excess events')
# plt.grid()
# plt.show()
```
```python
# Prepare arrays to contain the cumulative sums, from each gammaness value to 1, and from 0 to each
# value of theta (or Alpha):
cum_on_events = [np.copy(on_events[0]), np.copy(on_events[1])]
cum_off_events = [np.copy(off_events[0]), np.copy(off_events[1])]
excess_events = [on_events[0]-off_events[0], on_events[1]-off_events[1]]
```
```python
cum_on_events[0].shape
# 0 indicates source-dependent analysis.
# The four axis are odd/even event_id's; energy; gammaness_cut; theta2_cut;
```
```python
# Obtain the cumulative histograms: integrate in gammaness and in theta (or alpha).
for evtid in range(cum_on_events[0].shape[0]):
for energyid in range(cum_on_events[0].shape[1]):
# gammaness_bins and theta / alpha bins arrays are of bin edges...
# Note that actual number of bins in histograms is that number -1
for i in reversed(range(len(gammaness_bins)-2)):
cum_on_events[0][evtid, energyid, i,:] += cum_on_events[0][evtid, energyid, i+1,:]
cum_off_events[0][evtid, energyid, i,:] += cum_off_events[0][evtid, energyid, i+1,:]
cum_on_events[1][evtid, energyid, i,:] += cum_on_events[1][evtid, energyid, i+1,:]
cum_off_events[1][evtid, energyid, i,:] += cum_off_events[1][evtid, energyid, i+1,:]
for j in range(len(theta2bins)-1):
if j == 0:
continue
cum_on_events[0][evtid, energyid, :, j] += cum_on_events[0][evtid, energyid, :, j-1]
cum_off_events[0][evtid, energyid, :, j] += cum_off_events[0][evtid, energyid, :, j-1]
for j in range(len(alphabins)-1):
if j == 0:
continue
cum_on_events[1][evtid, energyid, :, j] += cum_on_events[1][evtid, energyid, :, j-1]
cum_off_events[1][evtid, energyid, :, j] += cum_off_events[1][evtid, energyid, :, j-1]
```
```python
cum_excess_events = [[],[]]
cum_excess_events[0] = cum_on_events[0] - cum_off_events[0]
cum_excess_events[1] = cum_on_events[1] - cum_off_events[1]
```
```python
#
# Just a plot to illustrate the content of the cumulative histograms, and to check the overall good
# agreement of the on- and off- alpha distributions. Note that it will not be perfect, some gammas
# may be present in the OFF at middle-Alpha values, especially for soft gammaness cuts!
# source-dependent analysis (1), even event_id's (0)
#
Ebin = 6 #Β Energy bin
gammanessbin = 1900 # gammaness cut (in bins)
alpha_rebin = 50 # rebinning for better view
ON = np.diff(cum_excess_events[1][0][Ebin][gammanessbin][::alpha_rebin] +
cum_off_events[1][0][Ebin][gammanessbin][::alpha_rebin])
xx = np.linspace(alphabins.min(), alphabins.max(), len(ON)+1)
plt.plot(xx[:-1], ON, 'o', label='ON')
plt.plot(xx[:-1], np.diff(cum_off_events[1][0][Ebin][gammanessbin][::alpha_rebin]), label='OFF')
plt.ylim(0, 1.1*np.max(ON))
plt.xlabel('Alpha')
plt.ylabel('Events')
plt.grid()
plt.legend()
plt.show()
```
```python
# Just a check plot: even event_id's, source-independent analysis, and some specific gammaness & theta2 cuts
plt.plot(cum_on_events[0][0,:,1400,200], label='ON')
plt.plot(cum_off_events[0][0,:,1400,200], label='OFF')
plt.plot(cum_excess_events[0][0,:,1400,200], label='Excess')
plt.yscale('log')
plt.xlabel('Energy bin')
plt.ylabel('Events')
plt.legend()
plt.show()
```
```python
#Β Having excess and OFF we do not need ON:
cum_on_events[0] = None
cum_on_events[1] = None
cum_on_events = None
gc.collect() # memory clean-up
```
```python
# Check for size of histograms in GB:
for i, type_str in enumerate(['Source-independent:', 'Source-dependent:']):
print(type_str,
excess_events[i].size*excess_events[i].itemsize/1e9,
cum_excess_events[i].size*cum_excess_events[i].itemsize/1e9,
cum_off_events[i].size*cum_off_events[i].itemsize/1e9,
off_events[i].size*off_events[i].itemsize/1e9,
on_events[i].size*on_events[i].itemsize/1e9)
```
```python
# Look for other large objects in memory (TEST)
# import sys
# sorted([(x, sys.getsizeof(globals().get(x))) for x in dir()], key=lambda x: x[1], reverse=True)
```
```python
# Just a test plot
evtid = 1 # Plot odd events
energyid = 7 # energy bin 7
# Excess events, per bin:
plt.figure(figsize=(16,4))
plt.pcolormesh(theta2bins, gammaness_bins, excess_events[0][evtid][energyid], norm=colors.LogNorm())
plt.colorbar()
plt.xlabel('theta2 (deg2)')
plt.ylabel('gammaness')
plt.show()
#Β Cumulative off events:
plt.figure(figsize=(16,4))
plt.pcolormesh(theta2bins, gammaness_bins, cum_off_events[0][evtid][energyid], norm=colors.LogNorm())
plt.colorbar()
plt.xlabel('theta2 cut (deg2)')
plt.ylabel('gammaness cut')
plt.show()
#Β Cumulative excess events:
plt.figure(figsize=(16,4))
plt.pcolormesh(theta2bins, gammaness_bins, cum_excess_events[0][evtid][energyid], norm=colors.LogNorm())
plt.colorbar()
plt.xlabel('theta2 cut (deg2)')
plt.ylabel('gammaness cut')
plt.show()
```
```python
#################################################################################################
# #
# SECOND PART OF THE NOTEBOOK!!! RUN FROM HERE IF YOU JUST WANT TO RE-RUN THE CUT OPTIMIZATION #
# USING THE SAME DATA SAMPLES! #
# #
##################################################################################################
```
```python
#################################################################################################
# #
# UNCOMMENT BELOW THE SETTINGS YOU WANT TO MODIFY (RELATIVE TO WHAT WAS SET AT THE BEGINNING) #
# #
#################################################################################################
# alpha = 0.2 # Li & Ma's "alpha", ratio of the ON to the OFF exposure
# This what we *assume* for the calculation, because it is the standard value.
# Note however that for a single LST, and to avoid large systematics, we use only one off
# region to estimate the background (in source-dependent analysis; in source-independent
# analysis it is one region up to 250 GeV and 3 regions for >250 GeV). We then "extrapolate"
# that background to what we would have in case we really had alpha=0.2...
# Assumed (effective) observation time for the calculation (note: it has nothing to do with
# the actual observation time of the sample used for the calculation!):
# obs_time = 50 * u.h
# obs_time = 1 * u.min
# obs_time = 5 * u.min
# obs_time = 0.5 * u.h
# obs_time = 0.7 * u.h
# obs_time = 5 * u.h
# The 5-sigma condition is so standard that we do not make it configurable! It is hardcoded.
# Additional sensitivity conditions:
# min_n_gammas = 10 # Minimum number of excess events
# min_backg_percent = 5 # Excess must be at least this percentage of the background
#
```
```python
#################################################################################################
# #
# UNCOMMENT BELOW THE SETTINGS YOU WANT TO MODIFY (RELATIVE TO WHAT WAS SET AT THE BEGINNING) #
# #
#################################################################################################
#
# CUTS TO ENSURE THE QUALITY OF THE REAL DATA EXCESS FROM WHICH THE SENSITIVITY WILL BE COMPUTED!
#
# We set here the conditions to ensure that all used histogram bins (defined by Ebin, gammaness cut and
# angular cut) have, in the data sample, a reliable excess which we can use for estimating sensitivity:
#
# min_signi = 3 # below this value (significance of the test source, Crab, for the *actual* observation
# time of the sample and obtained with 1 off region) we ignore the corresponding cut
# combination
# min_exc = 0.002 # in fraction of off. Below this we ignore the corresponding cut combination.
# min_off_events = 10 # minimum number of off events in the actual observation used. Below this we
# ignore the corresponding cut combination.
# Note that min_exc is set to a pretty low value! In the observations published in the LST1 performance
# paper the background at low E is stable to 0.5% (i.e. 0.005)
```
```python
#
# For the calculation of the uncertainty of sensitivity:
backg_syst = 0.01 # relative background systematic uncertainty
backg_normalization = False # generally normalization is not needed, background is uniform enough.
norm_range = np.array([[0.1, 0.16], [20., 59.8]]) # deg2 for theta2, deg for Alpha
```
```python
#
# Function to compute the fraction of the Crab flux that results in a given significance, for the
# observation time and Li&Ma's alpha set above.
# This is based on the observed excess and background (cumul_excess and cumul_off). This is done for all
# bins of cumul_excess and cumul_off, that is, for all possible gammaness and theta2/Alpha cuts
#
# In order to get more reliable results:
# We can require the *actually observed* excess to be a minimum fraction of the background (min_exc)
# We can require the *actually observed* excess to have a significance of at least min_signi (computed
# assuming just 1 off region)
# By "actually observed" we mean that it is not the excess (or significance) extrapolated for a
# 50h-observation or whatever, but the actually obtained result in the input data sample.
#
def calc_flux_for_N_sigma(N_sigma, cumul_excess, cumul_off,
min_signi, min_exc, min_off_events, alpha,
target_obs_time, actual_obs_time):
time_factor = target_obs_time.to_value(u.h) / actual_obs_time.to_value(u.h)
start_flux = 1
flux_factor = start_flux * np.ones_like(cumul_excess)
good_bin_mask = ((cumul_excess > min_exc*cumul_off) &
(cumul_off > min_off_events))
flux_factor = np.where(good_bin_mask, flux_factor, np.nan)
# First calculate significance (with 1 off) of the excesses in the provided sample, with no scaling.
# We will only use the cut combinations where we have at least min_signi sigmas to begin with...
# NOTE!!! float64 precision is essential for the arguments of li_ma_significance!
lima_signi = li_ma_significance((flux_factor*cumul_excess + cumul_off).astype('float64'),
cumul_off.astype('float64'),
alpha=1)
# Set nan in bins where we do not reach min_signi:
flux_factor = np.where(lima_signi > min_signi, flux_factor, np.nan)
# Now calculate the significance for the target observation time_
lima_signi = li_ma_significance((time_factor*(flux_factor*cumul_excess +
cumul_off)).astype('float64'),
(time_factor*cumul_off/alpha).astype('float64'),
alpha=alpha)
# iterate to obtain the flux which gives exactly N_sigma:
for iter in range(4):
# print(iter)
tolerance_mask = np.abs(lima_signi-N_sigma)>0.001 # recalculate only what is needed
flux_factor[tolerance_mask] *= (N_sigma / lima_signi[tolerance_mask])
# NOTE!!! float64 precision is essential here!!!!
lima_signi[tolerance_mask] = li_ma_significance((time_factor*(flux_factor[tolerance_mask]*
cumul_excess[tolerance_mask]+
cumul_off[tolerance_mask])).astype('float64'),
(time_factor*cumul_off[tolerance_mask]/alpha).astype('float64'),
alpha=alpha)
return flux_factor, lima_signi
```
```python
lima_signi = [[], []]
flux_for_5_sigma = [[], []]
#Β Compute the flux (in Crab fraction), for all cuts, which results in 5 sigma for the sensitivity conditions
# defined above:
for k in range(2):
flux_for_5_sigma[k], lima_signi[k] = calc_flux_for_N_sigma(5, cum_excess_events[k], cum_off_events[k],
min_signi, min_exc, min_off_events, alpha,
obs_time, livetimes[k]*0.5)
# NOTE: livetime is divided by 2 because calculation is done separately for the odd- and even-event_id samples!
```
```python
# Now make sure we only consider bins with valid flux in *both* samples (odd- and even-events).
# This is because we will use the optimal cuts obtained with odd- events to apply them to even- events,
# and vice-versa. If the flux is nan for one of the two, we set it to nan for both.
for k in range(2):
mask = np.isnan(flux_for_5_sigma[k][0]) | np.isnan(flux_for_5_sigma[k][1])
flux_for_5_sigma[k] = np.where(mask, np.nan, flux_for_5_sigma[k])
lima_signi[k] = np.where(mask, np.nan, lima_signi[k])
```
```python
# Check that significances are indeed 5 (or very close - they come from an interative procedure)
# Each entry in the histograms is a cut combination.
plt.hist(lima_signi[0].flatten(), bins=500, range=(0, 10), log=True)
plt.xlabel('Li & Ma significance')
plt.show()
plt.hist(lima_signi[1].flatten(), bins=500, range=(0, 10), log=True)
plt.xlabel('Li & Ma significance')
plt.show()
```
```python
#
# Function to calculate the flux needed to obtain a given excess, for all cut combinations:
#
def calc_flux_for_N_excess(N_excess, cumul_excess, target_obs_time, actual_obs_time):
time_factor = target_obs_time.to_value(u.h) / actual_obs_time.to_value(u.h)
return (N_excess/(cumul_excess*time_factor))
#
# Do the computation (JUST FOR TEST!!):
#
# flux_for_10_excess = [[], []]
# for k in range(2):
# flux_for_10_excess[k] = calc_flux_for_N_excess(min_n_gammas, cum_excess_events[k], obs_time, livetimes[k]*0.5)
# # livetime divided by 2 because calculation is done separately for the odd- and even-event_id samples!
# flux_for_10_excess = [np.where(np.isnan(flux_for_5_sigma[0]), np.nan, flux_for_10_excess[0]),
# np.where(np.isnan(flux_for_5_sigma[1]), np.nan, flux_for_10_excess[1])]
```
```python
#
# Function to calculate the flux needed to obtain a given percent of the background,
# for all cut combinations:
#
def calc_flux_for_x_percent_backg(percent, cumul_excess, cumul_off):
# In fraction of the flux of the test source (Crab):
return percent/100*cumul_off/cumul_excess
#
# Do the computation (JUST FOR TEST!!):
#
# flux_for_5percent_backg = [[], []]
# for k in range(2):
# flux_for_5percent_backg[k] = calc_flux_for_x_percent_backg(min_backg_percent, cum_excess_events[k],
# cum_off_events[k])
# flux_for_5percent_backg = [np.where(np.isnan(flux_for_5_sigma[0]), np.nan, flux_for_5percent_backg[0]),
# np.where(np.isnan(flux_for_5_sigma[1]), np.nan, flux_for_5percent_backg[1])]
```
```python
#
# Function to calculate the flux (for all cut combinations) which fulfills
# all 3 conditions of sensitivity (standard: at least 5-sigma significance,
# at least 10 excess events, and the excess being at least 5% of the background):
#
def calc_flux_3conditions(cumul_excess, cumul_off,
min_signi, min_exc, min_off_events, alpha,
target_obs_time, actual_obs_time,
min_n_gammas, min_backg_percent):
f1, _ = calc_flux_for_N_sigma(5, cumul_excess, cumul_off,
min_signi, min_exc, min_off_events, alpha,
target_obs_time, actual_obs_time)
f2 = 0
f3 = 0
if min_n_gammas > 0:
f2 = calc_flux_for_N_excess(min_n_gammas, cumul_excess, target_obs_time, actual_obs_time)
if min_backg_percent > 0:
f3 = calc_flux_for_x_percent_backg(min_backg_percent, cumul_excess, cumul_off)
return np.maximum(np.maximum(f1, f2), f3)
```
```python
#
# Definitions of min_signi, min_exc and min_off_events (to consider a cut combination valid) are above
#
# Do the computation of the detectable flux (for all cut combinations) according to the sensitivity conditions:
#
detectable_flux = [[],[]]
#Minimum flux fulfilling all three conditions:
for k in range(2):
detectable_flux[k] = calc_flux_3conditions(cum_excess_events[k], cum_off_events[k],
min_signi, min_exc, min_off_events, alpha,
obs_time, livetimes[k]*0.5,
min_n_gammas, min_backg_percent)
```
```python
# Just in case, make sure we only consider bins with valid flux in *both* samples (odd- and even-events):
for k in range(2):
mask = np.isnan(detectable_flux[k][0]) | np.isnan(detectable_flux[k][1])
detectable_flux[k] = np.where(mask, np.nan, detectable_flux[k])
```
```python
sensitivity = [[[], []], [[], []]] # [analysis_type, odd_or_even, energy]
reco_energy = [[[], []], [[], []]]
signi = [[[], []], [[], []]]
cut_indices = [[[], []], [[], []]] # [analysis_type, odd_or_even, energy]
# Tweak: sometimes the minimization at low Ereco ends up with very tight alpha cut
# just because of a fluke... We try to avoid it here, by setting minimum "reasonable"
# cut values for the low-E bins:
#
min_angle_cut = [np.zeros(len(logenergy_bins)-1), np.zeros(len(logenergy_bins)-1)]
min_angle_cut[0][:5] = 0.02
min_angle_cut[1][:5] = 5
# Now, in each subsample (odd/even event_id's) we will find which cuts provide the best sensitivity
# (= minimum detectable flux), and will apply thoise cuts to the *other* subsample.
for analysis_type in range(2): # source-independent and source-dependent analyses
for even_or_odd in range(2):
# We optimize the cuts with the sample indicated by even_or_odd,
# and apply them on the complementary sample, "other_half":
if even_or_odd == 0:
other_half = 1
else:
other_half = 0
for iebin in range(len(logenergy_bins)-1):
reco_energy[analysis_type][other_half].append(10**(0.5*(logenergy_bins[iebin]+logenergy_bins[iebin+1])))
# Now find the cuts which provide the minimum detectable flux using the events with odd event_id.
# Except if we have only nan values... :
if np.sum(~np.isnan(detectable_flux[analysis_type][even_or_odd][iebin])) == 0:
sensitivity[analysis_type][other_half].append(np.nan)
signi[analysis_type][other_half].append(np.nan)
cut_indices[analysis_type][other_half].append([0, 0])
continue
if analysis_type == 0:
start_bin = np.where(theta2bins>min_angle_cut[0][iebin])[0][0] - 1
else:
start_bin = np.where(alphabins>min_angle_cut[1][iebin])[0][0] - 1
index = np.nanargmin(detectable_flux[analysis_type][even_or_odd][iebin,:,start_bin:])
indices = list(np.unravel_index(index,
detectable_flux[analysis_type][even_or_odd][iebin, :, start_bin:].shape))
indices[1] += start_bin
#Β Now get & store the minimum detectable flux with these cuts but using the other half of the events
#Β Keep also the best-cut indices for later use
sensitivity[analysis_type][other_half].append(detectable_flux[analysis_type][other_half][iebin, indices[0], indices[1]])
signi[analysis_type][other_half].append(lima_signi[analysis_type][other_half][iebin, indices[0], indices[1]])
cut_indices[analysis_type][other_half].append(indices)
sensitivity[analysis_type][other_half] = np.array(sensitivity[analysis_type][other_half])
reco_energy[analysis_type][other_half] = np.array(reco_energy[analysis_type][other_half])
signi[analysis_type][other_half] = np.array(signi[analysis_type][other_half])
cut_indices[analysis_type][other_half] = np.array(cut_indices[analysis_type][other_half])
```
```python
#
# MAGIC sensitivity in Crab fraction:
#
def plot_MAGIC_sensitivity_fraction():
s = np.loadtxt('magic_sensitivity.txt', skiprows = 1)
energy = (s[:,0] * u.GeV).to(u.TeV)
percentage = s[:,5]
plt.plot(energy, percentage/100., '-.', label='MAGIC (stereo) [AleksiΔ et al. 2016]', color='tab:green')
return
```
```python
plt.figure(figsize=(10,4))
plt.scatter(reco_energy[0][1], sensitivity[0][1], marker='o', facecolors='tab:blue', edgecolors='tab:blue',
label=tag1+", odd id")
plt.scatter(reco_energy[0][0], sensitivity[0][0], marker='o', facecolors='none', edgecolors='tab:blue',
label=tag1+", even id")
plt.scatter(reco_energy[1][1], sensitivity[1][1], marker='o', facecolors='tab:orange', edgecolors='tab:orange',
label=tag2+", odd id")
plt.scatter(reco_energy[1][0], sensitivity[1][0], marker='o', facecolors='none', edgecolors='tab:orange',
label=tag2+", even id")
plt.yscale('log')
plt.xscale('log')
plt.ylim(0.005, 10)
plt.xlim(0.01, 200)
plt.ylabel("Fraction of Crab flux")
plt.xlabel("Reconstructed energy / TeV")
plot_MAGIC_sensitivity_fraction()
plt.legend()
plt.grid()
plt.show()
```
```python
plt.figure(figsize=(10,4))
plt.scatter(reco_energy[0][1], 0.5*(sensitivity[0][1]+sensitivity[0][0]),
marker='o', facecolors='tab:blue', edgecolors='tab:blue',
label=tag1)
plt.scatter(reco_energy[1][1], 0.5*(sensitivity[1][1]+sensitivity[1][0]), marker='o', facecolors='tab:orange', edgecolors='tab:orange',
label=tag2)
plt.yscale('log')
plt.xscale('log')
plt.ylim(0.005, 5)
plt.xlim(0.01, 200)
plt.ylabel("Fraction of Crab flux")
plt.xlabel("Reconstructed energy / TeV")
plot_MAGIC_sensitivity_fraction()
plt.legend(loc='lower right')
plt.grid()
plt.show()
```
```python
# Check the significance for the optimal cuts, for all the energy bins:
plt.figure(figsize=(15,6))
plt.scatter(reco_energy[0][1], signi[0][1], label=tag1+", odd id")
plt.scatter(reco_energy[1][1], signi[1][1], label=tag2+", odd id")
plt.scatter(reco_energy[0][1], signi[0][0], label=tag1+", even id")
plt.scatter(reco_energy[1][1], signi[1][0], label=tag2+", even id")
plt.xscale('log')
plt.ylim(2, 8)
plt.xlim(0.01, 100)
plt.legend()
plt.xlabel('Reconstructed energy / TeV')
plt.grid()
plt.show()
```
```python
# Just a test plot to see how the minimum flux is found on even-numbered events, and applied to odd-numbered events
energyid = 2 # for energy bin
analysis_type = 0 # 0 source-independent, 1 source-dependent
print(10**(0.5*(logenergy_bins[energyid]+logenergy_bins[energyid+1])), "TeV")
if analysis_type == 0:
start_bin = np.where(theta2bins>min_angle_cut[0][energyid])[0][0] - 1
else:
start_bin = np.where(alphabins>min_angle_cut[1][energyid])[0][0] - 1
index = np.nanargmin(detectable_flux[analysis_type][0][energyid, :, start_bin:])
indices = list(np.unravel_index(index, detectable_flux[analysis_type][0][energyid, :, start_bin:].shape))
indices[1] += start_bin
# convert to indices in gammaness axis & angle axis
print("Minimum:", np.nanmin(detectable_flux[analysis_type][0][energyid, :, start_bin:]))
print(detectable_flux[analysis_type][0][energyid, indices[0], indices[1]])
print(detectable_flux[analysis_type][0][energyid,
indices[0]-3:indices[0]+4,
indices[1]-3:indices[1]+4])
gammaness_cut = gammaness_bins[indices[0]]
angular_cut = theta2bins[indices[1]+1] # +1 because we want the bin's upper edge
if analysis_type == 1:
angular_cut = alphabins[indices[1]+1]
print()
print("With the sample of even-numbered events:")
print('Minimum flux, gammaness & angular cut bins:', indices)
print('Cut values and minimum detectable flux (in fraction of Crab):',
gammaness_cut, angular_cut, detectable_flux[analysis_type][0][energyid, indices[0], indices[1]])
print('Excess, Off events (for input sample), Li & Ma significance (in target t_obs for detectable flux):')
print(' ', cum_excess_events[analysis_type][0][energyid, indices[0], indices[1]],
cum_off_events[analysis_type][0][energyid, indices[0], indices[1]],
f'{lima_signi[analysis_type][0][energyid, indices[0], indices[1]]:.3f}')
plt.figure(figsize=(16,4))
if analysis_type == 0:
plt.pcolormesh(theta2bins, gammaness_bins, detectable_flux[analysis_type][0][energyid], norm=colors.LogNorm())
else:
plt.pcolormesh(alphabins, gammaness_bins, detectable_flux[analysis_type][0][energyid], norm=colors.LogNorm())
plt.colorbar()
plt.scatter([angular_cut], [gammaness_cut], marker='o', facecolors='none', edgecolors='red')
plt.ylabel('gammaness cut')
plt.xlabel('angular cut')
# plt.xlim(0, 0.5)
# plt.ylim(0.65, 0.75)
plt.show()
plt.figure(figsize=(16,4))
if analysis_type == 0:
plt.pcolormesh(theta2bins, gammaness_bins, detectable_flux[analysis_type][1][energyid], norm=colors.LogNorm())
else:
plt.pcolormesh(alphabins, gammaness_bins, detectable_flux[analysis_type][1][energyid], norm=colors.LogNorm())
plt.colorbar()
plt.scatter([angular_cut], [gammaness_cut], marker='o', facecolors='none', edgecolors='red')
plt.ylabel('gammaness cut')
plt.xlabel('angular cut')
# plt.xlim(0, 0.5)
# plt.ylim(0.65, 0.75)
plt.show()
print('With the sample of odd-numbered events:')
print('Applied cuts (from the other sample) and minimum detectable flux (in fraction of Crab):',
gammaness_cut, angular_cut, detectable_flux[analysis_type][1][energyid, indices[0], indices[1]])
print('Excess, Off events (for input sample), Li & Ma significance (in target t_obs for detectable flux):')
print(' ', cum_excess_events[analysis_type][1][energyid, indices[0], indices[1]],
cum_off_events[analysis_type][1][energyid, indices[0], indices[1]],
f'{lima_signi[analysis_type][1][energyid, indices[0], indices[1]]:.3f}')
```
```python
#
# Function to find the cut indices (i.e. bin indices of the histos) which correspond to certain cuts
#
def find_bin_indices(gcut, angcut, analysis_type):
# Find bin edge which is closest to the cut value:
if analysis_type == 0:
angcut_index = np.nanargmin(np.abs(theta2bins-angcut)) - 1
else:
angcut_index = np.nanargmin(np.abs(alphabins-angcut)) - 1
gcut_index = np.nanargmin(np.abs(gammaness_bins-gcut))
return gcut_index, angcut_index
```
```python
#
# Plot a rebinned histogram (for better visualization)
#
def plot_rebinned(x, y, yerr, rebin, label):
xx = np.array([0.5*(x[i]+x[i+rebin]) for i in range(0, len(x)-1, rebin)])
yy = np.array([np.sum(y[i:i+rebin]) for i in range(0, len(y)-1, rebin)])
yyerr = np.array([(np.sum(yerr[i:i+rebin]**2))**0.5 for i in range(0, len(yerr)-1, rebin)])
plt.errorbar(xx, yy, yerr=yyerr, fmt='o', markersize=3, label=label)
return xx, yy
```
```python
#
# Now we recalculate the sensitivities, and also check individual theta2 / Alphaplots:
#
target_obs_time = obs_time # = the same for which the cut optimization was done
# target_obs_time = 0.5 * u.h # CHANGE ONLY IN CASE YOU WANT TO CALCULATE SENSITIVITY
# FOR DIFFERENT T_OBS, BUT *WITHOUT* RE-OPTIMIZING CUTS!!
norm_bins = np.array([[np.where(theta2bins>norm_range[0][0])[0][0],
np.where(theta2bins>norm_range[0][1])[0][0]],
[np.where(alphabins>norm_range[1][0])[0][0],
np.where(alphabins>norm_range[1][1])[0][0]]
])
rebin_factor = np.array((len(logenergy_bins)-1)*[20]) # join bins in groups of rebin_factor, to make plots less noisy.
#rebin_factor = np.array((len(logenergy_bins)-1)*[60]) # join bins in groups of rebin_factor, to make plots less noisy.
rebin_factor[:3] = 120
rebin_factor[3:5] = 60
rebin_factor[14:] = 60
sensitivity = np.empty((2, len(logenergy_bins)-1))
sensitivity[:] = np.nan
delta_sensitivity = np.empty((2, 2, len(logenergy_bins)-1)) # separate upper and lower error bars
delta_sensitivity[:] = np.nan
# For 5-sigma condition only:
# (NOTE! This is with the cuts optimized using all 3 conditions! In order to obtain the best sensitivity
# for just the 5 sigma condition, one has to set min_n_gammas=0 and min_backg_percent=0 BEFORE the cut
#Β optimization!)
sensitivity_5s = np.zeros_like(sensitivity)
delta_sensitivity_5s = np.zeros_like(delta_sensitivity)
sensitivity_5s[:] = np.nan
delta_sensitivity_5s[:] = np.nan
reco_energy = np.zeros_like(sensitivity)
num_excess_events = np.zeros_like(sensitivity)
num_off_events = np.zeros_like(sensitivity)
reco_energy[:] = np.nan
num_excess_events[:] = np.nan
num_off_events[:] = np.nan
angular_cut = np.empty((2, 2, len(logenergy_bins)-1))
gammaness_cut = np.empty((2, 2, len(logenergy_bins)-1)) # analysis_type, odd_or_even, energy
angular_cut[:] = np.nan
gammaness_cut[:] = np.nan
for iebin in range(len(logenergy_bins)-1):
recoE = 10**(0.5*(logenergy_bins[iebin]+logenergy_bins[iebin+1]))
reco_energy[0][iebin] = recoE
reco_energy[1][iebin] = recoE
if (recoE < 0.016) | (recoE > 16):
continue
print(f'Energy: {recoE:.4f} Tev')
fig = plt.figure(figsize=(16, 5))
for analysis_type in range(2):
indices0 = cut_indices[analysis_type][0][iebin]
indices1 = cut_indices[analysis_type][1][iebin]
#Β indices0 and indices1 have 2 elements each: [0] is the gammaness cut, [1] is the angular cut
if (indices0>0).all() and (indices1>0).all():
# Valid gammanness & angular cuts (cut indices ==0 means no valid cuts could be determined!)
nevts_on = (np.sum(on_events[analysis_type][0, iebin, indices0[0]:], axis=0) +
np.sum(on_events[analysis_type][1, iebin, indices1[0]:], axis=0))
nevts_off = (np.sum(off_events[analysis_type][0, iebin, indices0[0]:], axis=0) +
np.sum(off_events[analysis_type][1, iebin, indices1[0]:], axis=0))
else:
nevts_on = None
nevts_off = None
if analysis_type == 0:
fig.add_subplot(1, 2, 1+analysis_type)
if nevts_on is None:
print('No valid cuts found for source-independent analysis!')
continue
else:
print(f'Gammaness cuts: {gammaness_bins[indices0[0]+1]:.4f}, {gammaness_bins[indices1[0]+1]:.4f}')
print(f'Theta2 cuts: {theta2bins[indices0[1]+1]:.4f}, {theta2bins[indices1[1]+1]:.4f}')
xx, yy = plot_rebinned(theta2bins, nevts_on, nevts_on**0.5, rebin_factor[iebin], '')
xxoff, yyoff = plot_rebinned(theta2bins, nevts_off, nevts_off**0.5, rebin_factor[iebin], '')
plt.plot([theta2bins[indices0[1]+1], theta2bins[indices0[1]+1]],
[0, yy[int((indices0[1]+1)/rebin_factor[iebin])]], '--', color='tab:green')
plt.plot([theta2bins[indices1[1]+1], theta2bins[indices1[1]+1]],
[0, yy[int((indices1[1]+1)/rebin_factor[iebin])]], '--', color='tab:green')
plt.xlim(0, 0.2)
plt.ylim(yyoff.min()*0.9, yy.max()*1.1)
plt.xlabel('Theta2 (deg2)')
plt.ylabel('Events')
angular_cut[analysis_type][0][iebin] = theta2bins[indices0[1]+1]
angular_cut[analysis_type][1][iebin] = theta2bins[indices1[1]+1]
else:
fig.add_subplot(1, 2, 1+analysis_type)
if nevts_on is None:
print('No valid cuts found for source-dependent analysis!')
continue
else:
print(f'Alpha cuts: {alphabins[indices0[1]+1]:.2f}, {alphabins[indices1[1]+1]:.2f}')
xx, yy = plot_rebinned(alphabins, nevts_on, nevts_on**0.5, rebin_factor[iebin], '')
xxoff, yyoff = plot_rebinned(alphabins, nevts_off, nevts_off**0.5, rebin_factor[iebin], '')
plt.plot([alphabins[indices0[1]+1], alphabins[indices0[1]+1]],
[0, yy[int((indices0[1]+1)/rebin_factor[iebin])]], '--', color='tab:green')
plt.plot([alphabins[indices1[1]+1], alphabins[indices1[1]+1]],
[0, yy[int((indices1[1]+1)/rebin_factor[iebin])]], '--', color='tab:green')
plt.xlim(0, 60)
plt.ylim(yyoff.min()*0.9, yy.max()*1.1)
plt.xlabel('Alpha (deg)')
plt.ylabel('Events')
angular_cut[analysis_type][0][iebin] = alphabins[indices0[1]+1]
angular_cut[analysis_type][1][iebin] = alphabins[indices1[1]+1]
# Add up the backg numbers (odd and even events) in the normalization region, and the excess:
off_in_norm_region = (cum_off_events[analysis_type][0, iebin, indices0[0], norm_bins[analysis_type][1]] +
cum_off_events[analysis_type][1, iebin, indices1[0], norm_bins[analysis_type][1]] -
cum_off_events[analysis_type][0, iebin, indices0[0], norm_bins[analysis_type][0]] -
cum_off_events[analysis_type][1, iebin, indices1[0], norm_bins[analysis_type][0]]
)
excess_in_norm_region = (cum_excess_events[analysis_type][0, iebin, indices0[0], norm_bins[analysis_type][1]] +
cum_excess_events[analysis_type][1, iebin, indices1[0], norm_bins[analysis_type][1]] -
cum_excess_events[analysis_type][0, iebin, indices0[0], norm_bins[analysis_type][0]] -
cum_excess_events[analysis_type][1, iebin, indices1[0], norm_bins[analysis_type][0]]
)
off_norm_factor = 1
if backg_normalization:
off_norm_factor = (off_in_norm_region + excess_in_norm_region) / off_in_norm_region
print('Off normalization for analysis type', analysis_type, ':', off_norm_factor)
norm_min = (((off_in_norm_region + excess_in_norm_region) -
(off_in_norm_region + excess_in_norm_region)**0.5) /
(off_in_norm_region + off_in_norm_region**0.5))
norm_max = (((off_in_norm_region + excess_in_norm_region) +
(off_in_norm_region + excess_in_norm_region)**0.5) /
(off_in_norm_region - off_in_norm_region**0.5))
print(f' {norm_min:.4f} to {norm_max:.4f}')
gammaness_cut[analysis_type][0][iebin] = gammaness_bins[indices0[0]]
gammaness_cut[analysis_type][1][iebin] = gammaness_bins[indices1[0]]
# Add up the excess (and the off) for odd and even event_id's
nexcess = (cum_excess_events[analysis_type][0, iebin, indices0[0], indices0[1]] +
cum_excess_events[analysis_type][1, iebin, indices1[0], indices1[1]])
noff = (cum_off_events[analysis_type][0, iebin, indices0[0], indices0[1]] +
cum_off_events[analysis_type][1, iebin, indices1[0], indices1[1]])
nexcess = nexcess + noff * (1 - off_norm_factor)
noff = noff * off_norm_factor
flux = calc_flux_3conditions(np.array([nexcess]), np.array([noff]),
min_signi, min_exc, min_off_events, alpha,
target_obs_time, livetimes[analysis_type],
min_n_gammas, min_backg_percent)
if analysis_type == 0:
print(f'Results (source-indep): Nexc={nexcess}, Noff={noff}, Flux/CU={flux[0]:.4f}')
else:
print(f'Results (source-dep): Nexc={nexcess}, Noff={noff}, Flux/CU={flux[0]:.4f}')
sensitivity[analysis_type][iebin] = flux[0]
# Assume background systematics and statistical fluctuation of excess go in same direction:
max_excess = nexcess + backg_syst * noff + (nexcess + 2*noff)**0.5
min_excess = nexcess - backg_syst * noff - (nexcess + 2*noff)**0.5
flux_minus = calc_flux_3conditions(np.array([max_excess]),
np.array([noff]),
0, 0, 0, alpha,
target_obs_time, livetimes[analysis_type],
min_n_gammas, min_backg_percent)
flux_plus = calc_flux_3conditions(np.array([min_excess]),
np.array([noff]),
0, 0, 0, alpha,
target_obs_time, livetimes[analysis_type],
min_n_gammas, min_backg_percent)
delta_sensitivity[analysis_type][1][iebin] = flux_plus[0] - flux[0]
delta_sensitivity[analysis_type][0][iebin] = flux[0] - flux_minus[0]
# Now only with the 5-sigma condition (remove req for 10 excess events & 5% of backg):
flux_5s = calc_flux_3conditions(np.array([nexcess]), np.array([noff]),
min_signi, min_exc, min_off_events, alpha,
target_obs_time, livetimes[analysis_type],
0, 0)
sensitivity_5s[analysis_type][iebin] = flux_5s[0]
flux_5s_minus = calc_flux_3conditions(np.array([max_excess]),
np.array([noff]),
0, 0, 0, alpha,
target_obs_time, livetimes[analysis_type],
0, 0)
flux_5s_plus = calc_flux_3conditions(np.array([min_excess]),
np.array([noff]),
0, 0, 0, alpha,
target_obs_time, livetimes[analysis_type],
0, 0)
delta_sensitivity_5s[analysis_type][1][iebin] = flux_5s_plus[0] - flux_5s[0]
delta_sensitivity_5s[analysis_type][0][iebin] = flux_5s[0] - flux_5s_minus[0]
num_excess_events[analysis_type][iebin] = nexcess
num_off_events[analysis_type][iebin] = noff
# plt.ylim(0, 250)
plt.grid()
# plt.legend()
plt.show()
print()
```
```python
#
# FINAL SENSITIVITY PLOTS
#
plt.figure(figsize=(8,6))
plt.fill_between(reco_energy[0][:-1],
(sensitivity[0]-delta_sensitivity[0][0])[:-1],
(sensitivity[0]+delta_sensitivity[0][1])[:-1], alpha=0.4, color='tab:blue')
plt.fill_between(reco_energy[1][:-1],
(sensitivity[1]-delta_sensitivity[1][0])[:-1],
(sensitivity[1]+delta_sensitivity[1][1])[:-1], alpha=0.4, color='tab:orange')
plt.errorbar(reco_energy[0], sensitivity[0],
yerr=delta_sensitivity[0], marker='o', color='tab:blue', ls='none', markersize=4,
label='LST-1 (source-independent)')
plt.errorbar(reco_energy[1], sensitivity[1],
yerr=delta_sensitivity[1], marker='o', color='tab:orange', ls='none', markersize=4,
label='LST-1 (source-dependent)')
plot_MAGIC_sensitivity_fraction()
plt.xscale('log')
plt.yscale('log')
plt.xlabel('E$_{reco}$ / TeV')
plt.ylabel('Fraction of Crab nebula flux')
plt.ylim(0.01, 50)
plt.xlim(0.02, 12)
plt.legend()
plt.show()
```
```python
# Write out the results:
Output_filename = "Sensitivity_output.csv"
np.savetxt(Output_filename, [reco_energy[0],
sensitivity[0],
delta_sensitivity[0][0],
delta_sensitivity[0][1],
sensitivity[1],
delta_sensitivity[1][0],
delta_sensitivity[1][1],
sensitivity_5s[0],
delta_sensitivity_5s[0][0],
delta_sensitivity_5s[0][1],
sensitivity_5s[1],
delta_sensitivity_5s[1][0],
delta_sensitivity_5s[1][1]],
fmt='%.5e', delimiter=',',
header = 'E(TeV) SI_sensitivity, SI_delta_sensitivity_low, SI_delta_sensitivity_high, '+
'SD_sensitivity, SD_delta_sensitivity_low, SD_delta_sensitivity_high, '+
'SI_sensitivity_5s, SI_delta_sensitivity_5s_low, SI_delta_sensitivity_5s_high, '+
'SD_sensitivity_5s, SD_delta_sensitivity_5s_low, SD_delta_sensitivity_5s_high')
```
```python
# REDO PLOT FROM CSV FILE:
plt.figure(figsize=(8,6))
data = np.loadtxt(Output_filename, delimiter=',')
plt.fill_between(data[0][:-1],
(data[1]-data[2])[:-1],
(data[1]+data[3])[:-1], alpha=0.4, color='tab:blue')
plt.fill_between(data[0][:-1],
(data[4]-data[5])[:-1],
(data[4]+data[6])[:-1], alpha=0.4, color='tab:orange')
plt.errorbar(data[0], data[1],
yerr=[data[2], data[3]], marker='o', color='tab:blue', ls='none', markersize=4,
label='LST-1 (source-independent)')
plt.errorbar(data[0], data[4],
yerr=[data[5], data[6]], marker='o', color='tab:orange', ls='none', markersize=4,
label='LST-1 (source-dependent)')
plot_MAGIC_sensitivity_fraction()
plt.xscale('log')
plt.yscale('log')
plt.xlabel('E$_{reco}$ / TeV')
plt.ylabel('Fraction of Crab nebula flux')
plt.xlim(0.02, 12)
plt.ylim(0.01, 50)
plt.legend()
plt.show()
```
```python
np.nancumsum(num_excess_events[0][::-1])[::-1]
```
```python
# Integral numbers of events
fig = plt.figure(figsize=(16, 6))
fig.add_subplot(1, 2, 1)
plt.plot(10**logenergy_bins[:-1], np.nancumsum(num_excess_events[0][::-1])[::-1])
plt.plot(10**logenergy_bins[:-1], np.nancumsum(num_excess_events[1][::-1])[::-1])
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Ereco / TeV')
plt.ylabel('Number of excess events, integrated above Ereco')
plt.xlim(0.01, 30)
fig.add_subplot(1, 2, 2)
plt.plot(10**logenergy_bins[:-1], np.nancumsum(num_off_events[0][::-1])[::-1])
plt.plot(10**logenergy_bins[:-1], np.nancumsum(num_off_events[1][::-1])[::-1])
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Ereco / TeV')
plt.ylabel('Number of OFF events, integrated above Ereco')
plt.xlim(0.01, 30)
plt.show()
```
```python
#
# Calculate integral sensitivity, using the same optimized cuts in each bin of Ereco.
# NOTE: at low E differential sensitivities may be similar, yet the integral ones differ, because of
# the different rates of excess and background (resulting in different conditions determining the
# flux sensitivity value). Cuts are not re-optimized!
#Β NOTE anyway that only the best integral sensitivity (in terms of Crab fraction) is relevant!
#
integral_sensitivity = np.zeros_like(sensitivity)
integral_sensitivity[:] = np.nan
for analysis_type in range(2):
for iebin in range(len(logenergy_bins)-1):
total_excess = np.nansum(num_excess_events[analysis_type][iebin:])
total_off = np.nansum(num_off_events[analysis_type][iebin:])
flux = calc_flux_3conditions(np.array([total_excess]), np.array([total_off]),
min_signi, min_exc, min_off_events, alpha,
target_obs_time, livetimes[analysis_type],
min_n_gammas, min_backg_percent)
integral_sensitivity[analysis_type][iebin] = flux[0]
```
```python
plt.figure(figsize=(12,6))
plt.plot(10**logenergy_bins[:-1], integral_sensitivity[0], label='Source-independent')
plt.plot(10**logenergy_bins[:-1], integral_sensitivity[1], label='Source-dependent')
plt.xscale('log')
plt.yscale('log')
#plt.ylim(0.01, 0.5)
plt.xlabel('E$_{reco}$ / TeV')
plt.ylabel('Integral sensitivity (fraction of Crab nebula flux)')
plt.grid()
plt.legend()
plt.show()
print(f'Best integral sensitivity, source-indep: {np.nanmin(integral_sensitivity[0]):.4f} C.U.')
print(f'Best integral sensitivity, source-dep: {np.nanmin(integral_sensitivity[1]):.4f} C.U.')
```
```python
np.set_printoptions(precision=4)
print("Energies (TeV):", np.array(reco_energy[0]))
print()
for analysis_type in range(2):
if analysis_type == 0:
print("Values for source-independent analysis:")
else:
print("Values for source-dependent analysis:")
print("Sensitivity:", sensitivity[analysis_type])
print()
print("Nexcess:", num_excess_events[analysis_type])
print()
print("Noff:", num_off_events[analysis_type])
print()
print("Gammaness cut (even):", np.where(np.isnan(sensitivity[analysis_type]), np.nan,
gammaness_cut[analysis_type][0]))
print()
print("Gammaness cut (odd):", np.where(np.isnan(sensitivity[analysis_type]), np.nan,
gammaness_cut[analysis_type][1]))
print()
print("Angular cut (even):", np.where(np.isnan(sensitivity[analysis_type]), np.nan,
angular_cut[analysis_type][0]))
print()
print("Angular cut (odd):", np.where(np.isnan(sensitivity[analysis_type]), np.nan,
angular_cut[analysis_type][1]))
print('\n'*3)
```
```python
[angular_cut[0][1], angular_cut[1][1]]
```
```python
[gammaness_cut[0][1], gammaness_cut[1][1]]
```
```python
plt.plot(reco_energy[0], angular_cut[0][0], label='even')
plt.plot(reco_energy[1], angular_cut[0][1], label='odd')
plt.xscale('log')
plt.grid()
plt.ylabel('theta2 cut / deg2')
plt.xlabel('E / TeV')
plt.legend()
plt.show()
plt.plot(reco_energy[0], angular_cut[1][0], label='even')
plt.plot(reco_energy[1], angular_cut[1][1], label='odd')
plt.xscale('log')
plt.grid()
plt.ylabel('Alpha cut / deg')
plt.xlabel('E / TeV')
plt.legend()
plt.show()
```
```python
plt.plot(reco_energy[0], num_excess_events[0], label='source-independent')
plt.plot(reco_energy[1], num_excess_events[1], label='source-dependent')
plt.xscale('log')
plt.yscale('log')
plt.grid()
plt.ylabel('Excess events')
plt.xlabel('E / TeV')
plt.legend()
plt.show()
plt.plot(reco_energy[0], num_off_events[0], label='source-independent')
plt.plot(reco_energy[1], num_off_events[1], label='source-dependent')
plt.xscale('log')
plt.yscale('log')
plt.grid()
plt.ylabel('Off events')
plt.xlabel('E / TeV')
plt.legend()
plt.show()
```
```python
num_off_events[0]
```
```python
num_excess_events[0]
```
```python
plt.plot(reco_energy[0], gammaness_cut[0][0], label='even')
plt.plot(reco_energy[0], gammaness_cut[0][1], label='odd')
plt.xscale('log')
plt.grid()
plt.ylabel('Gammaness')
plt.xlabel('E / TeV')
plt.legend()
plt.show()
plt.plot(reco_energy[1], gammaness_cut[1][0], label='even')
plt.plot(reco_energy[1], gammaness_cut[1][1], label='odd')
plt.xscale('log')
plt.grid()
plt.ylabel('Gammaness')
plt.xlabel('E / TeV')
#plt.ylim(0.9, 1.05)
plt.legend()
plt.show()
```
```python
import scipy.integrate as integrate
def dfde(x):
return CRAB_MAGIC_JHEAP2015(x*u.TeV).to_value(1/(u.TeV*u.s*u.cm**2))
```
```python
dfde(1.)
```
```python
etev = reco_energy[0] * u.TeV
plt.plot(etev, CRAB_MAGIC_JHEAP2015(etev))
plt.xscale('log')
plt.yscale('log')
plt.show()
logenergy_bins[iebin]+logenergy_bins[iebin+1]
rate_per_s_m2 = 1e4*np.array([integrate.quad(dfde, 10**a, 10**b)[0]
for a, b in zip(logenergy_bins[:-1], logenergy_bins[1:])])
rate_per_s_m2
```
```python
plt.plot(reco_energy[0], num_excess_events[0]/livetimes[0]/rate_per_s_m2, label='source-independent')
plt.plot(reco_energy[1], num_excess_events[1]/livetimes[1]/rate_per_s_m2, label='source-dependent')
plt.xscale('log')
plt.yscale('log')
plt.grid()
plt.ylabel('Aeff(m2)')
plt.xlabel('E / TeV')
plt.legend()
plt.show()
```
```python
```
|
cta-observatoryREPO_NAMEcta-lstchainPATH_START.@cta-lstchain_extracted@cta-lstchain-main@notebooks@calculate_sensitivity_from_Crab.ipynb@.PATH_END.py
|
{
"filename": "computeOccurrenceUncertainty-checkpoint.ipynb",
"repo_name": "stevepur/DR25-occurrence-public",
"repo_path": "DR25-occurrence-public_extracted/DR25-occurrence-public-main/GKbaseline/.ipynb_checkpoints/computeOccurrenceUncertainty-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import os
import requests
import pandas as pd
from astropy.io import fits
from cStringIO import StringIO
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import gamma
from scipy.optimize import minimize
from scipy.interpolate import RectBivariateSpline
import emcee
import corner
import scipy.io as sio
from ipywidgets import FloatProgress
from IPython.display import display
import time
import os.path
from os import path
```
```python
stellarCatalog = "../stellarCatalogs/dr25_stellar_berger2019_clean_GK.txt"
pcCatalog = "koiCatalogs/dr25_GK_PCs.csv"
period_rng = (50, 400)
n_period = 57
rp_rng = (0.75, 2.5)
n_rp = 61
# for quick tests
# nWalkers = 6
# nBurnin = 200
# nMcmc = 1000
# for production runs
nWalkers = 16
nBurnin = 1000
nMcmc = 5000
model = "dualPowerLaw"
whichRadii = "corrected"
```
```python
def rateModel(x, y, xRange, yRange, theta, model):
if model == "dualPowerLaw":
f0, alpha, beta = theta
ap1 = alpha+1;
bp1 = beta+1;
r = f0*(ap1*(x**alpha)/(xRange[1]**ap1-xRange[0]**ap1))*(bp1*(y**beta)/(yRange[1]**bp1-yRange[0]**bp1))
elif model == "dualPowerLawGap":
f0, alpha, beta, gd, gw, gapOffset, gapSlope = theta
ap1 = alpha+1;
bp1 = beta+1;
# van Eylen fit, really only good for p<100 days
# gapSlope = -0.13
# gapOffset = 0.41
# constant-radius valley, to match radius marginals
# gapSlope = 0
# gapOffset = 0.26
gapModel = 10**(gapSlope*np.log10(x) + gapOffset)
gapDist2 = (gapModel - y)**2
r = f0*(ap1*(x**alpha)/(xRange[1]**ap1-xRange[0]**ap1))*(bp1*(y**beta)/(yRange[1]**bp1-yRange[0]**bp1)
- gd*np.exp(-gapDist2/(2*gw*gw)))
elif model == "dualPowerLawGapFixedSlope":
f0, alpha, beta, gd, gw, gapOffset = theta
ap1 = alpha+1;
bp1 = beta+1;
# van Eylen fit, really only good for p<100 days
# gapSlope = -0.13
# gapOffset = 0.41
# constant-radius valley, to match radius marginals
gapSlope = 0
# gapOffset = 0.26
gapModel = 10**(gapSlope*np.log10(x) + gapOffset)
gapDist2 = (gapModel - y)**2
r = f0*(ap1*(x**alpha)/(xRange[1]**ap1-xRange[0]**ap1))*(bp1*(y**beta)/(yRange[1]**bp1-yRange[0]**bp1)
- gd*np.exp(-gapDist2/(2*gw*gw)))
elif model == "dualPowerLawFixedValley":
f0, alpha, beta = theta
ap1 = alpha+1;
bp1 = beta+1;
# van Eylen fit, really only good for p<100 days
# gapSlope = -0.13
# gapOffset = 0.41
# constant-radius valley, to match radius marginals
gd = 0.29297043
gw = 0.14683756
gapSlope = 0
gapOffset = 0.29125824
gapModel = 10**(gapSlope*np.log10(x) + gapOffset)
gapDist2 = (gapModel - y)**2
r = f0*(ap1*(x**alpha)/(xRange[1]**ap1-xRange[0]**ap1))*(bp1*(y**beta)/(yRange[1]**bp1-yRange[0]**bp1)
- gd*np.exp(-gapDist2/(2*gw*gw)))
else:
raise ValueError('Bad model name');
return r
def getModelLabels(model):
if model == "dualPowerLaw":
return [r"$F_0$", r"$\beta$", r"$\alpha$"]
elif model == "dualPowerLawGap":
return [r"$F_0$", r"$\beta$", r"$\alpha$", r"$d_g$", r"$w_g$", r"$o_g$", r"$s_g$"]
elif model == "dualPowerLawGapFixedSlope":
return [r"$F_0$", r"$\beta$", r"$\alpha$", r"$d_g$", r"$w_g$", r"$o_g$"]
elif model == "dualPowerLawFixedValley":
return [r"$F_0$", r"$\beta$", r"$\alpha$"]
else:
raise ValueError('Bad model name');
def initRateModel(model):
if model == "dualPowerLaw":
f0 = 0.75
alpha = -0.53218
beta = -0.5
theta = [f0, alpha, beta]
elif model == "dualPowerLawGap":
f0 = 0.75
alpha = -0.69
beta = -0.1
gd = 0.22
gw = 0.1
go = 0.26
gs = 0.0
theta = [f0, alpha, beta, gd, gw, go, gs]
elif model == "dualPowerLawGapFixedSlope":
f0 = 0.75
alpha = -0.69
beta = -0.1
gd = 0.22
gw = 0.1
go = 0.26
theta = [f0, alpha, beta, gd, gw, go]
elif model == "dualPowerLawFixedValley":
f0 = 0.75
alpha = -0.53218
beta = -0.5
theta = [f0, alpha, beta]
else:
raise ValueError('Bad model name');
return theta
def lnPoisprior(theta, model):
if model == "dualPowerLaw":
if 0.0 <= theta[0] <= 5 \
and -5.0 <= theta[1] <= 5.0 \
and -5.0 <= theta[2] <= 5.0:
return 1.0
elif model == "dualPowerLawGap":
if 0.0 <= theta[0] <= 5 \
and -5.0 <= theta[1] <= 5.0 \
and -5.0 <= theta[2] <= 5.0 \
and 0 <= theta[3] < 5 \
and 0.1 <= theta[4] < 0.3 \
and 0.2 <= theta[5] < 0.4 \
and -0.0 <= theta[6] < 0.05:
return 1.0
elif model == "dualPowerLawGapFixedSlope":
if 0.0 <= theta[0] <= 5 \
and -5.0 <= theta[1] <= 5.0 \
and -5.0 <= theta[2] <= 5.0 \
and 0 <= theta[3] < 0.6 \
and 0.1 <= theta[4] < 0.3 \
and 0.2 <= theta[5] < 0.4:
return 1.0
elif model == "dualPowerLawFixedValley":
if 0.0 <= theta[0] <= 5 \
and -5.0 <= theta[1] <= 5.0 \
and -5.0 <= theta[2] <= 5.0:
return 1.0
else:
raise ValueError('Bad model name');
# print(theta)
return -np.inf
```
```python
def medianAndErrorbars(data):
if data.ndim > 1:
dataResult = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(data, [16, 50, 84],
axis=0)))
dataResult = list(dataResult)
return dataResult
else:
v = np.percentile(data, [16, 50, 84])
return [v[1], v[2]-v[1], v[1]-v[0]]
def printMedianAndErrorbars(data):
e = medianAndErrorbars(data)
if data.ndim > 1:
print("printMedianAndErrorbars only works for 1D arrays")
else:
return "{:.3f}".format(e[0]) +"^{+" + "{:.3f}".format(e[1]) + "}_{-" + "{:.3f}".format(e[2]) + "}"
```
```python
```
```python
```
```python
from scipy.integrate import romb
def integrate2DGrid(g, dx, dy):
if g.shape[0]%2 == 0 or g.shape[1]%2 == 0:
raise ValueError('integrate2DGrid requires a grid with odd number of points on a side');
return romb(romb(g, dx), dy)
def integrateRateModel(periodRange, rpRange, theta, model):
nPts = 2**5+1 # must be 2**n + 1
pGrid, rGrid = np.meshgrid(np.linspace(periodRange[0], periodRange[1], nPts),
np.linspace(rpRange[0], rpRange[1], nPts),
indexing="ij")
dp = (pGrid[1,0]-pGrid[0,0])
dr = (rGrid[0,1]-rGrid[0,0])
if theta.ndim == 1:
y = rateModel(pGrid, rGrid, period_rng, rp_rng, theta, model)
return integrate2DGrid(y, dp, dr)
else: # assume first dimension is array of thetas
ret = np.zeros(theta.shape[0])
if len(ret) > 100:
f = FloatProgress(min=0, max=len(ret))
display(f)
for i in range(len(ret)):
y = rateModel(pGrid, rGrid, period_rng, rp_rng, theta[i,:], model)
ret[i] = integrate2DGrid(y, dp, dr)
if len(ret) > 100:
f.value += 1
return ret
def integratePopTimesComp(periodRange, rpRange, theta, model, compGrid):
nP = compGrid.shape[0]
nR = compGrid.shape[1]
pGrid, rGrid = np.meshgrid(np.linspace(periodRange[0], periodRange[1], nP),
np.linspace(rpRange[0], rpRange[1], nR),
indexing="ij")
dp = (pGrid[1,0]-pGrid[0,0])
dr = (rGrid[0,1]-rGrid[0,0])
y = rateModel(pGrid, rGrid, period_rng, rp_rng, theta, model)*compGrid
return integrate2DGrid(y, dp, dr)
```
```python
# population inference functions
def lnlike(theta):
pop = rateModel(period_grid, rp_grid, period_rng, rp_rng, theta, model) * summedCompleteness
pop = 0.5 * (pop[:-1, :-1] + pop[1:, 1:])
norm = np.sum(pop * vol)
ll = np.sum(np.log(rateModel(koi_periods, koi_rps, period_rng, rp_rng, theta, model))) - norm
return ll if np.isfinite(ll) else -np.inf
# The ln-probability function is just propotional to the ln-likelihood
# since we're assuming uniform priors.
def lnprob(theta):
lp = lnPoisprior(theta, model)
if not np.isfinite(lp):
return -np.inf
return lnlike(theta)
# The negative ln-likelihood is useful for optimization.
# Optimizers want to *minimize* your function.
def nll(theta):
ll = lnlike(theta)
return -ll if np.isfinite(ll) else 1e15
```
```python
# population analysis functions
# We'll reuse these functions to plot all of our results.
def make_plot(pop_comp, x0, x, y, ax):
# print("in make_plot, pop_comp:")
# print(pop_comp.shape)
pop = 0.5 * (pop_comp[:, 1:] + pop_comp[:, :-1])
# print("pop:")
# print(pop.shape)
pop = np.sum(pop * np.diff(y)[None, :, None], axis=1)
a, b, c, d, e = np.percentile(pop * np.diff(x)[0], [2.5, 16, 50, 84, 97.5], axis=0)
ax.fill_between(x0, a, e, color="k", alpha=0.1, edgecolor="none")
ax.fill_between(x0, b, d, color="k", alpha=0.3, edgecolor="none")
ax.plot(x0, c, "k", lw=1)
def plot_results(samples):
# Loop through the samples and compute the list of population models.
samples = np.atleast_2d(samples)
gamma_earth = np.empty((len(samples)))
for i, p in enumerate(samples):
gamma_earth[i] = rateModel(365.25, 1.0, period_rng, rp_rng, p, model) * 365.
return gamma_earth
```
```python
def getRadii(catalog):
if whichRadii == "corrected":
return catalog.corrected_prad
if whichRadii == "corrected Minus 1Sigma":
return catalog.corrected_prad - catalog.corrected_prad_err1
elif whichRadii == "kic":
return catalog.koi_prad
else:
raise ValueError('Bad whichRadii string');
```
```python
stellarTargets = pd.read_csv(stellarCatalog)
base_kois = pd.read_csv(pcCatalog)
m = (period_rng[0] <= base_kois.koi_period) & (base_kois.koi_period <= period_rng[1])
thisRadii = getRadii(base_kois)
m &= np.isfinite(thisRadii) & (rp_rng[0] <= thisRadii) & (thisRadii <= rp_rng[1])
kois = pd.DataFrame(base_kois[m])
allKois = kois
```
```python
plt.hist(base_kois.corrected_prad[base_kois.corrected_prad<10], 100);
```

```python
fig, ax = plt.subplots(figsize=(15,10));
ax.errorbar(kois.koi_period, kois.koi_prad,
yerr = [-kois.koi_prad_err2, kois.koi_prad_err1],
fmt="k.", alpha = 0.5);
ax.errorbar(kois.koi_period, kois.corrected_prad,
yerr = [-kois.corrected_prad_err2, kois.corrected_prad_err1],
fmt="r.", alpha = 0.5);
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI Radius Change");
plt.ylim([0, 2.5])
plt.xlim([50, 400])
```
(50, 400)

```python
period = np.linspace(period_rng[0], period_rng[1], n_period)
rp = np.linspace(rp_rng[0], rp_rng[1], n_rp)
period_grid, rp_grid = np.meshgrid(period, rp, indexing="ij")
periodShape = period_grid.shape
```
```python
inputgrid = "../completenessContours/out_sc0_GK_baseline.fits.gz"
hdulist = fits.open(inputgrid)
cumulative_array = hdulist[0].data
kiclist = np.asarray(hdulist[1].data, dtype=np.int32)
probdet = np.transpose(cumulative_array[0])
probtot = np.transpose(cumulative_array[1])
prihdr = hdulist[0].header
min_comp_period = prihdr["MINPER"]
max_comp_period = prihdr["MAXPER"]
n_comp_period = prihdr["NPER"]
min_comp_rp = prihdr["MINRP"]
max_comp_rp = prihdr["MAXRP"]
n_comp_rp = prihdr["NRP"]
# print "KIC list length" + '{:6d}'.format(kiclist.size)
period_want = np.linspace(min_comp_period, max_comp_period, n_comp_period)
rp_want = np.linspace(min_comp_rp, max_comp_rp, n_comp_rp)
period_want2d, rp_want2d = np.meshgrid(period_want, rp_want)
# interpolate the numerical grids onto the period_grid, rp_grid space
#print("size probtot = " + str(np.shape(probtot)))
#print("size period_want = " + str(np.shape(period_want)))
#print("size rp_want = " + str(np.shape(rp_want)))
numCompVeInterp = RectBivariateSpline(period_want, rp_want, probtot)
numProbDetInterp = RectBivariateSpline(period_want, rp_want, probdet)
```
```python
```
```python
summedCompleteness = numCompVeInterp(period, rp)
summedProbDet = numProbDetInterp(period, rp)
```
```python
# contourLevels = np.arange(1e-2, 1, 5e-2)
contourLevels = [0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 1.0]
fig, ax = plt.subplots(figsize=(15,10));
plt.pcolor(period_grid, rp_grid, summedProbDet, cmap="BuGn")
c = plt.contour(period_grid, rp_grid, summedProbDet / kiclist.size, contourLevels,
colors="k", alpha=0.8)
scf = plt.scatter(kois.koi_period, getRadii(kois), cmap="plasma",
c=kois.reliability, edgecolors='k', s=100*kois.totalReliability, alpha = 1.0)
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Instrumental FP Reliability");
#c = plt.contour(period_grid, rp_grid, numCompVe / kiclist.size,
# colors="k", alpha=0.8)
plt.ylim(0.5, 2.5)
plt.xlim(50, 400)
plt.clabel(c, fontsize=12, inline=1, fmt="%.3f")
plt.title("Summed detection*vetting efficiency, " + whichRadii + " radii", fontsize = 18)
plt.xlabel("period [days]", fontsize = 18)
plt.ylabel("$R_p \, [R_\oplus]$", fontsize = 18);
plt.plot([200, 200], [1, 2], color='k', linestyle='--', linewidth=1)
plt.plot([50, 200], [1, 1], color='k', linestyle='--', linewidth=1)
plt.plot([50, 200], [2, 2], color='k', linestyle='--', linewidth=1)
plt.savefig("summedCompleteness.pdf",bbox_inches='tight')
```

```python
# contourLevels = np.arange(1e-2, 1, 5e-2)
contourLevels = np.arange(1e-3, 1e-2, 1e-3)
contourLevels = np.insert(contourLevels, 0, [1e-4, 5e-4])
fig, ax = plt.subplots(figsize=(15,10));
plt.pcolor(period_grid, rp_grid, summedCompleteness, cmap="BuGn")
c = plt.contour(period_grid, rp_grid, summedCompleteness / kiclist.size, contourLevels,
colors="k", alpha=0.8)
scf = plt.scatter(kois.koi_period, getRadii(kois), cmap="plasma",
c=kois.totalReliability, edgecolors='k', s=100*kois.totalReliability, alpha = 1.0)
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability", fontsize = 24);
#c = plt.contour(period_grid, rp_grid, numCompVe / kiclist.size,
# colors="k", alpha=0.8)
plt.ylim(0.75, 2.5)
plt.xlim(50, 400)
plt.clabel(c, fontsize=12, inline=1, fmt="%.4f")
# plt.title("DR25 PC Average detection*vetting efficiency", fontsize = 18)
plt.tick_params(labelsize = 18)
plt.xlabel("period [days]", fontsize = 24)
plt.ylabel("$R_p \, [R_\oplus]$", fontsize = 24);
plt.plot([200, 200], [1, 2], color='k', linestyle=':', linewidth=1)
plt.plot([50, 200], [1, 1], color='k', linestyle=':', linewidth=1)
plt.plot([50, 200], [2, 2], color='k', linestyle=':', linewidth=1)
plt.plot([0.8*365, 0.8*365], [0.8, 1.2], color='k', linestyle='--', linewidth=1)
plt.plot([1.2*365, 1.2*365], [0.8, 1.2], color='k', linestyle='--', linewidth=1)
plt.plot([0.8*365, 1.2*365], [0.8, 0.8], color='k', linestyle='--', linewidth=1)
plt.plot([0.8*365, 1.2*365], [1.2, 1.2], color='k', linestyle='--', linewidth=1)
plt.savefig("summedCompleteness.pdf",bbox_inches='tight')
```

```python
1.2*365
```
438.0
```python
```
Compute a basic occurrence rate without reliability
```python
kois = allKois
if model == "dualPowerLaw":
bounds = [(0, 5), (-5, 5), (-5, 5)]
elif model == "dualPowerLawGap":
bounds = [(0, 5), (-5, 5), (-5, 5), (0, 5), (0.0, 0.3), (0.2, 0.4), (-0.2, 0.2)]
elif model == "dualPowerLawGapFixedSlope":
bounds = [(0, 5), (-5, 5), (-5, 5), (0, 5), (0.0, 0.3), (0.2, 0.4)]
elif model == "dualPowerLawFixedValley":
bounds = [(0, 5), (-5, 5), (-5, 5)]
# The ln-likelihood function given at the top of this post.
koi_periods = np.array(kois.koi_period)
koi_rps = np.array(getRadii(kois))
# koi_rps = getRadii(kois)
vol = np.diff(period_grid, axis=0)[:, :-1] * np.diff(rp_grid, axis=1)[:-1, :]
theta_0 = initRateModel(model)
r = minimize(nll, theta_0, method="L-BFGS-B", bounds=bounds)
print(r.x)
ge = plot_results(r.x);
```
[ 0.59812315 -0.56349886 0.33545565]
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:7: RuntimeWarning: divide by zero encountered in log
import sys
```python
rateModel(365.25, 1.0, period_rng, rp_rng, r.x, model)*365
```
0.20648611095875508
```python
samples_noreliability = np.load("occurenceRatePosteriors/occurenceRatePosteriors_noreliability.npy")
```
```python
```
```python
```
```python
```
```python
```
```python
postName = "occurenceRatePosteriors/occurenceRatePosteriors_radiusUncertainty.npy"
# postName = "occurenceRatePosteriors/occurenceRatePosteriors_FAReliability.npy"
if path.exists(postName):
samplesRadiiSigma = np.load(postName)
ndim = samplesRadiiSigma.shape[1]
else:
nTrials = 1000
f = FloatProgress(min=0, max=nTrials)
display(f)
koiRadii0 = getRadii(base_kois)
periodSelect = (period_rng[0] <= base_kois.koi_period) & (base_kois.koi_period <= period_rng[1])
for mCount in range(nTrials):
# randomly change the koi radius
# randomly choose half to be modified w/ upper error, half w/ lower error
errorSelect = np.random.rand(len(base_kois)) < 0.5
# add the sampled error to the radii
# upper error bar
thisRadii = np.zeros(koiRadii0.shape)
thisRadii[errorSelect] = koiRadii0[errorSelect] \
+ np.abs(np.random.normal(0.0, base_kois['corrected_prad_err1'][errorSelect], sum(errorSelect)))
# lower error bar
thisRadii[~errorSelect] = koiRadii0[~errorSelect] \
- np.abs(np.random.normal(0.0, -base_kois['corrected_prad_err2'][~errorSelect], sum(~errorSelect)))
# select on period and radius in range
m = periodSelect & np.isfinite(thisRadii) & (rp_rng[0] <= thisRadii) & (thisRadii <= rp_rng[1])
koi_periods = np.array(base_kois[m].koi_period)
koi_rps = np.array(thisRadii[m])
theta_0 = initRateModel(model)
r = minimize(nll, theta_0, method="L-BFGS-B", bounds=bounds)
##################################################################
ndim, nwalkers = len(r.x), 2*len(r.x)
pos = [r.x + 1e-5 * np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
# Burn in.
pos, _, _ = sampler.run_mcmc(pos, 400)
sampler.reset()
# Production.
pos, _, _ = sampler.run_mcmc(pos, 2000)
samples = sampler.flatchain
if mCount == 0:
samplesRadiiSigma = samples[:-1:10,:]
allRadii = koi_rps;
allRadii0 = koiRadii0[m];
else:
samplesRadiiSigma = np.concatenate((samplesRadiiSigma, samples[:-1:10,:]))
allRadii = np.concatenate((allRadii, koi_rps))
allRadii0 = np.concatenate((allRadii0, koiRadii0[m]))
f.value += 1
np.save(postName, samplesRadiiSigma)
```
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-21-793be5e33def> in <module>()
1 postName = "occurenceRatePosteriors/occurenceRatePosteriors_radiusUncertainty.npy"
2 # postName = "occurenceRatePosteriors/occurenceRatePosteriors_FAReliability.npy"
----> 3 if path.exists(postName):
4 samplesRadiiSigma = np.load(postName)
5 ndim = samplesRadiiSigma.shape[1]
NameError: name 'path' is not defined
```python
plt.hist(koi_periods, 20);
```
```python
plt.hist(allRadii, 20);
len(allRadii)
```
```python
plt.hist(allRadii - allRadii0, 100);
```
```python
corner.corner(samplesRadiiSigma, labels=getModelLabels(model), label_kwargs = {"fontsize": 32});
gamma_earth_samplesRadiiSigma = plot_results(samplesRadiiSigma)
```
```python
plt.hist(np.log10(gamma_earth_samplesRadiiSigma), 50, histtype="step", color="k", density=True)
plt.hist(np.log10(gamma_earth_no_reliability), 50, histtype="step", color="b", density=True)
plt.gca().set_yticklabels([])
plt.title("the rate of Earth analogs: " + str(10**np.mean(np.log10(gamma_earth_samplesRadiiSigma))))
plt.xlabel(r"$\log_{10}\Gamma_\oplus = \left. \log_{10}\mathrm{d}N / \mathrm{d}\ln P \, \mathrm{d}\ln R_p \right |_\oplus$");
print("Mean Gamma_Earth = {0}".format(10**np.mean(np.log10(gamma_earth_no_reliability))))
print("Gamma at p=365 days, r=1Re without reliability, no uncertainty = " + printMedianAndErrorbars(gamma_earth_no_reliability))
print("Mean Gamma_Earth = {0}".format(10**np.mean(np.log10(gamma_earth_samplesRadiiSigma))))
print("Gamma at p=365 days, r=1Re without reliability, rp uncertainty = " + printMedianAndErrorbars(gamma_earth_samplesRadiiSigma))
```
```python
modelLabels = getModelLabels(model)
for i in range(0,ndim):
print("MCMC with uncertainty " + modelLabels[i] + "=" + printMedianAndErrorbars(samplesRadiiSigma[:,i]))
for i in range(0,ndim):
print("MCMC no reliability " + modelLabels[i] + "=" + printMedianAndErrorbars(samples_noreliability[:,i]))
```
```python
plt.figure(figsize=(15,10));
plt.hist(gamma_earth_no_reliability, 50, histtype="step", color="k", density=True)
plt.hist(gamma_earth_samplesRadiiSigma, 50, histtype="step", color="b", density=True)
plt.gca().set_yticklabels([])
plt.xlabel(r"$\Gamma_\oplus$", fontSize = 36);
plt.tick_params(labelsize = 24)
plt.savefig("gammaDist_rp_sigma.pdf",bbox_inches='tight')
```
```python
F1Dist_nr = integrateRateModel([50.,200.], [1., 2.], samples_noreliability, model)
print("1-2Re, 50-200 Days without reliability = " + printMedianAndErrorbars(F1Dist_nr))
F1Dist_nr_sigma = integrateRateModel([50.,200.], [1., 2.], samplesRadiiSigma[:-1:10,:], model)
print("1-2Re, 50-200 Days without reliability with rp uncertainty = " + printMedianAndErrorbars(F1Dist_nr_sigma))
```
```python
print("1-2Re, 50-200 Days without reliability = " + printMedianAndErrorbars(F1Dist_nr))
print("1-2Re, 50-200 Days without reliability with rp uncertainty = " + printMedianAndErrorbars(F1Dist_nr_sigma))
```
```python
plt.figure(figsize=(15,10));
greyLevel = "0.7"
plt.hist(F1Dist_nr, 50, histtype="step", color="k", density=True);
plt.hist(F1Dist_nr_sigma, 50, histtype="step", color="b", density=True);
plt.gca().set_yticklabels([])
plt.tick_params(labelsize = 24)
plt.xlabel(r"$F_1$", fontSize = 36);
plt.savefig("f1Dist_rp_sigma.pdf",bbox_inches='tight')
plt.title("Distribution for 50-200 days, 1-2 $R_\oplus$", fontsize=18);
```
```python
zetaDist_nr = integrateRateModel([.8*365.25,1.2*365.25],
[0.8,1.2], samples_noreliability, model)
print("zeta Earth without reliability = " + printMedianAndErrorbars(zetaDist_nr))
zetaDist_nr_sigma = integrateRateModel([.8*365.25,1.2*365.25],
[0.8,1.2], samplesRadiiSigma[:-1:10,:], model)
print("zeta Earth without reliability with rp uncertainty = " + printMedianAndErrorbars(zetaDist_nr_sigma))
```
```python
print("zeta Earth without reliability = " + printMedianAndErrorbars(zetaDist_nr))
print("zeta Earth without reliability with rp uncertainty = " + printMedianAndErrorbars(zetaDist_nr_sigma))
```
```python
plt.figure(figsize=(15,10));
plt.hist(zetaDist_nr, 50, histtype="step", color="k", density=True);
plt.hist(zetaDist_nr_sigma, 50, histtype="step", color="b", density=True);
plt.gca().set_yticklabels([])
plt.tick_params(labelsize = 24)
plt.xlabel(r"$\zeta_{\oplus}$", fontSize = 36);
plt.savefig("zetaEarthDist_rp_sigma.pdf",bbox_inches='tight')
plt.title("Distribution of $\zeta_\oplus$", fontsize=18);
```
Compute an occurrence rate with reliability
```python
import sys
sys.path.insert(0, '..')
import dr25Models as funcModels
postName = "occurenceRatePosteriors/occurenceRatePosteriors_reliabilityUncertainty.npy"
# postName = "occurenceRatePosteriors/occurenceRatePosteriors_FAReliability.npy"
if path.exists(postName):
samplesReliabilitySigma = np.load(postName)
ndim = samplesReliabilitySigma.shape[1]
else:
nTrials = 1000
f = FloatProgress(min=0, max=nTrials)
display(f)
# start over
base_kois = pd.read_csv(pcCatalog)
m = (period_rng[0] <= base_kois.koi_period) & (base_kois.koi_period <= period_rng[1])
thisRadii = getRadii(base_kois)
m &= np.isfinite(thisRadii) & (rp_rng[0] <= thisRadii) & (thisRadii <= rp_rng[1])
kois = pd.DataFrame(base_kois[m])
allKois = kois
# read the posteriors
# set the effectiveness model
fpEffModel = "rotatedLogisticX0"
# set the obs FP rate model
obsModel = "rotatedLogisticX0"
eSamples = np.load("binEffPosteriors_" + str(fpEffModel) + ".npy");
oSamples = np.load("binObsPosteriors_" + str(obsModel) + ".npy");
# read in the model parameters
tt = pd.read_pickle("fpEffectivenessTable.pkl")
tm = tt[tt.Model == fpEffModel]
fpEffXRange = tm.periodRange.values[0]
fpEffYRange = tm.mesRange.values[0]
fpEffTheta = tm.medianMCMCTheta.values[0]
tt = pd.read_pickle("obsFpTable.pkl")
tm = tt[tt.Model == obsModel]
obsXRange = tm.periodRange.values[0]
obsYRange = tm.mesRange.values[0]
obsTheta = tm.medianMCMCTheta.values[0]
for mCount in range(nTrials):
# randomly select kois
# recompute the reliability with draw from posteriors
fpEffectiveness = funcModels.evaluateModel(allKois.koi_period,
allKois.koi_max_mult_ev, eSamples[np.random.randint(len(eSamples)),:],
fpEffXRange, fpEffYRange, fpEffModel)
obsFpRate = funcModels.evaluateModel(allKois.koi_period,
allKois.koi_max_mult_ev, oSamples[np.random.randint(len(eSamples)),:],
obsXRange, obsYRange, obsModel)
faReliability = 1-(obsFpRate/(1-obsFpRate))*(1-fpEffectiveness)/fpEffectiveness
faReliability[faReliability < 0.] = 0.
totalReliability = faReliability * (1 - allKois.fpp_prob_use)
koiSelect = (np.random.rand(len(allKois)) < totalReliability)
kois = allKois[koiSelect]
# print(str(mCount) + " of " + str(nTrials) + ", selected " + str(len(kois))
# + " kois out of " + str(len(allKois)) + " after reliability cut")
koi_periods = np.array(kois.koi_period)
koi_rps = np.array(getRadii(kois))
theta_0 = initRateModel(model)
r = minimize(nll, theta_0, method="L-BFGS-B", bounds=bounds)
##################################################################
ndim, nwalkers = len(r.x), 2*len(r.x)
pos = [r.x + 1e-5 * np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
# Burn in.
pos, _, _ = sampler.run_mcmc(pos, 400)
sampler.reset()
# Production.
pos, _, _ = sampler.run_mcmc(pos, 2000)
samples = sampler.flatchain
if mCount == 0:
samplesReliabilitySigma = samples[:-1:10,:]
allReliability = totalReliability;
else:
samplesReliabilitySigma = np.concatenate((samplesReliabilitySigma, samples[:-1:10,:]))
allReliability = np.vstack((allReliability, totalReliability))
f.value += 1
np.save(postName, samplesReliabilitySigma)
```
```python
```
```python
ii = (allKois["reliability"]<0.9)
ar = pd.DataFrame(allReliability[:,ii])
ax = ar.plot.kde(legend = False, fontsize = 18, figsize = (15,8))
plt.ylim(0,11)
plt.xlim(0,1)
plt.gca().set_yticklabels([])
plt.xlabel("False Alarm Reliability", fontsize = 24)
plt.ylabel("")
plt.savefig("reliabilityUncertaintyDist.pdf",bbox_inches='tight')
```
```python
allKois["reliability"].iloc[0]
```
```python
corner.corner(samplesReliabilitySigma, labels=getModelLabels(model), label_kwargs = {"fontsize": 32});
```
```python
modelLabels = getModelLabels(model)
for i in range(0,ndim):
print("MCMC with reliability " + modelLabels[i] + "=" + printMedianAndErrorbars(samplesReliabilitySigma[:,i]))
```
```python
gamma_earth = plot_results(samplesReliabilitySigma)
```
```python
```
```python
```
```python
gamma_earth_no_sigma = np.load("gammaDistReliability.npy", )
F1Dist_no_sigma = np.load("f1DistReliability.npy")
zetaDist_no_sigma = np.load("zetaDistReliability.npy")
```
```python
plt.figure(figsize=(15,10));
plt.hist(gamma_earth_no_sigma, 50, histtype="step", color="k", density=True)
plt.hist(gamma_earth, 50, histtype="step", color="b", density=True)
plt.gca().set_yticklabels([])
plt.xlabel(r"$\Gamma_\oplus$", fontSize = 36);
plt.tick_params(labelsize = 24)
plt.savefig("gammaDist_reliabilityUncertainty.pdf",bbox_inches='tight')
plt.title("the rate of Earth analogs: " + str(round(np.median(gamma_earth), 3))
+ "/" + str(round(np.median(gamma_earth_no_reliability), 3)))
```
```python
print("Gamma at p=365 days, r=1Re = " + printMedianAndErrorbars(gamma_earth))
```
```python
F1Dist = integrateRateModel([50.,200.], [1., 2.], samplesReliabilitySigma[:-1:10,:], model)
```
```python
plt.figure(figsize=(15,10));
greyLevel = "0.7"
plt.hist(F1Dist_no_sigma, 50, histtype="step", color="k", density=True);
plt.hist(F1Dist, 50, histtype="step", color="b", density=True);
plt.gca().set_yticklabels([])
plt.tick_params(labelsize = 24)
plt.xlabel(r"$F_1$", fontSize = 36);
plt.savefig("f1Dist_reliabilityUncertainty.pdf",bbox_inches='tight')
plt.title("Distribution for 50-200 days, 1-2 $R_\oplus$", fontsize=18);
```
```python
print("1-2Re, 50-200 Days = " + printMedianAndErrorbars(F1Dist))
```
```python
zetaDist = integrateRateModel([.8*365.25,1.2*365.25],
[0.8,1.2], samplesReliabilitySigma[:-1:10,:], model)
```
```python
plt.figure(figsize=(15,10));
plt.hist(zetaDist_no_sigma, 50, histtype="step", color="k", density=True);
plt.hist(zetaDist, 50, histtype="step", color="b", density=True);
plt.gca().set_yticklabels([])
plt.tick_params(labelsize = 24)
plt.xlabel(r"$\zeta_{\oplus}$", fontSize = 36);
plt.savefig("zetaEarthDist_reliabilityUncertainty.pdf",bbox_inches='tight')
plt.title("Distribution of $\zeta_\oplus$", fontsize=18);
```
```python
print("zeta-Earth = " + printMedianAndErrorbars(zetaDist))
```
```python
```
```python
np.save("samples_noreliability.npy", samples_noreliability)
np.save("samplesRadiiSigma.npy", samplesRadiiSigma)
np.save("samplesReliabilitySigma.npy", samplesReliabilitySigma)
```
```python
```
```python
```
```python
```
```python
```
```python
```
```javascript
%%javascript
IPython.notebook.save_notebook()
```
```bash
%%bash -s "$model"
jupyter nbconvert --to html computeOccurrenceUncertainty.ipynb
mv computeOccurrenceUncertaintyUncertainty.html htmlArchive/computeOccurrenceUncertainty_$1.html
```
```python
```
```python
errorSelect = np.random.rand(len(allKois)) < 0.5
```
```python
sum(errorSelect)
```
```python
sum(~errorSelect)
```
```python
sum(errorSelect)+sum(~errorSelect)
```
```python
len(gamma_earth_no_reliability)
```
```python
samples_noreliability.shape
```
```python
```
|
stevepurREPO_NAMEDR25-occurrence-publicPATH_START.@DR25-occurrence-public_extracted@DR25-occurrence-public-main@GKbaseline@.ipynb_checkpoints@computeOccurrenceUncertainty-checkpoint.ipynb@.PATH_END.py
|
{
"filename": "LOFAR_preprocess.py",
"repo_name": "revoltek/LiLF",
"repo_path": "LiLF_extracted/LiLF-master/pipelines/LOFAR_preprocess.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys, os, re, glob, time
import numpy as np
import casacore.tables as pt
from astropy.time import Time
##########################################
from LiLF import lib_ms, lib_util, lib_log
logger_obj = lib_log.Logger('pipeline-preprocess')
logger = lib_log.logger
s = lib_util.Scheduler(log_dir = logger_obj.log_dir, dry = False)
w = lib_util.Walker('pipeline-preprocess.walker')
# parse parset
parset = lib_util.getParset()
logger.info('Parset: '+str(dict(parset['LOFAR_preprocess'])))
parset_dir = parset.get('LOFAR_preprocess','parset_dir')
fix_table = parset.getboolean('LOFAR_preprocess','fix_table')
renameavg = parset.getboolean('LOFAR_preprocess','renameavg')
keep_IS = parset.getboolean('LOFAR_preprocess','keep_IS')
backup_full_res = parset.getboolean('LOFAR_preprocess','backup_full_res')
###########################################
if os.path.exists('html.txt'):
download_file = 'html.txt'
else:
download_file = None # just renaming
def getName(ms):
"""
Get new MS name based on obs name and time
"""
# get pointing name
with pt.table(ms+'/FIELD', readonly=True, ack=False) as t:
code = t.getcell('CODE',0)
if code == '':
with pt.table(ms+'/OBSERVATION', readonly=True, ack=False) as t:
code = t.getcell('LOFAR_TARGET',0)[0]
code = code.lower().replace(' ','_')
# get obsid
with pt.table(ms+'/OBSERVATION', readonly=True, ack=False) as t:
obsid = t.getcell('LOFAR_OBSERVATION_ID',0)
# get freq
with pt.table(ms+'/SPECTRAL_WINDOW', readonly=True, ack=False) as t:
freq = t.getcell('REF_FREQUENCY',0)
# get time (saved in ms as MJD in seconds)
#with pt.table(ms+'/OBSERVATION', readonly=True, ack=False) as t:
# time = Time(t.getcell('TIME_RANGE',0)[0]/(24*3600.), format='mjd')
# time = time.iso.replace('-','').replace(' ','').replace(':','')[0:12]
#pattern = re.compile("^c[0-9][0-9]-.*$")
# is survey?
#if pattern.match(code):
# cycle_obs, sou = code.split('_')
# if not os.path.exists(cycle_obs+'/'+sou): os.makedirs(cycle_obs+'/'+sou)
# return cycle_obs+'/'+sou+'/'+sou+'_t'+time+'_SB'+str(lib_util.lofar_nu2num(freq/1.e6))+'.MS'
#else:
if not os.path.exists('mss/id'+obsid+'_-_'+code): os.makedirs('mss/id'+obsid+'_-_'+code)
return 'mss/id'+obsid+'_-_'+code+'/'+code+'_SB%03i.MS' % lib_util.lofar_nu2num(freq/1.e6)
########################################
if not download_file is None:
with w.if_todo('download'):
with open(download_file,'r') as df:
logger.info('Downloading...')
downloaded = glob.glob('*MS')
# add renamed files
if os.path.exists('renamed.txt'):
with open('renamed.txt','r') as flog:
downloaded += [line.rstrip('\n') for line in flog]
for i, line in enumerate(df):
ms = re.findall(r'L[0-9]*.*_SB[0-9]*_uv', line)[0]
if ms+'.MS' in downloaded or ms+'.dppp.MS' in downloaded: continue
if ms+'.MS' in glob.glob('*MS') or ms+'.dppp.MS' in glob.glob('*MS'): continue
s.add('wget -nv "'+line[:-1]+'" -O - | tar -x', log=ms+'_download.log', commandType='general')
# print 'wget -nv "'+line[:-1]+'" -O - | tar -x'
logger.debug('Queue download of: '+line[:-1])
s.run(check=True, maxThreads=4)
MSs = lib_ms.AllMSs(glob.glob('*MS'), s, check_flags=False)
if len(MSs.getListStr()) == 0:
logger.info('Done.')
sys.exit(0)
######################################
with pt.table(MSs.getListStr()[0]+'/OBSERVATION', readonly=True, ack=False) as obs:
t = Time(obs.getcell('TIME_RANGE',0)[0]/(24*3600.), format='mjd')
time = np.int(t.iso.replace('-','')[0:8])
if fix_table:
with w.if_todo('fix_table'):
#logger.info('Fix MS table...')
#MSs.run('fixMS_TabRef.py $pathMS', log='$nameMS_fixms.log', commandType='python')
# only ms created in range (2/2013->2/2014)
if time > 20130200 and time < 20140300:
logger.info('Fix beam table...')
MSs.run('/home/fdg/scripts/fixinfo/fixbeaminfo $pathMS', log='$nameMS_fixbeam.log', commandType='python')
# Rescale visibilities by 1e3 if before 2014-03-19 (old correlator), and by 1e-2 otherwise
with w.if_todo('rescale_flux'):
logger.info('Rescaling flux...')
if time < 20140319:
rescale_factor = 1e6
else:
rescale_factor = 1e-4
for MS in MSs.getListStr():
with pt.table(MS+'/HISTORY', readonly=False, ack=False) as hist:
if "Flux rescaled" not in hist.getcol('MESSAGE'):
s.add('taql "update %s set DATA = %f*DATA" && taql "insert into %s/HISTORY (TIME,MESSAGE) values (mjd(), \'Flux rescaled\')"' % (MS,rescale_factor,MS), \
log='taql.log', commandType='general')
s.run(check=True)
######################################
# Avg to 4 chan and 2 sec
# Remove internationals
if renameavg:
with w.if_todo('renameavg'):
logger.info('Renaming/averaging...')
with open('renamed.txt','a') as flog:
MSs = lib_ms.AllMSs([MS for MS in glob.glob('*MS') if not os.path.exists(getName(MS))], s, check_flags=False)
minfreq = np.min(MSs.getFreqs())
logger.info('Min freq: %.2f MHz' % (minfreq/1e6))
for MS in MSs.getListObj():
if np.all(MS.getFreqs() > 168.3e6):
logger.warning(f'Skipping HBA above 168 MHz: deleting {MS.pathMS}')
lib_util.check_rm(MS.pathMS)
continue
# get avg time/freq values
nchan = MS.getNchan()
timeint = MS.getTimeInt()
if nchan == 1:
avg_factor_f = 1
elif nchan % 2 == 0 and MSs.isHBA: # case HBA
avg_factor_f = int(nchan / 4) # to 2 ch/SB
elif nchan % 8 == 0 and minfreq < 40e6:
avg_factor_f = int(nchan / 8) # to 8 ch/SB
elif nchan % 8 == 0 and 'SPARSE' in MS.getAntennaSet():
avg_factor_f = int(nchan / 8) # to 8 ch/SB
elif nchan % 4 == 0:
avg_factor_f = int(nchan / 4) # to 4 ch/SB
elif nchan % 5 == 0:
avg_factor_f = int(nchan / 5) # to 5 ch/SB
else:
logger.error('Channels should be a multiple of 4 or 5.')
sys.exit(1)
if keep_IS:
avg_factor_f = int(nchan / 16) if MSs.isHBA else int(nchan / 16) # to have the full FoV in LBA we need 32 ch/SB
if avg_factor_f < 1: avg_factor_f = 1
avg_factor_t = int(np.round(2/timeint)) if keep_IS else int(np.round(4/timeint)) # to 4 sec (2 for IS)
if avg_factor_t < 1: avg_factor_t = 1
MSout = getName(MS.pathMS)
if avg_factor_f != 1 or avg_factor_t != 1:
logger.info('%s->%s: Average in freq (factor of %i) and time (factor of %i)...' % (MS.nameMS, MSout, avg_factor_f, avg_factor_t))
if keep_IS:
s.add('DP3 '+parset_dir+'/DP3-avg.parset msin='+MS.pathMS+' msout='+MSout+' msin.datacolumn=DATA \
avg.timestep='+str(avg_factor_t)+' avg.freqstep='+str(avg_factor_f), \
log=MS.nameMS+'_avg.log', commandType='DP3')
else: # remove IS
s.add('DP3 '+parset_dir+'/DP3-avg.parset msin='+MS.pathMS+' msout='+MSout+' msin.datacolumn=DATA \
msin.baseline="[CR]S*&" \
avg.timestep='+str(avg_factor_t)+' avg.freqstep='+str(avg_factor_f), \
log=MS.nameMS+'_avg.log', commandType='DP3')
s.run(check=True, maxThreads=1) # limit threads to prevent I/O isssues
if backup_full_res:
logger.info('Backup full resolution data...')
if not os.path.exists('data-bkp'):
os.makedirs('data-bkp')
for MS in MSs.getListObj():
MS.move('data-bkp/' + MS.nameMS + '.MS', keepOrig=False, overwrite=False)
else:
lib_util.check_rm(MS.pathMS)
flog.write(MS.nameMS+'.MS\n') # after averaging to be sure no log is written if an error occurs
else:
logger.info('%s->%s: Move data - no averaging...' % (MS.nameMS, MSout))
flog.write(MS.nameMS+'.MS\n') # before move or the filenmae is changed
MS.move(MSout)
logger.info("Done.")
|
revoltekREPO_NAMELiLFPATH_START.@LiLF_extracted@LiLF-master@pipelines@LOFAR_preprocess.py@.PATH_END.py
|
{
"filename": "constants.py",
"repo_name": "Mauropieroni/GW_response",
"repo_path": "GW_response_extracted/GW_response-main/gw_response/constants.py",
"type": "Python"
}
|
import jax
import jax.numpy as jnp
import chex
from dataclasses import field
jax.config.update("jax_enable_x64", True)
@chex.dataclass(frozen=True)
class PhysicalConstants:
"""
A data class for storing physical constants used in unit conversions within
astronomical computations.
This class provides a convenient way to access commonly used physical
constants, ensuring consistency and clarity across different parts of the
code. The constants are set as class attributes with predefined values.
Attributes:
light_speed (float): The speed of light in meters per second (m/s).
Default is 299792458.0.
hour (float): The duration of an hour in seconds. Default is 3600
seconds.
day (float): The duration of a day in seconds, calculated as 24 hours.
Default is 86400 seconds.
yr (float): The duration of a year in seconds, accounting for leap
years. Default is 365.25 days.
Hubble_over_h (float): Hubble constant divided by the dimensionless
Hubble parameter 'h'. Units are 1/second (1/s).
The class is frozen using chex.dataclass, meaning its instances are
immutable and cannot be modified after creation.
"""
light_speed: float = 299792458.0 # speed of light in m/s
hour: float = 3600.0
day: float = 24 * hour
yr: float = 365.25 * day # year in s
Hubble_over_h: float = 3.24e-18 # H0 divided by h in units of 1/s
AU: float = 1.495978707e11 # Astronomical unit in meters
cmb_dipole: jnp.array = field(
default_factory=lambda: jnp.array([-0.972, 0.137, -0.191])
)
# Direction on the CMB dipole
@chex.dataclass
class BasisTransformations:
"""
A data class for managing basis transformations in astronomical
computations.
This class provides a predefined transformation matrix for converting
coordinates from the XYZ coordinate system to the AET (Arm, Ecliptic,
Transverse) coordinate system, which is commonly used in the context of
LISA (Laser Interferometer Space Antenna) and similar astronomical studies.
Attributes:
XYZ_to_AET (jnp.array): A numpy array representing the transformation
matrix from the XYZ coordinate system to the AET coordinate system. The
transformation matrix is defined as:
[
[-1 / sqrt(2), 0, 1 / sqrt(2)],
[1 / sqrt(6), -2 / sqrt(6), 1 / sqrt(6)],
[1 / sqrt(3), 1 / sqrt(3), 1 / sqrt(3)]
]
This class facilitates the conversion of coordinates between different
systems, which is crucial for accurate representation and analysis in
astronomical models.
"""
XYZ_to_AET: jnp.array = field(
default_factory=lambda: jnp.array(
[
[-1 / jnp.sqrt(2), 0, 1 / jnp.sqrt(2)],
[1 / jnp.sqrt(6), -2 / jnp.sqrt(6), 1 / jnp.sqrt(6)],
[1 / jnp.sqrt(3), 1 / jnp.sqrt(3), 1 / jnp.sqrt(3)],
]
)
)
|
MauropieroniREPO_NAMEGW_responsePATH_START.@GW_response_extracted@GW_response-main@gw_response@constants.py@.PATH_END.py
|
{
"filename": "avalanche.py",
"repo_name": "anastasia-tsvetkova/lc_pulse_avalanche",
"repo_path": "lc_pulse_avalanche_extracted/lc_pulse_avalanche-master/lc_pulse_avalanche/avalanche.py",
"type": "Python"
}
|
import inspect
import sys
import math
from math import exp, log
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import exponential, lognormal, normal, uniform
from scipy.stats import loguniform
import os, h5py
class LC(object):
"""
A class to generate gamma-ray burst light curves (GRB LCs) using a pulse
avalanche model ('chain reaction') proposed by Stern & Svensson, ApJ, 469: L109 (1996).
The 7 free parameters of such model are:
- mu
- mu0
- alpha
- delta1
- delta2
- tau_min
- tau_max
Parameters of BATSE:
- res = 0.064 [s]
- eff_area = 3600 [cm2]
- bg_level = 10.67 [cnt/cm2/s]
:mu: average number of baby pulses
:mu0: average number of spontaneous (initial) pulses
:alpha: delay parameter
:delta1: lower boundary of log-normal probability distribution of tau (time constant of baby pulse)
:delta2: upper boundary of log-normal probability distribution of tau
:tau_min: lower boundary of log-normal probability distribution of tau_0 (time constant of spontaneous pulse);
should be smaller than res
:tau_max: upper boundary of log-normal probability distribution of tau_0
:t_min: GRB LC start time
:t_max: GRB LC stop time
:res: GRB LC time resolution (s)
:eff_area: effective area of instrument (cm2)
:bg_level: background level (cnt/cm2/s)
:min_photon_rate: left boundary of -3/2 log N - log S distribution (ph/cm2/s)
:max_photon_rate: right boundary of -3/2 log N - log S distribution (ph/cm2/s)
:sigma: signal above background level
:n_cut: maximum number of pulses in avalanche (useful to speed up the simulations but in odds with the "classic" approach)
:with_bg: boolean flag for keeping or removing the background level at the end of the generation
:use_poisson: boolean flag for using the Poisson or the (rounded) exponential for sampling the number of initial pulses and childs
"""
def __init__(self, mu=1.2, mu0=1, alpha=4, delta1=-0.5, delta2=0, tau_min=0.2, tau_max=26,
t_min=-10, t_max=1000, res=0.256, eff_area=3600, bg_level=10.67, min_photon_rate=1.3,
max_photon_rate=1300, sigma=5, n_cut=None, with_bg=True, use_poisson=False, verbose=False):
self._mu = mu # mu~1 => critical runaway regime
self._mu0 = mu0 # average number of spontaneous pulses per GRB
self._alpha = alpha # delay parameter
self._delta1 = delta1
self._delta2 = delta2
if tau_min > res and not isinstance(self, Restored_LC):
raise ValueError("tau_min should be smaller than res =", res)
self._tau_min = tau_min
self._tau_max = tau_max
self._eff_area = eff_area
self._bg = bg_level * self._eff_area # cnts/s
self._min_photon_rate = min_photon_rate
self._max_photon_rate = max_photon_rate
self._verbose = verbose
self._res = res # s
self._n = int(np.ceil((t_max - t_min)/self._res)) + 1 # time steps
self._t_min = t_min # ms
self._t_max = (self._n - 1) * self._res + self._t_min # ms
self._times, self._step = np.linspace(self._t_min, self._t_max, self._n, retstep=True)
self._rates = np.zeros(len(self._times))
self._sp_pulse = np.zeros(len(self._times))
self._total_rates = np.zeros(len(self._times))
self._lc_params = list()
self._sigma = sigma
self._n_cut = n_cut
self._n_pulses = 0
self._with_bg = with_bg
self._use_poisson = use_poisson
if self._verbose:
print("Time resolution: ", self._step)
def norris_pulse(self, norm, tp, tau, tau_r):
"""
Computes a single pulse according to Norris et al., ApJ, 459, 393 (1996)
:t: times (lc x-axis), vector
:tp: pulse peak time, scalar
:tau: pulse width (decay rime), scalar
tau_r: rise time, scalar
:returns: an array of count rates
"""
self._n_pulses += 1
if self._verbose:
print("Generating a new pulse with tau={:0.3f}".format(tau))
t = self._times
_tp = np.ones(len(t))*tp
if tau_r == 0 or tau == 0:
return np.zeros(len(t))
return np.append(norm * np.exp(-(t[t<=tp]-_tp[t<=tp])**2/tau_r**2), \
norm * np.exp(-(t[t>tp]-_tp[t>tp])/tau))
def _rec_gen_pulse(self, tau1, t_shift):
"""
Recursively generates pulses from Norris function
:tau1: parent pulse width (decay rime), scalar
:t_shift: time delay relative to the parent pulse
:returns: an array of count rates
"""
# number of baby pulses: p2(mu_b) = exp(-mu_b/mu)/mu, mu - the average, mu_b - number of baby pulses
if self._use_poisson:
mu_b = poisson.rvs(mu=self._mu,
size=1,
random_state=None)
mu_b = mu_b[0]
else:
mu_b = round(exponential(scale=self._mu))
if self._verbose:
print("Number of pulses:", mu_b)
print("--------------------------------------------------------------------------")
for i in range(mu_b):
# time const of the baby pulse: p4(tau/tau1) = 1/(delta2 - delta1), tau1 - time const of the parent pulse
tau = tau1 * exp(uniform(low=self._delta1, high=self._delta2))
tau_r = 0.5 * tau
# time delay of baby pulse: p3(delta_t) = exp(-delta_t/(alpha*tau))/(alpha*tau) with respect to the parent pulse,
# alpha - delay parameter, tau - time const of the baby pulse
delta_t = exponential(scale=self._alpha*tau) + t_shift
norm = uniform(low=0.0, high=1)
self._rates += self.norris_pulse(norm, delta_t, tau, tau_r)
self._lc_params.append(dict(norm=norm, t_delay=delta_t, tau=tau, tau_r=tau_r))
if self._verbose:
print("Pulse amplitude: {:0.3f}".format(norm))
print("Pulse shift: {:0.3f}".format(delta_t))
print("Time constant (the decay time): {0:0.3f}".format(tau))
print("Rise time: {:0.3f}".format(tau_r))
print("--------------------------------------------------------------------------")
if tau > self._res:
if self._n_cut is None:
self._rec_gen_pulse(tau, delta_t)
else:
if self._n_pulses < self._n_cut:
self._rec_gen_pulse(tau, delta_t)
return self._rates
def generate_avalanche(self, seed=12345, return_array=False):
"""
Generates pulse avalanche
:seed: random seed
:return_array: if True returns arrays of parameters, if False - a dict with parameters for each pulse
:returns: set of parameters for the generated avalanche
"""
# set seed for random draw (the same as for the avalanche generation)
np.random.seed(seed)
if self._verbose:
inspect.getdoc(self.generate_avalanche)
"""
Starting pulse avalanche
"""
# number of spontaneous primary pulses: p5(mu_s) = exp(-mu_s/mu0)/mu0
if self._use_poisson:
mu_s = 0
while (mu_s==0):
mu_s = poisson.rvs(mu=self._mu0,
size=1,
random_state=None)
mu_s = mu_s[0]
else:
mu_s = round(exponential(scale=self._mu0))
if (mu_s==0):
mu_s = 1
if self._verbose:
print("Number of spontaneous pulses:", mu_s)
print("--------------------------------------------------------------------------")
for i in range(mu_s):
# time constant of spontaneous pulses: p6(log tau0) = 1/(log tau_max - log tau_min)
# decay time
tau0 = exp(uniform(low=log(self._tau_max), high=log(self._tau_min)))
# rise time
tau_r = 0.5 * tau0
# time delay of spontaneous primary pulses: p7(t) = exp(-t/(alpha*tau0))/(alpha*tau0)
t_delay = exponential(scale=self._alpha*tau0)
# pulse amplitude: p1(A) = 1 in [0, 1]
norm = uniform(low=0.0, high=1)
if self._verbose:
print("Spontaneous pulse amplitude: {:0.3f}".format(norm))
print("Spontaneous pulse shift: {:0.3f}".format(t_delay))
print("Time constant (the decay time) of spontaneous pulse: {0:0.3f}".format(tau0))
print("Rise time of spontaneous pulse: {:0.3f}".format(tau_r))
print("--------------------------------------------------------------------------")
self._sp_pulse += self.norris_pulse(norm, t_delay, tau0, tau_r)
self._lc_params.append(dict(norm=norm, t_delay=t_delay, tau=tau0, tau_r=tau_r))
self._rec_gen_pulse(tau0, t_delay)
# lc directly from the avalanche
self._raw_lc = self._sp_pulse + self._rates
self._max_raw_pcr = self._raw_lc.max()
#if (self._max_raw_pcr<1.e-12):
# # check that we have generated a lc with non-zero values; otherwise exit
# sys.exit()
#else:
# pass
population = np.geomspace(self._min_photon_rate , self._max_photon_rate, 1000)
weights = list(map(lambda x: x**(-3/2), population))
weights = weights / np.sum(weights)
ampl = np.random.choice(population, p=weights) / self._max_raw_pcr
self._ampl = ampl
self._peak_value = self._max_raw_pcr * self._ampl
# lc from avalanche scaled + Poissonian bg added
# Here, contrary to what happens in the method `restore_lc()` and thus
# in the method `plot_lc` of the object LC, the variable `_plot_lc``
# contains the COUNTS (and not the count RATES!)
if self._with_bg:
self._plot_lc = (self._raw_lc * self._ampl * self._eff_area) + self._bg # total count rates (signal+bkg)
self._plot_lc = np.random.poisson( self._res * self._plot_lc ) # total count (signal+bkg) with Poisson
self._err_lc = np.sqrt(self._plot_lc)
else: # background-subtracted
self._plot_lc = (self._raw_lc * self._ampl * self._eff_area) + self._bg # total count rates (signal+bkg)
self._plot_lc = np.random.poisson( self._res * self._plot_lc ) # total count (signal+bkg) with Poisson
self._err_lc = np.sqrt(self._plot_lc)
self._plot_lc = self._plot_lc - (self._bg*self._res) # total count rates (signal) with Poisson
self._get_lc_properties()
for p in self._lc_params:
p['norm'] *= self._ampl
norms = np.empty((0,))
t_delays = np.empty((0,))
taus = np.empty((0,))
tau_rs = np.empty((0,))
if return_array:
for p in self._lc_params:
norms = np.append(norms, p['norm'])
t_delays = np.append(t_delays, p['t_delay'])
taus = np.append(taus, p['tau'])
tau_rs = np.append(tau_rs, p['tau_r'])
return norms, t_delays, taus, tau_rs, self._peak_value
else:
return self._lc_params
def plot_lc(self, rescale=True, save=True, name="./plot_lc.pdf", show_duration=False):
"""
Plots GRB light curve
:rescale: to rescale the x-axis plotting only lc around T100
:save: to save the plot to file
:name: filename (including path) to save the plot
"""
plt.xlabel('T-T0 (s)')
plt.ylabel('Count rate (cnt/s)')
self._restore_lc()
plt.step(self._times, self._plot_lc, where='post')
plt.plot(np.linspace(self._t_min, self._t_max, num=2, endpoint=True), [self._bg, self._bg], 'r--')
if rescale:
t_i = max(self._t_start - 0.5*self._t100, self._t_min)
t_f = self._t_stop + 0.5*self._t100
plt.xlim([t_i, t_f])
if show_duration:
plt.axvline(x=self._t_start, color='blue')
plt.axvline(x=self._t_stop, color='blue')
plt.axvline(x=self._t90_i, color='red')
plt.axvline(x=self._t90_f, color='red')
if save:
plt.savefig(name)
plt.show()
def _get_lc_properties(self):
"""
Calculates T90 and T100 durations along with their start and stop times, total number of counts per T100,
mean, max, and background count rates
"""
self._aux_index = np.where(self._raw_lc>self._raw_lc.max()*1e-4)
# self._aux_index = np.where((self._plot_lc - self._bg) * self._res / (self._bg * self._res)**0.5 >= self._sigma)
self._max_snr = ((self._plot_lc - self._bg) * self._res / (self._bg * self._res)**0.5).max()
self._aux_times = self._times[self._aux_index[0][0]:self._aux_index[0][-1]] # +1 in the index
self._aux_lc = self._plot_lc[self._aux_index[0][0]:self._aux_index[0][-1]]
self._t_start = self._times[self._aux_index[0][0]]
# self._t_stop = self._times[self._aux_index[0][-1]+1]
self._t_stop = self._times[self._aux_index[0][-1]]
self._t100 = self._t_stop - self._t_start
self._total_cnts = np.sum(self._aux_lc - self._bg*np.ones(len(self._aux_lc))) * self._res
try:
sum_cnt = 0
i = 0
while sum_cnt < 0.05 * self._total_cnts:
sum_cnt += (self._aux_lc[i] - self._bg) * self._res
i += 1
self._t90_i = self._aux_times[i]
sum_cnt = 0
j = -1
while sum_cnt < 0.05 * self._total_cnts:
sum_cnt += (self._aux_lc[j] - self._bg) * self._res
j += -1
self._t90_f = self._aux_times[j]
self._t90 = self._t90_f - self._t90_i
self._t90_cnts = np.sum(self._aux_lc[i:j+1] - self._bg) * self._res
assert self._t90_i < self._t90_f
except:
self._t90 = self._t100
self._t90_i = self._t_start
self._t90_f = self._t_stop
self._t90_cnts = self._total_cnts
@property
def T90(self):
return "{:0.3f}".format(self._t90), "{:0.3f}".format(self._t90_i), "{:0.3f}".format(self._t90_f)
@property
def T100(self):
return "{:0.3f}".format(self._t100), "{:0.3f}".format(self._t_start), "{:0.3f}".format(self._t_stop)
@property
def total_counts(self):
return "{:0.2f}".format(self._total_cnts)
@property
def max_rate(self):
return "{:0.2f}".format(self._aux_lc.max())
@property
def mean_rate(self):
return "{:0.2f}".format(np.mean(self._aux_lc))
@property
def bg_rate(self):
return "{:0.2f}".format(self._bg)
@property
def max_snr(self):
return "{:0.2f}".format(self._max_snr)
def _restore_lc(self):
"""Restores GRB LC from avalanche parameters.
Here we are plotting the count RATES, not the counts!"""
self._raw_lc = np.zeros(len(self._times))
for par in self._lc_params:
norm = par['norm']
t_delay = par['t_delay']
tau = par['tau']
tau_r = par['tau_r']
self._raw_lc += self.norris_pulse(norm, t_delay, tau, tau_r) #
if self._with_bg:
self._plot_lc = (self._raw_lc * self._ampl * self._eff_area) + self._bg # total count rates (signal+bkg)
self._plot_lc = np.random.poisson( self._res * self._plot_lc ) # total count (signal+bkg) with Poisson
self._plot_lc = self._plot_lc / self._res # total count rates (signal+bkg) with Poisson
else:
self._plot_lc = (self._raw_lc * self._ampl * self._eff_area) + self._bg # total count rates (signal+bkg)
self._plot_lc = np.random.poisson( self._res * self._plot_lc ) # total count (signal+bkg) with Poisson
self._plot_lc = self._plot_lc / self._res # total count rates (signal+bkg) with Poisson
self._plot_lc = self._plot_lc - self._bg # total count rates (signal) with Poisson
self._get_lc_properties()
def hdf5_lc_generation(self, outfile, overwrite=False, seed=12345):
"""
Generates a new avalanche and writes it to an hdf5 file
:n_lcs: number of light curves we want to simulate
:outfile: file name
:overwrite: overwrite existing file
:seed: random seed for the avalanche generation, int or list
"""
if overwrite == False:
assert os.path.isfile(outfile), 'ERROR: file already exists!'
self._f = h5py.File(outfile, 'w')
self._f.create_group('GRB_PARAMETERS')
self._f['GRB_PARAMETERS'].attrs['PARAMETER_ORDER'] = '[K, t_start, t_rise, t_decay]'
self._grb_counter = 1
if isinstance(seed, list):
for sd in seed:
self.aux_hdf5(seed=sd)
else:
self.aux_hdf5(seed=seed)
self._f.close()
def aux_hdf5(self, seed):
norms, t_delays, taus, tau_rs, peak_value = self.generate_avalanche(seed=seed, return_array=True)
n_pulses = norms.size
grb_array = np.concatenate((
norms.reshape(n_pulses,1),
t_delays.reshape(n_pulses,1),
tau_rs.reshape(n_pulses,1),
taus.reshape(n_pulses,1)),
axis=1
)
self._f.create_dataset(f'GRB_PARAMETERS/GRB_{self._grb_counter}', data=grb_array)
self._f[f'GRB_PARAMETERS/GRB_{self._grb_counter}'].attrs['PEAK_VALUE'] = peak_value
self._f[f'GRB_PARAMETERS/GRB_{self._grb_counter}'].attrs['N_PULSES'] = n_pulses
self._grb_counter += 1
class Restored_LC(LC):
"""
Class to restore an avalanche from yaml file
:res: GRB LC time resolution
"""
def __init__(self, par_list, res=0.256, t_min=-10, t_max=1000, sigma=5):
super(Restored_LC, self).__init__(res=res, t_min=t_min, t_max=t_max, sigma=sigma)
if not par_list:
raise TypeError("Avalanche parameters should be given")
elif not isinstance(par_list, list):
raise TypeError("The avalanche parameters should be a list of dictionaries")
else:
self._lc_params = par_list
self._restore_lc()
|
anastasia-tsvetkovaREPO_NAMElc_pulse_avalanchePATH_START.@lc_pulse_avalanche_extracted@lc_pulse_avalanche-master@lc_pulse_avalanche@avalanche.py@.PATH_END.py
|
{
"filename": "interpcc.py",
"repo_name": "mpeel/fastcc",
"repo_path": "fastcc_extracted/fastcc-master/interpcc.py",
"type": "Python"
}
|
# 2021-03-9
# 2021-03-10 CLC I start a new code to compute all cases.
# 2021-03-11 MP Reformat into functions
# 2021-03-16 MP Add IRAS, Planck HFIs
# 2021-03-18 MP If using Planck, halve the array to reduce CPU time
# 2021-03-19 MP Add regular grid interpolation to speed things up
import numpy as np
from astropy.io import fits
from scipy import interpolate
def interpcc_setup(infile,band,td_limit=40,method=2):
# Read in the fits file with precomputed values
dat = fits.open(infile)
# print(dat.info())
# print(dat[1].header)
bands = dat[1].data[0][0]
td = dat[1].data[0][1]
beta = dat[1].data[0][2]
#
doing_planck = False
if band.startswith('DB'):
band = band.split('DB')[1]
elif band.startswith('P'):
band = band.split('P')[1]
doing_planck = True
elif band.startswith('I'):
iras_bands = {'I100': '4', 'I60': '3', 'I25': '2', 'I12': '1'}
band = iras_bands.get(band, 0)
idx_band = [ii for ii,bb in enumerate(bands) if band ==bb]
if len(idx_band) == 0:
raise ValueError(f"Invalid band name '{band}'")
if doing_planck:
map_cc = dat[1].data[0][3][1][idx_band[0]]
# map_cc = map_cc[::2,::2]
# td = td[::2]
# beta = beta[::2]
else:
map_cc = dat[1].data[0][3][idx_band[0]]
# Limit the dust tempertaure
sel_td = (td <= td_limit)
X, Y = np.meshgrid(beta,td[sel_td])
Z = map_cc[:,sel_td].T
# Interpolation
if method == 1:
# Method 1: using interp2d
return interpolate.interp2d(X, Y, Z, kind ='cubic')
elif method == 2:
# Method 2: using Rbf
return interpolate.Rbf(X,Y,Z,function='cubic')
else:
# Method 3, using RegularGridInterpolator
return interpolate.RegularGridInterpolator((td[sel_td],beta),Z,method='linear')
def interpcc(interp,td,bd):
try:
return np.around(interp(bd,td)[()],4)
except:
return np.around(interp([td,bd])[()],4)[0]
|
mpeelREPO_NAMEfastccPATH_START.@fastcc_extracted@fastcc-master@interpcc.py@.PATH_END.py
|
{
"filename": "adopt.py",
"repo_name": "OpenAccess-AI-Collective/axolotl",
"repo_path": "axolotl_extracted/axolotl-main/src/axolotl/utils/optimizers/adopt.py",
"type": "Python"
}
|
"""
Copied from https://github.com/iShohei220/adopt
ADOPT: Modified Adam Can Converge with Any Ξ²2 with the Optimal Rate (2024)
Taniguchi, Shohei and Harada, Keno and Minegishi, Gouki and Oshima, Yuta and Jeong, Seong Cheol and Nagahara, Go and Iiyama, Tomoshi and Suzuki, Masahiro and Iwasawa, Yusuke and Matsuo, Yutaka
"""
# mypy: ignore-errors
# pylint: skip-file
# flake8: noqa
# mypy: allow-untyped-decorators
# mypy: allow-untyped-defs
from typing import Callable, List, Optional, Tuple, Union, cast
import torch
from torch import Tensor
from torch.optim.optimizer import ( # DeviceDict,; _capturable_doc,; _differentiable_doc,; _foreach_doc,; _fused_doc,; _maximize_doc,; _stack_if_compiling,
DeviceDict,
Optimizer,
ParamsT,
_capturable_doc,
_default_to_fused_or_foreach,
_device_dtype_check_for_fused,
_differentiable_doc,
_disable_dynamo_if_unsupported,
_foreach_doc,
_fused_doc,
_get_capturable_supported_devices,
_get_scalar_dtype,
_get_value,
_maximize_doc,
_stack_if_compiling,
_use_grad_for_differentiable,
_view_as_real,
)
__all__ = ["ADOPT", "adopt"]
class ADOPT(Optimizer):
def __init__(
self,
params: ParamsT,
lr: Union[float, Tensor] = 1e-3,
betas: Tuple[float, float] = (0.9, 0.9999),
eps: float = 1e-6,
clip_lambda: Optional[Callable[[int], float]] = lambda step: step**0.25,
weight_decay: float = 0.0,
decouple: bool = False,
*,
foreach: Optional[bool] = None,
maximize: bool = False,
capturable: bool = False,
differentiable: bool = False,
fused: Optional[bool] = None,
):
if isinstance(lr, Tensor):
if foreach and not capturable:
raise ValueError(
"lr as a Tensor is not supported for capturable=False and foreach=True"
)
if lr.numel() != 1:
raise ValueError("Tensor lr must be 1-element")
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
if not 0.0 <= weight_decay:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
self.clip_lambda = clip_lambda
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
decouple=decouple,
maximize=maximize,
foreach=foreach,
capturable=capturable,
differentiable=differentiable,
fused=fused,
)
super().__init__(params, defaults)
if fused:
# TODO: support fused
raise RuntimeError("`fused` is not currently supported")
if differentiable:
raise RuntimeError("`fused` does not support `differentiable`")
self._step_supports_amp_scaling = True
# TODO(crcrpar): [low prec params & their higher prec copy]
# Support AMP with FP16/BF16 model params which would need
# higher prec copy of params to do update math in higher prec to
# alleviate the loss of information.
if foreach:
raise RuntimeError("`fused` and `foreach` cannot be `True` together.")
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("maximize", False)
group.setdefault("foreach", None)
group.setdefault("capturable", False)
group.setdefault("differentiable", False)
fused = group.setdefault("fused", None)
for p in group["params"]:
p_state = self.state.get(p, [])
if len(p_state) != 0 and not torch.is_tensor(p_state["step"]):
step_val = float(p_state["step"])
p_state["step"] = (
torch.tensor(
step_val,
dtype=_get_scalar_dtype(is_fused=fused),
device=p.device,
)
if group["capturable"] or group["fused"]
else torch.tensor(step_val, dtype=_get_scalar_dtype())
)
def _init_group(
self,
group,
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
state_steps,
):
has_complex = False
for p in group["params"]:
if p.grad is not None:
has_complex |= torch.is_complex(p)
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError("ADOPT does not support sparse gradients")
grads.append(p.grad)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
if group["fused"]:
_device_dtype_check_for_fused(p)
# note(crcrpar): [special device hosting for step]
# Deliberately host `step` on CPU if both capturable and fused are off.
# This is because kernel launches are costly on CUDA and XLA.
state["step"] = (
torch.zeros(
(),
dtype=_get_scalar_dtype(is_fused=group["fused"]),
device=p.device,
)
if group["capturable"] or group["fused"]
else torch.tensor(0.0, dtype=_get_scalar_dtype())
)
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
exp_avgs.append(state["exp_avg"])
exp_avg_sqs.append(state["exp_avg_sq"])
if group["differentiable"] and state["step"].requires_grad:
raise RuntimeError(
"`requires_grad` is not supported for `step` in differentiable mode"
)
# Foreach without capturable does not support a tensor lr
if (
group["foreach"]
and torch.is_tensor(group["lr"])
and not group["capturable"]
):
raise RuntimeError(
"lr as a Tensor is not supported for capturable=False and foreach=True"
)
state_steps.append(state["step"])
return has_complex
@_use_grad_for_differentiable
def step(self, closure=None):
"""Perform a single optimization step.
Args:
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
self._cuda_graph_capture_health_check()
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad: List[Tensor] = []
grads: List[Tensor] = []
exp_avgs: List[Tensor] = []
exp_avg_sqs: List[Tensor] = []
state_steps: List[Tensor] = []
beta1, beta2 = group["betas"]
has_complex = self._init_group(
group,
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
state_steps,
)
adopt(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
state_steps,
has_complex=has_complex,
beta1=beta1,
beta2=beta2,
lr=group["lr"],
clip_lambda=self.clip_lambda,
weight_decay=group["weight_decay"],
decouple=group["decouple"],
eps=group["eps"],
maximize=group["maximize"],
foreach=group["foreach"],
capturable=group["capturable"],
differentiable=group["differentiable"],
fused=group["fused"],
grad_scale=getattr(self, "grad_scale", None),
found_inf=getattr(self, "found_inf", None),
)
return loss
def _single_tensor_adopt(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
grad_scale: Optional[Tensor],
found_inf: Optional[Tensor],
*,
has_complex: bool,
beta1: float,
beta2: float,
lr: Union[float, Tensor],
clip_lambda: Optional[Callable[[int], float]],
weight_decay: float,
decouple: bool,
eps: float,
maximize: bool,
capturable: bool,
differentiable: bool,
):
assert grad_scale is None and found_inf is None
if torch.jit.is_scripting():
# this assert is due to JIT being dumb and not realizing that the ops below
# have overloads to handle both float and Tensor lrs, so we just assert it's
# a float since most people using JIT are using floats
assert isinstance(lr, float)
for i, param in enumerate(params):
grad = grads[i] if not maximize else -grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step_t = state_steps[i]
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
capturable_supported_devices = _get_capturable_supported_devices()
assert (
param.device.type == step_t.device.type
and param.device.type in capturable_supported_devices
), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
step = step_t if capturable or differentiable else _get_value(step_t)
if weight_decay != 0 and not decouple:
grad = grad.add(param, alpha=weight_decay)
if torch.is_complex(param):
grad = torch.view_as_real(grad)
if exp_avg is not None:
exp_avg = torch.view_as_real(exp_avg)
if exp_avg_sq is not None:
exp_avg_sq = torch.view_as_real(exp_avg_sq)
param = torch.view_as_real(param)
if step == 0:
exp_avg_sq.addcmul_(grad, grad.conj())
# update step
step_t += 1
continue
if weight_decay != 0 and decouple:
param.add_(param, alpha=-lr * weight_decay)
denom = torch.clamp(exp_avg_sq.sqrt(), eps)
normed_grad = grad.div(denom)
if clip_lambda is not None:
clip = clip_lambda(step)
normed_grad.clamp_(-clip, clip)
exp_avg.lerp_(normed_grad, 1 - beta1)
param.add_(exp_avg, alpha=-lr)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2)
# update step
step_t += 1
def _multi_tensor_adopt(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
grad_scale: Optional[Tensor],
found_inf: Optional[Tensor],
*,
has_complex: bool,
beta1: float,
beta2: float,
lr: Union[float, Tensor],
clip_lambda: Optional[Callable[[int], float]],
weight_decay: float,
decouple: bool,
eps: float,
maximize: bool,
capturable: bool,
differentiable: bool,
):
if len(params) == 0:
return
if isinstance(lr, Tensor) and not capturable:
raise RuntimeError(
"lr as a Tensor is not supported for capturable=False and foreach=True"
)
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
capturable_supported_devices = _get_capturable_supported_devices(
supports_xla=False
)
assert all(
p.device.type == step.device.type
and p.device.type in capturable_supported_devices
for p, step in zip(params, state_steps)
), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
assert grad_scale is None and found_inf is None
assert not differentiable, "_foreach ops don't support autograd"
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
[params, grads, exp_avgs, exp_avg_sqs, state_steps] # type: ignore[list-item]
)
for (
device_params_,
device_grads_,
device_exp_avgs_,
device_exp_avg_sqs_,
device_state_steps_,
), _ in grouped_tensors.values():
device_params = cast(List[Tensor], device_params_)
device_grads = cast(List[Tensor], device_grads_)
device_exp_avgs = cast(List[Tensor], device_exp_avgs_)
device_exp_avg_sqs = cast(List[Tensor], device_exp_avg_sqs_)
device_state_steps = cast(List[Tensor], device_state_steps_)
# Handle complex parameters
if has_complex:
_view_as_real(
device_params, device_grads, device_exp_avgs, device_exp_avg_sqs
)
if maximize:
device_grads = torch._foreach_neg(device_grads) # type: ignore[assignment]
if weight_decay != 0 and not decouple:
# Re-use the intermediate memory (device_grads) already allocated for maximize
if maximize:
torch._foreach_add_(device_grads, device_params, alpha=weight_decay)
else:
device_grads = torch._foreach_add( # type: ignore[assignment]
device_grads, device_params, alpha=weight_decay
)
if device_state_steps[0] == 0:
torch._foreach_addcmul_(device_exp_avg_sqs, device_grads, device_grads)
# Update steps
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
# wrapped it once now. The alpha is required to assure we go to the right overload.
if not torch._utils.is_compiling() and device_state_steps[0].is_cpu:
torch._foreach_add_(
device_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
)
else:
torch._foreach_add_(device_state_steps, 1)
continue
if weight_decay != 0 and decouple:
torch._foreach_add_(device_params, device_params, alpha=-lr * weight_decay)
exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs)
torch._foreach_maximum_(exp_avg_sq_sqrt, eps)
normed_grad = torch._foreach_div(device_grads, exp_avg_sq_sqrt)
if clip_lambda is not None:
clip = clip_lambda(device_state_steps[0])
torch._foreach_maximum_(normed_grad, -clip)
torch._foreach_minimum_(normed_grad, clip)
torch._foreach_lerp_(device_exp_avgs, normed_grad, 1 - beta1)
torch._foreach_add_(device_params, device_exp_avgs, alpha=-lr)
torch._foreach_mul_(device_exp_avg_sqs, beta2)
torch._foreach_addcmul_(
device_exp_avg_sqs, device_grads, device_grads, value=1 - beta2
)
# Update steps
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
# wrapped it once now. The alpha is required to assure we go to the right overload.
if not torch._utils.is_compiling() and device_state_steps[0].is_cpu:
torch._foreach_add_(
device_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
)
else:
torch._foreach_add_(device_state_steps, 1)
@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_adopt)
def adopt(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
foreach: Optional[bool] = None,
capturable: bool = False,
differentiable: bool = False,
fused: Optional[bool] = None,
grad_scale: Optional[Tensor] = None,
found_inf: Optional[Tensor] = None,
has_complex: bool = False,
*,
beta1: float,
beta2: float,
lr: Union[float, Tensor],
clip_lambda: Optional[Callable[[int], float]],
weight_decay: float,
decouple: bool,
eps: float,
maximize: bool,
):
r"""Functional API that performs ADOPT algorithm computation."""
# Respect when the user inputs False/True for foreach or fused. We only want to change
# the default when neither have been user-specified. Note that we default to foreach
# and pass False to use_fused. This is not a mistake--we want to give the fused impl
# bake-in time before making it the default, even if it is typically faster.
if fused is None and foreach is None:
_, foreach = _default_to_fused_or_foreach(
params, differentiable, use_fused=False
)
# Do not flip on foreach for the unsupported case where lr is a Tensor and capturable=False.
if foreach and isinstance(lr, Tensor) and not capturable:
foreach = False
if fused is None:
fused = False
if foreach is None:
foreach = False
# this check is slow during compilation, so we skip it
# if it's strictly needed we can add this check back in dynamo
if not torch._utils.is_compiling() and not all(
isinstance(t, torch.Tensor) for t in state_steps
):
raise RuntimeError(
"API has changed, `state_steps` argument must contain a list of singleton tensors"
)
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
if fused and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with fused optimizers")
# if fused and not torch.jit.is_scripting():
# func = _fused_adopt
# elif foreach and not torch.jit.is_scripting():
if foreach and not torch.jit.is_scripting():
func = _multi_tensor_adopt
else:
func = _single_tensor_adopt
func(
params,
grads,
exp_avgs,
exp_avg_sqs,
state_steps,
has_complex=has_complex,
beta1=beta1,
beta2=beta2,
lr=lr,
clip_lambda=clip_lambda,
weight_decay=weight_decay,
decouple=decouple,
eps=eps,
maximize=maximize,
capturable=capturable,
differentiable=differentiable,
grad_scale=grad_scale,
found_inf=found_inf,
)
|
OpenAccess-AI-CollectiveREPO_NAMEaxolotlPATH_START.@axolotl_extracted@axolotl-main@src@axolotl@utils@optimizers@adopt.py@.PATH_END.py
|
{
"filename": "_sizemode.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattergl/marker/_sizemode.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizemodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="sizemode", parent_name="scattergl.marker", **kwargs
):
super(SizemodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["diameter", "area"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattergl@marker@_sizemode.py@.PATH_END.py
|
{
"filename": "_size.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/ohlc/hoverlabel/font/_size.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="ohlc.hoverlabel.font", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@ohlc@hoverlabel@font@_size.py@.PATH_END.py
|
{
"filename": "script_example_constaining_line_ratios.ipynb",
"repo_name": "thomasorb/orcs",
"repo_path": "orcs_extracted/orcs-master/docs/_build/html/script_example_constaining_line_ratios.ipynb",
"type": "Jupyter Notebook"
}
|
# Constraining line ratios
Some line doublets (e.g. [NII]6548,6583, [OIII]4959,5007) have known line ratios which then should be constrained to inject more information and optimize the unceratinty on the best fit parameters by minimizing the number of free parameters (especially if the SNR is low and if small variations of the ratio could not be measured anyway).
```python
# import base class for the manipulation of a SITELLE spectral cube: HDFCube
from orcs.process import SpectralCube
import pylab as pl
# load spectral cube
cube = SpectralCube('/home/thomas/M31_SN3.merged.cm1.1.0.hdf5')
```
[0mmaster.03e73|INFO| Cube is level 3[0m
[0mmaster.03e73|INFO| shape: (2048, 2064, 840)[0m
[0mmaster.03e73|INFO| wavenumber calibration: True[0m
[0mmaster.03e73|INFO| flux calibration: True[0m
[0mmaster.03e73|INFO| wcs calibration: True[0m
## Fit with no amplitude constraint
```python
spectrum = cube.get_spectrum(998, 1198, 2)
fit = spectrum.fit(['[NII]6548', 'Halpha', '[NII]6583'],
fmodel='gaussian',
pos_cov=-350,
pos_def=['1'],
fwhm_def='fixed')
print(fit)
print('[NII] flux ratio', fit['flux'][2]/fit['flux'][0])
pl.figure(figsize=(10,6))
spectrum.plot(ls=':', c='black')
fit.get_spectrum().plot(ls='-', c='orange')
pl.xlim(14500, 15500)
```
=== Fit results ===
lines: ['[NII]6548', 'H3', '[NII]6584'], fmodel: gaussian
iterations: 67, fit time: 8.44e-02 s
Velocity (km/s): [-295.6(1.1) -295.6(1.1) -295.6(1.1)]
Flux: [4.62(57)e-15 1.410(57)e-14 1.561(57)e-14]
Broadening (km/s): [nan +- nan nan +- nan nan +- nan]
[NII] flux ratio 3.3796131687016837
(14500, 15500)

## Fit with naive amplitude ratio constraint
Only the amplitude parameter can be constrained but not the flux (flux is a function of fwhm, broadening and amplitude). However, the natural axis of interferometric spectra is in wavenumber so that their fwhm (instrumental line shape) is constant in wavenumber. It means that, in wavelength (which is the axis on which the flux is evaluated when the flux unit are erg/cm2/s/A), for a given amplitude the flux will also depend on the wavelength. i.e. for a given amplitude ratio of 3 between two lines, the flux ratio will be slightly different from 3.
This problem will be answered next. For the moment we will see how we can constraint the amplitude ratio between two lines. When two lines are covarying in amplitude, the covariance parameter will be multiplicative over the initial guess of the lines amplitude. We can thus set a line to a guessed amplitude of 1 and the other at 3 and their amplitude ratio will always be 3. The covarying parameter which is free will multiply both lines by e.g. 3.2e-17 and the first line will have a final amplitude of 3.2e-17 while the second one will have a final amplitude of 9.6e-17.
```python
spectrum = cube.get_spectrum(998, 1198, 2)
fit = spectrum.fit(['[NII]6548', 'Halpha', '[NII]6583'],
fmodel='gaussian',
pos_cov=-350,
pos_def=['1'],
fwhm_def='fixed',
amp_def=['1', '2', '1'],
amp_guess=[1, 1, 3])
print(fit)
print('[NII] amplitude ratio', fit['lines_params'][2,1]/fit['lines_params'][0,1])
print('[NII] flux ratio', fit['flux'][2]/fit['flux'][0])
pl.figure(figsize=(10,6))
spectrum.plot(ls=':', c='black')
fit.get_spectrum().plot(ls='-', c='orange')
pl.xlim(14500, 15500)
```
=== Fit results ===
lines: ['[NII]6548', 'H3', '[NII]6584'], fmodel: gaussian
iterations: 61, fit time: 7.80e-02 s
Velocity (km/s): [-295.6(1.1) -295.6(1.1) -295.6(1.1)]
Flux: [5.10(18)e-15 1.410(57)e-14 1.546(54)e-14]
Broadening (km/s): [nan +- nan nan +- nan nan +- nan]
[NII] amplitude ratio 3.0
[NII] flux ratio 3.0325248184327562
(14500, 15500)

## Fit with a flux ratio constraint
In the preceding example you can see that the amplitude ratio is 3 but the flux ratio (which is generally the parameter we want to constraint) is not exactly 3. To compute the amplitude ratio you must set to obtain a flux ratio of 3 you can use the method ``cube.get_amp_ratio_from_flux_ratio`` and use its output to set the correct amplitude ratio.
```python
amp_ratio = cube.get_amp_ratio_from_flux_ratio('[NII]6583', '[NII]6548', 3)
amp_ratio
```
2.967824020860382
```python
spectrum = cube.get_spectrum(998, 1198, 2)
fit = spectrum.fit(['[NII]6548', 'Halpha', '[NII]6583'],
fmodel='gaussian',
pos_cov=-350,
pos_def=['1'],
fwhm_def='fixed',
amp_def=['1', '2', '1'],
amp_guess=[1, 1, amp_ratio])
print(fit)
print('[NII] amplitude ratio', fit['lines_params'][2,1]/fit['lines_params'][0,1])
print('[NII] flux ratio', fit['flux'][2]/fit['flux'][0])
pl.figure(figsize=(10,6))
spectrum.plot(ls=':', c='black')
fit.get_spectrum().plot(ls='-', c='orange')
pl.xlim(14500, 15500)
```
=== Fit results ===
lines: ['[NII]6548', 'H3', '[NII]6584'], fmodel: gaussian
iterations: 61, fit time: 7.91e-02 s
Velocity (km/s): [-295.6(1.1) -295.6(1.1) -295.6(1.1)]
Flux: [5.15(18)e-15 1.410(57)e-14 1.544(54)e-14]
Broadening (km/s): [nan +- nan nan +- nan nan +- nan]
[NII] amplitude ratio 2.967824020860382
[NII] flux ratio 3.0
(14500, 15500)

```python
```
|
thomasorbREPO_NAMEorcsPATH_START.@orcs_extracted@orcs-master@docs@_build@html@script_example_constaining_line_ratios.ipynb@.PATH_END.py
|
{
"filename": "LogTestComponent.py",
"repo_name": "ACS-Community/ACS",
"repo_path": "ACS_extracted/ACS-master/Benchmark/components/src/perftestImpl/LogTestComponent.py",
"type": "Python"
}
|
#*******************************************************************************
# ALMA - Atacama Large Millimiter Array
# (c) Associated Universities Inc., 2002
# (c) European Southern Observatory, 2002
# Copyright by ESO (in the framework of the ALMA collaboration)
# and Cosylab 2002, All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
#------------------------------------------------------------------------------
'''
TODO:
- All!!!
'''
__version__ = "$Id: LogTestComponent.py,v 1.5 2005/05/20 21:47:56 dfugate Exp $"
#--REGULAR IMPORTS-------------------------------------------------------------
from os import environ
#--CORBA STUBS-----------------------------------------------------------------
#--ACS Imports-----------------------------------------------------------------
from perftestImpl.BasePerfComp import BasePerfComp
from Acspy.Util.Profiler import Profiler
#--GLOBALS---------------------------------------------------------------------
#------------------------------------------------------------------------------
class LogTestComponent(BasePerfComp):
#--------------------------------------------------------------------------
def __init__(self):
'''
'''
BasePerfComp.__init__(self)
#------------------------------------------------------------------------------
def method(self):
'''
void method();
'''
profiler = Profiler()
tString=""
for i in range(0, self.size):
tString = tString + "*"
for i in range(0, self.count):
profiler.start()
self.getLogger().logInfo(tString)
profiler.stop()
self.waitAwhile()
if environ.has_key("ACS_LOG_STDOUT"):
profiler.addData("ACS_LOG_STDOUT", environ["ACS_LOG_STDOUT"])
else:
profiler.addData("ACS_LOG_STDOUT", "None")
profiler.fullDescription("ACS Log of Size '" + str(self.size) + "' Bytes from within a CharacteristicComponent")
return
#------------------------------------------------------------------------------
|
ACS-CommunityREPO_NAMEACSPATH_START.@ACS_extracted@ACS-master@Benchmark@components@src@perftestImpl@LogTestComponent.py@.PATH_END.py
|
{
"filename": "hexbin_marginals.py",
"repo_name": "mwaskom/seaborn",
"repo_path": "seaborn_extracted/seaborn-master/examples/hexbin_marginals.py",
"type": "Python"
}
|
"""
Hexbin plot with marginal distributions
=======================================
_thumb: .45, .4
"""
import numpy as np
import seaborn as sns
sns.set_theme(style="ticks")
rs = np.random.RandomState(11)
x = rs.gamma(2, size=1000)
y = -.5 * x + rs.normal(size=1000)
sns.jointplot(x=x, y=y, kind="hex", color="#4CB391")
|
mwaskomREPO_NAMEseabornPATH_START.@seaborn_extracted@seaborn-master@examples@hexbin_marginals.py@.PATH_END.py
|
{
"filename": "test_enterprise_extensions.py",
"repo_name": "nanograv/enterprise_extensions",
"repo_path": "enterprise_extensions_extracted/enterprise_extensions-master/tests/test_enterprise_extensions.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `enterprise_extensions` package."""
import pytest
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
|
nanogravREPO_NAMEenterprise_extensionsPATH_START.@enterprise_extensions_extracted@enterprise_extensions-master@tests@test_enterprise_extensions.py@.PATH_END.py
|
{
"filename": "arbor.py",
"repo_name": "ytree-project/ytree",
"repo_path": "ytree_extracted/ytree-main/ytree/frontends/lhalotree/arbor.py",
"type": "Python"
}
|
"""
LHaloTreeArbor class and member functions
"""
#-----------------------------------------------------------------------------
# Copyright (c) ytree development team. All rights reserved.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
import glob
from yt.funcs import \
get_pbar
from unyt.exceptions import \
UnitParseError
from ytree.data_structures.arbor import \
Arbor
from ytree.frontends.lhalotree.fields import \
LHaloTreeFieldInfo
from ytree.frontends.lhalotree.io import \
LHaloTreeTreeFieldIO, LHaloTreeRootFieldIO
from ytree.frontends.lhalotree.utils import \
LHaloTreeReader
from ytree.utilities.logger import \
ytreeLogger
class LHaloTreeArbor(Arbor):
"""
Arbors for LHaloTree data.
"""
_field_info_class = LHaloTreeFieldInfo
_tree_field_io_class = LHaloTreeTreeFieldIO
_root_field_io_class = LHaloTreeRootFieldIO
_default_dtype = np.float32
_node_io_attrs = ('_lht', '_index_in_lht')
def __init__(self, *args, **kwargs):
r"""Added reader class to allow fast access of header info."""
reader_keys = ['parameters', 'parameter_file',
'scale_factors', 'scale_factor_file',
'header_size', 'nhalos_per_tree', 'read_header_func',
'item_dtype']
reader_kwargs = dict()
for k in reader_keys:
if k in kwargs:
reader_kwargs[k] = kwargs.pop(k)
self._lht0 = LHaloTreeReader(args[0], **reader_kwargs)
super().__init__(*args, **kwargs)
kwargs.update(**reader_kwargs)
lht0 = self._lht0
files = sorted(glob.glob(lht0.filepattern))
self._lhtfiles = [None for _ in files]
self._lhtfiles[lht0.fileindex] = lht0
if len(files) > 1:
if ((('header_size' in reader_kwargs) or # pragma: no cover
('nhalos_per_tree' in reader_kwargs))):
raise RuntimeError("Cannot use 'header_size' or 'nhalos_per_tree' " +
"for trees split across multiple files. Use " +
"'read_header_func' instead.")
reader_kwargs.update(parameters=lht0.parameters,
scale_factors=lht0.scale_factors,
item_dtype=lht0.item_dtype,
silent=True)
for f in files:
if f == lht0.filename:
continue
ilht = LHaloTreeReader(f, **reader_kwargs)
self._lhtfiles[ilht.fileindex] = ilht
# Assert files are there
for f in self._lhtfiles:
if f is None: # pragma: no cover
raise RuntimeError("Not all files were read.")
# NOTE: LHaloTree is currently using np.memmap so it dosn't need
# fd to be passed. If this causes memory issues for larger trees,
# the below functions allow a file descriptor to be checked and updated
# as needed to prevent unnecessary open/close operations when accessing
# nodes in the same file. Alternatively, see io.py for an option that
# caches the file descriptor any time a field is read.
# def _func_update_file(self, node, *args, **kwargs):
# """Call a file making sure that the correct file is open."""
# func = kwargs.pop("_func", None)
# if func is None:
# raise RuntimeError("No function passed.")
# fd = self._node_io_fd
# if fd is None or (node._lht.filename != fd.name):
# if fd is not None:
# fd.close()
# self._node_io_fd = open(node._lht.filename, 'rb')
# kwargs["f"] = self._node_io_fd
# return func(node, *args, **kwargs)
# def _node_io_loop(self, func, *args, **kwargs):
# """
# Since LHaloTrees can be split across multiple files, this
# optimization only works if the nodes are all in the same file.
# It's a small optimization that keeps the file open
# when doing io for multiple trees in the same file.
# """
# self._node_io_fd = None
# kwargs["_func"] = func
# super()._node_io_loop(
# self._func_update_file, *args, **kwargs)
# if self._node_io_fd is not None:
# self._node_io_fd.close()
def _parse_parameter_file(self):
"""
Parse the file header, get things like:
- cosmological parameters
- box size
- list of fields
"""
for u in ['mass', 'vel', 'len']:
setattr(self, '_lht_units_' + u,
getattr(self._lht0, 'units_' + u))
# v, s = getattr(self._lht0, 'units_' + u).split()
# setattr(self, '_lht_units_' + u, self.quan(float(v), s))
self.hubble_constant = self._lht0.hubble_constant
self.omega_matter = self._lht0.omega_matter
self.omega_lambda = self._lht0.omega_lambda
self.box_size = self.quan(self._lht0.box_size, self._lht_units_len)
# self.box_size = self._lht0.box_size * self._lht_units_len
# a list of all fields on disk
fields = self._lht0.fields
# a dictionary of information for each field
# this can have specialized information for reading the field
fi = {}
# for example:
# fi["mass"] = {"column": 4, "units": "Msun/h", ...}
none_keys = ['Descendant', 'FirstProgenitor', 'NextProgenitor',
'FirstHaloInFOFgroup', 'NextHaloInFOFgroup',
'Len', 'MostBoundID',
'SnapNum', 'FileNr', 'SubhaloIndex',
'uid', 'desc_uid', 'scale_factor',
'Jx', 'Jy', 'Jz']
mass_keys = ['M_Mean200', 'Mvir', 'M_TopHat', 'SubHalfMass']
dist_keys = ['x', 'y', 'z']
velo_keys = ['VelDisp', 'Vmax', 'vx', 'vy', 'vz']
all_keys = [none_keys, mass_keys, dist_keys, velo_keys]
all_units = ['', self._lht_units_mass, self._lht_units_len,
self._lht_units_vel]
for keylist, unit in zip(all_keys, all_units):
try:
self.quan(1, unit)
punit = unit
except UnitParseError: # pragma: no cover
ytreeLogger.warning(f"Could not parse unit: {unit}")
punit = ''
for k in keylist:
fi[k] = {'units': punit}
self.field_list = fields
self.field_info.update(fi)
def _plant_trees(self):
"""
This is where we figure out how many trees there are,
create the array to hold them, and instantiate a root
tree_node for each tree.
"""
if self.is_planted:
return
# open file, count trees
ntrees_tot = 0
for lht in self._lhtfiles:
ntrees_tot += lht.ntrees
self._size = ntrees_tot
pbar = get_pbar("Loading tree roots", ntrees_tot)
self._node_info['_lht'] = np.empty(ntrees_tot, dtype=object)
itot = 0
for ifile, lht in enumerate(self._lhtfiles):
ntrees = lht.ntrees
root_uids = lht.all_uids[lht.nhalos_before_tree]
for i in range(ntrees):
self._node_info['uid'][itot] = root_uids[i]
self._node_info['_lht'][itot] = lht
self._node_info['_index_in_lht'][itot] = i
itot += 1
pbar.update(itot)
pbar.finish()
@classmethod
def _is_valid(self, *args, **kwargs):
"""
Return True if we are able to initialize a reader.
"""
try:
kwargs.update(silent=True, validate=True)
LHaloTreeReader(*args, **kwargs)
except (IOError, TypeError):
return False
return True
|
ytree-projectREPO_NAMEytreePATH_START.@ytree_extracted@ytree-main@ytree@frontends@lhalotree@arbor.py@.PATH_END.py
|
{
"filename": "blending.py",
"repo_name": "danielacarrasco/GLACiAR",
"repo_path": "GLACiAR_extracted/GLACiAR-master/blending.py",
"type": "Python"
}
|
import numpy as np
import pickle
import dropouts
import os
from astropy.io import fits
def main(path_to_cat, niter, detection_band, cat, m1, redshift, xpos, ypos,
xpos_oc, ypos_oc, segm_science, m_oc, f_oc, id_oc, input_mag, zp,
bands, min_sn, dp, fw, fw2):
"""
Uses the information from the new and old catalogs and segmentation
maps to find the new sources and label them according to their
identification and blending statuses.
Args:
path_to_cat (string) = Path to the folder with the science images.
Given in the parameters file.
niter (integer) = iteration number.
detection_band (string) = Name of the detection band given in the
parameters file.
cat (string) = Name of the field for which the simulation is run.
m1 (float) = Initial input magnitude for the simulated galaxy in the
detection band.
redshift (float) = Redshift for the simulated galaxy.
xpos (int array) = Position of the simulated galaxy in the x axis.
ypos (int array) = Position of the simulated galaxy in the y axis.
xpos_oc (float array) = Array with the position in the x axis of the
centre for all the sources identified in the
original science image.
ypos_oc (float array) = Array with the position in the y axis of the
centre for all the sources identified in the
original science image.
segm_science (array) = Segmentation map produced by SExtractor for the
identified sources from the science image.
m_oc (float array) = Array with the AB AUTO magnitude for the sources
identified by SExtractor in the science image.
f_oc (float array) = Array with the ISO flux for the sources
identified by SExtractor in the science image.
id_oc (int array) = Array with the ID assigned by SExtractor for each
one of the sources identified in the science image.
input_mag (float) = Expected magnitude of the artificial galaxies in
the detection band.
zp (float array) = Zeropoint values for all bands as given in the
input parameters file.
bands (string array) = Name of the bands in which the artificial
galaxies will be simulated. Given in the input
parameters file.
min_sn (float) = Minimum S/N ratio in the detection band for an object
to be considered detected by SExtractor. Given in the
input parameters file.
dp (boolean) = Boolean that indicates whether the user requires to run
a dropout selection. Given in the input parameters file.
If True, the dropouts.py module will be used.
fw (text file) = File in which the information about the artificial
sources will be saved. 'RecoveredGalaxies_cat_z#.cat'
fw2 (text file) = File in which the information about the artificial
sources obtained by SExtractor will be saved.
Returns:
identified (int) = Number of artificial galaxies from the current
iteration that are detected by SExtractor and that
are isolated.
blended_b (int) = Number of artificial galaxies from the current
iteration that are detected by SExtractor and are
blended with previously detected brighter sources.
blended_f (int) = Number of artificial galaxies from the current
iteration that are detected by SExtractor and are
blended with previously detected fainter sources.
not_indentified_sn (int) = Number of artificial galaxies from the
current iteration that are detected by
SExtractor but are considered not identified
because their S/N is below min_sn.
not_indentified (int) = Number of artificial galaxies from the current
iteration that are not detected by SExtractor.
drops (int) = Number of artificial galaxies from the current iteration
that passed the redshift selection criteria from
'dropouts.py'. If drops is set to False the value is 0.
"""
# Open segmentation maps from simulated images, save the data,
# and delete the fits file.
segm_new_cat = fits.open(path_to_cat + 'Results/SegmentationMaps/'
'Segmentation_maps_i' + str(niter) + '_' +
detection_band+'.fits', ignore_missing_end=True)
segm_sim = segm_new_cat[0].data # Data from new images.
segm_new_cat.close()
os.remove(path_to_cat + 'Results/SegmentationMaps/Segmentation_maps_i' +
str(niter) + '_' + detection_band + '.fits')
# Catalog with the identified sources from the simulated images.
f = open(path_to_cat + 'Results/Dropouts/source_' + cat + '_mag' +
str(m1) + '_z' + str(redshift) + '_i' + str(niter) + '_' +
detection_band + '.cat')
k = f.readlines()
f.close()
# Information from SExtractor for the new sources (science image +
# simulated galaxies).
id_mgal = [int(line.split()[0]) for line in k[27:]] # ID
f_gal = [float(line.split()[1]) for line in k[27:]] # Isophotal flux
ef_gal = [float(line.split()[2]) for line in k[27:]] # RMS error for flux
m_gal = [float(line.split()[27]) for line in k[27:]] # AUTO magnitude
sn_mgal = np.array(np.array(f_gal)/np.array(ef_gal)) # RMS error for mag
xpos_nc = [float(line.split()[32]) for line in k[27:]] # Position in x
ypos_nc = [float(line.split()[31]) for line in k[27:]] # Position in y
radius = [float(line.split()[42]) for line in k[27:]] # Radii
# Convert the previous arrays into np.arrays
xpos = np.array(xpos).astype(int)
ypos = np.array(ypos).astype(int)
xpos_oc = np.array(xpos_oc)
ypos_oc = np.array(ypos_oc)
xpos_nc = np.array(xpos_nc)
ypos_nc = np.array(ypos_nc)
m_oc = np.array(m_oc)
f_oc = np.array(f_oc)
id_oc = np.array(id_oc)
id_mgal = np.array(id_mgal)
m_gal = np.array(m_gal)
# Initialise counters. These will contain information about the number of
# artificial galaxies and their detection status after running SExtractor.
identified = 0 # Identified artificial sources.
blended_b = 0 # Artificial sources blended with a brighter object.
blended_f = 0 # Artificial sources blended with a fainter object.
not_indentified_sn = 0 # Artificial sources not detected due to low S/N.
not_indentified = 0 # Artificial sources not detected by Sextractor.
drops = 0. # Artificial sources classified as a dropout.
margin = 3 # Number of pixels from centre in which search is performed.
# Array with only 100 as values, which will be replaced by the status code.
status = np.zeros(len(xpos)) + 100
# Array with only zeroes as values, which will be filled out by the ID of
# the artificial galaxy.
id_nmbr = np.zeros(len(xpos))
i_mag = np.zeros(len(xpos)) + input_mag # array with input mag value.
# Open a text file (.reg) with the formatting to be used as region file on
# DS9. It shows location where the artificial source was originally placed.
g = open(path_to_cat + 'Results/SegmentationMaps/region_' + cat + '_mag' +
str(m1) + '_z' + str(redshift) + '_i' + str(niter) + '.reg', "w")
# Loop for each artificial source.
for i in xrange(len(xpos)):
# The IF below searches for any value diferent than zero in the
# segmentation maps of the new images (science + artificial sources)
# within an square centered in the input position for the artificial
# galaxy and a side of 2*margin. This is done by evaluating the sum of
# the pixels' values. It enters the IF if the value is != 0.
if np.sum(segm_sim[xpos[i]-margin:xpos[i]+margin,
ypos[i]-margin:ypos[i]+margin]) != 0:
# array with square where search is performed.
id_mgi_aux2 = segm_sim[xpos[i]-margin:xpos[i]+margin,
ypos[i]-margin:ypos[i]+margin]
# ID of the source in the search region is recorded.
id_nmbr[i] = np.max(id_mgi_aux2)
# ID value for pixels from the science image in the position of
# the newly found source.
id_mgi2 = segm_science[segm_sim == id_nmbr[i]]
idsimgal = len(id_mgi2) # Number of pixels the source encompasses.
# ID of source previously in the position of newly found source.
id_mgi = np.max(id_mgi2)
# Number of pixels of the simulated galaxy that are overlapped
# with the old source.
w2 = np.where(id_mgi2 != 0)[0]
# If the S/N of the newly identified source is above the required,
# threshold, enter the IF below.
if sn_mgal[id_mgal == id_nmbr[i]] >= min_sn:
# If there wasn't any source on the pixels where the new source
# was found, enter the IF below.
# These are objects detected and isolated.
if np.sum(id_mgi2) == 0:
status[i] = 0 # Status for detected and isolated sources.
# Line with information of the artificial source.
line = ('%6s' % str(m1) + '\t%6s' % str(int(niter)) +
'\t%9s' % str(int(id_nmbr[i])) + '\t%9s' %
str(float(input_mag)) + '\t%9s' %
str(float(m_gal[id_mgal == id_nmbr[i]])) +
'\t%9s' % str(int(0)) + '\n')
# Write line in the file 'RecoveredGalaxies_cat_z#.cat'
fw.writelines(line)
# Write the region file for the artificial sources with
# status=0 in colour green. The position is the input one.
g.write("circle %s %s 11 #color=green width=2\n" %
(ypos[i], xpos[i]))
# Counter for identified and isolated galaxies.
identified = identified + 1
# If there was a source previously on the pixels where the new
# source was found, enter the IF below.
# These are objects detected and blended.
else:
id_blended = id_mgi # ID of the source previously there.
# Find the magnitude of the old source and compare it to
# the input magnitude. If the source is brighter, enter
# the IF below.
if m_oc[id_blended == id_oc] <= input_mag:
# Enter the IF below If the old source's flux is
# smaller than 75% the input flux of the artificial
# source, AND if the number of pixels of the newly
# identified source that were previously occupied by
# another source is 25% or less the total number of
# pixels of said source (the overlap of the new source
# is 25% its original size). We consider this as if the
# artificial source was blended with a fainter object.
if ((f_oc[id_blended == id_oc] <=
0.75*10**((zp[0]-input_mag)/2.5)) and
(len(w2) <= 0.25*idsimgal)):
# Status code for blended with fainter object.
status[i] = 2
line = ('%6s' % str(m1) + '\t%6s' %
str(int(niter)) + '\t%9s' %
str(int(id_nmbr[i])) + '\t%9s' %
str(float(input_mag)) + '\t%9s' %
str(float(m_gal[id_mgal == id_nmbr[i]])) +
'\t%9s' % str(int(2)) + '\n')
# Write in the file 'RecoveredGalaxies_cat_z#.cat'
fw.writelines(line)
# Write the region file for the artificial sources
# with status=2 in colour blue.
g.write("circle %s %s 11 #color=blue width=2\n" %
(ypos[i], xpos[i]))
# Counter for sources identified and blended with
# faintergalaxies.
blended_f = blended_f + 1
# If the flux or overlap conditions aren't true, do the
# following
else:
# Status code for blended with brighter object.
status[i] = -1
line = ('%6s' % str(m1) + '\t%6s' %
str(int(niter)) + '\t%9s' %
str(int(id_nmbr[i])) + '\t%9s' %
str(float(input_mag)) + '\t%9s' %
str(float(m_gal[id_mgal == id_nmbr[i]])) +
'\t%9s' % str(int(-1)) + '\n')
# Write in the file 'RecoveredGalaxies_cat_z#.cat'
fw.writelines(line)
# Write the region file for the artificial sources
# with status=-1 in colour red.
g.write("circle %s %s 11 #color=red width=2\n" %
(ypos[i], xpos[i]))
# Counter for sources identified and blended with
# brighter galaxies.
blended_b = blended_b + 1
# if the magnitude of the old source is fainter than the
# input magnitude, enter the IF below.
else:
# Status code for blended with fainter object.
# Different from 2 as it purely compares magnitudes.
status[i] = 1
line = ('%6s' % str(m1) + '\t%6s' % str(int(niter)) +
'\t%9s' % str(int(id_nmbr[i])) + '\t%9s' %
str(float(input_mag)) + '\t%9s' %
str(float(m_gal[id_mgal == id_nmbr[i]])) +
'\t%9s' % str(int(1)) + '\n')
# Write in the file 'RecoveredGalaxies_cat_z#.cat'
fw.writelines(line)
# Write the region file for the artificial sources
# with status=1 in colour blue.
g.write("circle %s %s 11 #color=blue width=2\n" %
(ypos[i], xpos[i]))
# Counter for sources identified and blended with
# faintergalaxies.
blended_f = blended_f + 1
# If the S/N of the newly identified source is above the required,
# threshold, enter the IF below.
else:
# Status for detected sources with S/N below required min_sn.
status[i] = -2
line = ('%6s' % str(m1) + '\t%6s' % str(int(niter)) +
'\t%9s' % str(int(id_nmbr[i])) + '\t%9s' %
str(float(input_mag)) + '\t%9s' %
str(float(m_gal[id_mgal == id_nmbr[i]])) +
'\t%9s' % str(int(-2)) + '\n')
# Write in the file 'RecoveredGalaxies_cat_z#.cat'
fw.writelines(line)
# Write the region file for the artificial sources with
# status=-2 in colour red.
g.write("circle %s %s 11 #color=red width=2\n" %
(ypos[i], xpos[i]))
# Counter for sources detected by SExtractor but that did not
# meet the required threshold.
not_indentified_sn = not_indentified_sn + 1
# If all values of the new segmentation map within the search grid are
# zero, the object has not been detected by SExtractor.
else:
status[i] = -3 # Status for sources not detected by SExtractor.
line = ('%6s' % str(m1) + '\t%6s' % str(int(niter)) +
'\t%9s' % str(int(0)) + '\t%9s' % str(float(input_mag)) +
'\t%9s' % str(float(-99.000)) + '\t%9s' % str(int(-3)) +
'\n')
# Write in the file 'RecoveredGalaxies_cat_z#.cat'
fw.writelines(line)
# Write the region file for the artificial sources with status=-3
# in colour red.
g.write("circle %s %s 11 #color=red width=2\n" %
(ypos[i], xpos[i]))
# Counter for sources not detected by SExtractor.
not_indentified = not_indentified + 1
# Close the .reg file.
g.close()
# Initialise array for ISO magnitudes, AUTO magnitudes, and S/N in all
# bands measured by SExtractor for the new sources.
mag_iso = np.zeros((len(id_mgal), len(bands)))
mag_auto = np.zeros((len(id_mgal), len(bands)))
sn = np.zeros((len(id_mgal), len(bands)))
# Loop for the number of bands used in the simulation.
for j in xrange(len(bands)):
# Open the catalog with the identified sources from the simulated
# images (science + artificial sources) for each band.
f1 = open(path_to_cat + 'Results/Dropouts/source_' + cat + '_mag' +
str(m1) + '_z' + str(redshift) + '_i' + str(niter) + '_' +
bands[j] + '.cat')
k1 = f1.readlines()
f1.close()
# Save the information on the ISO mag, AUTO mag, and S/N for each band.
mag_iso[:, j] = [float(line.split()[3]) for line in k1[27:]]
mag_auto[:, j] = [float(line.split()[27]) for line in k1[27:]]
sn[:, j] = [float(line.split()[1])/float(line.split()[2]) for
line in k1[27:]]
# Save the data only for sources identified as artificial sources.
mag_iso2 = mag_iso[id_nmbr.astype(int)-1, :]
mag_auto2 = mag_auto[id_nmbr.astype(int)-1, :]
sn2 = sn[id_nmbr.astype(int)-1, :]
# Initialise drops, which stays as zero if dropouts is False
drops = 0
# Run dropout module if dropout parameter is set to True.
if dp is True:
drops, drops_array = dropouts.main(mag_iso2, mag_auto2, sn2, status)
# Save stacked array with the dimensions needed according to the number
# of bands. It contains the information of the new sources by SExtractor
# plus their detection status and input magnitude.
final_array_aux = np.c_[id_nmbr, i_mag, status, drops_array,
mag_iso2, mag_auto2, sn2]
final_array = np.matrix.transpose(final_array_aux)
# Write a pickled representation of the data of the new sources to the
# file.
# WARNING: this is not readable data.
pickle.dump(final_array, fw2)
# Return number of sources for each of the following categories.
return identified, blended_b, blended_f, not_indentified_sn,\
not_indentified, drops
if __name__ == "__main__":
main()
|
danielacarrascoREPO_NAMEGLACiARPATH_START.@GLACiAR_extracted@GLACiAR-master@blending.py@.PATH_END.py
|
{
"filename": "embedding_in_wx5_sgskip.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/galleries/examples/user_interfaces/embedding_in_wx5_sgskip.py",
"type": "Python"
}
|
"""
==============
Embed in wx #5
==============
"""
import wx
import wx.lib.agw.aui as aui
import wx.lib.mixins.inspection as wit
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import \
NavigationToolbar2WxAgg as NavigationToolbar
from matplotlib.figure import Figure
class Plot(wx.Panel):
def __init__(self, parent, id=-1, dpi=None, **kwargs):
super().__init__(parent, id=id, **kwargs)
self.figure = Figure(dpi=dpi, figsize=(2, 2))
self.canvas = FigureCanvas(self, -1, self.figure)
self.toolbar = NavigationToolbar(self.canvas)
self.toolbar.Realize()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.canvas, 1, wx.EXPAND)
sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
self.SetSizer(sizer)
class PlotNotebook(wx.Panel):
def __init__(self, parent, id=-1):
super().__init__(parent, id=id)
self.nb = aui.AuiNotebook(self)
sizer = wx.BoxSizer()
sizer.Add(self.nb, 1, wx.EXPAND)
self.SetSizer(sizer)
def add(self, name="plot"):
page = Plot(self.nb)
self.nb.AddPage(page, name)
return page.figure
def demo():
# Alternatively you could use:
# app = wx.App()
# InspectableApp is a great debug tool, see:
# http://wiki.wxpython.org/Widget%20Inspection%20Tool
app = wit.InspectableApp()
frame = wx.Frame(None, -1, 'Plotter')
plotter = PlotNotebook(frame)
axes1 = plotter.add('figure 1').add_subplot()
axes1.plot([1, 2, 3], [2, 1, 4])
axes2 = plotter.add('figure 2').add_subplot()
axes2.plot([1, 2, 3, 4, 5], [2, 1, 4, 2, 3])
frame.Show()
app.MainLoop()
if __name__ == "__main__":
demo()
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@galleries@examples@user_interfaces@embedding_in_wx5_sgskip.py@.PATH_END.py
|
{
"filename": "one_time.py",
"repo_name": "misharash/class_public",
"repo_path": "class_public_extracted/class_public-master/scripts/one_time.py",
"type": "Python"
}
|
# coding: utf-8
# In[1]:
# import necessary modules
# uncomment to get plots displayed in notebook
#%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from classy import Class
from scipy.optimize import fsolve
from scipy.interpolate import interp1d
import math
# In[2]:
# esthetic definitions for the plots
font = {'size' : 16, 'family':'STIXGeneral'}
axislabelfontsize='large'
matplotlib.rc('font', **font)
matplotlib.mathtext.rcParams['legend.fontsize']='medium'
plt.rcParams["figure.figsize"] = [8.0,6.0]
# In[3]:
#############################################
#
# Cosmological parameters and other CLASS parameters
#
common_settings = {# wich output? ClTT, transfer functions delta_i and theta_i
'output':'tCl,mTk,vTk',
# LambdaCDM parameters
'h':0.67556,
'omega_b':0.022032,
'omega_cdm':0.12038,
'A_s':2.215e-9,
'n_s':0.9619,
'tau_reio':0.0925,
# Take fixed value for primordial Helium (instead of automatic BBN adjustment)
'YHe':0.246,
# other output and precision parameters
'l_max_scalars':5000,
'P_k_max_1/Mpc':10.0,
'gauge':'newtonian'}
###############
#
# call CLASS a first time just to compute z_rec (will compute transfer functions at default: z=0)
#
M = Class()
M.set(common_settings)
M.compute()
derived = M.get_current_derived_parameters(['z_rec','tau_rec','conformal_age'])
#print derived.viewkeys()
z_rec = derived['z_rec']
z_rec = int(1000.*z_rec)/1000. # round down at 4 digits after coma
M.struct_cleanup() # clean output
M.empty() # clean input
#
# call CLASS again (will compute transfer functions at inout value z_rec)
#
M = Class()
M.set(common_settings)
M.set({'z_pk':z_rec})
M.compute()
#
# load transfer functions at recombination
#
one_time = M.get_transfer(z_rec)
print one_time.viewkeys()
k = one_time['k (h/Mpc)']
Theta0 = 0.25*one_time['d_g']
phi = one_time['phi']
psi = one_time['psi']
theta_b = one_time['t_b']
# compute related quantitites
R = 3./4.*M.Omega_b()/M.Omega_g()/(1+z_rec) # R = 3/4 * (rho_b/rho_gamma) at z_rec
zero_point = -(1.+R)*psi # zero point of oscillations: -(1.+R)*psi
#
# get Theta0 oscillation amplitude (for vertical scale of plot)
#
Theta0_amp = max(Theta0.max(),-Theta0.min())
#
# use table of background quantitites to find the wavenumbers corresponding to
# Hubble crossing (k = 2 pi a H), sound horizon crossing (k = 2pi / rs)
#
background = M.get_background() # load background table
#print background.viewkeys()
#
background_tau = background['conf. time [Mpc]'] # read confromal times in background table
background_z = background['z'] # read redshift
background_kh = 2.*math.pi*background['H [1/Mpc]']/(1.+background['z'])/M.h() # read kh = 2pi aH = 2pi H/(1+z) converted to [h/Mpc]
background_ks = 2.*math.pi/background['comov.snd.hrz.']/M.h() # read ks = 2pi/rs converted to [h/Mpc]
#
# define interpolation functions; we want the value of tau when the argument is equal to 2pi
#
kh_at_tau = interp1d(background_tau,background_kh)
ks_at_tau = interp1d(background_tau,background_ks)
#
# finally get these scales
#
tau_rec = derived['tau_rec']
kh = kh_at_tau(tau_rec)
ks = ks_at_tau(tau_rec)
#
#################
#
# start plotting
#
#################
#
fig, (ax_Tk, ax_Tk2, ax_Cl) = plt.subplots(3,sharex=True,figsize=(8,12))
fig.subplots_adjust(hspace=0)
##################
#
# first figure with transfer functions
#
##################
ax_Tk.set_xlim([3.e-4,0.5])
ax_Tk.set_ylim([-1.1*Theta0_amp,1.1*Theta0_amp])
ax_Tk.tick_params(axis='x',which='both',bottom='off',top='on',labelbottom='off',labeltop='on')
ax_Tk.set_xlabel(r'$\mathrm{k} \,\,\, \mathrm{[h/Mpc]}$')
ax_Tk.set_ylabel(r'$\mathrm{Transfer}(\tau_\mathrm{dec},k)$')
ax_Tk.xaxis.set_label_position('top')
ax_Tk.grid()
#
ax_Tk.axvline(x=kh,color='r')
ax_Tk.axvline(x=ks,color='y')
#
ax_Tk.annotate(r'Hubble cross.',
xy=(kh,0.8*Theta0_amp),
xytext=(0.15*kh,0.9*Theta0_amp),
arrowprops=dict(facecolor='black', shrink=0.05, width=1, headlength=5, headwidth=5))
ax_Tk.annotate(r'sound hor. cross.',
xy=(ks,0.8*Theta0_amp),
xytext=(1.3*ks,0.9*Theta0_amp),
arrowprops=dict(facecolor='black', shrink=0.05, width=1, headlength=5, headwidth=5))
#
ax_Tk.semilogx(k,psi,'y-',label=r'$\psi$')
ax_Tk.semilogx(k,phi,'r-',label=r'$\phi$')
ax_Tk.semilogx(k,zero_point,'k:',label=r'$-(1+R)\psi$')
ax_Tk.semilogx(k,Theta0,'b-',label=r'$\Theta_0$')
ax_Tk.semilogx(k,(Theta0+psi),'c',label=r'$\Theta_0+\psi$')
ax_Tk.semilogx(k,theta_b,'g-',label=r'$\theta_b$')
#
ax_Tk.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
#######################
#
# second figure with transfer functions squared
#
#######################
ax_Tk2.set_xlim([3.e-4,0.5])
ax_Tk2.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off',labeltop='off')
ax_Tk2.set_ylabel(r'$\mathrm{Transfer}(\tau_\mathrm{dec},k)^2$')
ax_Tk2.grid()
#
ax_Tk2.semilogx(k,(Theta0+psi)**2,'c',label=r'$(\Theta_0+\psi)^2$')
#
ax_Tk2.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
########################
#
# third figure with all contributions to Cls
#
# For that we will need to call CLASS again for each contribution (TSW, earlyISW, lateISW, Doppler, total)
# Note that there is another contribution from polarisation: we don't plot it individually because it is
# too small to be seen, however it is included by default in the total.
#
# After each step we will save the figure (to get intermediate figures for the slides)
#
#########################
# presentation settings
ax_Cl.set_xlim([3.e-4,0.5])
ax_Cl.set_ylim([0.,8.])
ax_Cl.set_xlabel(r'$\ell/(\tau_0-\tau_{rec}) \,\,\, \mathrm{[h/Mpc]}$')
ax_Cl.set_ylabel(r'$\ell (\ell+1) C_l^{TT} / 2 \pi \,\,\, [\times 10^{10}]$')
ax_Cl.tick_params(axis='x',which='both',bottom='on',top='off',labelbottom='on',labeltop='off')
ax_Cl.grid()
#
# the x-axis will show l/(tau_0-tau_rec), so we need (tau_0-tau_rec) in units of [Mpc/h]
#
tau_0_minus_tau_rec_hMpc = (derived['conformal_age']-derived['tau_rec'])*M.h()
#
# save the total Cl's (we will plot them in the last step)
#
cl_tot = M.raw_cl(5000)
#
# call CLASS with TSW, then plot and save
#
M.struct_cleanup() # clean output
M.empty() # clean input
M.set(common_settings) # new input
M.set({'temperature contributions':'tsw'})
M.compute()
cl = M.raw_cl(5000)
#
ax_Cl.semilogx(cl['ell']/tau_0_minus_tau_rec_hMpc,1.e10*cl['ell']*(cl['ell']+1.)*cl['tt']/2./math.pi,'c-',label=r'$\mathrm{T+SW}$')
#
ax_Cl.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
fig.savefig('one_time_with_cl_1.pdf',bbox_inches='tight')
#
# call CLASS with early ISW, plot; call CLASS with late ISW, plot; then save
#
M.struct_cleanup()
M.empty()
M.set(common_settings)
M.set({'temperature contributions':'eisw'})
M.compute()
cl = M.raw_cl(5000)
#
ax_Cl.semilogx(cl['ell']/tau_0_minus_tau_rec_hMpc,1.e10*cl['ell']*(cl['ell']+1.)*cl['tt']/2./math.pi,'r-',label=r'$\mathrm{early} \,\, \mathrm{ISW}$')
#
M.struct_cleanup()
M.empty()
M.set(common_settings)
M.set({'temperature contributions':'lisw'})
M.compute()
cl = M.raw_cl(5000)
#
ax_Cl.semilogx(cl['ell']/tau_0_minus_tau_rec_hMpc,1.e10*cl['ell']*(cl['ell']+1.)*cl['tt']/2./math.pi,'y-',label=r'$\mathrm{late} \,\, \mathrm{ISW}$')
#
ax_Cl.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
fig.savefig('one_time_with_cl_2.pdf',bbox_inches='tight')
#
# call CLASS with Doppler, then plot and save
#
M.struct_cleanup()
M.empty()
M.set(common_settings)
M.set({'temperature contributions':'dop'})
M.compute()
cl = M.raw_cl(5000)
#
ax_Cl.semilogx(cl['ell']/tau_0_minus_tau_rec_hMpc,1.e10*cl['ell']*(cl['ell']+1.)*cl['tt']/2./math.pi,'g-',label=r'$\mathrm{Doppler}$')
#
ax_Cl.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
fig.savefig('one_time_with_cl_3.pdf',bbox_inches='tight')
#
# plot the total Cls that had been stored, and save
#
ax_Cl.semilogx(cl_tot['ell']/tau_0_minus_tau_rec_hMpc,1.e10*cl_tot['ell']*(cl_tot['ell']+1.)*cl_tot['tt']/2./math.pi,'k-',label=r'$\mathrm{Total}$')
#
ax_Cl.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
fig.savefig('one_time_with_cl_tot.pdf',bbox_inches='tight')
|
misharashREPO_NAMEclass_publicPATH_START.@class_public_extracted@class_public-master@scripts@one_time.py@.PATH_END.py
|
{
"filename": "_variantsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/sankey/link/hoverlabel/font/_variantsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="variantsrc",
parent_name="sankey.link.hoverlabel.font",
**kwargs,
):
super(VariantsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@sankey@link@hoverlabel@font@_variantsrc.py@.PATH_END.py
|
{
"filename": "doctest.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/doctest.py",
"type": "Python"
}
|
# Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'SKIP',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
'FAIL_FAST',
# 1. Utility Functions
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 8. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import difflib
import inspect
import linecache
import os
import pdb
import re
import sys
import traceback
import unittest
from io import StringIO, IncrementalNewlineDecoder
from collections import namedtuple
TestResults = namedtuple('TestResults', 'failed attempted')
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
SKIP = register_optionflag('SKIP')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
SKIP |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
FAIL_FAST = register_optionflag('FAIL_FAST')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE |
FAIL_FAST)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Unittest Support
# 8. Debugging Support
# 9. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, str):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
try:
try:
return sys.modules[sys._getframemodulename(depth)]
except AttributeError:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
except KeyError:
pass
else:
raise TypeError("Expected a module, string, or None")
def _newline_convert(data):
# The IO module provides a handy decoder for universal newline conversion
return IncrementalNewlineDecoder(None, True).decode(data, True)
def _load_testfile(filename, package, module_relative, encoding):
if module_relative:
package = _normalize_module(package, 3)
filename = _module_relative_path(package, filename)
if (loader := getattr(package, '__loader__', None)) is None:
try:
loader = package.__spec__.loader
except AttributeError:
pass
if hasattr(loader, 'get_data'):
file_contents = loader.get_data(filename)
file_contents = file_contents.decode(encoding)
# get_data() opens files as 'rb', so one must do the equivalent
# conversion as universal newlines would do.
return _newline_convert(file_contents), filename
with open(filename, encoding=encoding) as f:
return f.read(), filename
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
return result
def truncate(self, size=None):
self.seek(size)
StringIO.truncate(self)
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
def _strip_exception_details(msg):
# Support for IGNORE_EXCEPTION_DETAIL.
# Get rid of everything except the exception name; in particular, drop
# the possibly dotted module path (if any) and the exception message (if
# any). We assume that a colon is never part of a dotted name, or of an
# exception name.
# E.g., given
# "foo.bar.MyError: la di da"
# return "MyError"
# Or for "abc.def" or "abc.def:\n" return "def".
start, end = 0, len(msg)
# The exception name must appear on the first line.
i = msg.find("\n")
if i >= 0:
end = i
# retain up to the first colon (if any)
i = msg.find(':', 0, end)
if i >= 0:
end = i
# retain just the exception name
i = msg.rfind('.', 0, end)
if i >= 0:
start = i+1
return msg[start: end]
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
# do not play signal games in the pdb
pdb.Pdb.__init__(self, stdout=out, nosigint=True)
# still use input() to get user input
self.use_rawinput = 1
def set_trace(self, frame=None):
self.__debugger_used = True
if frame is None:
frame = sys._getframe().f_back
pdb.Pdb.set_trace(self, frame)
def set_continue(self):
# Calling set_continue unconditionally would break unit test
# coverage reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, test_path):
if not inspect.ismodule(module):
raise TypeError('Expected a module: %r' % module)
if test_path.startswith('/'):
raise ValueError('Module-relative files may not have absolute paths')
# Normalize the path. On Windows, replace "/" with "\".
test_path = os.path.join(*(test_path.split('/')))
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
if hasattr(module, '__path__'):
for directory in module.__path__:
fullpath = os.path.join(directory, test_path)
if os.path.exists(fullpath):
return fullpath
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module "
"%r (it has no __file__)"
% module.__name__)
# Combine the base directory and the test path.
return os.path.join(basedir, test_path)
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that precede the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.source == other.source and \
self.want == other.want and \
self.lineno == other.lineno and \
self.indent == other.indent and \
self.options == other.options and \
self.exc_msg == other.exc_msg
def __hash__(self):
return hash((self.source, self.want, self.lineno, self.indent,
self.exc_msg))
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, str), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<%s %s from %s:%s (%s)>' %
(self.__class__.__name__,
self.name, self.filename, self.lineno, examples))
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.examples == other.examples and \
self.docstring == other.docstring and \
self.globs == other.globs and \
self.name == other.name and \
self.filename == other.filename and \
self.lineno == other.lineno
def __hash__(self):
return hash((self.docstring, self.name, self.filename, self.lineno))
# This lets us sort tests by name:
def __lt__(self, other):
if not isinstance(other, DocTest):
return NotImplemented
self_lno = self.lineno if self.lineno is not None else -1
other_lno = other.lineno if other.lineno is not None else -1
return ((self.name, self.filename, self_lno, id(self))
<
(other.name, other.filename, other_lno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.+$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile(r'^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj)
except TypeError:
source_lines = None
else:
if not file:
# Check to see if it's one of our special internal "files"
# (see __patched_linecache_getlines).
file = inspect.getfile(obj)
if not file[0]+file[-2:] == '<]>': file = None
if file is None:
source_lines = None
else:
if module is not None:
# Supply the module globals in case the module was
# originally loaded via a PEP 302 loader and
# file is not a valid filesystem path
source_lines = linecache.getlines(file, module.__dict__)
else:
# No access to a loader, so assume it's a normal
# filesystem path
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__' # provide a default module name
# Recursively explore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
# Sort the tests by alpha order of names, for consistency in
# verbose-mode output. This was a feature of doctest in Pythons
# <= 2.3 that got lost by accident in 2.4. It was repaired in
# 2.4.4 and 2.5.
tests.sort()
return tests
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif inspect.isfunction(object):
return module.__dict__ is object.__globals__
elif (inspect.ismethoddescriptor(object) or
inspect.ismethodwrapper(object)):
if hasattr(object, '__objclass__') and hasattr(object.__objclass__, '__module__'):
obj_mod = object.__objclass__.__module__
elif hasattr(object, '__module__'):
obj_mod = object.__module__
else:
return True # [XX] no easy way to tell otherwise
return module.__name__ == obj_mod
elif inspect.isclass(object):
try:
return module.__name__ == object.__module__
except:
return True
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _is_routine(self, obj):
"""
Safely unwrap objects and determine if they are functions.
"""
maybe_routine = obj
try:
maybe_routine = inspect.unwrap(maybe_routine)
except ValueError:
pass
return inspect.isroutine(maybe_routine)
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((self._is_routine(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, str):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isroutine(val) or inspect.isclass(val) or
inspect.ismodule(val) or isinstance(val, str)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, (staticmethod, classmethod)):
val = val.__func__
# Recurse to methods, properties, and nested classes.
if ((inspect.isroutine(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, str):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, str):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
# __file__ can be None for namespace packages.
filename = getattr(module, '__file__', None) or module.__name__
if filename[-4:] == ".pyc":
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring.
Returns `None` if the given object does not have a docstring.
"""
lineno = None
docstring = getattr(obj, '__doc__', None)
# Find the line number for modules.
if inspect.ismodule(obj) and docstring is not None:
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj) and docstring is not None:
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
re.escape(getattr(obj, '__name__', '-')))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.__func__
if isinstance(obj, property):
obj = obj.fget
if inspect.isfunction(obj) and getattr(obj, '__doc__', None):
# We don't use `docstring` var here, because `obj` can be changed.
obj = inspect.unwrap(obj)
try:
obj = obj.__code__
except AttributeError:
# Functions implemented in C don't necessarily
# have a __code__ attribute.
# If there's no code, there's no lineno
return None
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = obj.co_firstlineno - 1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile(r'(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> tests.sort(key = lambda test: test.name)
>>> for test in tests:
... print(test.name, '->', runner.run(test))
_TestClass -> TestResults(failed=0, attempted=2)
_TestClass.__init__ -> TestResults(failed=0, attempted=2)
_TestClass.get -> TestResults(failed=0, attempted=2)
_TestClass.square -> TestResults(failed=0, attempted=1)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then suppress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
continue
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec(compile(example.source, filename, "single",
compileflags, True), test.globs)
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
formatted_ex = traceback.format_exception_only(*exception[:2])
if issubclass(exception[0], SyntaxError):
# SyntaxError / IndentationError is special:
# we don't care about the carets / suggestions / etc
# We only care about the error message and notes.
# They start with `SyntaxError:` (or any other class name)
exception_line_prefixes = (
f"{exception[0].__qualname__}:",
f"{exception[0].__module__}.{exception[0].__qualname__}:",
)
exc_msg_index = next(
index
for index, line in enumerate(formatted_ex)
if line.startswith(exception_line_prefixes)
)
formatted_ex = formatted_ex[exc_msg_index:]
exc_msg = "".join(formatted_ex)
if not quiet:
got += _exception_traceback(exception)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
if check(_strip_exception_details(example.exc_msg),
_strip_exception_details(exc_msg),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exception)
failures += 1
else:
assert False, ("unknown outcome", outcome)
if failures and self.optionflags & FAIL_FAST:
break
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return TestResults(failures, tries)
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>.+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(keepends=True)
else:
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
encoding = save_stdout.encoding
if encoding is None or encoding.lower() == 'utf-8':
out = save_stdout.write
else:
# Use backslashreplace error handling on write
def out(s):
s = str(s.encode(encoding, 'backslashreplace'), encoding)
save_stdout.write(s)
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_trace = sys.gettrace()
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
# Make sure sys.displayhook just prints the value to stdout
save_displayhook = sys.displayhook
sys.displayhook = sys.__displayhook__
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
sys.settrace(save_trace)
linecache.getlines = self.save_linecache_getlines
sys.displayhook = save_displayhook
if clear_globs:
test.globs.clear()
import builtins
builtins._ = None
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print(len(notests), "items had no tests:")
notests.sort()
for thing in notests:
print(" ", thing)
if passed:
print(len(passed), "items passed all tests:")
passed.sort()
for thing, count in passed:
print(" %3d tests in %s" % (count, thing))
if failed:
print(self.DIVIDER)
print(len(failed), "items had failures:")
failed.sort()
for thing, (f, t) in failed:
print(" %3d of %3d in %s" % (f, t, thing))
if verbose:
print(totalt, "tests in", len(self._name2ft), "items.")
print(totalt - totalf, "passed and", totalf, "failed.")
if totalf:
print("***Test Failed***", totalf, "failures.")
elif verbose:
print("Test passed.")
return TestResults(totalf, totalt)
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
# Don't print here by default, since doing
# so breaks some of the buildbots
#print("*** DocTestRunner.merge: '" + name + "' in both" \
# " testers; summing outcomes.")
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def _toAscii(self, s):
"""
Convert string to hex-escaped ASCII string.
"""
return str(s.encode('ASCII', 'backslashreplace'), "ASCII")
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# If `want` contains hex-escaped character such as "\u1234",
# then `want` is a string of six characters(e.g. [\,u,1,2,3,4]).
# On the other hand, `got` could be another sequence of
# characters such as [\u1234], so `want` and `got` should
# be folded to hex-escaped ASCII string to compare.
got = self._toAscii(got)
want = self._toAscii(want)
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub(r'(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub(r'(?m)^[^\S\n]+$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(keepends=True)
got_lines = got.splitlines(keepends=True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException as f:
... failure = f
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[1] # Already has the traceback
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure as f:
... failure = f
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
doctest.UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
TestResults(failed=0, attempted=1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, report=True,
optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See help(doctest) for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser(),
encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg "encoding" specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
text, filename = _load_testfile(filename, package, module_relative,
encoding or "utf-8")
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> import doctest
>>> old = doctest._unittest_reportflags
>>> doctest.set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> doctest.set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
self._dt_globs = test.globs.copy()
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
# restore the original globs
test.globs.clear()
test.globs.update(self._dt_globs)
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexpected
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException as f:
... failure = f
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[1] # Already has the traceback
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure as f:
... failure = f
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test, clear_globs=False)
self.tearDown()
def id(self):
return self._dt_test.name
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._dt_test == other._dt_test and \
self._dt_optionflags == other._dt_optionflags and \
self._dt_setUp == other._dt_setUp and \
self._dt_tearDown == other._dt_tearDown and \
self._dt_checker == other._dt_checker
def __hash__(self):
return hash((self._dt_optionflags, self._dt_setUp, self._dt_tearDown,
self._dt_checker))
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = object.__str__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
class SkipDocTestCase(DocTestCase):
def __init__(self, module):
self.module = module
DocTestCase.__init__(self, None)
def setUp(self):
self.skipTest("DocTestSuite will not work with -O2 and above")
def test_skip(self):
pass
def shortDescription(self):
return "Skipping tests from %s" % self.module.__name__
__str__ = shortDescription
class _DocTestSuite(unittest.TestSuite):
def _removeTestAtIndex(self, index):
pass
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if not tests and sys.flags.optimize >=2:
# Skip doctests when running with -O2
suite = _DocTestSuite()
suite.addTest(SkipDocTestCase(module))
return suite
tests.sort()
suite = _DocTestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] == ".pyc":
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(),
encoding=None, **options):
if globs is None:
globs = {}
else:
globs = globs.copy()
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
doc, path = _load_testfile(path, package, module_relative,
encoding or "utf-8")
if "__file__" not in globs:
globs["__file__"] = path
# Find the file and read it.
name = os.path.basename(path)
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
encoding
An encoding that will be used to convert the files to unicode.
"""
suite = _DocTestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 8. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print(script_from_examples(text))
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
<BLANKLINE>
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
# Add a courtesy newline to prevent exec from choking (see bug #1172785)
return '\n'.join(output) + '\n'
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
exec(src, globs, globs)
except:
print(sys.exc_info()[1])
p = pdb.Pdb(nosigint=True)
p.reset()
p.interaction(None, sys.exc_info()[2])
else:
pdb.Pdb(nosigint=True).run("exec(%r)" % src, globs, globs)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 9. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print(t.get())
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print(x.get())
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print('foo\n\nbar\n')
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print(list(range(1000))) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print(list(range(30))) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
import argparse
parser = argparse.ArgumentParser(description="doctest runner")
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='print very verbose output for all tests')
parser.add_argument('-o', '--option', action='append',
choices=OPTIONFLAGS_BY_NAME.keys(), default=[],
help=('specify a doctest option flag to apply'
' to the test run; may be specified more'
' than once to apply multiple options'))
parser.add_argument('-f', '--fail-fast', action='store_true',
help=('stop running tests after first failure (this'
' is a shorthand for -o FAIL_FAST, and is'
' in addition to any other -o options)'))
parser.add_argument('file', nargs='+',
help='file containing the tests to run')
args = parser.parse_args()
testfiles = args.file
# Verbose used to be handled by the "inspect argv" magic in DocTestRunner,
# but since we are using argparse we are passing it manually now.
verbose = args.verbose
options = 0
for option in args.option:
options |= OPTIONFLAGS_BY_NAME[option]
if args.fail_fast:
options |= FAIL_FAST
for filename in testfiles:
if filename.endswith(".py"):
# It is a module -- insert its dir into sys.path and try to
# import it. If it is part of a package, that possibly
# won't work because of package imports.
dirname, filename = os.path.split(filename)
sys.path.insert(0, dirname)
m = __import__(filename[:-3])
del sys.path[0]
failures, _ = testmod(m, verbose=verbose, optionflags=options)
else:
failures, _ = testfile(filename, module_relative=False,
verbose=verbose, optionflags=options)
if failures:
return 1
return 0
if __name__ == "__main__":
sys.exit(_test())
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@doctest.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "brinckmann/montepython_public",
"repo_path": "montepython_public_extracted/montepython_public-master/montepython/likelihoods/polarbear/__init__.py",
"type": "Python"
}
|
import os
from math import pi
import numpy as np
from montepython.likelihood_class import Likelihood
class polarbear(Likelihood):
def __init__(self, path, data, command_line):
Likelihood.__init__(self, path, data, command_line)
# Read the four data points from the bandpower file
self.bandpowers = np.loadtxt(os.path.join(
self.data_directory, self.bandpower_file))
# Read the band power window function (bpwf hereafter... yes, but
# sometimes, explicit is too much)
self.bpwf = self.load_bandpower_window_function(os.path.join(
self.data_directory, self.bpwf_file))
# l_max is now read from the bandpower window functions
self.l_max = int(self.bpwf[0][-1, 0])
# Require polarization from class
arguments = {
'output': 'tCl pCl lCl',
'lensing': 'yes',
'l_max_scalars': self.l_max}
self.need_cosmo_arguments(data, arguments)
def load_bandpower_window_function(self, path):
"""
Read n^th blocks in the bpwf_file
"""
size = len(self.bandpowers)
empty_lines = 0
blocks = []
with open(path, 'r') as bpfw_file:
for line in bpfw_file:
# Check for comments
if not line or line.startswith('#'):
# If it is the first one: new block
if empty_lines == 0:
blocks.append([])
empty_lines += 1
# Non empty line: add line in current(last) block
elif line.strip():
empty_lines = 0
clean_line = line.strip()
blocks[-1].append(
[float(e) for e in clean_line.split()])
# Convert everything to numpy arrays
blocks = [np.array(block) for block in blocks]
# Check that sufficiently many blocks were read
assert len(blocks) == size
return blocks
def loglkl(self, cosmo, data):
# Recover the Cl_BB from CLASS
cls = self.get_cl(cosmo, self.l_max)
ell = cls['ell']
cls_bb = cls['bb']*ell*(ell+1.)/(2.*pi)
# Recover the predicted Cl_BB for each of the four bandpowers
BB_th = []
for block in self.bpwf:
# each block contains a window function
integrand = np.array(
[block[index, 1]*cls_bb[int(e)]
for index, e in enumerate(block[:, 0])])
convolution = 0.5*((integrand[1:]+integrand[:-1])*(
block[1:, 0]-block[:-1, 0])).sum()
BB_th.append(convolution)
BB_exp = self.bandpowers[:, 3]
Delta_BB = self.bandpowers[:, 4]
chi2 = ((BB_th - BB_exp)**2/(Delta_BB)**2).sum()
return -0.5*chi2
|
brinckmannREPO_NAMEmontepython_publicPATH_START.@montepython_public_extracted@montepython_public-master@montepython@likelihoods@polarbear@__init__.py@.PATH_END.py
|
{
"filename": "GalaxiesFaintStars.py",
"repo_name": "dsavransky/EXOSIMS",
"repo_path": "EXOSIMS_extracted/EXOSIMS-master/EXOSIMS/BackgroundSources/GalaxiesFaintStars.py",
"type": "Python"
}
|
from EXOSIMS.Prototypes.BackgroundSources import BackgroundSources
import os
import inspect
import numpy as np
import astropy.units as u
from scipy.interpolate import griddata
from EXOSIMS.util._numpy_compat import copy_if_needed
class GalaxiesFaintStars(BackgroundSources):
"""
GalaxiesFaintStars class
This class calculates the total number background sources in number per square
arcminute, including galaxies and faint stars.
"""
def __init__(self, **specs):
"""
Constructor for class GalaxiesFaintStars
"""
BackgroundSources.__init__(self, **specs)
def dNbackground(self, coords, intDepths):
"""
Return total number counts per square arcmin
Args:
coords (astropy SkyCoord array):
SkyCoord object containing right ascension, declination, and
distance to star of the planets of interest in units of deg, deg and pc
intDepths (float ndarray):
Integration depths equal to the planet magnitude (Vmag+dMag),
i.e. the V magnitude of the dark hole to be produced for each target.
Must be of same length as coords.
Returns:
dN (astropy Quantity array):
Number densities of background sources for given targets in
units of 1/arcmin2. Same length as inputs.
"""
# check whether inputs are valid arrays
mag = np.array(intDepths, ndmin=1, copy=copy_if_needed)
dN = super(GalaxiesFaintStars, self).dNbackground(coords, mag)
# make sure mag is within [15,25]
mag = np.clip(mag, 15.0, 25.0)
# retrieve the galactic latitude in degrees from input coords
lat = abs(coords.galactic.b.degree)
# Load stellar background counts from stellar_cnts.txt
# The table comes from Allen Astrophysical Quantities
# Units are in V magnitudes
path = os.path.split(inspect.getfile(self.__class__))[0]
table = np.loadtxt(os.path.join(path, "stellar_cnts.txt"))
# create data point coordinates
lat_pts = np.array([0.0, 5, 10, 20, 30, 60, 90]) # deg
mag_pts = np.array([15.0, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25])
y_pts, x_pts = np.meshgrid(mag_pts, lat_pts)
points = np.array(list(zip(np.concatenate(x_pts), np.concatenate(y_pts))))
# create data values
values = table.reshape(table.size)
# interpolates 2D
C_st = griddata(points, values, np.array(list(zip(lat, mag)))) # log values
C_st = 10**C_st / 3600
# Galaxy count per square arcmin, from Windhorst et al 2011
# who derived numbers based on Deep Field HST data
C_gal = 2 * 2.1 ** (mag - 12.5) / 3600
# total counts
dN = C_st + C_gal
return dN / u.arcmin**2
|
dsavranskyREPO_NAMEEXOSIMSPATH_START.@EXOSIMS_extracted@EXOSIMS-master@EXOSIMS@BackgroundSources@GalaxiesFaintStars.py@.PATH_END.py
|
{
"filename": "magnitude_to_radius.py",
"repo_name": "sarahbrands/Kiwi-GA",
"repo_path": "Kiwi-GA_extracted/Kiwi-GA-master/magnitude_to_radius.py",
"type": "Python"
}
|
# Estimate the stellar radius given an effective temperature
# and magnitude. Requires filter transmissions and zero points.
# Created by Sarah Brands on 20 Dec 2019 (s.a.brands@uva.nl)
import __future__
import numpy as np
import sys
from scipy.interpolate import interp1d
def magnitude_to_radius(teff, band, obsmag, zp_system, Tfrac=0.9,
filterdir='filter_transmissions/'):
'''Estimate the radius of the star given a temperature,
photometric filter and observed (dereddened) absolute
magnitude.
Input:
- teff: model effective temperature in K (float)
- band: name of the photometric band (string), see section
'Available photometric bands' at the start of of this
functions for which ones are included, and the
description below on how to add more.
- obsmag: the observed absolute magnitude in the given
band (float)
- Tfrac: fraction of the effective temperature that used
used for calculating the 'theoretical SED' aka black
body curve (float)
- zp_system: choose from 'vega', 'AB', 'ST' (string)
- filterdir: specify (relative) path to the directory
where the filter information is stored (string)
Output:
- Estimated stellar radius in solar units (float)
NOTE ON ADDING NEW FILTERS
A useful resource for filter information is the
SVO Filter Profile Service:
http://svo2.cab.inta-csic.es/theory/fps/
When adding a new filter, please do the following:
1. Place an asci file with wavelengths and transmissions in
the filter directory (specified in the parameter
'filterdir'. In this file, lines with columns names or
headers should start with a '#'. Wavelengths can be in
Angstrom or nm (see next point).
2. Add an 'elif' statement in the code below under 'Available
photometric bands', in which you give the filter a clear
and descriptive name, and point to the transmission file.
Wavelength units in the data file can be either nanometers
or Angstrom, specify which one is used in the file in the
parameter 'waveunit' in the elif statement.
3. Add zero points to the file 'zero_points.dat' in the
filterdirectory. In the first column give the name of
the filter: use the same name as in point 2.
NOTE ON THE 'THEORETICAL SED'
The "theoretical SED" on which the radius estimate is based
is a Planck function. The temperature used for this can be
scaled with Tfrac, is now set default to 0.9, this is as done in
Mokiem 2005, who follows Markova 2004.
#FIXME it would be interesting to check whether this
chosen value for Tfrac from Markova 2004 gives the best
approximation by comparing for calculated models the
real SEDs with the 0.9*teff black body spectrum, and see
how those radii compare
'''
##########################################################
### Available photometric bands ###
##########################################################
if band == 'SPHERE_Ks':
filterfile = 'SPHERE_IRDIS_B_Ks.dat'
waveunit = 'nm'
elif band == 'HST_555w':
filterfile = 'HST_ACS_HRC.F555W.dat'
waveunit = 'angstrom'
elif band == '2MASS_Ks':
filterfile = '2MASS_Ks.dat'
waveunit = 'angstrom'
elif band == 'VISTA_Ks':
filterfile = 'Paranal_VISTA.Ks.dat'
waveunit = 'angstrom'
elif band == 'Johnson_V':
filterfile = 'GCPD_Johnson.V.dat'
waveunit = 'angstrom'
elif band == "Johnson_J":
filterfile = "Generic_Johnson.J.dat"
waveunit = 'angstrom'
else:
print('Unknown value for <band>, exiting')
sys.exit()
##########################################################
### Computation starts here ###
##########################################################
# Read transmission profile and convert units if necessary
filterfile = filterdir + filterfile
wave, trans = np.genfromtxt(filterfile, comments='#').T
if waveunit == 'nm':
nm_to_Angstrom = 10
wave = wave * nm_to_Angstrom
elif waveunit == 'angstrom':
pass
else:
print('Unknown value for <waveunit>, exiting')
# Get filter zero point
zpfile = filterdir + 'zero_points.dat'
zp_values = np.genfromtxt(zpfile, comments='#', dtype=str)
the_zero_point = ''
for afilter in zp_values:
if afilter[0] == band:
if zp_system == 'vega':
the_zero_point = float(afilter[1])
elif zp_system == 'AB':
the_zero_point = float(afilter[2])
elif zp_system == 'ST':
the_zero_point = float(afilter[3])
else:
print('Unknown value for <zp_system>, exiting')
sys.exit()
if the_zero_point == '':
print('Zero point for band ' + band + ' not found, exiting')
sys.exit()
tBB = teff * Tfrac
# Integration over angles results in the factor of pi
F_lambda = np.pi*planck_wavelength(wave, tBB)
rsun = 6.96e10
parsec_cm = 3.08567758e18
radius_ratio = 10*parsec_cm / rsun
filtered_flux = np.trapz(trans*F_lambda, wave)/np.trapz(trans, wave)
obsflux = magnitude_to_flux(obsmag, the_zero_point)
bolflux_10pc = obsflux/filtered_flux
luminosity = bolflux_10pc * (10*parsec_cm / rsun)**2
radius_rsun = luminosity**0.5
return radius_rsun
def planck_wavelength(wave_angstrom, temp):
''' Calculate the Planck function as function of temperature,
and wavelengt (in Angstrom, output is then also in Angstrom).
'''
angstrom_to_cm = 1e-8
wave = wave_angstrom * angstrom_to_cm
# All units in cgs
hh = 6.6260755e-27 #Planck constant;
cc = 2.99792458e10 #speed of light in a vacuum;
kk = 1.380658e-16 #Boltzmann constant;
prefactor = 2.0 * hh * cc**2 / (wave**5)
exponent = (hh * cc / kk) / (wave * temp)
Blambda = prefactor * (1.0 / (np.exp(exponent)-1))
#Blambda from per cm to per angstrom
Blambda = Blambda * angstrom_to_cm
return Blambda
def magnitude_to_flux(magnitude, zpflux):
''' Calculate observed flux from magnitude and zeropoint flux'''
obsflux = zpflux * 10**(-magnitude/2.5)
return obsflux
def magnitude_to_radius_SED(sed_wave, sed_flam, band, obsmag, zp_system,
filterdir='filter_transmissions/'):
'''Estimate the radius of the star given a temperature,
photometric filter and observed (dereddened) absolute
magnitude.
Input:
- band: name of the photometric band (string), see section
'Available photometric bands' at the start of of this
functions for which ones are included, and the
description below on how to add more.
- obsmag: the observed absolute magnitude in the given
band (float)
- zp_system: choose from 'vega', 'AB', 'ST' (string)
- filterdir: specify (relative) path to the directory
where the filter information is stored (string)
Output:
- Estimated stellar radius in solar units (float)
NOTE ON ADDING NEW FILTERS
A useful resource for filter information is the
SVO Filter Profile Service:
http://svo2.cab.inta-csic.es/theory/fps/
When adding a new filter, please do the following:
1. Place an asci file with wavelengths and transmissions in
the filter directory (specified in the parameter
'filterdir'. In this file, lines with columns names or
headers should start with a '#'. Wavelengths can be in
Angstrom or nm (see next point).
2. Add an 'elif' statement in the code below under 'Available
photometric bands', in which you give the filter a clear
and descriptive name, and point to the transmission file.
Wavelength units in the data file can be either nanometers
or Angstrom, specify which one is used in the file in the
parameter 'waveunit' in the elif statement.
3. Add zero points to the file 'zero_points.dat' in the
filterdirectory. In the first column give the name of
the filter: use the same name as in point 2.
'''
##########################################################
### Available photometric bands ###
##########################################################
if band == 'SPHERE_Ks':
filterfile = 'SPHERE_IRDIS_B_Ks.dat'
waveunit = 'nm'
elif band == 'HST_555w':
filterfile = 'HST_ACS_HRC.F555W.dat'
waveunit = 'angstrom'
elif band == '2MASS_Ks':
filterfile = '2MASS_Ks.dat'
waveunit = 'angstrom'
elif band == 'VISTA_Ks':
filterfile = 'Paranal_VISTA.Ks.dat'
waveunit = 'angstrom'
elif band == 'Johnson_V':
filterfile = 'GCPD_Johnson.V.dat'
waveunit = 'angstrom'
elif band == "Johnson_J":
filterfile = "Generic_Johnson.J.dat"
waveunit = 'angstrom'
else:
print('Unknown value for <band>, exiting')
sys.exit()
##########################################################
### Computation starts here ###
##########################################################
# Read transmission profile and convert units if necessary
filterfile = filterdir + filterfile
wave, trans = np.genfromtxt(filterfile, comments='#').T
if waveunit == 'nm':
nm_to_Angstrom = 10
wave = wave * nm_to_Angstrom
elif waveunit == 'angstrom':
pass
else:
print('Unknown value for <waveunit>, exiting')
# Get filter zero point
zpfile = filterdir + 'zero_points.dat'
zp_values = np.genfromtxt(zpfile, comments='#', dtype=str)
the_zero_point = ''
for afilter in zp_values:
if afilter[0] == band:
if zp_system == 'vega':
the_zero_point = float(afilter[1])
elif zp_system == 'AB':
the_zero_point = float(afilter[2])
elif zp_system == 'ST':
the_zero_point = float(afilter[3])
else:
print('Unknown value for <zp_system>, exiting')
sys.exit()
if the_zero_point == '':
print('Zero point for band ' + band + ' not found, exiting')
sys.exit()
sed_ip = interp1d(sed_wave, sed_flam)
F_lambda = sed_ip(wave)
rsun = 6.96e10
parsec_cm = 3.08567758e18
radius_ratio = 10*parsec_cm / rsun
filtered_flux = np.trapz(trans*F_lambda, wave)/np.trapz(trans, wave)
obsflux = magnitude_to_flux(obsmag, the_zero_point)
bolflux_10pc = obsflux/filtered_flux
luminosity = bolflux_10pc * (10*parsec_cm / rsun)**2
radius_rsun = luminosity**0.5
return radius_rsun
|
sarahbrandsREPO_NAMEKiwi-GAPATH_START.@Kiwi-GA_extracted@Kiwi-GA-master@magnitude_to_radius.py@.PATH_END.py
|
{
"filename": "generator_data_adapter.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/trainers/data_adapters/generator_data_adapter.py",
"type": "Python"
}
|
import itertools
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class GeneratorDataAdapter(DataAdapter):
"""Adapter for Python generators."""
def __init__(self, generator):
first_batches, generator = peek_and_restore(generator)
self.generator = generator
self._first_batches = first_batches
self._output_signature = None
if not isinstance(first_batches[0], tuple):
raise ValueError(
"When passing a Python generator to a Keras model, "
"the generator must return a tuple, either "
"(input,) or (inputs, targets) or "
"(inputs, targets, sample_weights). "
f"Received: {first_batches[0]}"
)
def get_numpy_iterator(self):
return data_adapter_utils.get_numpy_iterator(self.generator())
def get_jax_iterator(self):
return data_adapter_utils.get_jax_iterator(self.generator())
def get_tf_dataset(self):
from keras.src.utils.module_utils import tensorflow as tf
def convert_to_tf(x, spec):
if data_adapter_utils.is_scipy_sparse(x):
x = data_adapter_utils.scipy_sparse_to_tf_sparse(x)
elif data_adapter_utils.is_jax_sparse(x):
x = data_adapter_utils.jax_sparse_to_tf_sparse(x)
if not spec.shape.is_compatible_with(x.shape):
raise TypeError(
f"Generator yielded an element of shape {x.shape} where "
f"an element of shape {spec.shape} was expected. Your "
"generator provides tensors with variable input "
"dimensions other than the batch size. Make sure that the "
"generator's first two batches do not have the same "
"dimension value wherever there is a variable input "
"dimension."
)
return x
def get_tf_iterator():
for batch in self.generator():
batch = tree.map_structure(
convert_to_tf, batch, self._output_signature
)
yield batch
if self._output_signature is None:
self._output_signature = data_adapter_utils.get_tensor_spec(
self._first_batches
)
ds = tf.data.Dataset.from_generator(
get_tf_iterator,
output_signature=self._output_signature,
)
ds = ds.prefetch(tf.data.AUTOTUNE)
return ds
def get_torch_dataloader(self):
return data_adapter_utils.get_torch_dataloader(self.generator())
@property
def num_batches(self):
return None
@property
def batch_size(self):
return None
def peek_and_restore(generator):
batches = list(
itertools.islice(
generator, data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC
)
)
return batches, lambda: itertools.chain(batches, generator)
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@trainers@data_adapters@generator_data_adapter.py@.PATH_END.py
|
{
"filename": "crackfortran.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/numpy/py3/numpy/f2py/crackfortran.py",
"type": "Python"
}
|
#!/usr/bin/env python3
"""
crackfortran --- read fortran (77,90) code and extract declaration information.
Copyright 1999 -- 2011 Pearu Peterson all rights reserved.
Copyright 2011 -- present NumPy Developers.
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Usage of crackfortran:
======================
Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h <pyffilename>
-m <module name for f77 routines>,--ignore-contains
Functions: crackfortran, crack2fortran
The following Fortran statements/constructions are supported
(or will be if needed):
block data,byte,call,character,common,complex,contains,data,
dimension,double complex,double precision,end,external,function,
implicit,integer,intent,interface,intrinsic,
logical,module,optional,parameter,private,public,
program,real,(sequence?),subroutine,type,use,virtual,
include,pythonmodule
Note: 'virtual' is mapped to 'dimension'.
Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug).
Note: code after 'contains' will be ignored until its scope ends.
Note: 'common' statement is extended: dimensions are moved to variable definitions
Note: f2py directive: <commentchar>f2py<line> is read as <line>
Note: pythonmodule is introduced to represent Python module
Usage:
`postlist=crackfortran(files)`
`postlist` contains declaration information read from the list of files `files`.
`crack2fortran(postlist)` returns a fortran code to be saved to pyf-file
`postlist` has the following structure:
*** it is a list of dictionaries containing `blocks':
B = {'block','body','vars','parent_block'[,'name','prefix','args','result',
'implicit','externals','interfaced','common','sortvars',
'commonvars','note']}
B['block'] = 'interface' | 'function' | 'subroutine' | 'module' |
'program' | 'block data' | 'type' | 'pythonmodule' |
'abstract interface'
B['body'] --- list containing `subblocks' with the same structure as `blocks'
B['parent_block'] --- dictionary of a parent block:
C['body'][<index>]['parent_block'] is C
B['vars'] --- dictionary of variable definitions
B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first)
B['name'] --- name of the block (not if B['block']=='interface')
B['prefix'] --- prefix string (only if B['block']=='function')
B['args'] --- list of argument names if B['block']== 'function' | 'subroutine'
B['result'] --- name of the return value (only if B['block']=='function')
B['implicit'] --- dictionary {'a':<variable definition>,'b':...} | None
B['externals'] --- list of variables being external
B['interfaced'] --- list of variables being external and defined
B['common'] --- dictionary of common blocks (list of objects)
B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions)
B['from'] --- string showing the 'parents' of the current block
B['use'] --- dictionary of modules used in current block:
{<modulename>:{['only':<0|1>],['map':{<local_name1>:<use_name1>,...}]}}
B['note'] --- list of LaTeX comments on the block
B['f2pyenhancements'] --- optional dictionary
{'threadsafe':'','fortranname':<name>,
'callstatement':<C-expr>|<multi-line block>,
'callprotoargument':<C-expr-list>,
'usercode':<multi-line block>|<list of multi-line blocks>,
'pymethoddef:<multi-line block>'
}
B['entry'] --- dictionary {entryname:argslist,..}
B['varnames'] --- list of variable names given in the order of reading the
Fortran code, useful for derived types.
B['saved_interface'] --- a string of scanned routine signature, defines explicit interface
*** Variable definition is a dictionary
D = B['vars'][<variable name>] =
{'typespec'[,'attrspec','kindselector','charselector','=','typename']}
D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' |
'double precision' | 'integer' | 'logical' | 'real' | 'type'
D['attrspec'] --- list of attributes (e.g. 'dimension(<arrayspec>)',
'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)',
'optional','required', etc)
K = D['kindselector'] = {['*','kind']} (only if D['typespec'] =
'complex' | 'integer' | 'logical' | 'real' )
C = D['charselector'] = {['*','len','kind','f2py_len']}
(only if D['typespec']=='character')
D['='] --- initialization expression string
D['typename'] --- name of the type if D['typespec']=='type'
D['dimension'] --- list of dimension bounds
D['intent'] --- list of intent specifications
D['depend'] --- list of variable names on which current variable depends on
D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised
D['note'] --- list of LaTeX comments on the variable
*** Meaning of kind/char selectors (few examples):
D['typespec>']*K['*']
D['typespec'](kind=K['kind'])
character*C['*']
character(len=C['len'],kind=C['kind'], f2py_len=C['f2py_len'])
(see also fortran type declaration statement formats below)
Fortran 90 type declaration statement format (F77 is subset of F90)
====================================================================
(Main source: IBM XL Fortran 5.1 Language Reference Manual)
type declaration = <typespec> [[<attrspec>]::] <entitydecl>
<typespec> = byte |
character[<charselector>] |
complex[<kindselector>] |
double complex |
double precision |
integer[<kindselector>] |
logical[<kindselector>] |
real[<kindselector>] |
type(<typename>)
<charselector> = * <charlen> |
([len=]<len>[,[kind=]<kind>]) |
(kind=<kind>[,len=<len>])
<kindselector> = * <intlen> |
([kind=]<kind>)
<attrspec> = comma separated list of attributes.
Only the following attributes are used in
building up the interface:
external
(parameter --- affects '=' key)
optional
intent
Other attributes are ignored.
<intentspec> = in | out | inout
<arrayspec> = comma separated list of dimension bounds.
<entitydecl> = <name> [[*<charlen>][(<arrayspec>)] | [(<arrayspec>)]*<charlen>]
[/<init_expr>/ | =<init_expr>] [,<entitydecl>]
In addition, the following attributes are used: check,depend,note
TODO:
* Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)'
-> 'real x(2)')
The above may be solved by creating appropriate preprocessor program, for example.
"""
import sys
import string
import fileinput
import re
import os
import copy
import platform
import codecs
from pathlib import Path
try:
import charset_normalizer
except ImportError:
charset_normalizer = None
from . import __version__
# The environment provided by auxfuncs.py is needed for some calls to eval.
# As the needed functions cannot be determined by static inspection of the
# code, it is safest to use import * pending a major refactoring of f2py.
from .auxfuncs import *
from . import symbolic
f2py_version = __version__.version
# Global flags:
strictf77 = 1 # Ignore `!' comments unless line[0]=='!'
sourcecodeform = 'fix' # 'fix','free'
quiet = 0 # Be verbose if 0 (Obsolete: not used any more)
verbose = 1 # Be quiet if 0, extra verbose if > 1.
tabchar = 4 * ' '
pyffilename = ''
f77modulename = ''
skipemptyends = 0 # for old F77 programs without 'program' statement
ignorecontains = 1
dolowercase = 1
debug = []
# Global variables
beginpattern = ''
currentfilename = ''
expectbegin = 1
f90modulevars = {}
filepositiontext = ''
gotnextfile = 1
groupcache = None
groupcounter = 0
grouplist = {groupcounter: []}
groupname = ''
include_paths = []
neededmodule = -1
onlyfuncs = []
previous_context = None
skipblocksuntil = -1
skipfuncs = []
skipfunctions = []
usermodules = []
def reset_global_f2py_vars():
global groupcounter, grouplist, neededmodule, expectbegin
global skipblocksuntil, usermodules, f90modulevars, gotnextfile
global filepositiontext, currentfilename, skipfunctions, skipfuncs
global onlyfuncs, include_paths, previous_context
global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename
global f77modulename, skipemptyends, ignorecontains, dolowercase, debug
# flags
strictf77 = 1
sourcecodeform = 'fix'
quiet = 0
verbose = 1
tabchar = 4 * ' '
pyffilename = ''
f77modulename = ''
skipemptyends = 0
ignorecontains = 1
dolowercase = 1
debug = []
# variables
groupcounter = 0
grouplist = {groupcounter: []}
neededmodule = -1
expectbegin = 1
skipblocksuntil = -1
usermodules = []
f90modulevars = {}
gotnextfile = 1
filepositiontext = ''
currentfilename = ''
skipfunctions = []
skipfuncs = []
onlyfuncs = []
include_paths = []
previous_context = None
def outmess(line, flag=1):
global filepositiontext
if not verbose:
return
if not quiet:
if flag:
sys.stdout.write(filepositiontext)
sys.stdout.write(line)
re._MAXCACHE = 50
defaultimplicitrules = {}
for c in "abcdefghopqrstuvwxyz$_":
defaultimplicitrules[c] = {'typespec': 'real'}
for c in "ijklmn":
defaultimplicitrules[c] = {'typespec': 'integer'}
badnames = {}
invbadnames = {}
for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while',
'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union',
'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch',
'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto',
'len', 'rank', 'shape', 'index', 'slen', 'size', '_i',
'max', 'min',
'flen', 'fshape',
'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout',
'type', 'default']:
badnames[n] = n + '_bn'
invbadnames[n + '_bn'] = n
def rmbadname1(name):
if name in badnames:
errmess('rmbadname1: Replacing "%s" with "%s".\n' %
(name, badnames[name]))
return badnames[name]
return name
def rmbadname(names):
return [rmbadname1(_m) for _m in names]
def undo_rmbadname1(name):
if name in invbadnames:
errmess('undo_rmbadname1: Replacing "%s" with "%s".\n'
% (name, invbadnames[name]))
return invbadnames[name]
return name
def undo_rmbadname(names):
return [undo_rmbadname1(_m) for _m in names]
_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search
_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search
_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search
_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match
# Extensions
COMMON_FREE_EXTENSIONS = ['.f90', '.f95', '.f03', '.f08']
COMMON_FIXED_EXTENSIONS = ['.for', '.ftn', '.f77', '.f']
def openhook(filename, mode):
"""Ensures that filename is opened with correct encoding parameter.
This function uses charset_normalizer package, when available, for
determining the encoding of the file to be opened. When charset_normalizer
is not available, the function detects only UTF encodings, otherwise, ASCII
encoding is used as fallback.
"""
# Reads in the entire file. Robust detection of encoding.
# Correctly handles comments or late stage unicode characters
# gh-22871
if charset_normalizer is not None:
encoding = charset_normalizer.from_path(filename).best().encoding
else:
# hint: install charset_normalizer for correct encoding handling
# No need to read the whole file for trying with startswith
nbytes = min(32, os.path.getsize(filename))
with open(filename, 'rb') as fhandle:
raw = fhandle.read(nbytes)
if raw.startswith(codecs.BOM_UTF8):
encoding = 'UTF-8-SIG'
elif raw.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)):
encoding = 'UTF-32'
elif raw.startswith((codecs.BOM_LE, codecs.BOM_BE)):
encoding = 'UTF-16'
else:
# Fallback, without charset_normalizer
encoding = 'ascii'
return open(filename, mode, encoding=encoding)
def is_free_format(fname):
"""Check if file is in free format Fortran."""
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = False
if Path(fname).suffix.lower() in COMMON_FREE_EXTENSIONS:
result = True
with openhook(fname, 'r') as fhandle:
line = fhandle.readline()
n = 15 # the number of non-comment lines to scan for hints
if _has_f_header(line):
n = 0
elif _has_f90_header(line):
n = 0
result = True
while n > 0 and line:
if line[0] != '!' and line.strip():
n -= 1
if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&':
result = True
break
line = fhandle.readline()
return result
# Read fortran (77,90) code
def readfortrancode(ffile, dowithline=show, istop=1):
"""
Read fortran codes from files and
1) Get rid of comments, line continuations, and empty lines; lower cases.
2) Call dowithline(line) on every line.
3) Recursively call itself when statement \"include '<filename>'\" is met.
"""
global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77
global beginpattern, quiet, verbose, dolowercase, include_paths
if not istop:
saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase
if ffile == []:
return
localdolowercase = dolowercase
# cont: set to True when the content of the last line read
# indicates statement continuation
cont = False
finalline = ''
ll = ''
includeline = re.compile(
r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")', re.I)
cont1 = re.compile(r'(?P<line>.*)&\s*\Z')
cont2 = re.compile(r'(\s*&|)(?P<line>.*)')
mline_mark = re.compile(r".*?'''")
if istop:
dowithline('', -1)
ll, l1 = '', ''
spacedigits = [' '] + [str(_m) for _m in range(10)]
filepositiontext = ''
fin = fileinput.FileInput(ffile, openhook=openhook)
while True:
try:
l = fin.readline()
except UnicodeDecodeError as msg:
raise Exception(
f'readfortrancode: reading {fin.filename()}#{fin.lineno()}'
f' failed with\n{msg}.\nIt is likely that installing charset_normalizer'
' package will help f2py determine the input file encoding'
' correctly.')
if not l:
break
if fin.isfirstline():
filepositiontext = ''
currentfilename = fin.filename()
gotnextfile = 1
l1 = l
strictf77 = 0
sourcecodeform = 'fix'
ext = os.path.splitext(currentfilename)[1]
if Path(currentfilename).suffix.lower() in COMMON_FIXED_EXTENSIONS and \
not (_has_f90_header(l) or _has_fix_header(l)):
strictf77 = 1
elif is_free_format(currentfilename) and not _has_fix_header(l):
sourcecodeform = 'free'
if strictf77:
beginpattern = beginpattern77
else:
beginpattern = beginpattern90
outmess('\tReading file %s (format:%s%s)\n'
% (repr(currentfilename), sourcecodeform,
strictf77 and ',strict' or ''))
l = l.expandtabs().replace('\xa0', ' ')
# Get rid of newline characters
while not l == '':
if l[-1] not in "\n\r\f":
break
l = l[:-1]
if not strictf77:
(l, rl) = split_by_unquoted(l, '!')
l += ' '
if rl[:5].lower() == '!f2py': # f2py directive
l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!')
if l.strip() == '': # Skip empty line
if sourcecodeform == 'free':
# In free form, a statement continues in the next line
# that is not a comment line [3.3.2.4^1], lines with
# blanks are comment lines [3.3.2.3^1]. Hence, the
# line continuation flag must retain its state.
pass
else:
# In fixed form, statement continuation is determined
# by a non-blank character at the 6-th position. Empty
# line indicates a start of a new statement
# [3.3.3.3^1]. Hence, the line continuation flag must
# be reset.
cont = False
continue
if sourcecodeform == 'fix':
if l[0] in ['*', 'c', '!', 'C', '#']:
if l[1:5].lower() == 'f2py': # f2py directive
l = ' ' + l[5:]
else: # Skip comment line
cont = False
continue
elif strictf77:
if len(l) > 72:
l = l[:72]
if not (l[0] in spacedigits):
raise Exception('readfortrancode: Found non-(space,digit) char '
'in the first column.\n\tAre you sure that '
'this code is in fix form?\n\tline=%s' % repr(l))
if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '):
# Continuation of a previous line
ll = ll + l[6:]
finalline = ''
origfinalline = ''
else:
if not strictf77:
# F90 continuation
r = cont1.match(l)
if r:
l = r.group('line') # Continuation follows ..
if cont:
ll = ll + cont2.match(l).group('line')
finalline = ''
origfinalline = ''
else:
# clean up line beginning from possible digits.
l = ' ' + l[5:]
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
cont = (r is not None)
else:
# clean up line beginning from possible digits.
l = ' ' + l[5:]
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
elif sourcecodeform == 'free':
if not cont and ext == '.pyf' and mline_mark.match(l):
l = l + '\n'
while True:
lc = fin.readline()
if not lc:
errmess(
'Unexpected end of file when reading multiline\n')
break
l = l + lc
if mline_mark.match(lc):
break
l = l.rstrip()
r = cont1.match(l)
if r:
l = r.group('line') # Continuation follows ..
if cont:
ll = ll + cont2.match(l).group('line')
finalline = ''
origfinalline = ''
else:
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
cont = (r is not None)
else:
raise ValueError(
"Flag sourcecodeform must be either 'fix' or 'free': %s" % repr(sourcecodeform))
filepositiontext = 'Line #%d in %s:"%s"\n\t' % (
fin.filelineno() - 1, currentfilename, l1)
m = includeline.match(origfinalline)
if m:
fn = m.group('name')
if os.path.isfile(fn):
readfortrancode(fn, dowithline=dowithline, istop=0)
else:
include_dirs = [
os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir, fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1, dowithline=dowithline, istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % (
repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
l1 = ll
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
filepositiontext = 'Line #%d in %s:"%s"\n\t' % (
fin.filelineno() - 1, currentfilename, l1)
m = includeline.match(origfinalline)
if m:
fn = m.group('name')
if os.path.isfile(fn):
readfortrancode(fn, dowithline=dowithline, istop=0)
else:
include_dirs = [os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir, fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1, dowithline=dowithline, istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % (
repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
filepositiontext = ''
fin.close()
if istop:
dowithline('', 1)
else:
gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase = saveglobals
# Crack line
beforethisafter = r'\s*(?P<before>%s(?=\s*(\b(%s)\b)))' + \
r'\s*(?P<this>(\b(%s)\b))' + \
r'\s*(?P<after>%s)\s*\Z'
##
fortrantypes = r'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte'
typespattern = re.compile(
beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type'
typespattern4implicit = re.compile(beforethisafter % (
'', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I)
#
functionpattern = re.compile(beforethisafter % (
r'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin'
subroutinepattern = re.compile(beforethisafter % (
r'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin'
# modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin'
#
groupbegins77 = r'program|block\s*data'
beginpattern77 = re.compile(
beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin'
groupbegins90 = groupbegins77 + \
r'|module(?!\s*procedure)|python\s*module|(abstract|)\s*interface|' + \
r'type(?!\s*\()'
beginpattern90 = re.compile(
beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin'
groupends = (r'end|endprogram|endblockdata|endmodule|endpythonmodule|'
r'endinterface|endsubroutine|endfunction')
endpattern = re.compile(
beforethisafter % ('', groupends, groupends, '.*'), re.I), 'end'
# block, the Fortran 2008 construct needs special handling in the rest of the file
endifs = r'end\s*(if|do|where|select|while|forall|associate|' + \
r'critical|enum|team)'
endifpattern = re.compile(
beforethisafter % (r'[\w]*?', endifs, endifs, '.*'), re.I), 'endif'
#
moduleprocedures = r'module\s*procedure'
moduleprocedurepattern = re.compile(
beforethisafter % ('', moduleprocedures, moduleprocedures, '.*'), re.I), \
'moduleprocedure'
implicitpattern = re.compile(
beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit'
dimensionpattern = re.compile(beforethisafter % (
'', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension'
externalpattern = re.compile(
beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external'
optionalpattern = re.compile(
beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional'
requiredpattern = re.compile(
beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required'
publicpattern = re.compile(
beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public'
privatepattern = re.compile(
beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private'
intrinsicpattern = re.compile(
beforethisafter % ('', 'intrinsic', 'intrinsic', '.*'), re.I), 'intrinsic'
intentpattern = re.compile(beforethisafter % (
'', 'intent|depend|note|check', 'intent|depend|note|check', r'\s*\(.*?\).*'), re.I), 'intent'
parameterpattern = re.compile(
beforethisafter % ('', 'parameter', 'parameter', r'\s*\(.*'), re.I), 'parameter'
datapattern = re.compile(
beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data'
callpattern = re.compile(
beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call'
entrypattern = re.compile(
beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry'
callfunpattern = re.compile(
beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun'
commonpattern = re.compile(
beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common'
usepattern = re.compile(
beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use'
containspattern = re.compile(
beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains'
formatpattern = re.compile(
beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format'
# Non-fortran and f2py-specific statements
f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef',
'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements'
multilinepattern = re.compile(
r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z", re.S), 'multiline'
##
def split_by_unquoted(line, characters):
"""
Splits the line into (line[:i], line[i:]),
where i is the index of first occurrence of one of the characters
not within quotes, or len(line) if no such index exists
"""
assert not (set('"\'') & set(characters)), "cannot split by unquoted quotes"
r = re.compile(
r"\A(?P<before>({single_quoted}|{double_quoted}|{not_quoted})*)"
r"(?P<after>{char}.*)\Z".format(
not_quoted="[^\"'{}]".format(re.escape(characters)),
char="[{}]".format(re.escape(characters)),
single_quoted=r"('([^'\\]|(\\.))*')",
double_quoted=r'("([^"\\]|(\\.))*")'))
m = r.match(line)
if m:
d = m.groupdict()
return (d["before"], d["after"])
return (line, "")
def _simplifyargs(argsline):
a = []
for n in markoutercomma(argsline).split('@,@'):
for r in '(),':
n = n.replace(r, '_')
a.append(n)
return ','.join(a)
crackline_re_1 = re.compile(r'\s*(?P<result>\b[a-z]+\w*\b)\s*=.*', re.I)
crackline_bind_1 = re.compile(r'\s*(?P<bind>\b[a-z]+\w*\b)\s*=.*', re.I)
crackline_bindlang = re.compile(r'\s*bind\(\s*(?P<lang>[^,]+)\s*,\s*name\s*=\s*"(?P<lang_name>[^"]+)"\s*\)', re.I)
def crackline(line, reset=0):
"""
reset=-1 --- initialize
reset=0 --- crack the line
reset=1 --- final check if mismatch of blocks occurred
Cracked data is saved in grouplist[0].
"""
global beginpattern, groupcounter, groupname, groupcache, grouplist
global filepositiontext, currentfilename, neededmodule, expectbegin
global skipblocksuntil, skipemptyends, previous_context, gotnextfile
_, has_semicolon = split_by_unquoted(line, ";")
if has_semicolon and not (f2pyenhancementspattern[0].match(line) or
multilinepattern[0].match(line)):
# XXX: non-zero reset values need testing
assert reset == 0, repr(reset)
# split line on unquoted semicolons
line, semicolon_line = split_by_unquoted(line, ";")
while semicolon_line:
crackline(line, reset)
line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";")
crackline(line, reset)
return
if reset < 0:
groupcounter = 0
groupname = {groupcounter: ''}
groupcache = {groupcounter: {}}
grouplist = {groupcounter: []}
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['block'] = ''
groupcache[groupcounter]['name'] = ''
neededmodule = -1
skipblocksuntil = -1
return
if reset > 0:
fl = 0
if f77modulename and neededmodule == groupcounter:
fl = 2
while groupcounter > fl:
outmess('crackline: groupcounter=%s groupname=%s\n' %
(repr(groupcounter), repr(groupname)))
outmess(
'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n')
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1
if f77modulename and neededmodule == groupcounter:
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end interface
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end module
neededmodule = -1
return
if line == '':
return
flag = 0
for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern,
requiredpattern,
parameterpattern, datapattern, publicpattern, privatepattern,
intrinsicpattern,
endifpattern, endpattern,
formatpattern,
beginpattern, functionpattern, subroutinepattern,
implicitpattern, typespattern, commonpattern,
callpattern, usepattern, containspattern,
entrypattern,
f2pyenhancementspattern,
multilinepattern,
moduleprocedurepattern
]:
m = pat[0].match(line)
if m:
break
flag = flag + 1
if not m:
re_1 = crackline_re_1
if 0 <= skipblocksuntil <= groupcounter:
return
if 'externals' in groupcache[groupcounter]:
for name in groupcache[groupcounter]['externals']:
if name in invbadnames:
name = invbadnames[name]
if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']:
continue
m1 = re.match(
r'(?P<before>[^"]*)\b%s\b\s*@\(@(?P<args>[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I)
if m1:
m2 = re_1.match(m1.group('before'))
a = _simplifyargs(m1.group('args'))
if m2:
line = 'callfun %s(%s) result (%s)' % (
name, a, m2.group('result'))
else:
line = 'callfun %s(%s)' % (name, a)
m = callfunpattern[0].match(line)
if not m:
outmess(
'crackline: could not resolve function call for line=%s.\n' % repr(line))
return
analyzeline(m, 'callfun', line)
return
if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')):
previous_context = None
outmess('crackline:%d: No pattern for line\n' % (groupcounter))
return
elif pat[1] == 'end':
if 0 <= skipblocksuntil < groupcounter:
groupcounter = groupcounter - 1
if skipblocksuntil <= groupcounter:
return
if groupcounter <= 0:
raise Exception('crackline: groupcounter(=%s) is nonpositive. '
'Check the blocks.'
% (groupcounter))
m1 = beginpattern[0].match((line))
if (m1) and (not m1.group('this') == groupname[groupcounter]):
raise Exception('crackline: End group %s does not match with '
'previous Begin group %s\n\t%s' %
(repr(m1.group('this')), repr(groupname[groupcounter]),
filepositiontext)
)
if skipblocksuntil == groupcounter:
skipblocksuntil = -1
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1
if not skipemptyends:
expectbegin = 1
elif pat[1] == 'begin':
if 0 <= skipblocksuntil <= groupcounter:
groupcounter = groupcounter + 1
return
gotnextfile = 0
analyzeline(m, pat[1], line)
expectbegin = 0
elif pat[1] == 'endif':
pass
elif pat[1] == 'moduleprocedure':
analyzeline(m, pat[1], line)
elif pat[1] == 'contains':
if ignorecontains:
return
if 0 <= skipblocksuntil <= groupcounter:
return
skipblocksuntil = groupcounter
else:
if 0 <= skipblocksuntil <= groupcounter:
return
analyzeline(m, pat[1], line)
def markouterparen(line):
l = ''
f = 0
for c in line:
if c == '(':
f = f + 1
if f == 1:
l = l + '@(@'
continue
elif c == ')':
f = f - 1
if f == 0:
l = l + '@)@'
continue
l = l + c
return l
def markoutercomma(line, comma=','):
l = ''
f = 0
before, after = split_by_unquoted(line, comma + '()')
l += before
while after:
if (after[0] == comma) and (f == 0):
l += '@' + comma + '@'
else:
l += after[0]
if after[0] == '(':
f += 1
elif after[0] == ')':
f -= 1
before, after = split_by_unquoted(after[1:], comma + '()')
l += before
assert not f, repr((f, line, l))
return l
def unmarkouterparen(line):
r = line.replace('@(@', '(').replace('@)@', ')')
return r
def appenddecl(decl, decl2, force=1):
if not decl:
decl = {}
if not decl2:
return decl
if decl is decl2:
return decl
for k in list(decl2.keys()):
if k == 'typespec':
if force or k not in decl:
decl[k] = decl2[k]
elif k == 'attrspec':
for l in decl2[k]:
decl = setattrspec(decl, l, force)
elif k == 'kindselector':
decl = setkindselector(decl, decl2[k], force)
elif k == 'charselector':
decl = setcharselector(decl, decl2[k], force)
elif k in ['=', 'typename']:
if force or k not in decl:
decl[k] = decl2[k]
elif k == 'note':
pass
elif k in ['intent', 'check', 'dimension', 'optional',
'required', 'depend']:
errmess('appenddecl: "%s" not implemented.\n' % k)
else:
raise Exception('appenddecl: Unknown variable definition key: ' +
str(k))
return decl
selectpattern = re.compile(
r'\s*(?P<this>(@\(@.*?@\)@|\*[\d*]+|\*\s*@\(@.*?@\)@|))(?P<after>.*)\Z', re.I)
typedefpattern = re.compile(
r'(?:,(?P<attributes>[\w(),]+))?(::)?(?P<name>\b[a-z$_][\w$]*\b)'
r'(?:\((?P<params>[\w,]*)\))?\Z', re.I)
nameargspattern = re.compile(
r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>(?:(?!@\)@).)*)\s*@\)@))*\s*\Z', re.I)
operatorpattern = re.compile(
r'\s*(?P<scheme>(operator|assignment))'
r'@\(@\s*(?P<name>[^)]+)\s*@\)@\s*\Z', re.I)
callnameargspattern = re.compile(
r'\s*(?P<name>\b[\w$]+\b)\s*@\(@\s*(?P<args>.*)\s*@\)@\s*\Z', re.I)
real16pattern = re.compile(
r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)')
real8pattern = re.compile(
r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))')
_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I)
def _is_intent_callback(vdecl):
for a in vdecl.get('attrspec', []):
if _intentcallbackpattern.match(a):
return 1
return 0
def _resolvetypedefpattern(line):
line = ''.join(line.split()) # removes whitespace
m1 = typedefpattern.match(line)
print(line, m1)
if m1:
attrs = m1.group('attributes')
attrs = [a.lower() for a in attrs.split(',')] if attrs else []
return m1.group('name'), attrs, m1.group('params')
return None, [], None
def parse_name_for_bind(line):
pattern = re.compile(r'bind\(\s*(?P<lang>[^,]+)(?:\s*,\s*name\s*=\s*["\'](?P<name>[^"\']+)["\']\s*)?\)', re.I)
match = pattern.search(line)
bind_statement = None
if match:
bind_statement = match.group(0)
# Remove the 'bind' construct from the line.
line = line[:match.start()] + line[match.end():]
return line, bind_statement
def _resolvenameargspattern(line):
line, bind_cname = parse_name_for_bind(line)
line = markouterparen(line)
m1 = nameargspattern.match(line)
if m1:
return m1.group('name'), m1.group('args'), m1.group('result'), bind_cname
m1 = operatorpattern.match(line)
if m1:
name = m1.group('scheme') + '(' + m1.group('name') + ')'
return name, [], None, None
m1 = callnameargspattern.match(line)
if m1:
return m1.group('name'), m1.group('args'), None, None
return None, [], None, None
def analyzeline(m, case, line):
"""
Reads each line in the input file in sequence and updates global vars.
Effectively reads and collects information from the input file to the
global variable groupcache, a dictionary containing info about each part
of the fortran module.
At the end of analyzeline, information is filtered into the correct dict
keys, but parameter values and dimensions are not yet interpreted.
"""
global groupcounter, groupname, groupcache, grouplist, filepositiontext
global currentfilename, f77modulename, neededinterface, neededmodule
global expectbegin, gotnextfile, previous_context
block = m.group('this')
if case != 'multiline':
previous_context = None
if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \
and not skipemptyends and groupcounter < 1:
newname = os.path.basename(currentfilename).split('.')[0]
outmess(
'analyzeline: no group yet. Creating program group with name "%s".\n' % newname)
gotnextfile = 0
groupcounter = groupcounter + 1
groupname[groupcounter] = 'program'
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['block'] = 'program'
groupcache[groupcounter]['name'] = newname
groupcache[groupcounter]['from'] = 'fromsky'
expectbegin = 0
if case in ['begin', 'call', 'callfun']:
# Crack line => block,name,args,result
block = block.lower()
if re.match(r'block\s*data', block, re.I):
block = 'block data'
elif re.match(r'python\s*module', block, re.I):
block = 'python module'
elif re.match(r'abstract\s*interface', block, re.I):
block = 'abstract interface'
if block == 'type':
name, attrs, _ = _resolvetypedefpattern(m.group('after'))
groupcache[groupcounter]['vars'][name] = dict(attrspec = attrs)
args = []
result = None
else:
name, args, result, bindcline = _resolvenameargspattern(m.group('after'))
if name is None:
if block == 'block data':
name = '_BLOCK_DATA_'
else:
name = ''
if block not in ['interface', 'block data', 'abstract interface']:
outmess('analyzeline: No name/args pattern found for line.\n')
previous_context = (block, name, groupcounter)
if args:
args = rmbadname([x.strip()
for x in markoutercomma(args).split('@,@')])
else:
args = []
if '' in args:
while '' in args:
args.remove('')
outmess(
'analyzeline: argument list is malformed (missing argument).\n')
# end of crack line => block,name,args,result
needmodule = 0
needinterface = 0
if case in ['call', 'callfun']:
needinterface = 1
if 'args' not in groupcache[groupcounter]:
return
if name not in groupcache[groupcounter]['args']:
return
for it in grouplist[groupcounter]:
if it['name'] == name:
return
if name in groupcache[groupcounter]['interfaced']:
return
block = {'call': 'subroutine', 'callfun': 'function'}[case]
if f77modulename and neededmodule == -1 and groupcounter <= 1:
neededmodule = groupcounter + 2
needmodule = 1
if block not in ['interface', 'abstract interface']:
needinterface = 1
# Create new block(s)
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
if needmodule:
if verbose > 1:
outmess('analyzeline: Creating module block %s\n' %
repr(f77modulename), 0)
groupname[groupcounter] = 'module'
groupcache[groupcounter]['block'] = 'python module'
groupcache[groupcounter]['name'] = f77modulename
groupcache[groupcounter]['from'] = ''
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
if needinterface:
if verbose > 1:
outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % (
groupcounter), 0)
groupname[groupcounter] = 'interface'
groupcache[groupcounter]['block'] = 'interface'
groupcache[groupcounter]['name'] = 'unknown_interface'
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name'])
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
groupname[groupcounter] = block
groupcache[groupcounter]['block'] = block
if not name:
name = 'unknown_' + block.replace(' ', '_')
groupcache[groupcounter]['prefix'] = m.group('before')
groupcache[groupcounter]['name'] = rmbadname1(name)
groupcache[groupcounter]['result'] = result
if groupcounter == 1:
groupcache[groupcounter]['from'] = currentfilename
else:
if f77modulename and groupcounter == 3:
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], currentfilename)
else:
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name'])
for k in list(groupcache[groupcounter].keys()):
if not groupcache[groupcounter][k]:
del groupcache[groupcounter][k]
groupcache[groupcounter]['args'] = args
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['entry'] = {}
# end of creation
if block == 'type':
groupcache[groupcounter]['varnames'] = []
if case in ['call', 'callfun']: # set parents variables
if name not in groupcache[groupcounter - 2]['externals']:
groupcache[groupcounter - 2]['externals'].append(name)
groupcache[groupcounter]['vars'] = copy.deepcopy(
groupcache[groupcounter - 2]['vars'])
try:
del groupcache[groupcounter]['vars'][name][
groupcache[groupcounter]['vars'][name]['attrspec'].index('external')]
except Exception:
pass
if block in ['function', 'subroutine']: # set global attributes
# name is fortran name
if bindcline:
bindcdat = re.search(crackline_bindlang, bindcline)
if bindcdat:
groupcache[groupcounter]['bindlang'] = {name : {}}
groupcache[groupcounter]['bindlang'][name]["lang"] = bindcdat.group('lang')
if bindcdat.group('lang_name'):
groupcache[groupcounter]['bindlang'][name]["name"] = bindcdat.group('lang_name')
try:
groupcache[groupcounter]['vars'][name] = appenddecl(
groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars'][''])
except Exception:
pass
if case == 'callfun': # return type
if result and result in groupcache[groupcounter]['vars']:
if not name == result:
groupcache[groupcounter]['vars'][name] = appenddecl(
groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result])
# if groupcounter>1: # name is interfaced
try:
groupcache[groupcounter - 2]['interfaced'].append(name)
except Exception:
pass
if block == 'function':
t = typespattern[0].match(m.group('before') + ' ' + name)
if t:
typespec, selector, attr, edecl = cracktypespec0(
t.group('this'), t.group('after'))
updatevars(typespec, selector, attr, edecl)
if case in ['call', 'callfun']:
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end routine
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end interface
elif case == 'entry':
name, args, result, _= _resolvenameargspattern(m.group('after'))
if name is not None:
if args:
args = rmbadname([x.strip()
for x in markoutercomma(args).split('@,@')])
else:
args = []
assert result is None, repr(result)
groupcache[groupcounter]['entry'][name] = args
previous_context = ('entry', name, groupcounter)
elif case == 'type':
typespec, selector, attr, edecl = cracktypespec0(
block, m.group('after'))
last_name = updatevars(typespec, selector, attr, edecl)
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrinsic']:
edecl = groupcache[groupcounter]['vars']
ll = m.group('after').strip()
i = ll.find('::')
if i < 0 and case == 'intent':
i = markouterparen(ll).find('@)@') - 2
ll = ll[:i + 1] + '::' + ll[i + 1:]
i = ll.find('::')
if ll[i:] == '::' and 'args' in groupcache[groupcounter]:
outmess('All arguments will have attribute %s%s\n' %
(m.group('this'), ll[:i]))
ll = ll + ','.join(groupcache[groupcounter]['args'])
if i < 0:
i = 0
pl = ''
else:
pl = ll[:i].strip()
ll = ll[i + 2:]
ch = markoutercomma(pl).split('@,@')
if len(ch) > 1:
pl = ch[0]
outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % (
','.join(ch[1:])))
last_name = None
for e in [x.strip() for x in markoutercomma(ll).split('@,@')]:
m1 = namepattern.match(e)
if not m1:
if case in ['public', 'private']:
k = ''
else:
print(m.groupdict())
outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % (
case, repr(e)))
continue
else:
k = rmbadname1(m1.group('name'))
if case in ['public', 'private'] and \
(k == 'operator' or k == 'assignment'):
k += m1.group('after')
if k not in edecl:
edecl[k] = {}
if case == 'dimension':
ap = case + m1.group('after')
if case == 'intent':
ap = m.group('this') + pl
if _intentcallbackpattern.match(ap):
if k not in groupcache[groupcounter]['args']:
if groupcounter > 1:
if '__user__' not in groupcache[groupcounter - 2]['name']:
outmess(
'analyzeline: missing __user__ module (could be nothing)\n')
# fixes ticket 1693
if k != groupcache[groupcounter]['name']:
outmess('analyzeline: appending intent(callback) %s'
' to %s arguments\n' % (k, groupcache[groupcounter]['name']))
groupcache[groupcounter]['args'].append(k)
else:
errmess(
'analyzeline: intent(callback) %s is ignored\n' % (k))
else:
errmess('analyzeline: intent(callback) %s is already'
' in argument list\n' % (k))
if case in ['optional', 'required', 'public', 'external', 'private', 'intrinsic']:
ap = case
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append(ap)
else:
edecl[k]['attrspec'] = [ap]
if case == 'external':
if groupcache[groupcounter]['block'] == 'program':
outmess('analyzeline: ignoring program arguments\n')
continue
if k not in groupcache[groupcounter]['args']:
continue
if 'externals' not in groupcache[groupcounter]:
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['externals'].append(k)
last_name = k
groupcache[groupcounter]['vars'] = edecl
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'moduleprocedure':
groupcache[groupcounter]['implementedby'] = \
[x.strip() for x in m.group('after').split(',')]
elif case == 'parameter':
edecl = groupcache[groupcounter]['vars']
ll = m.group('after').strip()[1:-1]
last_name = None
for e in markoutercomma(ll).split('@,@'):
try:
k, initexpr = [x.strip() for x in e.split('=')]
except Exception:
outmess(
'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll))
continue
params = get_parameters(edecl)
k = rmbadname1(k)
if k not in edecl:
edecl[k] = {}
if '=' in edecl[k] and (not edecl[k]['='] == initexpr):
outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % (
k, edecl[k]['='], initexpr))
t = determineexprtype(initexpr, params)
if t:
if t.get('typespec') == 'real':
tt = list(initexpr)
for m in real16pattern.finditer(initexpr):
tt[m.start():m.end()] = list(
initexpr[m.start():m.end()].lower().replace('d', 'e'))
initexpr = ''.join(tt)
elif t.get('typespec') == 'complex':
initexpr = initexpr[1:].lower().replace('d', 'e').\
replace(',', '+1j*(')
try:
v = eval(initexpr, {}, params)
except (SyntaxError, NameError, TypeError) as msg:
errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'
% (initexpr, msg))
continue
edecl[k]['='] = repr(v)
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append('parameter')
else:
edecl[k]['attrspec'] = ['parameter']
last_name = k
groupcache[groupcounter]['vars'] = edecl
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'implicit':
if m.group('after').strip().lower() == 'none':
groupcache[groupcounter]['implicit'] = None
elif m.group('after'):
if 'implicit' in groupcache[groupcounter]:
impl = groupcache[groupcounter]['implicit']
else:
impl = {}
if impl is None:
outmess(
'analyzeline: Overwriting earlier "implicit none" statement.\n')
impl = {}
for e in markoutercomma(m.group('after')).split('@,@'):
decl = {}
m1 = re.match(
r'\s*(?P<this>.*?)\s*(\(\s*(?P<after>[a-z-, ]+)\s*\)\s*|)\Z', e, re.I)
if not m1:
outmess(
'analyzeline: could not extract info of implicit statement part "%s"\n' % (e))
continue
m2 = typespattern4implicit.match(m1.group('this'))
if not m2:
outmess(
'analyzeline: could not extract types pattern of implicit statement part "%s"\n' % (e))
continue
typespec, selector, attr, edecl = cracktypespec0(
m2.group('this'), m2.group('after'))
kindselect, charselect, typename = cracktypespec(
typespec, selector)
decl['typespec'] = typespec
decl['kindselector'] = kindselect
decl['charselector'] = charselect
decl['typename'] = typename
for k in list(decl.keys()):
if not decl[k]:
del decl[k]
for r in markoutercomma(m1.group('after')).split('@,@'):
if '-' in r:
try:
begc, endc = [x.strip() for x in r.split('-')]
except Exception:
outmess(
'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement\n' % r)
continue
else:
begc = endc = r.strip()
if not len(begc) == len(endc) == 1:
outmess(
'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement (2)\n' % r)
continue
for o in range(ord(begc), ord(endc) + 1):
impl[chr(o)] = decl
groupcache[groupcounter]['implicit'] = impl
elif case == 'data':
ll = []
dl = ''
il = ''
f = 0
fc = 1
inp = 0
for c in m.group('after'):
if not inp:
if c == "'":
fc = not fc
if c == '/' and fc:
f = f + 1
continue
if c == '(':
inp = inp + 1
elif c == ')':
inp = inp - 1
if f == 0:
dl = dl + c
elif f == 1:
il = il + c
elif f == 2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl, il])
dl = c
il = ''
f = 0
if f == 2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl, il])
vars = groupcache[groupcounter].get('vars', {})
last_name = None
for l in ll:
l[0], l[1] = l[0].strip(), l[1].strip()
if l[0].startswith(','):
l[0] = l[0][1:]
if l[0].startswith('('):
outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0])
continue
for idx, v in enumerate(rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')])):
if v.startswith('('):
outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v)
# XXX: subsequent init expressions may get wrong values.
# Ignoring since data statements are irrelevant for
# wrapping.
continue
if '!' in l[1]:
# Fixes gh-24746 pyf generation
# XXX: This essentially ignores the value for generating the pyf which is fine:
# integer dimension(3) :: mytab
# common /mycom/ mytab
# Since in any case it is initialized in the Fortran code
outmess('Comment line in declaration "%s" is not supported. Skipping.\n' % l[1])
continue
vars.setdefault(v, {})
vtype = vars[v].get('typespec')
vdim = getdimension(vars[v])
matches = re.findall(r"\(.*?\)", l[1]) if vtype == 'complex' else l[1].split(',')
try:
new_val = "(/{}/)".format(", ".join(matches)) if vdim else matches[idx]
except IndexError:
# gh-24746
# Runs only if above code fails. Fixes the line
# DATA IVAR1, IVAR2, IVAR3, IVAR4, EVAR5 /4*0,0.0D0/
# by expanding to ['0', '0', '0', '0', '0.0d0']
if any("*" in m for m in matches):
expanded_list = []
for match in matches:
if "*" in match:
try:
multiplier, value = match.split("*")
expanded_list.extend([value.strip()] * int(multiplier))
except ValueError: # if int(multiplier) fails
expanded_list.append(match.strip())
else:
expanded_list.append(match.strip())
matches = expanded_list
new_val = "(/{}/)".format(", ".join(matches)) if vdim else matches[idx]
current_val = vars[v].get('=')
if current_val and (current_val != new_val):
outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % (v, current_val, new_val))
vars[v]['='] = new_val
last_name = v
groupcache[groupcounter]['vars'] = vars
if last_name:
previous_context = ('variable', last_name, groupcounter)
elif case == 'common':
line = m.group('after').strip()
if not line[0] == '/':
line = '//' + line
cl = []
f = 0
bn = ''
ol = ''
for c in line:
if c == '/':
f = f + 1
continue
if f >= 3:
bn = bn.strip()
if not bn:
bn = '_BLNK_'
cl.append([bn, ol])
f = f - 2
bn = ''
ol = ''
if f % 2:
bn = bn + c
else:
ol = ol + c
bn = bn.strip()
if not bn:
bn = '_BLNK_'
cl.append([bn, ol])
commonkey = {}
if 'common' in groupcache[groupcounter]:
commonkey = groupcache[groupcounter]['common']
for c in cl:
if c[0] not in commonkey:
commonkey[c[0]] = []
for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]:
if i:
commonkey[c[0]].append(i)
groupcache[groupcounter]['common'] = commonkey
previous_context = ('common', bn, groupcounter)
elif case == 'use':
m1 = re.match(
r'\A\s*(?P<name>\b\w+\b)\s*((,(\s*\bonly\b\s*:|(?P<notonly>))\s*(?P<list>.*))|)\s*\Z', m.group('after'), re.I)
if m1:
mm = m1.groupdict()
if 'use' not in groupcache[groupcounter]:
groupcache[groupcounter]['use'] = {}
name = m1.group('name')
groupcache[groupcounter]['use'][name] = {}
isonly = 0
if 'list' in mm and mm['list'] is not None:
if 'notonly' in mm and mm['notonly'] is None:
isonly = 1
groupcache[groupcounter]['use'][name]['only'] = isonly
ll = [x.strip() for x in mm['list'].split(',')]
rl = {}
for l in ll:
if '=' in l:
m2 = re.match(
r'\A\s*(?P<local>\b\w+\b)\s*=\s*>\s*(?P<use>\b\w+\b)\s*\Z', l, re.I)
if m2:
rl[m2.group('local').strip()] = m2.group(
'use').strip()
else:
outmess(
'analyzeline: Not local=>use pattern found in %s\n' % repr(l))
else:
rl[l] = l
groupcache[groupcounter]['use'][name]['map'] = rl
else:
pass
else:
print(m.groupdict())
outmess('analyzeline: Could not crack the use statement.\n')
elif case in ['f2pyenhancements']:
if 'f2pyenhancements' not in groupcache[groupcounter]:
groupcache[groupcounter]['f2pyenhancements'] = {}
d = groupcache[groupcounter]['f2pyenhancements']
if m.group('this') == 'usercode' and 'usercode' in d:
if isinstance(d['usercode'], str):
d['usercode'] = [d['usercode']]
d['usercode'].append(m.group('after'))
else:
d[m.group('this')] = m.group('after')
elif case == 'multiline':
if previous_context is None:
if verbose:
outmess('analyzeline: No context for multiline block.\n')
return
gc = groupcounter
appendmultiline(groupcache[gc],
previous_context[:2],
m.group('this'))
else:
if verbose > 1:
print(m.groupdict())
outmess('analyzeline: No code implemented for line.\n')
def appendmultiline(group, context_name, ml):
if 'f2pymultilines' not in group:
group['f2pymultilines'] = {}
d = group['f2pymultilines']
if context_name not in d:
d[context_name] = []
d[context_name].append(ml)
return
def cracktypespec0(typespec, ll):
selector = None
attr = None
if re.match(r'double\s*complex', typespec, re.I):
typespec = 'double complex'
elif re.match(r'double\s*precision', typespec, re.I):
typespec = 'double precision'
else:
typespec = typespec.strip().lower()
m1 = selectpattern.match(markouterparen(ll))
if not m1:
outmess(
'cracktypespec0: no kind/char_selector pattern found for line.\n')
return
d = m1.groupdict()
for k in list(d.keys()):
d[k] = unmarkouterparen(d[k])
if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']:
selector = d['this']
ll = d['after']
i = ll.find('::')
if i >= 0:
attr = ll[:i].strip()
ll = ll[i + 2:]
return typespec, selector, attr, ll
#####
namepattern = re.compile(r'\s*(?P<name>\b\w+\b)\s*(?P<after>.*)\s*\Z', re.I)
kindselector = re.compile(
r'\s*(\(\s*(kind\s*=)?\s*(?P<kind>.*)\s*\)|\*\s*(?P<kind2>.*?))\s*\Z', re.I)
charselector = re.compile(
r'\s*(\((?P<lenkind>.*)\)|\*\s*(?P<charlen>.*))\s*\Z', re.I)
lenkindpattern = re.compile(
r'\s*(kind\s*=\s*(?P<kind>.*?)\s*(@,@\s*len\s*=\s*(?P<len>.*)|)'
r'|(len\s*=\s*|)(?P<len2>.*?)\s*(@,@\s*(kind\s*=\s*|)(?P<kind2>.*)'
r'|(f2py_len\s*=\s*(?P<f2py_len>.*))|))\s*\Z', re.I)
lenarraypattern = re.compile(
r'\s*(@\(@\s*(?!/)\s*(?P<array>.*?)\s*@\)@\s*\*\s*(?P<len>.*?)|(\*\s*(?P<len2>.*?)|)\s*(@\(@\s*(?!/)\s*(?P<array2>.*?)\s*@\)@|))\s*(=\s*(?P<init>.*?)|(@\(@|)/\s*(?P<init2>.*?)\s*/(@\)@|)|)\s*\Z', re.I)
def removespaces(expr):
expr = expr.strip()
if len(expr) <= 1:
return expr
expr2 = expr[0]
for i in range(1, len(expr) - 1):
if (expr[i] == ' ' and
((expr[i + 1] in "()[]{}=+-/* ") or
(expr[i - 1] in "()[]{}=+-/* "))):
continue
expr2 = expr2 + expr[i]
expr2 = expr2 + expr[-1]
return expr2
def markinnerspaces(line):
"""
The function replace all spaces in the input variable line which are
surrounded with quotation marks, with the triplet "@_@".
For instance, for the input "a 'b c'" the function returns "a 'b@_@c'"
Parameters
----------
line : str
Returns
-------
str
"""
fragment = ''
inside = False
current_quote = None
escaped = ''
for c in line:
if escaped == '\\' and c in ['\\', '\'', '"']:
fragment += c
escaped = c
continue
if not inside and c in ['\'', '"']:
current_quote = c
if c == current_quote:
inside = not inside
elif c == ' ' and inside:
fragment += '@_@'
continue
fragment += c
escaped = c # reset to non-backslash
return fragment
def updatevars(typespec, selector, attrspec, entitydecl):
"""
Returns last_name, the variable name without special chars, parenthesis
or dimension specifiers.
Alters groupcache to add the name, typespec, attrspec (and possibly value)
of current variable.
"""
global groupcache, groupcounter
last_name = None
kindselect, charselect, typename = cracktypespec(typespec, selector)
# Clean up outer commas, whitespace and undesired chars from attrspec
if attrspec:
attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')]
l = []
c = re.compile(r'(?P<start>[a-zA-Z]+)')
for a in attrspec:
if not a:
continue
m = c.match(a)
if m:
s = m.group('start').lower()
a = s + a[len(s):]
l.append(a)
attrspec = l
el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')]
el1 = []
for e in el:
for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]:
if e1:
el1.append(e1.replace('@_@', ' '))
for e in el1:
m = namepattern.match(e)
if not m:
outmess(
'updatevars: no name pattern found for entity=%s. Skipping.\n' % (repr(e)))
continue
ename = rmbadname1(m.group('name'))
edecl = {}
if ename in groupcache[groupcounter]['vars']:
edecl = groupcache[groupcounter]['vars'][ename].copy()
not_has_typespec = 'typespec' not in edecl
if not_has_typespec:
edecl['typespec'] = typespec
elif typespec and (not typespec == edecl['typespec']):
outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['typespec'], typespec))
if 'kindselector' not in edecl:
edecl['kindselector'] = copy.copy(kindselect)
elif kindselect:
for k in list(kindselect.keys()):
if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]):
outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (
k, ename, edecl['kindselector'][k], kindselect[k]))
else:
edecl['kindselector'][k] = copy.copy(kindselect[k])
if 'charselector' not in edecl and charselect:
if not_has_typespec:
edecl['charselector'] = charselect
else:
errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n'
% (ename, charselect))
elif charselect:
for k in list(charselect.keys()):
if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]):
outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (
k, ename, edecl['charselector'][k], charselect[k]))
else:
edecl['charselector'][k] = copy.copy(charselect[k])
if 'typename' not in edecl:
edecl['typename'] = typename
elif typename and (not edecl['typename'] == typename):
outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['typename'], typename))
if 'attrspec' not in edecl:
edecl['attrspec'] = copy.copy(attrspec)
elif attrspec:
for a in attrspec:
if a not in edecl['attrspec']:
edecl['attrspec'].append(a)
else:
edecl['typespec'] = copy.copy(typespec)
edecl['kindselector'] = copy.copy(kindselect)
edecl['charselector'] = copy.copy(charselect)
edecl['typename'] = typename
edecl['attrspec'] = copy.copy(attrspec)
if 'external' in (edecl.get('attrspec') or []) and e in groupcache[groupcounter]['args']:
if 'externals' not in groupcache[groupcounter]:
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['externals'].append(e)
if m.group('after'):
m1 = lenarraypattern.match(markouterparen(m.group('after')))
if m1:
d1 = m1.groupdict()
for lk in ['len', 'array', 'init']:
if d1[lk + '2'] is not None:
d1[lk] = d1[lk + '2']
del d1[lk + '2']
for k in list(d1.keys()):
if d1[k] is not None:
d1[k] = unmarkouterparen(d1[k])
else:
del d1[k]
if 'len' in d1 and 'array' in d1:
if d1['len'] == '':
d1['len'] = d1['array']
del d1['array']
elif typespec == 'character':
if ('charselector' not in edecl) or (not edecl['charselector']):
edecl['charselector'] = {}
if 'len' in edecl['charselector']:
del edecl['charselector']['len']
edecl['charselector']['*'] = d1['len']
del d1['len']
else:
d1['array'] = d1['array'] + ',' + d1['len']
del d1['len']
errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % (
typespec, e, typespec, ename, d1['array']))
if 'len' in d1:
if typespec in ['complex', 'integer', 'logical', 'real']:
if ('kindselector' not in edecl) or (not edecl['kindselector']):
edecl['kindselector'] = {}
edecl['kindselector']['*'] = d1['len']
del d1['len']
elif typespec == 'character':
if ('charselector' not in edecl) or (not edecl['charselector']):
edecl['charselector'] = {}
if 'len' in edecl['charselector']:
del edecl['charselector']['len']
edecl['charselector']['*'] = d1['len']
del d1['len']
if 'init' in d1:
if '=' in edecl and (not edecl['='] == d1['init']):
outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['='], d1['init']))
else:
edecl['='] = d1['init']
if 'array' in d1:
dm = 'dimension(%s)' % d1['array']
if 'attrspec' not in edecl or (not edecl['attrspec']):
edecl['attrspec'] = [dm]
else:
edecl['attrspec'].append(dm)
for dm1 in edecl['attrspec']:
if dm1[:9] == 'dimension' and dm1 != dm:
del edecl['attrspec'][-1]
errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n'
% (ename, dm1, dm))
break
else:
outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % (
ename + m.group('after')))
for k in list(edecl.keys()):
if not edecl[k]:
del edecl[k]
groupcache[groupcounter]['vars'][ename] = edecl
if 'varnames' in groupcache[groupcounter]:
groupcache[groupcounter]['varnames'].append(ename)
last_name = ename
return last_name
def cracktypespec(typespec, selector):
kindselect = None
charselect = None
typename = None
if selector:
if typespec in ['complex', 'integer', 'logical', 'real']:
kindselect = kindselector.match(selector)
if not kindselect:
outmess(
'cracktypespec: no kindselector pattern found for %s\n' % (repr(selector)))
return
kindselect = kindselect.groupdict()
kindselect['*'] = kindselect['kind2']
del kindselect['kind2']
for k in list(kindselect.keys()):
if not kindselect[k]:
del kindselect[k]
for k, i in list(kindselect.items()):
kindselect[k] = rmbadname1(i)
elif typespec == 'character':
charselect = charselector.match(selector)
if not charselect:
outmess(
'cracktypespec: no charselector pattern found for %s\n' % (repr(selector)))
return
charselect = charselect.groupdict()
charselect['*'] = charselect['charlen']
del charselect['charlen']
if charselect['lenkind']:
lenkind = lenkindpattern.match(
markoutercomma(charselect['lenkind']))
lenkind = lenkind.groupdict()
for lk in ['len', 'kind']:
if lenkind[lk + '2']:
lenkind[lk] = lenkind[lk + '2']
charselect[lk] = lenkind[lk]
del lenkind[lk + '2']
if lenkind['f2py_len'] is not None:
# used to specify the length of assumed length strings
charselect['f2py_len'] = lenkind['f2py_len']
del charselect['lenkind']
for k in list(charselect.keys()):
if not charselect[k]:
del charselect[k]
for k, i in list(charselect.items()):
charselect[k] = rmbadname1(i)
elif typespec == 'type':
typename = re.match(r'\s*\(\s*(?P<name>\w+)\s*\)', selector, re.I)
if typename:
typename = typename.group('name')
else:
outmess('cracktypespec: no typename found in %s\n' %
(repr(typespec + selector)))
else:
outmess('cracktypespec: no selector used for %s\n' %
(repr(selector)))
return kindselect, charselect, typename
######
def setattrspec(decl, attr, force=0):
if not decl:
decl = {}
if not attr:
return decl
if 'attrspec' not in decl:
decl['attrspec'] = [attr]
return decl
if force:
decl['attrspec'].append(attr)
if attr in decl['attrspec']:
return decl
if attr == 'static' and 'automatic' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'automatic' and 'static' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'public':
if 'private' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'private':
if 'public' not in decl['attrspec']:
decl['attrspec'].append(attr)
else:
decl['attrspec'].append(attr)
return decl
def setkindselector(decl, sel, force=0):
if not decl:
decl = {}
if not sel:
return decl
if 'kindselector' not in decl:
decl['kindselector'] = sel
return decl
for k in list(sel.keys()):
if force or k not in decl['kindselector']:
decl['kindselector'][k] = sel[k]
return decl
def setcharselector(decl, sel, force=0):
if not decl:
decl = {}
if not sel:
return decl
if 'charselector' not in decl:
decl['charselector'] = sel
return decl
for k in list(sel.keys()):
if force or k not in decl['charselector']:
decl['charselector'][k] = sel[k]
return decl
def getblockname(block, unknown='unknown'):
if 'name' in block:
return block['name']
return unknown
# post processing
def setmesstext(block):
global filepositiontext
try:
filepositiontext = 'In: %s:%s\n' % (block['from'], block['name'])
except Exception:
pass
def get_usedict(block):
usedict = {}
if 'parent_block' in block:
usedict = get_usedict(block['parent_block'])
if 'use' in block:
usedict.update(block['use'])
return usedict
def get_useparameters(block, param_map=None):
global f90modulevars
if param_map is None:
param_map = {}
usedict = get_usedict(block)
if not usedict:
return param_map
for usename, mapping in list(usedict.items()):
usename = usename.lower()
if usename not in f90modulevars:
outmess('get_useparameters: no module %s info used by %s\n' %
(usename, block.get('name')))
continue
mvars = f90modulevars[usename]
params = get_parameters(mvars)
if not params:
continue
# XXX: apply mapping
if mapping:
errmess('get_useparameters: mapping for %s not impl.\n' % (mapping))
for k, v in list(params.items()):
if k in param_map:
outmess('get_useparameters: overriding parameter %s with'
' value from module %s\n' % (repr(k), repr(usename)))
param_map[k] = v
return param_map
def postcrack2(block, tab='', param_map=None):
global f90modulevars
if not f90modulevars:
return block
if isinstance(block, list):
ret = [postcrack2(g, tab=tab + '\t', param_map=param_map)
for g in block]
return ret
setmesstext(block)
outmess('%sBlock: %s\n' % (tab, block['name']), 0)
if param_map is None:
param_map = get_useparameters(block)
if param_map is not None and 'vars' in block:
vars = block['vars']
for n in list(vars.keys()):
var = vars[n]
if 'kindselector' in var:
kind = var['kindselector']
if 'kind' in kind:
val = kind['kind']
if val in param_map:
kind['kind'] = param_map[val]
new_body = [postcrack2(b, tab=tab + '\t', param_map=param_map)
for b in block['body']]
block['body'] = new_body
return block
def postcrack(block, args=None, tab=''):
"""
TODO:
function return values
determine expression types if in argument list
"""
global usermodules, onlyfunctions
if isinstance(block, list):
gret = []
uret = []
for g in block:
setmesstext(g)
g = postcrack(g, tab=tab + '\t')
# sort user routines to appear first
if 'name' in g and '__user__' in g['name']:
uret.append(g)
else:
gret.append(g)
return uret + gret
setmesstext(block)
if not isinstance(block, dict) and 'block' not in block:
raise Exception('postcrack: Expected block dictionary instead of ' +
str(block))
if 'name' in block and not block['name'] == 'unknown_interface':
outmess('%sBlock: %s\n' % (tab, block['name']), 0)
block = analyzeargs(block)
block = analyzecommon(block)
block['vars'] = analyzevars(block)
block['sortvars'] = sortvarnames(block['vars'])
if 'args' in block and block['args']:
args = block['args']
block['body'] = analyzebody(block, args, tab=tab)
userisdefined = []
if 'use' in block:
useblock = block['use']
for k in list(useblock.keys()):
if '__user__' in k:
userisdefined.append(k)
else:
useblock = {}
name = ''
if 'name' in block:
name = block['name']
# and not userisdefined: # Build a __user__ module
if 'externals' in block and block['externals']:
interfaced = []
if 'interfaced' in block:
interfaced = block['interfaced']
mvars = copy.copy(block['vars'])
if name:
mname = name + '__user__routines'
else:
mname = 'unknown__user__routines'
if mname in userisdefined:
i = 1
while '%s_%i' % (mname, i) in userisdefined:
i = i + 1
mname = '%s_%i' % (mname, i)
interface = {'block': 'interface', 'body': [],
'vars': {}, 'name': name + '_user_interface'}
for e in block['externals']:
if e in interfaced:
edef = []
j = -1
for b in block['body']:
j = j + 1
if b['block'] == 'interface':
i = -1
for bb in b['body']:
i = i + 1
if 'name' in bb and bb['name'] == e:
edef = copy.copy(bb)
del b['body'][i]
break
if edef:
if not b['body']:
del block['body'][j]
del interfaced[interfaced.index(e)]
break
interface['body'].append(edef)
else:
if e in mvars and not isexternal(mvars[e]):
interface['vars'][e] = mvars[e]
if interface['vars'] or interface['body']:
block['interfaced'] = interfaced
mblock = {'block': 'python module', 'body': [
interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']}
useblock[mname] = {}
usermodules.append(mblock)
if useblock:
block['use'] = useblock
return block
def sortvarnames(vars):
indep = []
dep = []
for v in list(vars.keys()):
if 'depend' in vars[v] and vars[v]['depend']:
dep.append(v)
else:
indep.append(v)
n = len(dep)
i = 0
while dep: # XXX: How to catch dependence cycles correctly?
v = dep[0]
fl = 0
for w in dep[1:]:
if w in vars[v]['depend']:
fl = 1
break
if fl:
dep = dep[1:] + [v]
i = i + 1
if i > n:
errmess('sortvarnames: failed to compute dependencies because'
' of cyclic dependencies between '
+ ', '.join(dep) + '\n')
indep = indep + dep
break
else:
indep.append(v)
dep = dep[1:]
n = len(dep)
i = 0
return indep
def analyzecommon(block):
if not hascommon(block):
return block
commonvars = []
for k in list(block['common'].keys()):
comvars = []
for e in block['common'][k]:
m = re.match(
r'\A\s*\b(?P<name>.*?)\b\s*(\((?P<dims>.*?)\)|)\s*\Z', e, re.I)
if m:
dims = []
if m.group('dims'):
dims = [x.strip()
for x in markoutercomma(m.group('dims')).split('@,@')]
n = rmbadname1(m.group('name').strip())
if n in block['vars']:
if 'attrspec' in block['vars'][n]:
block['vars'][n]['attrspec'].append(
'dimension(%s)' % (','.join(dims)))
else:
block['vars'][n]['attrspec'] = [
'dimension(%s)' % (','.join(dims))]
else:
if dims:
block['vars'][n] = {
'attrspec': ['dimension(%s)' % (','.join(dims))]}
else:
block['vars'][n] = {}
if n not in commonvars:
commonvars.append(n)
else:
n = e
errmess(
'analyzecommon: failed to extract "<name>[(<dims>)]" from "%s" in common /%s/.\n' % (e, k))
comvars.append(n)
block['common'][k] = comvars
if 'commonvars' not in block:
block['commonvars'] = commonvars
else:
block['commonvars'] = block['commonvars'] + commonvars
return block
def analyzebody(block, args, tab=''):
global usermodules, skipfuncs, onlyfuncs, f90modulevars
setmesstext(block)
maybe_private = {
key: value
for key, value in block['vars'].items()
if 'attrspec' not in value or 'public' not in value['attrspec']
}
body = []
for b in block['body']:
b['parent_block'] = block
if b['block'] in ['function', 'subroutine']:
if args is not None and b['name'] not in args:
continue
else:
as_ = b['args']
# Add private members to skipfuncs for gh-23879
if b['name'] in maybe_private.keys():
skipfuncs.append(b['name'])
if b['name'] in skipfuncs:
continue
if onlyfuncs and b['name'] not in onlyfuncs:
continue
b['saved_interface'] = crack2fortrangen(
b, '\n' + ' ' * 6, as_interface=True)
else:
as_ = args
b = postcrack(b, as_, tab=tab + '\t')
if b['block'] in ['interface', 'abstract interface'] and \
not b['body'] and not b.get('implementedby'):
if 'f2pyenhancements' not in b:
continue
if b['block'].replace(' ', '') == 'pythonmodule':
usermodules.append(b)
else:
if b['block'] == 'module':
f90modulevars[b['name']] = b['vars']
body.append(b)
return body
def buildimplicitrules(block):
setmesstext(block)
implicitrules = defaultimplicitrules
attrrules = {}
if 'implicit' in block:
if block['implicit'] is None:
implicitrules = None
if verbose > 1:
outmess(
'buildimplicitrules: no implicit rules for routine %s.\n' % repr(block['name']))
else:
for k in list(block['implicit'].keys()):
if block['implicit'][k].get('typespec') not in ['static', 'automatic']:
implicitrules[k] = block['implicit'][k]
else:
attrrules[k] = block['implicit'][k]['typespec']
return implicitrules, attrrules
def myeval(e, g=None, l=None):
""" Like `eval` but returns only integers and floats """
r = eval(e, g, l)
if type(r) in [int, float]:
return r
raise ValueError('r=%r' % (r))
getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I)
def getlincoef(e, xset): # e = a*x+b ; x in xset
"""
Obtain ``a`` and ``b`` when ``e == "a*x+b"``, where ``x`` is a symbol in
xset.
>>> getlincoef('2*x + 1', {'x'})
(2, 1, 'x')
>>> getlincoef('3*x + x*2 + 2 + 1', {'x'})
(5, 3, 'x')
>>> getlincoef('0', {'x'})
(0, 0, None)
>>> getlincoef('0*x', {'x'})
(0, 0, 'x')
>>> getlincoef('x*x', {'x'})
(None, None, None)
This can be tricked by sufficiently complex expressions
>>> getlincoef('(x - 0.5)*(x - 1.5)*(x - 1)*x + 2*x + 3', {'x'})
(2.0, 3.0, 'x')
"""
try:
c = int(myeval(e, {}, {}))
return 0, c, None
except Exception:
pass
if getlincoef_re_1.match(e):
return 1, 0, e
len_e = len(e)
for x in xset:
if len(x) > len_e:
continue
if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e):
# skip function calls having x as an argument, e.g max(1, x)
continue
re_1 = re.compile(r'(?P<before>.*?)\b' + x + r'\b(?P<after>.*)', re.I)
m = re_1.match(e)
if m:
try:
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 0, m1.group('after'))
m1 = re_1.match(ee)
b = myeval(ee, {}, {})
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 1, m1.group('after'))
m1 = re_1.match(ee)
a = myeval(ee, {}, {}) - b
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 0.5, m1.group('after'))
m1 = re_1.match(ee)
c = myeval(ee, {}, {})
# computing another point to be sure that expression is linear
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 1.5, m1.group('after'))
m1 = re_1.match(ee)
c2 = myeval(ee, {}, {})
if (a * 0.5 + b == c and a * 1.5 + b == c2):
return a, b, x
except Exception:
pass
break
return None, None, None
word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I)
def _get_depend_dict(name, vars, deps):
if name in vars:
words = vars[name].get('depend', [])
if '=' in vars[name] and not isstring(vars[name]):
for word in word_pattern.findall(vars[name]['=']):
# The word_pattern may return values that are not
# only variables, they can be string content for instance
if word not in words and word in vars and word != name:
words.append(word)
for word in words[:]:
for w in deps.get(word, []) \
or _get_depend_dict(word, vars, deps):
if w not in words:
words.append(w)
else:
outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name)))
words = []
deps[name] = words
return words
def _calc_depend_dict(vars):
names = list(vars.keys())
depend_dict = {}
for n in names:
_get_depend_dict(n, vars, depend_dict)
return depend_dict
def get_sorted_names(vars):
depend_dict = _calc_depend_dict(vars)
names = []
for name in list(depend_dict.keys()):
if not depend_dict[name]:
names.append(name)
del depend_dict[name]
while depend_dict:
for name, lst in list(depend_dict.items()):
new_lst = [n for n in lst if n in depend_dict]
if not new_lst:
names.append(name)
del depend_dict[name]
else:
depend_dict[name] = new_lst
return [name for name in names if name in vars]
def _kind_func(string):
# XXX: return something sensible.
if string[0] in "'\"":
string = string[1:-1]
if real16pattern.match(string):
return 8
elif real8pattern.match(string):
return 4
return 'kind(' + string + ')'
def _selected_int_kind_func(r):
# XXX: This should be processor dependent
m = 10 ** r
if m <= 2 ** 8:
return 1
if m <= 2 ** 16:
return 2
if m <= 2 ** 32:
return 4
if m <= 2 ** 63:
return 8
if m <= 2 ** 128:
return 16
return -1
def _selected_real_kind_func(p, r=0, radix=0):
# XXX: This should be processor dependent
# This is only verified for 0 <= p <= 20, possibly good for p <= 33 and above
if p < 7:
return 4
if p < 16:
return 8
machine = platform.machine().lower()
if machine.startswith(('aarch64', 'alpha', 'arm64', 'loongarch', 'mips', 'power', 'ppc', 'riscv', 's390x', 'sparc')):
if p <= 33:
return 16
else:
if p < 19:
return 10
elif p <= 33:
return 16
return -1
def get_parameters(vars, global_params={}):
params = copy.copy(global_params)
g_params = copy.copy(global_params)
for name, func in [('kind', _kind_func),
('selected_int_kind', _selected_int_kind_func),
('selected_real_kind', _selected_real_kind_func), ]:
if name not in g_params:
g_params[name] = func
param_names = []
for n in get_sorted_names(vars):
if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']:
param_names.append(n)
kind_re = re.compile(r'\bkind\s*\(\s*(?P<value>.*)\s*\)', re.I)
selected_int_kind_re = re.compile(
r'\bselected_int_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
selected_kind_re = re.compile(
r'\bselected_(int|real)_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
for n in param_names:
if '=' in vars[n]:
v = vars[n]['=']
if islogical(vars[n]):
v = v.lower()
for repl in [
('.false.', 'False'),
('.true.', 'True'),
# TODO: test .eq., .neq., etc replacements.
]:
v = v.replace(*repl)
v = kind_re.sub(r'kind("\1")', v)
v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v)
# We need to act according to the data.
# The easy case is if the data has a kind-specifier,
# then we may easily remove those specifiers.
# However, it may be that the user uses other specifiers...(!)
is_replaced = False
if 'kindselector' in vars[n]:
# Remove kind specifier (including those defined
# by parameters)
if 'kind' in vars[n]['kindselector']:
orig_v_len = len(v)
v = v.replace('_' + vars[n]['kindselector']['kind'], '')
# Again, this will be true if even a single specifier
# has been replaced, see comment above.
is_replaced = len(v) < orig_v_len
if not is_replaced:
if not selected_kind_re.match(v):
v_ = v.split('_')
# In case there are additive parameters
if len(v_) > 1:
v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '')
# Currently this will not work for complex numbers.
# There is missing code for extracting a complex number,
# which may be defined in either of these:
# a) (Re, Im)
# b) cmplx(Re, Im)
# c) dcmplx(Re, Im)
# d) cmplx(Re, Im, <prec>)
if isdouble(vars[n]):
tt = list(v)
for m in real16pattern.finditer(v):
tt[m.start():m.end()] = list(
v[m.start():m.end()].lower().replace('d', 'e'))
v = ''.join(tt)
elif iscomplex(vars[n]):
outmess(f'get_parameters[TODO]: '
f'implement evaluation of complex expression {v}\n')
dimspec = ([s.lstrip('dimension').strip()
for s in vars[n]['attrspec']
if s.startswith('dimension')] or [None])[0]
# Handle _dp for gh-6624
# Also fixes gh-20460
if real16pattern.search(v):
v = 8
elif real8pattern.search(v):
v = 4
try:
params[n] = param_eval(v, g_params, params, dimspec=dimspec)
except Exception as msg:
params[n] = v
outmess(f'get_parameters: got "{msg}" on {n!r}\n')
if isstring(vars[n]) and isinstance(params[n], int):
params[n] = chr(params[n])
nl = n.lower()
if nl != n:
params[nl] = params[n]
else:
print(vars[n])
outmess(f'get_parameters:parameter {n!r} does not have value?!\n')
return params
def _eval_length(length, params):
if length in ['(:)', '(*)', '*']:
return '(*)'
return _eval_scalar(length, params)
_is_kind_number = re.compile(r'\d+_').match
def _eval_scalar(value, params):
if _is_kind_number(value):
value = value.split('_')[0]
try:
# TODO: use symbolic from PR #19805
value = eval(value, {}, params)
value = (repr if isinstance(value, str) else str)(value)
except (NameError, SyntaxError, TypeError):
return value
except Exception as msg:
errmess('"%s" in evaluating %r '
'(available names: %s)\n'
% (msg, value, list(params.keys())))
return value
def analyzevars(block):
"""
Sets correct dimension information for each variable/parameter
"""
global f90modulevars
setmesstext(block)
implicitrules, attrrules = buildimplicitrules(block)
vars = copy.copy(block['vars'])
if block['block'] == 'function' and block['name'] not in vars:
vars[block['name']] = {}
if '' in block['vars']:
del vars['']
if 'attrspec' in block['vars']['']:
gen = block['vars']['']['attrspec']
for n in set(vars) | set(b['name'] for b in block['body']):
for k in ['public', 'private']:
if k in gen:
vars[n] = setattrspec(vars.get(n, {}), k)
svars = []
args = block['args']
for a in args:
try:
vars[a]
svars.append(a)
except KeyError:
pass
for n in list(vars.keys()):
if n not in args:
svars.append(n)
params = get_parameters(vars, get_useparameters(block))
# At this point, params are read and interpreted, but
# the params used to define vars are not yet parsed
dep_matches = {}
name_match = re.compile(r'[A-Za-z][\w$]*').match
for v in list(vars.keys()):
m = name_match(v)
if m:
n = v[m.start():m.end()]
try:
dep_matches[n]
except KeyError:
dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match
for n in svars:
if n[0] in list(attrrules.keys()):
vars[n] = setattrspec(vars[n], attrrules[n[0]])
if 'typespec' not in vars[n]:
if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']):
if implicitrules:
ln0 = n[0].lower()
for k in list(implicitrules[ln0].keys()):
if k == 'typespec' and implicitrules[ln0][k] == 'undefined':
continue
if k not in vars[n]:
vars[n][k] = implicitrules[ln0][k]
elif k == 'attrspec':
for l in implicitrules[ln0][k]:
vars[n] = setattrspec(vars[n], l)
elif n in block['args']:
outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % (
repr(n), block['name']))
if 'charselector' in vars[n]:
if 'len' in vars[n]['charselector']:
l = vars[n]['charselector']['len']
try:
l = str(eval(l, {}, params))
except Exception:
pass
vars[n]['charselector']['len'] = l
if 'kindselector' in vars[n]:
if 'kind' in vars[n]['kindselector']:
l = vars[n]['kindselector']['kind']
try:
l = str(eval(l, {}, params))
except Exception:
pass
vars[n]['kindselector']['kind'] = l
dimension_exprs = {}
if 'attrspec' in vars[n]:
attr = vars[n]['attrspec']
attr.reverse()
vars[n]['attrspec'] = []
dim, intent, depend, check, note = None, None, None, None, None
for a in attr:
if a[:9] == 'dimension':
dim = (a[9:].strip())[1:-1]
elif a[:6] == 'intent':
intent = (a[6:].strip())[1:-1]
elif a[:6] == 'depend':
depend = (a[6:].strip())[1:-1]
elif a[:5] == 'check':
check = (a[5:].strip())[1:-1]
elif a[:4] == 'note':
note = (a[4:].strip())[1:-1]
else:
vars[n] = setattrspec(vars[n], a)
if intent:
if 'intent' not in vars[n]:
vars[n]['intent'] = []
for c in [x.strip() for x in markoutercomma(intent).split('@,@')]:
# Remove spaces so that 'in out' becomes 'inout'
tmp = c.replace(' ', '')
if tmp not in vars[n]['intent']:
vars[n]['intent'].append(tmp)
intent = None
if note:
note = note.replace('\\n\\n', '\n\n')
note = note.replace('\\n ', '\n')
if 'note' not in vars[n]:
vars[n]['note'] = [note]
else:
vars[n]['note'].append(note)
note = None
if depend is not None:
if 'depend' not in vars[n]:
vars[n]['depend'] = []
for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]):
if c not in vars[n]['depend']:
vars[n]['depend'].append(c)
depend = None
if check is not None:
if 'check' not in vars[n]:
vars[n]['check'] = []
for c in [x.strip() for x in markoutercomma(check).split('@,@')]:
if c not in vars[n]['check']:
vars[n]['check'].append(c)
check = None
if dim and 'dimension' not in vars[n]:
vars[n]['dimension'] = []
for d in rmbadname(
[x.strip() for x in markoutercomma(dim).split('@,@')]
):
# d is the expression inside the dimension declaration
# Evaluate `d` with respect to params
try:
# the dimension for this variable depends on a
# previously defined parameter
d = param_parse(d, params)
except (ValueError, IndexError, KeyError):
outmess(
('analyzevars: could not parse dimension for '
f'variable {d!r}\n')
)
dim_char = ':' if d == ':' else '*'
if d == dim_char:
dl = [dim_char]
else:
dl = markoutercomma(d, ':').split('@:@')
if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*)
dl = ['*']
d = '*'
if len(dl) == 1 and dl[0] != dim_char:
dl = ['1', dl[0]]
if len(dl) == 2:
d1, d2 = map(symbolic.Expr.parse, dl)
dsize = d2 - d1 + 1
d = dsize.tostring(language=symbolic.Language.C)
# find variables v that define d as a linear
# function, `d == a * v + b`, and store
# coefficients a and b for further analysis.
solver_and_deps = {}
for v in block['vars']:
s = symbolic.as_symbol(v)
if dsize.contains(s):
try:
a, b = dsize.linear_solve(s)
def solve_v(s, a=a, b=b):
return (s - b) / a
all_symbols = set(a.symbols())
all_symbols.update(b.symbols())
except RuntimeError as msg:
# d is not a linear function of v,
# however, if v can be determined
# from d using other means,
# implement the corresponding
# solve_v function here.
solve_v = None
all_symbols = set(dsize.symbols())
v_deps = set(
s.data for s in all_symbols
if s.data in vars)
solver_and_deps[v] = solve_v, list(v_deps)
# Note that dsize may contain symbols that are
# not defined in block['vars']. Here we assume
# these correspond to Fortran/C intrinsic
# functions or that are defined by other
# means. We'll let the compiler validate the
# definiteness of such symbols.
dimension_exprs[d] = solver_and_deps
vars[n]['dimension'].append(d)
if 'check' not in vars[n] and 'args' in block and n in block['args']:
# n is an argument that has no checks defined. Here we
# generate some consistency checks for n, and when n is an
# array, generate checks for its dimensions and construct
# initialization expressions.
n_deps = vars[n].get('depend', [])
n_checks = []
n_is_input = l_or(isintent_in, isintent_inout,
isintent_inplace)(vars[n])
if isarray(vars[n]): # n is array
for i, d in enumerate(vars[n]['dimension']):
coeffs_and_deps = dimension_exprs.get(d)
if coeffs_and_deps is None:
# d is `:` or `*` or a constant expression
pass
elif n_is_input:
# n is an input array argument and its shape
# may define variables used in dimension
# specifications.
for v, (solver, deps) in coeffs_and_deps.items():
def compute_deps(v, deps):
for v1 in coeffs_and_deps.get(v, [None, []])[1]:
if v1 not in deps:
deps.add(v1)
compute_deps(v1, deps)
all_deps = set()
compute_deps(v, all_deps)
if ((v in n_deps
or '=' in vars[v]
or 'depend' in vars[v])):
# Skip a variable that
# - n depends on
# - has user-defined initialization expression
# - has user-defined dependencies
continue
if solver is not None and v not in all_deps:
# v can be solved from d, hence, we
# make it an optional argument with
# initialization expression:
is_required = False
init = solver(symbolic.as_symbol(
f'shape({n}, {i})'))
init = init.tostring(
language=symbolic.Language.C)
vars[v]['='] = init
# n needs to be initialized before v. So,
# making v dependent on n and on any
# variables in solver or d.
vars[v]['depend'] = [n] + deps
if 'check' not in vars[v]:
# add check only when no
# user-specified checks exist
vars[v]['check'] = [
f'shape({n}, {i}) == {d}']
else:
# d is a non-linear function on v,
# hence, v must be a required input
# argument that n will depend on
is_required = True
if 'intent' not in vars[v]:
vars[v]['intent'] = []
if 'in' not in vars[v]['intent']:
vars[v]['intent'].append('in')
# v needs to be initialized before n
n_deps.append(v)
n_checks.append(
f'shape({n}, {i}) == {d}')
v_attr = vars[v].get('attrspec', [])
if not ('optional' in v_attr
or 'required' in v_attr):
v_attr.append(
'required' if is_required else 'optional')
if v_attr:
vars[v]['attrspec'] = v_attr
if coeffs_and_deps is not None:
# extend v dependencies with ones specified in attrspec
for v, (solver, deps) in coeffs_and_deps.items():
v_deps = vars[v].get('depend', [])
for aa in vars[v].get('attrspec', []):
if aa.startswith('depend'):
aa = ''.join(aa.split())
v_deps.extend(aa[7:-1].split(','))
if v_deps:
vars[v]['depend'] = list(set(v_deps))
if n not in v_deps:
n_deps.append(v)
elif isstring(vars[n]):
if 'charselector' in vars[n]:
if '*' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['*'],
params)
vars[n]['charselector']['*'] = length
elif 'len' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['len'],
params)
del vars[n]['charselector']['len']
vars[n]['charselector']['*'] = length
if n_checks:
vars[n]['check'] = n_checks
if n_deps:
vars[n]['depend'] = list(set(n_deps))
if '=' in vars[n]:
if 'attrspec' not in vars[n]:
vars[n]['attrspec'] = []
if ('optional' not in vars[n]['attrspec']) and \
('required' not in vars[n]['attrspec']):
vars[n]['attrspec'].append('optional')
if 'depend' not in vars[n]:
vars[n]['depend'] = []
for v, m in list(dep_matches.items()):
if m(vars[n]['=']):
vars[n]['depend'].append(v)
if not vars[n]['depend']:
del vars[n]['depend']
if isscalar(vars[n]):
vars[n]['='] = _eval_scalar(vars[n]['='], params)
for n in list(vars.keys()):
if n == block['name']: # n is block name
if 'note' in vars[n]:
block['note'] = vars[n]['note']
if block['block'] == 'function':
if 'result' in block and block['result'] in vars:
vars[n] = appenddecl(vars[n], vars[block['result']])
if 'prefix' in block:
pr = block['prefix']
pr1 = pr.replace('pure', '')
ispure = (not pr == pr1)
pr = pr1.replace('recursive', '')
isrec = (not pr == pr1)
m = typespattern[0].match(pr)
if m:
typespec, selector, attr, edecl = cracktypespec0(
m.group('this'), m.group('after'))
kindselect, charselect, typename = cracktypespec(
typespec, selector)
vars[n]['typespec'] = typespec
try:
if block['result']:
vars[block['result']]['typespec'] = typespec
except Exception:
pass
if kindselect:
if 'kind' in kindselect:
try:
kindselect['kind'] = eval(
kindselect['kind'], {}, params)
except Exception:
pass
vars[n]['kindselector'] = kindselect
if charselect:
vars[n]['charselector'] = charselect
if typename:
vars[n]['typename'] = typename
if ispure:
vars[n] = setattrspec(vars[n], 'pure')
if isrec:
vars[n] = setattrspec(vars[n], 'recursive')
else:
outmess(
'analyzevars: prefix (%s) were not used\n' % repr(block['prefix']))
if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']:
if 'commonvars' in block:
neededvars = copy.copy(block['args'] + block['commonvars'])
else:
neededvars = copy.copy(block['args'])
for n in list(vars.keys()):
if l_or(isintent_callback, isintent_aux)(vars[n]):
neededvars.append(n)
if 'entry' in block:
neededvars.extend(list(block['entry'].keys()))
for k in list(block['entry'].keys()):
for n in block['entry'][k]:
if n not in neededvars:
neededvars.append(n)
if block['block'] == 'function':
if 'result' in block:
neededvars.append(block['result'])
else:
neededvars.append(block['name'])
if block['block'] in ['subroutine', 'function']:
name = block['name']
if name in vars and 'intent' in vars[name]:
block['intent'] = vars[name]['intent']
if block['block'] == 'type':
neededvars.extend(list(vars.keys()))
for n in list(vars.keys()):
if n not in neededvars:
del vars[n]
return vars
analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I)
def param_eval(v, g_params, params, dimspec=None):
"""
Creates a dictionary of indices and values for each parameter in a
parameter array to be evaluated later.
WARNING: It is not possible to initialize multidimensional array
parameters e.g. dimension(-3:1, 4, 3:5) at this point. This is because in
Fortran initialization through array constructor requires the RESHAPE
intrinsic function. Since the right-hand side of the parameter declaration
is not executed in f2py, but rather at the compiled c/fortran extension,
later, it is not possible to execute a reshape of a parameter array.
One issue remains: if the user wants to access the array parameter from
python, we should either
1) allow them to access the parameter array using python standard indexing
(which is often incompatible with the original fortran indexing)
2) allow the parameter array to be accessed in python as a dictionary with
fortran indices as keys
We are choosing 2 for now.
"""
if dimspec is None:
try:
p = eval(v, g_params, params)
except Exception as msg:
p = v
outmess(f'param_eval: got "{msg}" on {v!r}\n')
return p
# This is an array parameter.
# First, we parse the dimension information
if len(dimspec) < 2 or dimspec[::len(dimspec)-1] != "()":
raise ValueError(f'param_eval: dimension {dimspec} can\'t be parsed')
dimrange = dimspec[1:-1].split(',')
if len(dimrange) == 1:
# e.g. dimension(2) or dimension(-1:1)
dimrange = dimrange[0].split(':')
# now, dimrange is a list of 1 or 2 elements
if len(dimrange) == 1:
bound = param_parse(dimrange[0], params)
dimrange = range(1, int(bound)+1)
else:
lbound = param_parse(dimrange[0], params)
ubound = param_parse(dimrange[1], params)
dimrange = range(int(lbound), int(ubound)+1)
else:
raise ValueError(f'param_eval: multidimensional array parameters '
'{dimspec} not supported')
# Parse parameter value
v = (v[2:-2] if v.startswith('(/') else v).split(',')
v_eval = []
for item in v:
try:
item = eval(item, g_params, params)
except Exception as msg:
outmess(f'param_eval: got "{msg}" on {item!r}\n')
v_eval.append(item)
p = dict(zip(dimrange, v_eval))
return p
def param_parse(d, params):
"""Recursively parse array dimensions.
Parses the declaration of an array variable or parameter
`dimension` keyword, and is called recursively if the
dimension for this array is a previously defined parameter
(found in `params`).
Parameters
----------
d : str
Fortran expression describing the dimension of an array.
params : dict
Previously parsed parameters declared in the Fortran source file.
Returns
-------
out : str
Parsed dimension expression.
Examples
--------
* If the line being analyzed is
`integer, parameter, dimension(2) :: pa = (/ 3, 5 /)`
then `d = 2` and we return immediately, with
>>> d = '2'
>>> param_parse(d, params)
2
* If the line being analyzed is
`integer, parameter, dimension(pa) :: pb = (/1, 2, 3/)`
then `d = 'pa'`; since `pa` is a previously parsed parameter,
and `pa = 3`, we call `param_parse` recursively, to obtain
>>> d = 'pa'
>>> params = {'pa': 3}
>>> param_parse(d, params)
3
* If the line being analyzed is
`integer, parameter, dimension(pa(1)) :: pb = (/1, 2, 3/)`
then `d = 'pa(1)'`; since `pa` is a previously parsed parameter,
and `pa(1) = 3`, we call `param_parse` recursively, to obtain
>>> d = 'pa(1)'
>>> params = dict(pa={1: 3, 2: 5})
>>> param_parse(d, params)
3
"""
if "(" in d:
# this dimension expression is an array
dname = d[:d.find("(")]
ddims = d[d.find("(")+1:d.rfind(")")]
# this dimension expression is also a parameter;
# parse it recursively
index = int(param_parse(ddims, params))
return str(params[dname][index])
elif d in params:
return str(params[d])
else:
for p in params:
re_1 = re.compile(
r'(?P<before>.*?)\b' + p + r'\b(?P<after>.*)', re.I
)
m = re_1.match(d)
while m:
d = m.group('before') + \
str(params[p]) + m.group('after')
m = re_1.match(d)
return d
def expr2name(a, block, args=[]):
orig_a = a
a_is_expr = not analyzeargs_re_1.match(a)
if a_is_expr: # `a` is an expression
implicitrules, attrrules = buildimplicitrules(block)
at = determineexprtype(a, block['vars'], implicitrules)
na = 'e_'
for c in a:
c = c.lower()
if c not in string.ascii_lowercase + string.digits:
c = '_'
na = na + c
if na[-1] == '_':
na = na + 'e'
else:
na = na + '_e'
a = na
while a in block['vars'] or a in block['args']:
a = a + 'r'
if a in args:
k = 1
while a + str(k) in args:
k = k + 1
a = a + str(k)
if a_is_expr:
block['vars'][a] = at
else:
if a not in block['vars']:
if orig_a in block['vars']:
block['vars'][a] = block['vars'][orig_a]
else:
block['vars'][a] = {}
if 'externals' in block and orig_a in block['externals'] + block['interfaced']:
block['vars'][a] = setattrspec(block['vars'][a], 'external')
return a
def analyzeargs(block):
setmesstext(block)
implicitrules, _ = buildimplicitrules(block)
if 'args' not in block:
block['args'] = []
args = []
for a in block['args']:
a = expr2name(a, block, args)
args.append(a)
block['args'] = args
if 'entry' in block:
for k, args1 in list(block['entry'].items()):
for a in args1:
if a not in block['vars']:
block['vars'][a] = {}
for b in block['body']:
if b['name'] in args:
if 'externals' not in block:
block['externals'] = []
if b['name'] not in block['externals']:
block['externals'].append(b['name'])
if 'result' in block and block['result'] not in block['vars']:
block['vars'][block['result']] = {}
return block
determineexprtype_re_1 = re.compile(r'\A\(.+?,.+?\)\Z', re.I)
determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P<name>\w+)|)\Z', re.I)
determineexprtype_re_3 = re.compile(
r'\A[+-]?[\d.]+[-\d+de.]*(_(?P<name>\w+)|)\Z', re.I)
determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I)
determineexprtype_re_5 = re.compile(r'\A(?P<name>\w+)\s*\(.*?\)\s*\Z', re.I)
def _ensure_exprdict(r):
if isinstance(r, int):
return {'typespec': 'integer'}
if isinstance(r, float):
return {'typespec': 'real'}
if isinstance(r, complex):
return {'typespec': 'complex'}
if isinstance(r, dict):
return r
raise AssertionError(repr(r))
def determineexprtype(expr, vars, rules={}):
if expr in vars:
return _ensure_exprdict(vars[expr])
expr = expr.strip()
if determineexprtype_re_1.match(expr):
return {'typespec': 'complex'}
m = determineexprtype_re_2.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess(
'determineexprtype: selected kind types not supported (%s)\n' % repr(expr))
return {'typespec': 'integer'}
m = determineexprtype_re_3.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess(
'determineexprtype: selected kind types not supported (%s)\n' % repr(expr))
return {'typespec': 'real'}
for op in ['+', '-', '*', '/']:
for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]:
if e in vars:
return _ensure_exprdict(vars[e])
t = {}
if determineexprtype_re_4.match(expr): # in parenthesis
t = determineexprtype(expr[1:-1], vars, rules)
else:
m = determineexprtype_re_5.match(expr)
if m:
rn = m.group('name')
t = determineexprtype(m.group('name'), vars, rules)
if t and 'attrspec' in t:
del t['attrspec']
if not t:
if rn[0] in rules:
return _ensure_exprdict(rules[rn[0]])
if expr[0] in '\'"':
return {'typespec': 'character', 'charselector': {'*': '*'}}
if not t:
outmess(
'determineexprtype: could not determine expressions (%s) type.\n' % (repr(expr)))
return t
######
def crack2fortrangen(block, tab='\n', as_interface=False):
global skipfuncs, onlyfuncs
setmesstext(block)
ret = ''
if isinstance(block, list):
for g in block:
if g and g['block'] in ['function', 'subroutine']:
if g['name'] in skipfuncs:
continue
if onlyfuncs and g['name'] not in onlyfuncs:
continue
ret = ret + crack2fortrangen(g, tab, as_interface=as_interface)
return ret
prefix = ''
name = ''
args = ''
blocktype = block['block']
if blocktype == 'program':
return ''
argsl = []
if 'name' in block:
name = block['name']
if 'args' in block:
vars = block['vars']
for a in block['args']:
a = expr2name(a, block, argsl)
if not isintent_callback(vars[a]):
argsl.append(a)
if block['block'] == 'function' or argsl:
args = '(%s)' % ','.join(argsl)
f2pyenhancements = ''
if 'f2pyenhancements' in block:
for k in list(block['f2pyenhancements'].keys()):
f2pyenhancements = '%s%s%s %s' % (
f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k])
intent_lst = block.get('intent', [])[:]
if blocktype == 'function' and 'callback' in intent_lst:
intent_lst.remove('callback')
if intent_lst:
f2pyenhancements = '%s%sintent(%s) %s' %\
(f2pyenhancements, tab + tabchar,
','.join(intent_lst), name)
use = ''
if 'use' in block:
use = use2fortran(block['use'], tab + tabchar)
common = ''
if 'common' in block:
common = common2fortran(block['common'], tab + tabchar)
if name == 'unknown_interface':
name = ''
result = ''
if 'result' in block:
result = ' result (%s)' % block['result']
if block['result'] not in argsl:
argsl.append(block['result'])
body = crack2fortrangen(block['body'], tab + tabchar, as_interface=as_interface)
vars = vars2fortran(
block, block['vars'], argsl, tab + tabchar, as_interface=as_interface)
mess = ''
if 'from' in block and not as_interface:
mess = '! in %s' % block['from']
if 'entry' in block:
entry_stmts = ''
for k, i in list(block['entry'].items()):
entry_stmts = '%s%sentry %s(%s)' \
% (entry_stmts, tab + tabchar, k, ','.join(i))
body = body + entry_stmts
if blocktype == 'block data' and name == '_BLOCK_DATA_':
name = ''
ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % (
tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name)
return ret
def common2fortran(common, tab=''):
ret = ''
for k in list(common.keys()):
if k == '_BLNK_':
ret = '%s%scommon %s' % (ret, tab, ','.join(common[k]))
else:
ret = '%s%scommon /%s/ %s' % (ret, tab, k, ','.join(common[k]))
return ret
def use2fortran(use, tab=''):
ret = ''
for m in list(use.keys()):
ret = '%s%suse %s,' % (ret, tab, m)
if use[m] == {}:
if ret and ret[-1] == ',':
ret = ret[:-1]
continue
if 'only' in use[m] and use[m]['only']:
ret = '%s only:' % (ret)
if 'map' in use[m] and use[m]['map']:
c = ' '
for k in list(use[m]['map'].keys()):
if k == use[m]['map'][k]:
ret = '%s%s%s' % (ret, c, k)
c = ','
else:
ret = '%s%s%s=>%s' % (ret, c, k, use[m]['map'][k])
c = ','
if ret and ret[-1] == ',':
ret = ret[:-1]
return ret
def true_intent_list(var):
lst = var['intent']
ret = []
for intent in lst:
try:
f = globals()['isintent_%s' % intent]
except KeyError:
pass
else:
if f(var):
ret.append(intent)
return ret
def vars2fortran(block, vars, args, tab='', as_interface=False):
setmesstext(block)
ret = ''
nout = []
for a in args:
if a in block['vars']:
nout.append(a)
if 'commonvars' in block:
for a in block['commonvars']:
if a in vars:
if a not in nout:
nout.append(a)
else:
errmess(
'vars2fortran: Confused?!: "%s" is not defined in vars.\n' % a)
if 'varnames' in block:
nout.extend(block['varnames'])
if not as_interface:
for a in list(vars.keys()):
if a not in nout:
nout.append(a)
for a in nout:
if 'depend' in vars[a]:
for d in vars[a]['depend']:
if d in vars and 'depend' in vars[d] and a in vars[d]['depend']:
errmess(
'vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n' % (a, d))
if 'externals' in block and a in block['externals']:
if isintent_callback(vars[a]):
ret = '%s%sintent(callback) %s' % (ret, tab, a)
ret = '%s%sexternal %s' % (ret, tab, a)
if isoptional(vars[a]):
ret = '%s%soptional %s' % (ret, tab, a)
if a in vars and 'typespec' not in vars[a]:
continue
cont = 1
for b in block['body']:
if a == b['name'] and b['block'] == 'function':
cont = 0
break
if cont:
continue
if a not in vars:
show(vars)
outmess('vars2fortran: No definition for argument "%s".\n' % a)
continue
if a == block['name']:
if block['block'] != 'function' or block.get('result'):
# 1) skip declaring a variable that name matches with
# subroutine name
# 2) skip declaring function when its type is
# declared via `result` construction
continue
if 'typespec' not in vars[a]:
if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']:
if a in args:
ret = '%s%sexternal %s' % (ret, tab, a)
continue
show(vars[a])
outmess('vars2fortran: No typespec for argument "%s".\n' % a)
continue
vardef = vars[a]['typespec']
if vardef == 'type' and 'typename' in vars[a]:
vardef = '%s(%s)' % (vardef, vars[a]['typename'])
selector = {}
if 'kindselector' in vars[a]:
selector = vars[a]['kindselector']
elif 'charselector' in vars[a]:
selector = vars[a]['charselector']
if '*' in selector:
if selector['*'] in ['*', ':']:
vardef = '%s*(%s)' % (vardef, selector['*'])
else:
vardef = '%s*%s' % (vardef, selector['*'])
else:
if 'len' in selector:
vardef = '%s(len=%s' % (vardef, selector['len'])
if 'kind' in selector:
vardef = '%s,kind=%s)' % (vardef, selector['kind'])
else:
vardef = '%s)' % (vardef)
elif 'kind' in selector:
vardef = '%s(kind=%s)' % (vardef, selector['kind'])
c = ' '
if 'attrspec' in vars[a]:
attr = [l for l in vars[a]['attrspec']
if l not in ['external']]
if as_interface and 'intent(in)' in attr and 'intent(out)' in attr:
# In Fortran, intent(in, out) are conflicting while
# intent(in, out) can be specified only via
# `!f2py intent(out) ..`.
# So, for the Fortran interface, we'll drop
# intent(out) to resolve the conflict.
attr.remove('intent(out)')
if attr:
vardef = '%s, %s' % (vardef, ','.join(attr))
c = ','
if 'dimension' in vars[a]:
vardef = '%s%sdimension(%s)' % (
vardef, c, ','.join(vars[a]['dimension']))
c = ','
if 'intent' in vars[a]:
lst = true_intent_list(vars[a])
if lst:
vardef = '%s%sintent(%s)' % (vardef, c, ','.join(lst))
c = ','
if 'check' in vars[a]:
vardef = '%s%scheck(%s)' % (vardef, c, ','.join(vars[a]['check']))
c = ','
if 'depend' in vars[a]:
vardef = '%s%sdepend(%s)' % (
vardef, c, ','.join(vars[a]['depend']))
c = ','
if '=' in vars[a]:
v = vars[a]['=']
if vars[a]['typespec'] in ['complex', 'double complex']:
try:
v = eval(v)
v = '(%s,%s)' % (v.real, v.imag)
except Exception:
pass
vardef = '%s :: %s=%s' % (vardef, a, v)
else:
vardef = '%s :: %s' % (vardef, a)
ret = '%s%s%s' % (ret, tab, vardef)
return ret
######
# We expose post_processing_hooks as global variable so that
# user-libraries could register their own hooks to f2py.
post_processing_hooks = []
def crackfortran(files):
global usermodules, post_processing_hooks
outmess('Reading fortran codes...\n', 0)
readfortrancode(files, crackline)
outmess('Post-processing...\n', 0)
usermodules = []
postlist = postcrack(grouplist[0])
outmess('Applying post-processing hooks...\n', 0)
for hook in post_processing_hooks:
outmess(f' {hook.__name__}\n', 0)
postlist = traverse(postlist, hook)
outmess('Post-processing (stage 2)...\n', 0)
postlist = postcrack2(postlist)
return usermodules + postlist
def crack2fortran(block):
global f2py_version
pyf = crack2fortrangen(block) + '\n'
header = """! -*- f90 -*-
! Note: the context of this file is case sensitive.
"""
footer = """
! This file was auto-generated with f2py (version:%s).
! See:
! https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e
""" % (f2py_version)
return header + pyf + footer
def _is_visit_pair(obj):
return (isinstance(obj, tuple)
and len(obj) == 2
and isinstance(obj[0], (int, str)))
def traverse(obj, visit, parents=[], result=None, *args, **kwargs):
'''Traverse f2py data structure with the following visit function:
def visit(item, parents, result, *args, **kwargs):
"""
parents is a list of key-"f2py data structure" pairs from which
items are taken from.
result is a f2py data structure that is filled with the
return value of the visit function.
item is 2-tuple (index, value) if parents[-1][1] is a list
item is 2-tuple (key, value) if parents[-1][1] is a dict
The return value of visit must be None, or of the same kind as
item, that is, if parents[-1] is a list, the return value must
be 2-tuple (new_index, new_value), or if parents[-1] is a
dict, the return value must be 2-tuple (new_key, new_value).
If new_index or new_value is None, the return value of visit
is ignored, that is, it will not be added to the result.
If the return value is None, the content of obj will be
traversed, otherwise not.
"""
'''
if _is_visit_pair(obj):
if obj[0] == 'parent_block':
# avoid infinite recursion
return obj
new_result = visit(obj, parents, result, *args, **kwargs)
if new_result is not None:
assert _is_visit_pair(new_result)
return new_result
parent = obj
result_key, obj = obj
else:
parent = (None, obj)
result_key = None
if isinstance(obj, list):
new_result = []
for index, value in enumerate(obj):
new_index, new_item = traverse((index, value), visit,
parents=parents + [parent],
result=result, *args, **kwargs)
if new_index is not None:
new_result.append(new_item)
elif isinstance(obj, dict):
new_result = dict()
for key, value in obj.items():
new_key, new_value = traverse((key, value), visit,
parents=parents + [parent],
result=result, *args, **kwargs)
if new_key is not None:
new_result[new_key] = new_value
else:
new_result = obj
if result_key is None:
return new_result
return result_key, new_result
def character_backward_compatibility_hook(item, parents, result,
*args, **kwargs):
"""Previously, Fortran character was incorrectly treated as
character*1. This hook fixes the usage of the corresponding
variables in `check`, `dimension`, `=`, and `callstatement`
expressions.
The usage of `char*` in `callprotoargument` expression can be left
unchanged because C `character` is C typedef of `char`, although,
new implementations should use `character*` in the corresponding
expressions.
See https://github.com/numpy/numpy/pull/19388 for more information.
"""
parent_key, parent_value = parents[-1]
key, value = item
def fix_usage(varname, value):
value = re.sub(r'[*]\s*\b' + varname + r'\b', varname, value)
value = re.sub(r'\b' + varname + r'\b\s*[\[]\s*0\s*[\]]',
varname, value)
return value
if parent_key in ['dimension', 'check']:
assert parents[-3][0] == 'vars'
vars_dict = parents[-3][1]
elif key == '=':
assert parents[-2][0] == 'vars'
vars_dict = parents[-2][1]
else:
vars_dict = None
new_value = None
if vars_dict is not None:
new_value = value
for varname, vd in vars_dict.items():
if ischaracter(vd):
new_value = fix_usage(varname, new_value)
elif key == 'callstatement':
vars_dict = parents[-2][1]['vars']
new_value = value
for varname, vd in vars_dict.items():
if ischaracter(vd):
# replace all occurrences of `<varname>` with
# `&<varname>` in argument passing
new_value = re.sub(
r'(?<![&])\b' + varname + r'\b', '&' + varname, new_value)
if new_value is not None:
if new_value != value:
# We report the replacements here so that downstream
# software could update their source codes
# accordingly. However, such updates are recommended only
# when BC with numpy 1.21 or older is not required.
outmess(f'character_bc_hook[{parent_key}.{key}]:'
f' replaced `{value}` -> `{new_value}`\n', 1)
return (key, new_value)
post_processing_hooks.append(character_backward_compatibility_hook)
if __name__ == "__main__":
files = []
funcs = []
f = 1
f2 = 0
f3 = 0
showblocklist = 0
for l in sys.argv[1:]:
if l == '':
pass
elif l[0] == ':':
f = 0
elif l == '-quiet':
quiet = 1
verbose = 0
elif l == '-verbose':
verbose = 2
quiet = 0
elif l == '-fix':
if strictf77:
outmess(
'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0)
skipemptyends = 1
sourcecodeform = 'fix'
elif l == '-skipemptyends':
skipemptyends = 1
elif l == '--ignore-contains':
ignorecontains = 1
elif l == '-f77':
strictf77 = 1
sourcecodeform = 'fix'
elif l == '-f90':
strictf77 = 0
sourcecodeform = 'free'
skipemptyends = 1
elif l == '-h':
f2 = 1
elif l == '-show':
showblocklist = 1
elif l == '-m':
f3 = 1
elif l[0] == '-':
errmess('Unknown option %s\n' % repr(l))
elif f2:
f2 = 0
pyffilename = l
elif f3:
f3 = 0
f77modulename = l
elif f:
try:
open(l).close()
files.append(l)
except OSError as detail:
errmess(f'OSError: {detail!s}\n')
else:
funcs.append(l)
if not strictf77 and f77modulename and not skipemptyends:
outmess("""\
Warning: You have specified module name for non Fortran 77 code that
should not need one (expect if you are scanning F90 code for non
module blocks but then you should use flag -skipemptyends and also
be sure that the files do not contain programs without program
statement).
""", 0)
postlist = crackfortran(files)
if pyffilename:
outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0)
pyf = crack2fortran(postlist)
with open(pyffilename, 'w') as f:
f.write(pyf)
if showblocklist:
show(postlist)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@numpy@py3@numpy@f2py@crackfortran.py@.PATH_END.py
|
{
"filename": "cmd_train.py",
"repo_name": "i4Ds/sdo-cli",
"repo_path": "sdo-cli_extracted/sdo-cli-main/src/sdo/cmd/sood/ae/cmd_train.py",
"type": "Python"
}
|
from sdo.cli import pass_environment
from sdo.sood.algorithms.ae_2d import main
import click
@click.command("train", short_help="Trains an AE model")
@click.option("--target-size", type=click.IntRange(1, 512, clamp=True), default=128)
@click.option("--batch-size", type=click.IntRange(1, 512, clamp=True), default=16)
@click.option("--n-epochs", type=int, default=20)
@click.option("--lr", type=float, default=1e-4)
@click.option("--z-dim", type=int, default=128)
@click.option("-fm", "--fmap-sizes", type=int, multiple=True, default=[16, 64, 256, 1024])
@click.option("--print-every-iter", type=int, default=100)
@click.option("-l", "--load-path", type=click.Path(exists=True), required=False, default=None)
@click.option("-o", "--log-dir", type=click.Path(exists=True, writable=True), required=False, default=None)
@click.option("-d", "--data-dir", type=click.Path(exists=True), required=True, default=None)
@pass_environment
def train(ctx,
target_size=128,
batch_size=16,
n_epochs=20,
lr=1e-4,
z_dim=128,
fmap_sizes=(16, 64, 256, 1024),
print_every_iter=100,
load_path=None,
log_dir=None,
data_dir=None):
main(run="train",
target_size=target_size,
batch_size=batch_size,
n_epochs=n_epochs,
lr=lr,
z_dim=z_dim,
fmap_sizes=fmap_sizes,
print_every_iter=print_every_iter,
load_path=load_path,
log_dir=log_dir,
data_dir=data_dir)
|
i4DsREPO_NAMEsdo-cliPATH_START.@sdo-cli_extracted@sdo-cli-main@src@sdo@cmd@sood@ae@cmd_train.py@.PATH_END.py
|
{
"filename": "atlas.py",
"repo_name": "snad-space/ztf-viewer",
"repo_path": "ztf-viewer_extracted/ztf-viewer-master/ztf_viewer/catalogs/conesearch/atlas.py",
"type": "Python"
}
|
from ztf_viewer.catalogs.conesearch._base import _BaseVizierQuery
class AtlasQuery(_BaseVizierQuery):
id_column = "ATOID"
type_column = "Class"
period_column = "fp-LSper"
columns = {
"__link": "Name",
"separation": "Separation, arcsec",
"fp-LSper": "Period, days",
"Class": "Class",
}
_vizier_columns = ["ATOID", "fp-LSper", "Class"]
_vizier_catalog = "J/AJ/156/241/table4"
|
snad-spaceREPO_NAMEztf-viewerPATH_START.@ztf-viewer_extracted@ztf-viewer-master@ztf_viewer@catalogs@conesearch@atlas.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/prefect/workers/__init__.py",
"type": "Python"
}
|
from .process import ProcessWorker
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@prefect@workers@__init__.py@.PATH_END.py
|
{
"filename": "test_neo4j.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/unit_tests/vectorstores/test_neo4j.py",
"type": "Python"
}
|
"""Test Neo4j functionality."""
from langchain_community.vectorstores.neo4j_vector import (
dict_to_yaml_str,
remove_lucene_chars,
)
def test_escaping_lucene() -> None:
"""Test escaping lucene characters"""
assert remove_lucene_chars("Hello+World") == "Hello World"
assert remove_lucene_chars("Hello World\\") == "Hello World"
assert (
remove_lucene_chars("It is the end of the world. Take shelter!")
== "It is the end of the world. Take shelter"
)
assert (
remove_lucene_chars("It is the end of the world. Take shelter&&")
== "It is the end of the world. Take shelter"
)
assert (
remove_lucene_chars("Bill&&Melinda Gates Foundation")
== "Bill Melinda Gates Foundation"
)
assert (
remove_lucene_chars("It is the end of the world. Take shelter(&&)")
== "It is the end of the world. Take shelter"
)
assert (
remove_lucene_chars("It is the end of the world. Take shelter??")
== "It is the end of the world. Take shelter"
)
assert (
remove_lucene_chars("It is the end of the world. Take shelter^")
== "It is the end of the world. Take shelter"
)
assert (
remove_lucene_chars("It is the end of the world. Take shelter+")
== "It is the end of the world. Take shelter"
)
assert (
remove_lucene_chars("It is the end of the world. Take shelter-")
== "It is the end of the world. Take shelter"
)
assert (
remove_lucene_chars("It is the end of the world. Take shelter~")
== "It is the end of the world. Take shelter"
)
def test_converting_to_yaml() -> None:
example_dict = {
"name": "John Doe",
"age": 30,
"skills": ["Python", "Data Analysis", "Machine Learning"],
"location": {"city": "Ljubljana", "country": "Slovenia"},
}
yaml_str = dict_to_yaml_str(example_dict)
expected_output = (
"name: John Doe\nage: 30\nskills:\n- Python\n- "
"Data Analysis\n- Machine Learning\nlocation:\n city: Ljubljana\n"
" country: Slovenia\n"
)
assert yaml_str == expected_output
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@unit_tests@vectorstores@test_neo4j.py@.PATH_END.py
|
{
"filename": "multitrace.py",
"repo_name": "ricardoclandim/NIRVANA",
"repo_path": "NIRVANA_extracted/NIRVANA-master/nirvana/models/multitrace.py",
"type": "Python"
}
|
"""
Module with a class that fits multiple tracers to a single disk.
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
import warnings
from IPython import embed
import numpy as np
from scipy import optimize
from matplotlib import pyplot, rc, patches, ticker, colors
from astropy.io import fits
from .geometry import projected_polar, disk_ellipse
from ..data.util import select_kinematic_axis, bin_stats, growth_lim, atleast_one_decade
from .util import cov_err
from ..util import plot, fileio
from . import thindisk
from . import axisym
#warnings.simplefilter('error', RuntimeWarning)
class MultiTracerDisk:
"""
Define a class that enables multiple kinematic datasets to be simultaneously
fit with ThinDisk models.
.. todo::
internals ...
Args:
disk (array-like):
The :class:`~nirvana.models.thindisk.ThinDisk` models to use for
each dataset; i.e., there must be one disk model per dataset even if
the models are identical. Importantly, these must be separate
object instances regardless of whether or not they are identical
models. The :class:`~nirvana.models.thindisk.ThinDisk` classes are
used to perform a number of operations that are specific to each
dataset that expedite the fit; the results are saved to the
internals of the class meaning that there must be one instance per
kinematic database.
tie_base (array-like, optional):
A 5-element vector indicating which geometric projection parameters
should tied between the disk models. The parameters are the
dynamical center coordinates (x, y), the position angle, the
inclination, and the systemic velocity.
tie_disk (array-like, optional):
A vector indicating which model-specific parameters should be tied
between disks. Models to be tied must have the same type and the
same number of parameters. The length of this vector must match the
number of disk model parameters *excluding* the base parameters.
"""
gbm = thindisk.ThinDiskGlobalBitMask()
"""
Global bitmask.
"""
mbm = thindisk.ThinDiskFitBitMask()
"""
Measurement-specific bitmask.
"""
pbm = thindisk.ThinDiskParBitMask()
"""
Parameter-specific bitmask.
"""
def __init__(self, disk, tie_base=None, tie_disk=None):
self.disk = np.atleast_1d(disk)
self.ntracer = self.disk.size
if self.ntracer == 1:
raise ValueError('You are only fitting one dataset. Use the disk class directly!')
# Parameter checks
if np.any([self.disk[i+1].nbp != self.disk[0].nbp for i in range(self.ntracer-1)]):
raise ValueError('All of the disks should have the same number of base parameters.')
self.nbp = self.disk[0].nbp
self.np = np.sum([d.np for d in self.disk])
# Setup the ties between base and disk parameters
self.tie_base = None # Vector indicating which base parameters to tie between disks
self.tie_disk = None # Vector indicating which disk parameters to tie
self.update_tie_base(tie_base) # Set the base tying strategy
self.update_tie_disk(tie_disk) # Set the disk tying strategy
# Setup the parameters
self.par = None # Holds the *unique* (untied) parameters
self.par_err = None # Holds any estimated parameter errors
self.par_mask = None # Holds any parameter-specific masking
# TODO: Set this to the bitmask dtype
self.global_mask = 0 # Global bitmask value
self.fit_status = None # Detailed fit status
self.fit_success = None # Simple fit success flag
# Workspace
self.disk_fom = None # Functions used to calculate the minimized figure-of-merit
self.disk_jac = None # Functions used to calculate the fit Jacobian
self._wrkspc_parslc = None # Fit workspace: parameter slices
self._wrkspc_ndata = None # ... Number of data points per dataset
self._wrkspc_sdata = None # ... Starting indices of each dataset in the concatenation
self._wrkspc_jac = None # ... Full fit Jacobian
def __getitem__(self, item):
return self.disk[item]
def __setitem__(self, item, value):
raise ValueError('Cannot change disk elements using direct access.')
def update_tie_base(self, tie_base):
"""
Setup parameter tying between the base geometric projection parameters.
Function sets :attr:`tie_base` and calls :func:`retie`.
Args:
tie_base (array-like):
Boolean array indicating which base parameters should be tied
between disks.
"""
# Set the internal
self.tie_base = np.zeros(self.nbp, dtype=bool) if tie_base is None \
else np.atleast_1d(tie_base)
# Check the result
if self.tie_base.size != self.nbp:
raise ValueError('Number of parameters in base tying vector incorrect! Expected '
f'{self.nbp}, found {selt.tie_base.size}.')
if not np.issubdtype(self.tie_base.dtype, bool):
raise TypeError('Base tying vector should hold booleans!')
# Reset the tying vectors
self.retie()
@property
def can_tie_disks(self):
"""
Check if the disks can be tied.
Current criteria are:
- disks must have same class type
- disks must have the same number of parameters
Returns:
:obj:`bool`: Flag that disk parameters can be tied.
"""
types = np.array([d.__class__.__name__ for d in self.disk])
npar = np.array([d.np for d in self.disk])
return np.all(types == types[0]) and np.all(npar == npar[0])
def update_tie_disk(self, tie_disk):
"""
Setup parameter tying between the disk kinematic parameters
Function sets :attr:`tie_disk` and calls :func:`retie`.
Args:
tie_disk (array-like):
Boolean array indicating which kinematic parameters should be
tied between disks.
"""
# Set the internal
ndp = self.disk[0].np - self.nbp
self.tie_disk = np.zeros(ndp, dtype=bool) if tie_disk is None else np.atleast_1d(tie_disk)
# Check that the disks *can* be tied
if np.any(self.tie_disk) and not self.can_tie_disks:
raise ValueError('Disk parameters cannot be tied together!')
# Check the tying flags
if self.tie_disk.size != ndp:
raise ValueError('Number of parameters in kinematics tying vector incorrect! '
f'Expected {self.disk[0].np - self.nbp}, found {self.tie_disk.size}.')
if not np.issubdtype(self.tie_disk.dtype, bool):
raise TypeError('Kinematics tying vector should hold booleans!')
# Reset the tying vectors
self.retie()
def retie(self):
"""
Construct the tying and untying vectors.
Given the current :attr:`tie_base` and :attr:`tie_disk` arrays, this
sets the :attr:`tie` and :attr:`untie` internals.
If ``full_par`` provides the complete set of parameters, including
redundant parameters that are tied and ``tied_par`` par only contains
the unique, untied parameters, one can change between them using the
:attr:`tie` and :attr:`untie` internal arrays as follows:
.. code-block:: python
full_par = tied_par[disk.untie]
tied_par = full_par[disk.tie]
where ``disk`` is an instance of this class.
"""
# Find the starting index for the parameters of each disk
s = [0 if i == 0 else np.sum([d.np for d in self.disk[:i]]) for i in range(self.ntracer)]
# Instatiate as all parameters being untied
self.untie = np.arange(self.np)
# Tie base parameters by replacing parameter indices in disks with the
# ones from the first disk.
indx = np.where(self.tie_base)[0]
if indx.size > 0:
for i in range(self.ntracer-1):
self.untie[indx + s[i+1]] = indx
# Tie disk parameters by replacing parameter indices in disks with the
# ones from the first disk.
# NOTE: This works when self.tie_disk is None, so there's no need to test for it
indx = np.where(self.tie_disk)[0]
if indx.size > 0:
for i in range(self.ntracer-1):
self.untie[indx + s[i+1] + self.nbp] = indx + self.nbp
# The tying vector simply uses the unique indices in the untying vector
self.tie, self.untie = np.unique(self.untie, return_inverse=True)
# Set the number of untied, unique parameters
self.nup = self.tie.size
def guess_par(self, full=False):
"""
Return a set of guess parameters based on the guess parameters for each disk.
Args:
full (:obj:`bool`, optional):
Flag to return the full set of parameters, not just the unique,
untied ones.
Returns:
`numpy.ndarray`_: Guess parameters
"""
gp = np.concatenate([d.guess_par() for d in self.disk])
return gp if full else gp[self.tie]
def _init_par(self, p0, fix):
"""
Initialize the parameter vectors.
This includes the parameter vector itself, :attr:`par`, and the boolean
vector selecting the free parameters, :attr:`free`. This also sets the
number of free parameters, :attr:`nfree`.
Args:
p0 (`numpy.ndarray`_):
The parameters to use. The length can be either the total
number of parameters (:attr:`np`) or the total number of
unique/untied parameters (:attr:`nup`). If None, the parameters
are set by :func:`guess_par`.
fix (`numpy.ndarray`_):
Boolean flags to fix the parameters. If None or if ``p0`` is
None, all parameters are assumed to be free. If not None, the
length *must* match ``p0``.
"""
# If the parameters are not provided, use the defaults
if p0 is None:
if fix is not None:
warnings.warn('To fix parameter, must provide the full set of initial guess '
'parameters. Ignoring the fixed parameters provided and fitting '
'all parameters.')
self.par = self.guess_par()
self.free = np.ones(self.nup, dtype=bool)
self.nfree = self.nup
return
_p0 = np.atleast_1d(p0)
_free = np.ones(_p0.size, dtype=bool) if fix is None else np.logical_not(fix)
if _p0.size not in [self.np, self.nup]:
raise ValueError('Incorrect number of model parameters. Must be either '
f'{self.np} (full) or {self.nup} (unique).')
if _free.size != _p0.size:
raise ValueError('Vector selecting fixed parameters has different length from '
f'parameter vector: {_free.size} instead of {_p0.size}.')
self.par = _p0.copy()
self.par_err = None
self.free = _free.copy()
if self.par.size == self.np:
self.par = self.par[self.tie]
self.free = self.free[self.tie]
self.nfree = np.sum(self.free)
def _set_par(self, par):
"""
Set the full parameter vector, accounting for any fixed and/or tied
parameters.
Args:
par (`numpy.ndarray`_, optional):
The list of parameters to use. Length must be either the total
number of *untied* parameters (:attr:`nup`) or the total number
of free parameters (:attr:`nfree`).
"""
if par.ndim != 1:
raise ValueError('Parameter array must be a 1D vector.')
if par.size == self.nup:
self.par = par.copy()
return
if par.size != self.nfree:
raise ValueError('Must provide {0} or {1} parameters.'.format(self.nup, self.nfree))
self.par[self.free] = par.copy()
def disk_slice(self, index):
"""
Return the slice selecting parameters for the specified disk from the
*full* parameter vector.
Args:
index (:obj:`int`):
The index of the disk to select.
Returns:
:obj:`slice`: Slice selected the relevant parameters.
"""
s = 0 if index == 0 else np.sum([d.np for d in self.disk[:index]])
return slice(s, s + self.disk[index].np)
def lsq_fit(self, kin, sb_wgt=False, p0=None, fix=None, lb=None, ub=None, scatter=None,
verbose=0, assume_posdef_covar=False, ignore_covar=True, analytic_jac=True,
maxiter=5):
"""
Use non-linear least-squares minimization to simultaneously fit all
datasets.
For all input parameter vectors (``p0``, ``fix``, ``lb``, and ``ub``),
the length of all vectors must be the same, but they can be either the
total number of parameters (:attr:`np`) or the number of unique (untied)
parameters (:attr:`nup`). If the former, note that only the values of
the tied parameters in the first disk will be used *regardless of
whether or not the parameters are fixed*.
.. warning::
Currently, this class *does not construct a model of the
surface-brightness distribution*. Instead, any weighting of the
model during convolution with the beam profile uses the as-observed
surface-brightness distribution, instead of a model of the intrinsic
surface brightness distribution. See ``sb_wgt``.
Args:
kin (array-like):
A list of :class:`~nirvana.data.kinematics.Kinematics` objects
to fit. The number of datasets *must* match the number of disks
(:attr:`ntracer`).
sb_wgt (:obj:`bool`, optional):
Flag to use the surface-brightness data provided by each dataset
to weight the model when applying the beam-smearing. **See the
warning above**.
p0 (`numpy.ndarray`_, optional):
The initial parameters for the model. See function description
for the allowed shapes. If None, the default guess parameters
are used; see :func:`guess_par`.
fix (`numpy.ndarray`_, optional):
A boolean array selecting the parameters that should be fixed
during the model fit. See function description for the allowed
shapes. If None, all parameters are free.
lb (`numpy.ndarray`_, optional):
The lower bounds for the parameters. See function description
for the allowed shapes. If None, the defaults are used (see
:func:`par_bounds`).
ub (`numpy.ndarray`_, optional):
The upper bounds for the parameters. See function description
for the allowed shapes. If None, the defaults are used (see
:func:`par_bounds`).
scatter (:obj:`float`, array-like, optional):
Introduce a fixed intrinsic-scatter term into the model. The
scatter is added in quadrature to all measurement errors in the
calculation of the merit function. If no errors are available,
this has the effect of renormalizing the unweighted merit
function by 1/scatter. Can be None, which means no intrinsic
scatter is added. The number of provided values can be either:
- 1 (i.e., a single float) to apply the same scatter to all
kinematic moments and datasets,
- the same as the number of disks to apply a separate
scatter to both kinematic moments for each dataset
- twice the number of disks to apply a separate scatter to
all the kinematic moments and datasets. In this case, the
order is velocity scatter for disk 1, velocity dispersion
scatter for disk 1, velocity scatter for disk 2, etc.
verbose (:obj:`int`, optional):
Verbosity level to pass to `scipy.optimize.least_squares`_.
assume_posdef_covar (:obj:`bool`, optional):
If any of the :class:`~nirvana.data.kinematics.Kinematics`
datasets include covariance matrices, this forces the code to
proceed assuming the matrices are positive definite.
ignore_covar (:obj:`bool`, optional):
If any of the :class:`~nirvana.data.kinematics.Kinematics`
datasets include covariance matrices, ignore them and just use
the inverse variance.
analytic_jac (:obj:`bool`, optional):
Use the analytic calculation of the Jacobian matrix during the
fit optimization. If False, the Jacobian is calculated using
finite-differencing methods provided by
`scipy.optimize.least_squares`_.
maxiter (:obj:`int`, optional):
The call to `scipy.optimize.least_squares`_ is repeated when it
returns best-fit parameters that are *identical* to the input
parameters (to within a small tolerance). This parameter sets
the maximum number of times the fit will be repeated. Set this
to 1 to ignore these occurences; ``maxiter`` cannot be None.
"""
# Check the input
self.kin = np.atleast_1d(kin)
if self.kin.size != self.ntracer:
raise ValueError('Must provide the same number of kinematic databases as disks '
f'({self.ntracer}).')
if scatter is not None:
_scatter = np.atleast_1d(scatter)
if _scatter.size not in [1, self.ntracer, 2*self.ntracer]:
raise ValueError(f'Number of scatter terms must be 1, {self.ntracer}, or '
f'{2*self.ntracer}; found {_scatter.size}.')
if _scatter.size == 1:
_scatter = np.repeat(_scatter, 2*self.ntracer).reshape(self.ntracer,-1)
elif _scatter.size == 2:
_scatter = np.tile(_scatter, (self.ntracer,1))
else:
_scatter = scatter.reshape(self.ntracer,-1)
# Initialize the parameters. This checks that the parameters have the
# correct length.
self._init_par(p0, fix)
# Prepare the disks for fitting
self.disk_fom = [None]*self.ntracer
self.disk_jac = [None]*self.ntracer
self._wrkspc_parslc = [self.disk_slice(i) for i in range(self.ntracer)]
for i in range(self.ntracer):
self.disk[i]._init_par(self.par[self.untie][self._wrkspc_parslc[i]], None)
self.disk[i]._init_model(None, self.kin[i].grid_x, self.kin[i].grid_y,
self.kin[i].grid_sb if sb_wgt else None,
self.kin[i].beam_fft, True, None, False)
self.disk[i]._init_data(self.kin[i], None if scatter is None else _scatter[i],
assume_posdef_covar, ignore_covar)
self.disk_fom[i] = self.disk[i]._get_fom()
self.disk_jac[i] = self.disk[i]._get_jac()
if analytic_jac:
# Set the least_squares keywords
jac_kwargs = {'jac': self.jac}
# Set up the workspace for dealing with tied parameters
self._wrkspc_ndata = [np.sum(self.disk[i].vel_gpm) + (0 if self.disk[i].dc is None
else np.sum(self.disk[i].sig_gpm))
for i in range(self.ntracer)]
self._wrkspc_sdata = np.append([0],np.cumsum(self._wrkspc_ndata)[:-1])
self._wrkspc_jac = np.zeros((np.sum(self._wrkspc_ndata), self.nup), dtype=float)
else:
jac_kwargs = {'diff_step': np.full(self.nup, 0.01, dtype=float)[self.free]}
# Parameter boundaries
if lb is None or ub is None:
_lb, _ub = self.par_bounds()
if lb is not None:
_lb = lb[self.tie] if lb.size == self.np else lb.copy()
if ub is not None:
_ub = ub[self.tie] if ub.size == self.np else ub.copy()
if len(_lb) != self.nup or len(_ub) != self.nup:
raise ValueError('Length of one or both of the bound vectors is incorrect.')
# Setup to iteratively fit, where the iterations are meant to ensure
# that the least-squares fit actually optimizes the parameters.
# - Set the random number generator with a fixed seed so that the
# result is deterministic.
rng = np.random.default_rng(seed=909)
# - Set the free parameters. These are save to a new vector so that the
# initial parameters can be tracked for each iteration without losing
# the original input.
_p0 = self.par[self.free]
p = _p0.copy()
# - Reset any parameter errors
pe = None
# - Start counting the iterations
niter = 0
while niter < maxiter:
# Run the optimization
# try:
result = optimize.least_squares(self.fom, p, x_scale='jac', method='trf',
xtol=1e-12, bounds=(_lb[self.free], _ub[self.free]),
verbose=max(verbose,0), **jac_kwargs)
# except Exception as e:
# embed()
# exit()
# Attempt to calculate the errors
try:
pe = np.sqrt(np.diag(cov_err(result.jac)))
except:
warnings.warn('Unable to compute parameter errors from precision matrix.')
pe = None
# The fit should change the input parameters.
if np.all(np.absolute(p-result.x) > 1e-3):
break
warnings.warn('Parameters unchanged after fit. Retrying...')
# If it doesn't, something likely went wrong with the fit. Perturb
# the input guesses a bit and retry.
p = _p0 + rng.normal(size=self.nfree)*(pe if pe is not None else 0.1*p0)
p = np.clip(p, _lb[self.free], _ub[self.free])
niter += 1
if niter == maxiter and np.all(np.absolute(p-result.x) > 1e-3):
warnings.warn('Parameters unchanged after fit. Abandoning iterations...')
# TODO: Save this to the status somehow
# TODO: Add something to the fit status/success flags that tests if
# niter == maxiter and/or if the input parameters are identical to the
# final best-fit parameters? Note that the input parameters, p0, may not
# be identical to the output parameters because of the iterations mean
# that p != p0 !
# Save the fit status
self.fit_status = result.status
self.fit_success = result.success
# Save the best-fitting parameters
self._set_par(result.x)
if pe is None:
self.par_err = None
else:
self.par_err = np.zeros(self.nup, dtype=float)
self.par_err[self.free] = pe
# Initialize the mask
self.par_mask = self.pbm.init_mask_array(self.nup)
# Check if any parameters are "at" the boundary
pm = self.par_mask[self.free]
for v, flg in zip([-1, 1], ['LOWERBOUND', 'UPPERBOUND']):
indx = result.active_mask == v
if np.any(indx):
pm[indx] = self.pbm.turn_on(pm[indx], flg)
# Check if any parameters are within 1-sigma of the boundary
indx = self.par[self.free] - self.par_err[self.free] < _lb[self.free]
if np.any(indx):
pm[indx] = self.pbm.turn_on(pm[indx], 'LOWERERR')
indx = self.par[self.free] + self.par_err[self.free] > _ub[self.free]
if np.any(indx):
pm[indx] = self.pbm.turn_on(pm[indx], 'UPPERERR')
# Flag the fixed parameters
self.par_mask[self.free] = pm
indx = np.logical_not(self.free)
if np.any(indx):
self.par_mask[indx] = self.pbm.turn_on(self.par_mask[indx], 'FIXED')
# Print the report
if verbose > -1:
self.report(fit_message=result.message)
def par_bounds(self, base_lb=None, base_ub=None):
"""
Return the lower and upper bounds for the unique, untied parameters.
Returns:
:obj:`tuple`: A two-tuple of `numpy.ndarray`_ objects with the lower
and upper parameter boundaries.
"""
lb, ub = np.array([list(d.par_bounds(base_lb=base_lb, base_ub=base_ub))
for d in self.disk]).transpose(1,0,2).reshape(2,-1)
return lb[self.tie], ub[self.tie]
def fom(self, par):
"""
Calculate the figure-of-merit for a model by concatenating the result
from the figure-of-merit calculations provided for each disk. This is
the function used by the least-squares optimization in the simultaneous
fitting process.
Args:
par (`numpy.ndarray`_):
The model parameters; see :func:`_set_par`.
Returns:
`numpy.ndarray`_: The vector with the model residuals for each
fitted data point.
"""
# Get the tied parameters
self._set_par(par)
# Untie them to get the full set
full_par = self.par[self.untie]
return np.concatenate([self.disk_fom[i](full_par[self._wrkspc_parslc[i]])
for i in range(self.ntracer)])
def jac(self, par):
"""
Compute the model Jacobian using the analytic derivatives provided by
each disk independently.
Args:
par (`numpy.ndarray`_):
The model parameters; see :func:`_set_par`.
Returns:
`numpy.ndarray`_: The array with the model Jacobian.
"""
# Get the tied parameters
self._set_par(par)
# Untie them to get the full set
full_par = self.par[self.untie]
_jac = [self.disk_jac[i](full_par[self._wrkspc_parslc[i]]) for i in range(self.ntracer)]
self._wrkspc_jac[...] = 0.
assert np.array_equal(self._wrkspc_ndata, [j.shape[0] for j in _jac]), \
'Jacobians have incorrect shape!'
for i in range(self.ntracer):
sec = np.ix_(np.arange(self._wrkspc_ndata[i])+self._wrkspc_sdata[i],
self.untie[self._wrkspc_parslc[i]])
self._wrkspc_jac[sec] += _jac[i]
return self._wrkspc_jac[:,self.free]
def distribute_par(self):
"""
Distribute the current parameter set to the disk components.
"""
full_par = self.par[self.untie]
full_par_err = None if self.par_err is None else self.par_err[self.untie]
full_free = self.free[self.untie]
tied = np.setdiff1d(np.arange(self.np), self.tie)
full_free[tied] = False
if self.par_mask is None:
full_par_mask = None
else:
full_par_mask = self.par_mask[self.untie]
full_par_mask[tied] = self.pbm.turn_on(full_par_mask[tied], 'TIED')
for i in range(self.ntracer):
slc = self.disk_slice(i)
self.disk[i].par = full_par[slc]
self.disk[i].free = full_free[slc]
self.disk[i].par_err = None if full_par_err is None else full_par_err[slc]
self.disk[i].par_mask = None if full_par_mask is None else full_par_mask[slc]
def report(self, fit_message=None):
"""
Report the current parameters of the model to the screen.
Args:
fit_message (:obj:`str`, optional):
The status message returned by the fit optimization.
"""
if self.par is None:
print('No parameters to report.')
return
print('-'*70)
print(f'{"Fit Result":^70}')
print('-'*70)
if fit_message is not None:
print(f'Fit status message: {fit_message}')
if self.fit_status is not None:
print(f'Fit status: {self.fit_status}')
print(f'Fit success: {"True" if self.fit_status else "False"}')
print('-'*50)
# Indicate which parameters are tied
if np.any(self.tie_base) or np.any(self.tie_disk):
disk0_parn = self.disk[0].par_names()
tied = self.tie_base if not np.any(self.tie_disk) \
else np.append(self.tie_base, self.tie_disk)
print('Tied Parameters:')
for i in range(tied.size):
if not tied[i]:
continue
print(f'{disk0_parn[i]:>30}')
else:
print('No tied parameters')
# Print the results for each disk
self.distribute_par()
for i in range(self.ntracer):
print('-'*50)
print(f'{f"Disk {i+1}":^50}')
print('-'*50)
self.disk[i].report(component=True)
print('-'*50)
resid = self.fom(self.par)
chisqr = np.sum(resid**2) / (resid.size - self.nfree)
print(f'Total reduced chi-square: {chisqr}')
print('-'*70)
# TODO:
# - Add keyword for radial sampling for 1D model RCs and dispersion profiles
# - This is MaNGA-specific and needs to be abstracted
# - Allow the plot to be constructed from the fits file written by
# axisym_fit_data
def asymdrift_fit_plot(galmeta, kin, disk, par=None, par_err=None, fix=None, ofile=None):
"""
Construct the QA plot for the result of fitting an
:class:`~nirvana.model.axisym.AxisymmetricDisk` model to a galaxy.
Args:
galmeta (:class:`~nirvana.data.meta.GlobalPar`):
Object with metadata for the galaxy to be fit.
kin (:class:`~nirvana.data.kinematics.Kinematics`):
Object with the data to be fit
disk (:class:`~nirvana.models.axisym.AxisymmetricDisk`):
Object that performed the fit and has the best-fitting parameters.
par (`numpy.ndarray`_, optional):
The parameters of the model. If None are provided, the parameters
in ``disk`` are used.
par_err (`numpy.ndarray`_, optional):
The errors in the model parameters. If None are provided, the
parameter errors in ``disk`` are used.
fix (`numpy.ndarray`_, optional):
Flags indicating the parameters that were fixed during the fit. If
None, all parameters are assumed to have been free.
ofile (:obj:`str`, optional):
Output filename for the plot. If None, the plot is shown to the
screen.
"""
logformatter = plot.get_logformatter()
# Change the style
rc('font', size=8)
if disk.par is None and par is None:
raise ValueError('No model parameters available. Provide directly or via disk argument.')
_par = disk.par[disk.untie] if par is None else par
if disk.par_err is None and par_err is None:
_par_err = np.full(_par.size, -1., dtype=float)
else:
_par_err = disk.par_err[disk.untie] if par_err is None else par_err
if fix is None:
_fix = np.logical_not(disk.free)[disk.untie]
_fix[np.setdiff1d(np.arange(disk.np), disk.tie)] = True
else:
_fix = fix
if _par.size != disk.np:
raise ValueError('Number of provided parameters has the incorrect size.')
if _par_err.size != disk.np:
raise ValueError('Number of provided parameter errors has the incorrect size.')
if _fix.size != disk.np:
raise ValueError('Number of provided parameter fixing flags has the incorrect size.')
mean_vsys = (_par[4] + _par[disk.disk[0].np+4])/2
# TODO: Move these to arguments for the function?
fwhm = galmeta.psf_fwhm[1]
oversample = 1.5
maj_wedge = 30.
rstep = fwhm/oversample
# Build the AD data
gv_map, gv_ivar_map, gv_mod_map, gv_mod_intr_map, \
sv_map, sv_ivar_map, sv_mod_map, sv_mod_intr_map, \
sd_map, sd_ivar_map, sd_mod_map, sd_mod_intr_map, \
ad_map, ad_ivar_map, ad_bc_map, ad_bc_ivar_map, ad_mod_map, ad_mod_bc_map, ad_mask_map, \
ados_map, ados_ivar_map, ados_bc_map, ados_bc_ivar_map, ados_mod_map, ados_mod_bc_map, \
ados_mask_map, \
spax_ad_r, spax_ad, _, spax_ad_mask, \
ad_binr, \
ad_ewmean, ad_ewsdev, ad_mod_ewmean, ad_mod_ewsdev, ad_nbin, \
ad_bc_ewmean, ad_bc_ewsdev, ad_mod_bc_ewmean, ad_mod_bc_ewsdev, ad_bc_nbin, \
ados_ewmean, ados_ewsdev, ados_mod_ewmean, ados_mod_ewsdev, ados_nbin, \
ados_bc_ewmean, ados_bc_ewsdev, ados_mod_bc_ewmean, ados_mod_bc_ewsdev, ados_bc_nbin \
= asymdrift_fit_maps(kin, disk, rstep, maj_wedge=maj_wedge)
# Surface brightness maps
gs_map = kin[0].remap('sb')
ss_map = kin[1].remap('sb')
# Get the projected spaxel data and the binned radial profiles for the
# kinematic data.
spax_vrot_r = [None]*2
spax_vrot = [None]*2
spax_smaj_r = [None]*2
spax_smaj = [None]*2
bin_r = [None]*2
bin_vrot = [None]*2
bin_vrote = [None]*2
bin_vrotn = [None]*2
bin_smaj = [None]*2
bin_smaje = [None]*2
bin_smajn = [None]*2
for i in range(2):
# Get the projected rotational velocity
# - Disk-plane coordinates
r, th = projected_polar(kin[i].x - disk.disk[i].par[0], kin[i].y - disk.disk[i].par[1],
*np.radians(disk.disk[i].par[2:4]))
# - Mask for data along the major axis
major_gpm = select_kinematic_axis(r, th, which='major', r_range='all', wedge=maj_wedge)
# - Projected rotation velocities
indx = major_gpm & np.logical_not(kin[i].vel_mask)
spax_vrot_r[i] = r[indx]
spax_vrot[i] = (kin[i].vel[indx] - disk.disk[i].par[4])/np.cos(th[indx])
# - Major axis velocity dispersions
indx = major_gpm & np.logical_not(kin[i].sig_mask) & (kin[i].sig_phys2 > 0)
spax_smaj_r[i] = r[indx]
spax_smaj[i] = np.sqrt(kin[i].sig_phys2[indx])
bin_r[i], bin_vrot[i], bin_vrote[i], _, bin_vrotn[i], _, _, _, _, _, _, _, _, \
_, _, _, _, _, _, bin_smaj[i], bin_smaje[i], _, bin_smajn[i], _, _, _, _, _, _, _, _, \
= kin[i].radial_profiles(rstep, xc=disk.disk[i].par[0], yc=disk.disk[i].par[1],
pa=disk.disk[i].par[2], inc=disk.disk[i].par[3],
vsys=disk.disk[i].par[4], maj_wedge=maj_wedge)
# Get the 1D model profiles
# NOTE: This catches cases when there is no data, which makes the plot
# useless, but it avoids a fault.
maxr = rstep if len(spax_vrot_r) == 0 and len(spax_smaj_r) == 0 \
else np.amax(np.concatenate(spax_vrot_r+spax_smaj_r))
modelr = np.arange(0, maxr, 0.1)
vrotm = [None]*2
smajm = [None]*2
for i in range(2):
vrotm[i] = disk.disk[i].rc.sample(modelr, par=disk.disk[i].rc_par())
smajm[i] = disk.disk[i].dc.sample(modelr, par=disk.disk[i].dc_par())
# Construct an ellipse that has a constant disk radius and is at the
# best-fit center, position angle, and inclination. Set the radius to the
# maximum of the valid binned rotation curve measurements, selecting the
# larger value between the gas and stars.
vrot_indx = [vrn > 5 for vrn in bin_vrotn]
for i in range(2):
if not np.any(vrot_indx[i]):
vrot_indx[i] = bin_vrotn[i] > 0
if not np.any(np.append(*vrot_indx)):
de_x, de_y = None, None
else:
# NOTE: Assumes geometric parameters are tied!
de_r = np.amax(np.append(bin_r[0][vrot_indx[0]], bin_r[1][vrot_indx[1]]))
de_x, de_y = disk_ellipse(de_r, *np.radians(disk[0].par[2:4]), xc=disk[0].par[0],
yc=disk[0].par[1])
resid = disk.fom(_par[disk.tie])
rchisqr = np.sum(resid**2) / (resid.size - disk.nfree)
gpm = np.logical_not(spax_ad_mask) & (spax_ad > 0)
spax_ad_r = spax_ad_r[gpm]
spax_ad = np.sqrt(spax_ad[gpm])
bin_ad = np.ma.sqrt(ad_ewmean).filled(0.0)
bin_ade = np.ma.divide(ad_ewsdev, 2*bin_ad).filled(0.0)
# Set the radius limits for the radial plots
r_lim = [0.0, maxr * 1.05]
rc_lim = growth_lim(np.concatenate(bin_vrot+vrotm), 0.99, 1.3)
smaj_lim = growth_lim(np.ma.log10(np.concatenate(smajm + [bin_ad])).compressed(), 0.9, 1.5)
smaj_lim = atleast_one_decade(np.power(10.0, smaj_lim))
# TODO: Extent may need to be adjusted by 0.25 arcsec! extent is from the
# edge of the pixel, not from its center.
# Set the extent for the 2D maps
extent = [np.amax(kin[0].grid_x), np.amin(kin[0].grid_x),
np.amin(kin[0].grid_y), np.amax(kin[0].grid_y)]
Dx = max(extent[0]-extent[1], extent[3]-extent[2]) # *1.01
skylim = np.array([ (extent[0]+extent[1] - Dx)/2., 0.0 ])
skylim[1] = skylim[0] + Dx
sb_lim = [growth_lim(np.ma.log10(gs_map).compressed(), 0.90, 1.05),
growth_lim(np.ma.log10(ss_map).compressed(), 0.90, 1.05)]
sb_lim = [atleast_one_decade(np.power(10.0, sb_lim[0])),
atleast_one_decade(np.power(10.0, sb_lim[1]))]
vel_lim = growth_lim(np.ma.concatenate([gv_map, sv_map, gv_mod_map, sv_mod_map]).compressed(),
0.90, 1.05, midpoint=mean_vsys)
ad_lim = growth_lim(np.ma.log10(np.ma.append(ad_map, ad_mod_map)).compressed(), 0.70, 1.05)
ad_lim = atleast_one_decade(np.power(10.0, ad_lim))
sig_map_lim = growth_lim(np.ma.log10(np.ma.append(sd_map, sd_mod_map)).compressed(), 0.70, 1.05)
sig_map_lim = atleast_one_decade(np.power(10., sig_map_lim))
# ados_lim = np.power(10.0, growth_lim(np.ma.log10(np.ma.append(ados_map, adosmod_map).compressed(),
# 0.80, 1.05)))
# ados_lim = atleast_one_decade(sig_lim)
ados_lim = growth_lim(np.ma.append(ados_map, ados_mod_map).compressed(), 0.80, 1.05)
# Create the plot
w,h = pyplot.figaspect(1)
fig = pyplot.figure(figsize=(2*w,2*h))
#-------------------------------------------------------------------
# Gas velocity field
ax = plot.init_ax(fig, [0.02, 0.775, 0.19, 0.19])
cax = fig.add_axes([0.05, 0.97, 0.15, 0.005])
cax.tick_params(which='both', direction='in')
ax.set_xlim(skylim[::-1])
ax.set_ylim(skylim)
plot.rotate_y_ticks(ax, 90, 'center')
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.add_patch(patches.Circle((0.1, 0.1), fwhm/np.diff(skylim)[0]/2, transform=ax.transAxes,
facecolor='0.7', edgecolor='k', zorder=4))
im = ax.imshow(gv_map, origin='lower', interpolation='nearest', cmap='RdBu_r',
extent=extent, vmin=vel_lim[0], vmax=vel_lim[1], zorder=4)
# Mark the fitted dynamical center
ax.scatter(disk.disk[0].par[0], disk.disk[0].par[1],
marker='+', color='k', s=40, lw=1, zorder=5)
# Plot the ellipse with constant disk radius
if de_x is not None:
ax.plot(de_x, de_y, color='w', lw=2, zorder=6, alpha=0.5)
cb = fig.colorbar(im, cax=cax, orientation='horizontal')
cb.ax.xaxis.set_ticks_position('top')
cb.ax.xaxis.set_label_position('top')
ax.text(0.05, 0.90, r'$V_g$', ha='left', va='center', transform=ax.transAxes)
#-------------------------------------------------------------------
# Gas velocity field model
ax = plot.init_ax(fig, [0.02, 0.580, 0.19, 0.19])
cax = fig.add_axes([0.05, 0.57, 0.15, 0.005])
cax.tick_params(which='both', direction='in')
ax.set_xlim(skylim[::-1])
ax.set_ylim(skylim)
plot.rotate_y_ticks(ax, 90, 'center')
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.add_patch(patches.Circle((0.1, 0.1), fwhm/np.diff(skylim)[0]/2, transform=ax.transAxes,
facecolor='0.7', edgecolor='k', zorder=4))
im = ax.imshow(gv_mod_map, origin='lower', interpolation='nearest', cmap='RdBu_r',
extent=extent, vmin=vel_lim[0], vmax=vel_lim[1], zorder=4)
# Mark the fitted dynamical center
ax.scatter(disk.disk[0].par[0], disk.disk[0].par[1],
marker='+', color='k', s=40, lw=1, zorder=5)
# Plot the ellipse with constant disk radius
if de_x is not None:
ax.plot(de_x, de_y, color='w', lw=2, zorder=6, alpha=0.5)
cb = fig.colorbar(im, cax=cax, orientation='horizontal')
ax.text(0.05, 0.90, r'$V_{g,m}$', ha='left', va='center', transform=ax.transAxes)
#-------------------------------------------------------------------
# Stellar velocity field
ax = plot.init_ax(fig, [0.215, 0.775, 0.19, 0.19])
cax = fig.add_axes([0.245, 0.97, 0.15, 0.005])
cax.tick_params(which='both', direction='in')
ax.set_xlim(skylim[::-1])
ax.set_ylim(skylim)
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.add_patch(patches.Circle((0.1, 0.1), fwhm/np.diff(skylim)[0]/2, transform=ax.transAxes,
facecolor='0.7', edgecolor='k', zorder=4))
im = ax.imshow(sv_map, origin='lower', interpolation='nearest', cmap='RdBu_r',
extent=extent, vmin=vel_lim[0], vmax=vel_lim[1], zorder=4)
# Mark the fitted dynamical center
ax.scatter(disk.disk[1].par[0], disk.disk[1].par[1],
marker='+', color='k', s=40, lw=1, zorder=5)
# Plot the ellipse with constant disk radius
if de_x is not None:
ax.plot(de_x, de_y, color='w', lw=2, zorder=6, alpha=0.5)
cb = fig.colorbar(im, cax=cax, orientation='horizontal')
cb.ax.xaxis.set_ticks_position('top')
cb.ax.xaxis.set_label_position('top')
ax.text(0.05, 0.90, r'$V_\ast$', ha='left', va='center', transform=ax.transAxes)
#-------------------------------------------------------------------
# Stellar velocity field model
ax = plot.init_ax(fig, [0.215, 0.580, 0.19, 0.19])
cax = fig.add_axes([0.245, 0.57, 0.15, 0.005])
cax.tick_params(which='both', direction='in')
ax.set_xlim(skylim[::-1])
ax.set_ylim(skylim)
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.add_patch(patches.Circle((0.1, 0.1), fwhm/np.diff(skylim)[0]/2, transform=ax.transAxes,
facecolor='0.7', edgecolor='k', zorder=4))
im = ax.imshow(sv_mod_map, origin='lower', interpolation='nearest', cmap='RdBu_r',
extent=extent, vmin=vel_lim[0], vmax=vel_lim[1], zorder=4)
# Mark the fitted dynamical center
ax.scatter(disk.disk[1].par[0], disk.disk[1].par[1],
marker='+', color='k', s=40, lw=1, zorder=5)
# Plot the ellipse with constant disk radius
if de_x is not None:
ax.plot(de_x, de_y, color='w', lw=2, zorder=6, alpha=0.5)
cb = fig.colorbar(im, cax=cax, orientation='horizontal')
ax.text(0.05, 0.90, r'$V_{\ast,m}$', ha='left', va='center', transform=ax.transAxes)
#-------------------------------------------------------------------
# Measured AD
ax = plot.init_ax(fig, [0.410, 0.775, 0.19, 0.19])
cax = fig.add_axes([0.440, 0.97, 0.15, 0.005])
cax.tick_params(which='both', direction='in')
ax.set_xlim(skylim[::-1])
ax.set_ylim(skylim)
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.add_patch(patches.Circle((0.1, 0.1), fwhm/np.diff(skylim)[0]/2, transform=ax.transAxes,
facecolor='0.7', edgecolor='k', zorder=4))
im = ax.imshow(ad_map, origin='lower', interpolation='nearest', cmap='viridis',
extent=extent, norm=colors.LogNorm(vmin=ad_lim[0], vmax=ad_lim[1]), zorder=4)
# Plot the ellipse with constant disk radius
if de_x is not None:
ax.plot(de_x, de_y, color='w', lw=2, zorder=6, alpha=0.5)
cb = fig.colorbar(im, cax=cax, orientation='horizontal', format=logformatter)
cb.ax.xaxis.set_ticks_position('top')
cb.ax.xaxis.set_label_position('top')
ax.text(0.05, 0.90, r'$\sigma^2_a$', ha='left', va='center', transform=ax.transAxes)
#-------------------------------------------------------------------
# AD Model
ax = plot.init_ax(fig, [0.410, 0.580, 0.19, 0.19])
cax = fig.add_axes([0.440, 0.57, 0.15, 0.005])
cax.tick_params(which='both', direction='in')
ax.set_xlim(skylim[::-1])
ax.set_ylim(skylim)
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.add_patch(patches.Circle((0.1, 0.1), fwhm/np.diff(skylim)[0]/2, transform=ax.transAxes,
facecolor='0.7', edgecolor='k', zorder=4))
im = ax.imshow(ad_mod_map, origin='lower', interpolation='nearest', cmap='viridis',
extent=extent, norm=colors.LogNorm(vmin=ad_lim[0], vmax=ad_lim[1]), zorder=4)
# Plot the ellipse with constant disk radius
if de_x is not None:
ax.plot(de_x, de_y, color='w', lw=2, zorder=6, alpha=0.5)
cb = fig.colorbar(im, cax=cax, orientation='horizontal', format=logformatter)
ax.text(0.05, 0.90, r'$\sigma^2_{a,m}$', ha='left', va='center', transform=ax.transAxes)
#-------------------------------------------------------------------
# Velocity Dispersion
ax = plot.init_ax(fig, [0.605, 0.775, 0.19, 0.19])
cax = fig.add_axes([0.635, 0.97, 0.15, 0.005])
cax.tick_params(which='both', direction='in')
ax.set_xlim(skylim[::-1])
ax.set_ylim(skylim)
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.add_patch(patches.Circle((0.1, 0.1), fwhm/np.diff(skylim)[0]/2, transform=ax.transAxes,
facecolor='0.7', edgecolor='k', zorder=4))
im = ax.imshow(sd_map, origin='lower', interpolation='nearest', cmap='viridis',
extent=extent, norm=colors.LogNorm(vmin=sig_map_lim[0], vmax=sig_map_lim[1]),
zorder=4)
# Mark the fitted dynamical center
ax.scatter(disk.disk[1].par[0], disk.disk[1].par[1],
marker='+', color='k', s=40, lw=1, zorder=5)
# Plot the ellipse with constant disk radius
if de_x is not None:
ax.plot(de_x, de_y, color='w', lw=2, zorder=6, alpha=0.5)
cb = fig.colorbar(im, cax=cax, orientation='horizontal', format=logformatter)
cb.ax.xaxis.set_ticks_position('top')
cb.ax.xaxis.set_label_position('top')
ax.text(0.05, 0.90, r'$\sigma^2_\ast$', ha='left', va='center', transform=ax.transAxes)
#-------------------------------------------------------------------
# Velocity Dispersion Model
ax = plot.init_ax(fig, [0.605, 0.580, 0.19, 0.19])
cax = fig.add_axes([0.635, 0.57, 0.15, 0.005])
cax.tick_params(which='both', direction='in')
ax.set_xlim(skylim[::-1])
ax.set_ylim(skylim)
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.add_patch(patches.Circle((0.1, 0.1), fwhm/np.diff(skylim)[0]/2, transform=ax.transAxes,
facecolor='0.7', edgecolor='k', zorder=4))
im = ax.imshow(sd_mod_map, origin='lower', interpolation='nearest', cmap='viridis',
extent=extent, norm=colors.LogNorm(vmin=sig_map_lim[0], vmax=sig_map_lim[1]),
zorder=4)
# Mark the fitted dynamical center
ax.scatter(disk.disk[1].par[0], disk.disk[1].par[1],
marker='+', color='k', s=40, lw=1, zorder=5)
# Plot the ellipse with constant disk radius
if de_x is not None:
ax.plot(de_x, de_y, color='w', lw=2, zorder=6, alpha=0.5)
cb = fig.colorbar(im, cax=cax, orientation='horizontal', format=logformatter)
ax.text(0.05, 0.90, r'$\sigma^2_{\ast,m}$', ha='left', va='center', transform=ax.transAxes)
#-------------------------------------------------------------------
# AD ratio
ax = plot.init_ax(fig, [0.800, 0.775, 0.19, 0.19])
cax = fig.add_axes([0.830, 0.97, 0.15, 0.005])
cax.tick_params(which='both', direction='in')
ax.set_xlim(skylim[::-1])
ax.set_ylim(skylim)
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.add_patch(patches.Circle((0.1, 0.1), fwhm/np.diff(skylim)[0]/2, transform=ax.transAxes,
facecolor='0.7', edgecolor='k', zorder=4))
im = ax.imshow(ados_map, origin='lower', interpolation='nearest', cmap='viridis',
extent=extent, vmin=ados_lim[0], vmax=ados_lim[1], zorder=4)
# Plot the ellipse with constant disk radius
if de_x is not None:
ax.plot(de_x, de_y, color='w', lw=2, zorder=6, alpha=0.5)
cb = fig.colorbar(im, cax=cax, orientation='horizontal')
cb.ax.xaxis.set_ticks_position('top')
cb.ax.xaxis.set_label_position('top')
ax.text(0.05, 0.90, r'$\sigma^2_a/\sigma^2_\ast$',
ha='left', va='center', transform=ax.transAxes)
#-------------------------------------------------------------------
# Model AD ratio
ax = plot.init_ax(fig, [0.800, 0.580, 0.19, 0.19])
cax = fig.add_axes([0.830, 0.57, 0.15, 0.005])
cax.tick_params(which='both', direction='in')
ax.set_xlim(skylim[::-1])
ax.set_ylim(skylim)
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.add_patch(patches.Circle((0.1, 0.1), fwhm/np.diff(skylim)[0]/2, transform=ax.transAxes,
facecolor='0.7', edgecolor='k', zorder=4))
im = ax.imshow(ados_mod_map, origin='lower', interpolation='nearest', cmap='viridis',
extent=extent, vmin=ados_lim[0], vmax=ados_lim[1], zorder=4)
# Plot the ellipse with constant disk radius
if de_x is not None:
ax.plot(de_x, de_y, color='w', lw=2, zorder=6, alpha=0.5)
cb = fig.colorbar(im, cax=cax, orientation='horizontal')
ax.text(0.05, 0.90, r'$\sigma^2_{a,m}/\sigma^2_{\ast,m}$', ha='left', va='center',
transform=ax.transAxes)
#-------------------------------------------------------------------
# H-alpha surface-brightness
ax = plot.init_ax(fig, [0.800, 0.305, 0.19, 0.19])
cax = fig.add_axes([0.830, 0.50, 0.15, 0.005])
cax.tick_params(which='both', direction='in')
ax.set_xlim(skylim[::-1])
ax.set_ylim(skylim)
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.add_patch(patches.Circle((0.1, 0.1), fwhm/np.diff(skylim)[0]/2, transform=ax.transAxes,
facecolor='0.7', edgecolor='k', zorder=4))
im = ax.imshow(gs_map, origin='lower', interpolation='nearest', cmap='inferno',
extent=extent, norm=colors.LogNorm(vmin=sb_lim[0][0], vmax=sb_lim[0][1]),
zorder=4)
# Mark the fitted dynamical center
ax.scatter(disk.disk[0].par[0], disk.disk[0].par[1],
marker='+', color='k', s=40, lw=1, zorder=5)
# Plot the ellipse with constant disk radius
if de_x is not None:
ax.plot(de_x, de_y, color='w', lw=2, zorder=6, alpha=0.5)
# TODO: For some reason, the combination of the use of a masked array and
# setting the formatter to logformatter leads to weird behavior in the map.
# Use something like the "pallete" object described here?
# https://matplotlib.org/stable/gallery/images_contours_and_fields/image_masked.html
cb = fig.colorbar(im, cax=cax, orientation='horizontal', format=logformatter)
cb.ax.xaxis.set_ticks_position('top')
cb.ax.xaxis.set_label_position('top')
ax.text(0.05, 0.90, r'$\mu_g$', ha='left', va='center', transform=ax.transAxes)
# ax.text(0.5, 1.2, 'Intrinsic Model', ha='center', va='center', transform=ax.transAxes,
# fontsize=10)
#-------------------------------------------------------------------
# Continuum surface brightness
ax = plot.init_ax(fig, [0.800, 0.110, 0.19, 0.19])
cax = fig.add_axes([0.830, 0.10, 0.15, 0.005])
cax.tick_params(which='both', direction='in')
ax.set_xlim(skylim[::-1])
ax.set_ylim(skylim)
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.add_patch(patches.Circle((0.1, 0.1), fwhm/np.diff(skylim)[0]/2, transform=ax.transAxes,
facecolor='0.7', edgecolor='k', zorder=4))
im = ax.imshow(ss_map, origin='lower', interpolation='nearest', cmap='inferno',
extent=extent, norm=colors.LogNorm(vmin=sb_lim[1][0], vmax=sb_lim[1][1]),
zorder=4)
# Mark the fitted dynamical center
ax.scatter(disk.disk[1].par[0], disk.disk[1].par[1],
marker='+', color='k', s=40, lw=1, zorder=5)
# Plot the ellipse with constant disk radius
if de_x is not None:
ax.plot(de_x, de_y, color='w', lw=2, zorder=6, alpha=0.5)
cb = fig.colorbar(im, cax=cax, orientation='horizontal', format=logformatter)
ax.text(0.05, 0.90, r'$\mu_\ast$', ha='left', va='center', transform=ax.transAxes)
# #-------------------------------------------------------------------
# # Annotate with the intrinsic scatter included
# ax.text(0.00, -0.2, r'V scatter, $\epsilon_v$:', ha='left', va='center',
# transform=ax.transAxes, fontsize=10)
# ax.text(1.00, -0.2, f'{vsct:.1f}', ha='right', va='center', transform=ax.transAxes,
# fontsize=10)
# if disk.dc is not None:
# ax.text(0.00, -0.3, r'$\sigma^2$ scatter, $\epsilon_{\sigma^2}$:', ha='left', va='center',
# transform=ax.transAxes, fontsize=10)
# ax.text(1.00, -0.3, f'{ssct:.1f}', ha='right', va='center', transform=ax.transAxes,
# fontsize=10)
#-------------------------------------------------------------------
# SDSS image
ax = fig.add_axes([0.01, 0.29, 0.23, 0.23])
if kin[0].image is not None:
ax.imshow(kin[0].image)
else:
ax.text(0.5, 0.5, 'No Image', ha='center', va='center', transform=ax.transAxes,
fontsize=20)
ax.text(0.5, 1.05, 'SDSS gri Composite', ha='center', va='center', transform=ax.transAxes,
fontsize=10)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if galmeta.primaryplus:
sample='Primary+'
elif galmeta.secondary:
sample='Secondary'
elif galmeta.ancillary:
sample='Ancillary'
else:
sample='Filler'
# Assume center, PA, and inclination are tied, and that the systemic velocities are not.
# MaNGA ID
ax.text(0.00, -0.05, 'MaNGA ID:', ha='left', va='center', transform=ax.transAxes, fontsize=10)
ax.text(1.01, -0.05, f'{galmeta.mangaid}', ha='right', va='center', transform=ax.transAxes,
fontsize=10)
# Observation
ax.text(0.00, -0.13, 'Observation:', ha='left', va='center', transform=ax.transAxes,
fontsize=10)
ax.text(1.01, -0.13, galmeta.plateifu, ha='right', va='center',
transform=ax.transAxes, fontsize=10)
# Sample selection
ax.text(0.00, -0.21, 'Sample:', ha='left', va='center', transform=ax.transAxes, fontsize=10)
ax.text(1.01, -0.21, f'{sample}', ha='right', va='center', transform=ax.transAxes, fontsize=10)
# Redshift
ax.text(0.00, -0.29, 'Redshift:', ha='left', va='center', transform=ax.transAxes, fontsize=10)
ax.text(1.01, -0.29, '{0:.4f}'.format(galmeta.z), ha='right', va='center',
transform=ax.transAxes, fontsize=10)
# Mag
ax.text(0.00, -0.37, 'Mag (N,r,i):', ha='left', va='center', transform=ax.transAxes,
fontsize=10)
if galmeta.mag is None:
ax.text(1.01, -0.37, 'Unavailable', ha='right', va='center',
transform=ax.transAxes, fontsize=10)
else:
ax.text(1.01, -0.37, '{0:.1f}/{1:.1f}/{2:.1f}'.format(*galmeta.mag), ha='right',
va='center', transform=ax.transAxes, fontsize=10)
# PSF FWHM
ax.text(0.00, -0.45, 'FWHM (g,r):', ha='left', va='center', transform=ax.transAxes,
fontsize=10)
ax.text(1.01, -0.45, '{0:.2f}, {1:.2f}'.format(*galmeta.psf_fwhm[:2]), ha='right', va='center',
transform=ax.transAxes, fontsize=10)
# Sersic n
ax.text(0.00, -0.53, r'Sersic $n$:', ha='left', va='center', transform=ax.transAxes,
fontsize=10)
ax.text(1.01, -0.53, '{0:.2f}'.format(galmeta.sersic_n), ha='right', va='center',
transform=ax.transAxes, fontsize=10)
# Stellar Mass
ax.text(0.00, -0.61, r'$\log(\mathcal{M}_\ast/\mathcal{M}_\odot$):', ha='left', va='center',
transform=ax.transAxes, fontsize=10)
ax.text(1.01, -0.61, '{0:.2f}'.format(np.log10(galmeta.mass)), ha='right', va='center',
transform=ax.transAxes, fontsize=10)
# Phot Inclination
ax.text(0.00, -0.69, r'$i_{\rm phot}$ [deg]', ha='left', va='center', transform=ax.transAxes,
fontsize=10)
ax.text(1.01, -0.69, '{0:.1f}'.format(galmeta.guess_inclination(lb=1., ub=89.)),
ha='right', va='center', transform=ax.transAxes, fontsize=10)
# Fitted center
ax.text(0.00, -0.77, r'$x_0$ [arcsec]', ha='left', va='center', transform=ax.transAxes,
fontsize=10, color='C3' if _fix[0] else 'k')
xstr = r'{0:.2f}'.format(_par[0]) if _fix[0] \
else r'{0:.2f} $\pm$ {1:.2f}'.format(_par[0], _par_err[0])
ax.text(1.01, -0.77, xstr,
ha='right', va='center', transform=ax.transAxes, fontsize=10,
color='C3' if _fix[0] else 'k')
ax.text(0.00, -0.85, r'$y_0$ [arcsec]', ha='left', va='center', transform=ax.transAxes,
fontsize=10, color='C3' if _fix[1] else 'k')
ystr = r'{0:.2f}'.format(_par[1]) if _fix[1] \
else r'{0:.2f} $\pm$ {1:.2f}'.format(_par[1], _par_err[1])
ax.text(1.01, -0.85, ystr,
ha='right', va='center', transform=ax.transAxes, fontsize=10,
color='C3' if _fix[1] else 'k')
# Position angle
ax.text(0.00, -0.93, r'$\phi_0$ [deg]', ha='left', va='center', transform=ax.transAxes,
fontsize=10, color='C3' if _fix[2] else 'k')
pastr = r'{0:.1f}'.format(_par[2]) if _fix[2] \
else r'{0:.1f} $\pm$ {1:.1f}'.format(_par[2], _par_err[2])
ax.text(1.01, -0.93, pastr,
ha='right', va='center', transform=ax.transAxes, fontsize=10,
color='C3' if _fix[2] else 'k')
# Kinematic Inclination
ax.text(0.00, -1.01, r'$i_{\rm kin}$ [deg]', ha='left', va='center', transform=ax.transAxes,
fontsize=10, color='C3' if _fix[3] else 'k')
incstr = r'{0:.1f}'.format(_par[3]) if _fix[3] \
else r'{0:.1f} $\pm$ {1:.1f}'.format(_par[3], _par_err[3])
ax.text(1.01, -1.01, incstr,
ha='right', va='center', transform=ax.transAxes, fontsize=10,
color='C3' if _fix[3] else 'k')
# Systemic velocity
ax.text(0.00, -1.09, r'$\langle V_{\rm sys}\rangle$ [km/s]',
ha='left', va='center', transform=ax.transAxes,
fontsize=10, color='k')
vsysstr = r'{0:.1f}'.format(mean_vsys)
ax.text(1.01, -1.09, vsysstr,
ha='right', va='center', transform=ax.transAxes, fontsize=10, color='k')
# Reduced chi-square
ax.text(0.00, -1.17, r'$\chi^2_\nu$', ha='left', va='center', transform=ax.transAxes,
fontsize=10)
ax.text(1.01, -1.17, f'{rchisqr:.2f}', ha='right', va='center', transform=ax.transAxes,
fontsize=10)
#-------------------------------------------------------------------
# Radial plot radius limits
# Select bins with sufficient data
vrot_indx = [vrn > 5 for vrn in bin_vrotn]
smaj_indx = [smn > 5 for smn in bin_smajn]
for i in range(2):
if not np.any(vrot_indx[i]):
vrot_indx[i] = bin_vrotn[i] > 0
if not np.any(smaj_indx[i]):
smaj_indx[i] = bin_smajn[i] > 0
ad_indx = ad_nbin > 5
if not np.any(ad_indx):
ad_indx = ad_nbin > 0
#-------------------------------------------------------------------
# Rotation curves
reff_lines = np.arange(galmeta.reff, r_lim[1], galmeta.reff) if galmeta.reff > 1 else None
ax = plot.init_ax(fig, [0.27, 0.27, 0.51, 0.23], facecolor='0.9', top=False, right=False)
ax.set_xlim(r_lim)
ax.set_ylim(rc_lim)
plot.rotate_y_ticks(ax, 90, 'center')
ax.xaxis.set_major_formatter(ticker.NullFormatter())
# Gas
_c = tuple([(1-x)*0.2+x for x in colors.to_rgb('C3')])
ax.scatter(spax_vrot_r[0], spax_vrot[0],
marker='.', color=_c, s=30, lw=0, alpha=0.6, zorder=1)
if np.any(vrot_indx[0]):
ax.scatter(bin_r[0][vrot_indx[0]], bin_vrot[0][vrot_indx[0]],
marker='o', s=110, alpha=1.0, color='white', zorder=3)
ax.scatter(bin_r[0][vrot_indx[0]], bin_vrot[0][vrot_indx[0]],
marker='o', s=90, alpha=1.0, color='C3', zorder=4)
ax.errorbar(bin_r[0][vrot_indx[0]], bin_vrot[0][vrot_indx[0]],
yerr=bin_vrote[0][vrot_indx[0]], color='C3', capsize=0,
linestyle='', linewidth=1, alpha=1.0, zorder=2)
ax.plot(modelr, vrotm[0], color='C3', zorder=5, lw=1)
# Stars
_c = tuple([(1-x)*0.2+x for x in colors.to_rgb('C0')])
ax.scatter(spax_vrot_r[1], spax_vrot[1],
marker='.', color=_c, s=30, lw=0, alpha=0.6, zorder=1)
if np.any(vrot_indx[1]):
ax.scatter(bin_r[1][vrot_indx[1]], bin_vrot[1][vrot_indx[1]],
marker='o', s=110, alpha=1.0, color='white', zorder=3)
ax.scatter(bin_r[1][vrot_indx[1]], bin_vrot[1][vrot_indx[1]],
marker='o', s=90, alpha=1.0, color='C0', zorder=4)
ax.errorbar(bin_r[1][vrot_indx[1]], bin_vrot[1][vrot_indx[1]],
yerr=bin_vrote[1][vrot_indx[1]], color='C0', capsize=0,
linestyle='', linewidth=1, alpha=1.0, zorder=2)
ax.plot(modelr, vrotm[1], color='C0', zorder=5, lw=1)
if reff_lines is not None:
for l in reff_lines:
ax.axvline(x=l, linestyle='--', lw=0.5, zorder=1, color='k')
asec2kpc = galmeta.kpc_per_arcsec()
if asec2kpc > 0:
axt = plot.get_twin(ax, 'x')
axt.set_xlim(np.array(r_lim) * galmeta.kpc_per_arcsec())
axt.set_ylim(rc_lim)
ax.text(0.5, 1.14, r'$R$ [$h^{-1}$ kpc]', ha='center', va='center', transform=ax.transAxes,
fontsize=10)
else:
ax.text(0.5, 1.05, 'kpc conversion unavailable', ha='center', va='center',
transform=ax.transAxes, fontsize=10)
# kin_inc = disk.par[3]
# axt = plot.get_twin(ax, 'y')
# axt.set_xlim(r_lim)
# axt.set_ylim(np.array(rc_lim)/np.sin(np.radians(kin_inc)))
# plot.rotate_y_ticks(axt, 90, 'center')
# axt.spines['right'].set_color('0.4')
# axt.tick_params(which='both', axis='y', colors='0.4')
# axt.yaxis.label.set_color('0.4')
ax.add_patch(patches.Rectangle((0.79,0.45), 0.19, 0.09, facecolor='w', lw=0, edgecolor='none',
zorder=5, alpha=0.7, transform=ax.transAxes))
ax.text(0.97, 0.451, r'$V\ \sin i$ [km/s]', ha='right', va='bottom',
transform=ax.transAxes, fontsize=10, zorder=6)
# ax.text(0.97, 0.56, r'$V$ [km/s; right axis]', ha='right', va='bottom', color='0.4',
# transform=ax.transAxes, fontsize=10, zorder=6)
#-------------------------------------------------------------------
# Velocity Dispersion profile
ax = plot.init_ax(fig, [0.27, 0.04, 0.51, 0.23], facecolor='0.9')
ax.set_xlim(r_lim)
ax.set_ylim(smaj_lim)#[10,275])
ax.set_yscale('log')
ax.yaxis.set_major_formatter(logformatter)
plot.rotate_y_ticks(ax, 90, 'center')
# Gas
_c = tuple([(1-x)*0.2+x for x in colors.to_rgb('C3')])
ax.scatter(spax_smaj_r[0], spax_smaj[0],
marker='.', color=_c, s=30, lw=0, alpha=0.6, zorder=1)
if np.any(smaj_indx[0]):
ax.scatter(bin_r[0][smaj_indx[0]], bin_smaj[0][smaj_indx[0]],
marker='o', s=110, alpha=1.0, color='white', zorder=3)
ax.scatter(bin_r[0][smaj_indx[0]], bin_smaj[0][smaj_indx[0]],
marker='o', s=90, alpha=1.0, color='C3', zorder=4)
ax.errorbar(bin_r[0][smaj_indx[0]], bin_smaj[0][smaj_indx[0]],
yerr=bin_smaje[0][smaj_indx[0]], color='C3', capsize=0,
linestyle='', linewidth=1, alpha=1.0, zorder=2)
ax.plot(modelr, smajm[0], color='C3', zorder=5, lw=1)
# Stars
_c = tuple([(1-x)*0.2+x for x in colors.to_rgb('C0')])
ax.scatter(spax_smaj_r[1], spax_smaj[1],
marker='.', color=_c, s=30, lw=0, alpha=0.6, zorder=1)
if np.any(smaj_indx[1]):
ax.scatter(bin_r[1][smaj_indx[1]], bin_smaj[1][smaj_indx[1]],
marker='o', s=110, alpha=1.0, color='white', zorder=3)
ax.scatter(bin_r[1][smaj_indx[1]], bin_smaj[1][smaj_indx[1]],
marker='o', s=90, alpha=1.0, color='C0', zorder=4)
ax.errorbar(bin_r[1][smaj_indx[1]], bin_smaj[1][smaj_indx[1]],
yerr=bin_smaje[1][smaj_indx[1]], color='C0', capsize=0,
linestyle='', linewidth=1, alpha=1.0, zorder=2)
ax.plot(modelr, smajm[1], color='C0', zorder=5, lw=1)
# Sigma AD
_c = tuple([(1-x)*0.2+x for x in colors.to_rgb('k')])
ax.scatter(spax_ad_r, spax_ad,
marker='.', color=_c, s=30, lw=0, alpha=0.6, zorder=1)
if np.any(ad_indx):
ax.scatter(ad_binr[ad_indx], bin_ad[ad_indx],
marker='o', s=110, alpha=1.0, color='white', zorder=3)
ax.scatter(ad_binr[ad_indx], bin_ad[ad_indx],
marker='o', s=90, alpha=1.0, color='k', zorder=4)
ax.errorbar(ad_binr[ad_indx], bin_ad[ad_indx], yerr=bin_ade[ad_indx],
color='k', capsize=0, linestyle='', linewidth=1, alpha=1.0, zorder=2)
ax.plot(modelr, np.ma.sqrt(vrotm[0]**2 - vrotm[1]**2).filled(0.0), color='k', zorder=5, lw=1)
if reff_lines is not None:
for l in reff_lines:
ax.axvline(x=l, linestyle='--', lw=0.5, zorder=1, color='k')
ax.text(0.5, -0.13, r'$R$ [arcsec]', ha='center', va='center', transform=ax.transAxes,
fontsize=10)
ax.add_patch(patches.Rectangle((0.81,0.86), 0.17, 0.09, facecolor='w', lw=0,
edgecolor='none', zorder=5, alpha=0.7,
transform=ax.transAxes))
ax.text(0.97, 0.861, r'$\sigma_{\rm maj}$ [km/s]', ha='right', va='bottom',
transform=ax.transAxes, fontsize=10, zorder=6)
# TODO:
# - Add errors (if available)?
# - Surface brightness units?
if ofile is None:
pyplot.show()
else:
fig.canvas.print_figure(ofile, bbox_inches='tight')
fig.clear()
pyplot.close(fig)
# Reset to default style
pyplot.rcdefaults()
# TODO: Figure out what's causing:
# - UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedArray.
def asymdrift_fit_maps(kin, disk, rstep, par=None, maj_wedge=30.):
"""
Construct azimuthally averaged radial profiles of the kinematics.
"""
# Check input
if disk.par is None and par is None:
raise ValueError('No model parameters available. Provide directly or via disk argument.')
if disk.ntracer != 2:
raise NotImplementedError('Must provide two disks, first gas, second stars.')
if len(kin) != 2:
raise NotImplementedError('Must provide two kinematic datasets, first gas, second stars.')
if kin[0].spatial_shape != kin[1].spatial_shape:
raise NotImplementedError('Kinematic datasets must have the same spatial shape.')
if not np.allclose(kin[0].grid_x, kin[1].grid_x) \
or not np.allclose(kin[0].grid_y, kin[1].grid_y):
raise NotImplementedError('Kinematics datasets must have the same grid sampling!')
if not np.all(disk.tie_base[:4]):
raise NotImplementedError('Disk must have tied the xc, yc, pa, and inc for both tracers.')
# Set the parameters and confirm it has the correct size
_par = disk.par[disk.untie] if par is None else par
if _par.size != disk.np:
raise ValueError('Number of provided parameters has the incorrect size.')
# Get the maps of gas and stellar velocities
gv_mask = np.logical_not(disk[0].vel_gpm) | kin[0].vel_mask
gv_map = kin[0].remap('vel', mask=gv_mask)
sv_mask = np.logical_not(disk[1].vel_gpm) | kin[1].vel_mask
sv_map = kin[1].remap('vel', mask=sv_mask)
sv_ivar_map = kin[1].remap('vel_ivar', mask=sv_mask)
# Use masks to determine the spaxels/bins that both have valid gas and
# stellar velocity data
ad_mask_map = np.ma.getmaskarray(gv_map) | np.ma.getmaskarray(sv_map)
sv_map[ad_mask_map] = np.ma.masked
sv_ivar_map[ad_mask_map] = np.ma.masked
# Get the mask for the binned data
# NOTE: This approach should mean that the entire bin is masked if any
# spaxel within it is masked. This is conservative, but it saves me having
# to account for individual masked gas spaxels within the stellar bins!
ad_mask = kin[1].bin(ad_mask_map.astype(int)) > 0.
# Bin the gas velocities identically to the stars and reconstruct the map
# using the stellar binning
gv = kin[1].bin_moments(kin[0].grid_sb, gv_map.filled(0.0), None)[1]
gv[ad_mask] = 0.
gv_map = kin[1].remap(gv, mask=ad_mask)
# Use simple error propagation to get the errors
_msk = np.logical_not(disk[0].vel_gpm) | kin[0].vel_mask
gv_var_map = np.ma.power(kin[0].remap('vel_ivar', mask=_msk), -1)
gv_ivar = np.ma.power(kin[1].bin_moments(kin[0].grid_sb, gv_var_map.filled(0.0), None)[1], -1)
gv_ivar_map = kin[1].remap(gv_ivar.filled(0.0), mask=ad_mask)
# Get the binned stellar velocities and update the mapped properties to use
# the new mask
sv = np.ma.MaskedArray(kin[1].vel, mask=ad_mask)
sv_ivar = np.ma.MaskedArray(kin[1].vel_ivar, mask=ad_mask)
sv_map = kin[1].remap(sv)
sv_ivar_map = kin[1].remap(sv_ivar)
# Stellar velocity dispersion (squared) data
sd = np.ma.MaskedArray(kin[1].sig_phys2.copy())
sd_mask = np.logical_not(disk[1].sig_gpm) | kin[1].sig_mask \
| np.logical_not(kin[1].sig_phys2_ivar > 0)
sd[sd_mask] = np.ma.masked
sd_map = kin[1].remap(sd, mask=sd_mask)
sd_ivar = np.ma.MaskedArray(kin[1].sig_phys2_ivar, mask=sd_mask)
sd_ivar_map = kin[1].remap(sd_ivar, mask=sd_mask)
# Create the model data
# - Gas kinematics
slc = disk.disk_slice(0)
disk[0].par = _par[slc]
models = disk[0].model()
intr_models = disk[0].model(ignore_beam=True)
gv_mod = kin[1].bin_moments(kin[0].grid_sb,
models if disk[0].dc is None else models[0], None)[1]
gv_mod_map = kin[1].remap(gv_mod, mask=ad_mask)
gv_mod_intr = kin[1].bin_moments(kin[0].grid_sb,
intr_models if disk[0].dc is None else intr_models[0], None)[1]
gv_mod_intr_map = kin[1].remap(gv_mod_intr, mask=ad_mask)
# - Stellar kinematics
slc = disk.disk_slice(1)
sv_mod, sd_mod = disk[1].binned_model(_par[slc])
sv_mod_intr, sd_mod_intr = disk[1].binned_model(_par[slc], ignore_beam=True)
sv_mod_map = kin[1].remap(sv_mod, mask=ad_mask)
sv_mod_intr_map = kin[1].remap(sv_mod_intr, mask=ad_mask)
if sd_mod is None:
sd_mod = np.ones(sv_mod.shape, dtype=float)
sd_mod_intr = np.ones(sv_mod.shape, dtype=float)
sd_mod = sd_mod**2
sd_mod_intr = sd_mod_intr**2
sd_mod_map = kin[1].remap(sd_mod, mask=sd_mask)
sd_mod_intr_map = kin[1].remap(sd_mod, mask=sd_mask)
# Get the beam-smearing corrections
gv_bc = gv_mod - gv_mod_intr
sv_bc = sv_mod - sv_mod_intr
sd_bc = sd_mod - sd_mod_intr
# Get the coordinates of the bins in the disk plane
r, th = projected_polar(kin[1].x - _par[0], kin[1].y - _par[1], *np.radians(_par[2:4]))
# Get the AD data for each bin, both beam-corrected and not
grot = np.ma.divide(gv - _par[4], np.cos(th))
grot_bc = np.ma.divide(gv - gv_bc - _par[4], np.cos(th))
grot_var = np.ma.power(np.cos(th)**2 * gv_ivar, -1)
srot = np.ma.divide(sv - _par[disk[0].np+4], np.cos(th))
srot_bc = np.ma.divide(sv - sv_bc - _par[disk[0].np+4], np.cos(th))
srot_var = np.ma.power(np.cos(th)**2 * sv_ivar, -1)
ad = grot**2 - srot**2
ad_var = (2*grot)**2 * grot_var + (2*srot)**2 * srot_var
ad_ivar = np.ma.power(ad_var, -1)
ad_bc = grot_bc**2 - srot_bc**2
ad_bc_var = (2*grot_bc)**2 * grot_var + (2*srot_bc)**2 * srot_var
ad_bc_ivar = np.ma.power(ad_bc_var, -1)
ados = np.ma.divide(ad, sd)
ados_ivar = np.ma.divide(np.ma.power(np.ma.power(ad_ivar * ad**2,-1)
+ np.ma.power(sd_ivar * sd**2, -1), -1), ados**2)
ados_mask = sd_mask | ad_mask | np.ma.getmaskarray(ados_ivar)
ados[ados_mask] = np.ma.masked
ados_ivar[ados_mask] = np.ma.masked
ados_bc = np.ma.divide(ad_bc, sd - sd_bc)
ados_bc_ivar = np.ma.divide(np.ma.power(np.ma.power(ad_bc_ivar * ad_bc**2,-1)
+ np.ma.power(sd_ivar * (sd-sd_bc)**2, -1), -1), ados_bc**2)
ados_bc_mask = sd_mask | ad_mask | np.ma.getmaskarray(ados_bc_ivar)
ados_bc[ados_bc_mask] = np.ma.masked
ados_bc_ivar[ados_bc_mask] = np.ma.masked
# Update the masking
if np.any((np.ma.getmaskarray(ad) | np.ma.getmaskarray(ad_ivar)) & np.logical_not(ad_mask)):
raise ValueError('check mask')
if np.any((np.ma.getmaskarray(ad_bc) | np.ma.getmaskarray(ad_bc_ivar)) & np.logical_not(ad_mask)):
raise ValueError('check bc mask')
# ad_mask |= (np.ma.getmaskarray(ad) | np.ma.getmaskarray(ad_ivar))
# ad[ad_mask] = np.ma.masked
# ad_ivar[ad_mask] = np.ma.masked
# gv[ad_mask] = np.ma.masked
# gv_ivar[ad_mask] = np.ma.masked
# sv[ad_mask] = np.ma.masked
# sv_ivar[ad_mask] = np.ma.masked
# Create the maps
ad_map = kin[1].remap(ad.filled(0.0), mask=ad_mask)
ad_ivar_map = kin[1].remap(ad_ivar.filled(0.0), mask=ad_mask)
ad_mask_map = kin[1].remap(ad_mask.astype(int), mask=ad_mask).filled(1).astype(bool)
ad_bc_map = kin[1].remap(ad_bc.filled(0.0), mask=ad_mask)
ad_bc_ivar_map = kin[1].remap(ad_bc_ivar.filled(0.0), mask=ad_mask)
ados_map = kin[1].remap(ados.filled(0.0), mask=ados_mask)
ados_ivar_map = kin[1].remap(ados_ivar.filled(0.0), mask=ados_mask)
ados_mask_map = kin[1].remap(ados_mask.astype(int), mask=ados_mask).filled(1).astype(bool)
ados_bc_map = kin[1].remap(ados_bc.filled(0.0), mask=ados_mask)
ados_bc_ivar_map = kin[1].remap(ados_bc_ivar.filled(0.0), mask=ados_mask)
# Get the model AD data for each bin, both beam-corrected and not
grot_mod = np.ma.divide(gv_mod - _par[4], np.cos(th))
grot_mod_bc = np.ma.divide(gv_mod - gv_bc - _par[4], np.cos(th))
srot_mod = np.ma.divide(sv_mod - _par[disk[0].np+4], np.cos(th))
srot_mod_bc = np.ma.divide(sv_mod - sv_bc - _par[disk[0].np+4], np.cos(th))
ad_mod = grot_mod**2 - srot_mod**2
ad_mod_bc = grot_mod_bc**2 - srot_mod_bc**2
ados_mod = np.ma.divide(ad_mod, sd_mod)
ados_mod_bc = np.ma.divide(ad_mod_bc, sd_mod - sd_bc)
# Create the model maps
ad_mod_map = kin[1].remap(ad_mod.filled(0.0), mask=ad_mask)
ad_mod_bc_map = kin[1].remap(ad_mod_bc.filled(0.0), mask=ad_mask)
ados_mod_map = kin[1].remap(ados_mod.filled(0.0), mask=ados_mask)
ados_mod_bc_map = kin[1].remap(ados_mod_bc.filled(0.0), mask=ados_mask)
# Mask data away from the major axes
major_gpm = select_kinematic_axis(r, th, which='major', r_range='all', wedge=maj_wedge)
ad_indx = major_gpm & np.logical_not(ad_mask)
ados_indx = major_gpm & np.logical_not(ados_mask)
# Set the radial bins
# NOTE: ad hoc maximum radius is meant to mitigate effect of minor axis
# points on number radial bins. This will limit the number of off-axis
# points included in galaxies with inclinations > 75 deg.
maxr = min(4*np.amax(r[ad_indx]), np.amax(r)) if np.any(ad_indx) else np.amax(r)
binr = np.array([rstep/2]) if maxr < rstep/2 else np.arange(rstep/2, maxr, rstep)
#binr = np.arange(rstep/2, min(4*np.amax(r[ad_indx]), np.amax(r)), rstep)
binw = np.full(binr.size, rstep, dtype=float)
# Bin the data
_, _, _, _, _, _, _, ad_ewmean, ad_ewsdev, _, _, ad_nbin, _ \
= bin_stats(r[ad_indx], ad.data[ad_indx], binr, binw, wgts=ad_ivar.data[ad_indx],
fill_value=0.0)
_, _, _, _, _, _, _, ad_bc_ewmean, ad_bc_ewsdev, _, _, ad_bc_nbin, _ \
= bin_stats(r[ad_indx], ad_bc.data[ad_indx], binr, binw, wgts=ad_bc_ivar.data[ad_indx],
fill_value=0.0)
_, _, _, _, _, _, _, ados_ewmean, ados_ewsdev, _, _, ados_nbin, _ \
= bin_stats(r[ados_indx], ados.data[ados_indx], binr, binw,
wgts=ados_ivar.data[ados_indx], fill_value=0.0)
_, _, _, _, _, _, _, ados_bc_ewmean, ados_bc_ewsdev, _, _, ados_bc_nbin, _ \
= bin_stats(r[ados_indx], ados_bc.data[ados_indx], binr, binw,
wgts=ados_bc_ivar.data[ados_indx], fill_value=0.0)
# Bin the model identically to the data
_, _, _, _, _, _, _, ad_mod_ewmean, ad_mod_ewsdev, _, _, _, _ \
= bin_stats(r[ad_indx], ad_mod.data[ad_indx], binr, binw, wgts=ad_ivar.data[ad_indx],
fill_value=0.0)
_, _, _, _, _, _, _, ad_mod_bc_ewmean, ad_mod_bc_ewsdev, _, _, _, _ \
= bin_stats(r[ad_indx], ad_mod_bc.data[ad_indx], binr, binw, wgts=ad_bc_ivar.data[ad_indx],
fill_value=0.0)
_, _, _, _, _, _, _, ados_mod_ewmean, ados_mod_ewsdev, _, _, _, _ \
= bin_stats(r[ados_indx], ados_mod.data[ados_indx], binr, binw,
wgts=ados_ivar.data[ados_indx], fill_value=0.0)
_, _, _, _, _, _, _, ados_mod_bc_ewmean, ados_mod_bc_ewsdev, _, _, _, _ \
= bin_stats(r[ados_indx], ados_mod_bc.data[ados_indx], binr, binw,
wgts=ados_bc_ivar.data[ados_indx], fill_value=0.0)
# Return the data
return gv_map, gv_ivar_map, gv_mod_map, gv_mod_intr_map, \
sv_map, sv_ivar_map, sv_mod_map, sv_mod_intr_map, \
sd_map, sd_ivar_map, sd_mod_map, sd_mod_intr_map, \
ad_map, ad_ivar_map, ad_bc_map, ad_bc_ivar_map, ad_mod_map, ad_mod_bc_map, \
ad_mask_map, \
ados_map, ados_ivar_map, ados_bc_map, ados_bc_ivar_map, ados_mod_map, ados_mod_bc_map, \
ados_mask_map, \
r[ad_indx], ad[ad_indx].filled(0.0), ad_ivar[ad_indx].filled(0.0), \
np.ma.getmaskarray(ad[ad_indx]), \
binr, \
ad_ewmean, ad_ewsdev, ad_mod_ewmean, ad_mod_ewsdev, ad_nbin, \
ad_bc_ewmean, ad_bc_ewsdev, ad_mod_bc_ewmean, ad_mod_bc_ewsdev, ad_bc_nbin, \
ados_ewmean, ados_ewsdev, ados_mod_ewmean, ados_mod_ewsdev, ados_nbin, \
ados_bc_ewmean, ados_bc_ewsdev, ados_mod_bc_ewmean, ados_mod_bc_ewsdev, ados_bc_nbin
def asymdrift_radial_profile(disk, kin, rstep, maj_wedge=30.):
"""
Construct azimuthally averaged radial profiles of the kinematics.
"""
if disk.ntracer != 2:
raise NotImplementedError('Must provide two disks, first gas, second stars.')
if len(kin) != 2:
raise NotImplementedError('Must provide two kinematic datasets, first gas, second stars.')
if kin[0].spatial_shape != kin[1].spatial_shape:
raise NotImplementedError('Kinematic datasets must have the same spatial shape.')
if not np.allclose(kin[0].grid_x, kin[1].grid_x) \
or not np.allclose(kin[0].grid_y, kin[1].grid_y):
raise NotImplementedError('Kinematics datasets must have the same grid sampling!')
if not np.all(disk.tie_base[:4]):
raise NotImplementedError('Disk must have tied the xc, yc, pa, and inc for both tracers.')
# Get the full list of parameters
par = disk.par[disk.untie]
# Determine the spaxels/bins that both have valid gas and stellar velocity
# data
gas_vel = kin[0].remap('vel', mask=np.logical_not(disk[0].vel_gpm) | kin[0].vel_mask)
str_vel = kin[1].remap('vel', mask=np.logical_not(disk[1].vel_gpm) | kin[1].vel_mask)
ad_mask_map = np.ma.getmaskarray(gas_vel) | np.ma.getmaskarray(str_vel)
ad_mask = kin[1].bin(ad_mask_map.astype(int)) > 0.
# Bin the gas data identical to the stars
gas_sig = np.ma.sqrt(kin[0].remap('sig_phys2',
mask=np.logical_not(disk[0].sig_gpm) | kin[0].sig_mask))
_, _gas_vel, _gas_sig = kin[1].bin_moments(kin[0].grid_sb, gas_vel.filled(0.0),
gas_sig.filled(0.0))
# Use bin_moments to get the error in the velocities
gas_vel_var = np.ma.power(kin[0].remap('vel_ivar',
mask=np.logical_not(disk[0].vel_gpm) | kin[0].vel_mask),
-1)
_gas_vel_ivar = np.ma.power(kin[1].bin_moments(kin[0].grid_sb, gas_vel_var.filled(0.0), None)[1],
-1)
# Get the coordinates of the bins in the disk plane
r, th = projected_polar(kin[1].x - par[0], kin[1].y - par[1], *np.radians(par[2:4]))
# Mask for data along the major and minor axes
major_gpm = select_kinematic_axis(r, th, which='major', r_range='all', wedge=maj_wedge)
# Set the radial bins
binr = np.arange(rstep/2, np.amax(r), rstep)
binw = np.full(binr.size, rstep, dtype=float)
# Construct the radial profile using the binned data
indx = major_gpm & np.logical_not(ad_mask)
ad_r = r[indx]
gas_vrot = (_gas_vel[indx] - par[4])/np.cos(th[indx])
gas_vrot_ivar = _gas_vel_ivar.data[indx]*np.cos(th[indx])**2
str_vrot = (kin[1].vel[indx] - par[disk[0].np+4])/np.cos(th[indx])
str_vrot_ivar = kin[1].vel_ivar[indx]*np.cos(th[indx])**2
ad = gas_vrot**2 - str_vrot**2
ad_wgt = 1 / 2 / (gas_vrot**2/gas_vrot_ivar + str_vrot**2/str_vrot_ivar)
# sini = np.sin(np.radians(par[3]))
# ad /= sini
# ad_wgt *= sini**2
_, _, _, _, _, _, _, ad_ewmean, ad_ewsdev, _, _, ad_nbin, ad_bin_gpm \
= bin_stats(ad_r, ad, binr, binw, wgts=ad_wgt, fill_value=0.0)
return ad_r, ad, binr, ad_ewmean, ad_ewsdev, ad_nbin
def _rej_iters(rej):
_rej = None if rej is None else list(rej)
if _rej is not None and len(_rej) == 1:
_rej *= 4
if _rej is not None and len(_rej) != 4:
raise ValueError('Must provide 1 or 4 sigma rejection levels.')
return _rej
def asymdrift_iter_fit(galmeta, gas_kin, str_kin, gas_disk, str_disk, gas_vel_mask=None,
gas_sig_mask=None, str_vel_mask=None, str_sig_mask=None, ignore_covar=True,
assume_posdef_covar=True, gas_vel_sigma_rej=[15,10,10,10],
gas_sig_sigma_rej=[15,10,10,10], str_vel_sigma_rej=[15,10,10,10],
str_sig_sigma_rej=[15,10,10,10], fix_cen=False, fix_inc=False, low_inc=None,
min_unmasked=None, analytic_jac=True, fit_scatter=True, verbose=0):
r"""
Iteratively fit a two-component disk to measure asymmetric drift.
The constraints and iterations closely mirror the approach used by
:func:`~nirvana.models.axisym.axisym_iter_fit`.
The initial guess parameters are set using the provided ``gas_disk`` and
``str_disk`` objects. If the ``par`` attributes of either of these objects
are None, the guess parameters are set by
:func:`~nirvana.models.thindisk.ThinDisk.guess_par` function of the derived
class (e.g., :func:`~nirvana.models.axisym.AxisymmetricDisk.guess_par`).
The initial guess for the geometric parameters are simply the mean of the
two disk objects, except that the guess for the inclination is always set to
the photometric inclination.
Constraints are as follows:
#. The center is constrained to be in the middle third of the available
range in x and y.
#. The center, position angle, and inclination are forced to be the same
for both disks. The systemic velocities are, however, allowed to be
different.
The iterations are as follows:
#. Fit all data but fix the inclination to the value returned by
:func:`~nirvana.data.meta.GlobalPar.guess_inclination` and fix the
center to the initial guess value. The initial guess will either be
:math:`(x,y) = (0,0)` or the mean of the centers provided by the
``gas_disk`` and ``str_disk`` arguments. If available, covariance is
ignored.
#. Reject outliers for all 4 kinematic measurements (gas v, gas sigma,
stellar v, stellar sigma) using
:func:`~nirvana.models.thindisk.ThinDisk.reject`. The rejection
sigma used is the *first* element in the provided lists. Then refit
the data, starting again from the initial guess parameters. The
intrinsic scatter estimates provided by
:func:`~nirvana.models.thindisk.ThinDisk.reject` are
*not* included in the fit and, if available, covariance is ignored.
#. Reject outliers for all 4 kinematic measurements (gas v, gas sigma,
stellar v, stellar sigma) using
:func:`~nirvana.models.thindisk.ThinDisk.reject`. The rejection
sigma used is the *second* element in the provided lists. Then refit
the data using the parameters from the previous fit as the starting
point. This iteration also uses the intrinsic scatter estimates
provided by :func:`~nirvana.models.thindisk.ThinDisk.reject`;
however, covariance is still ignored.
#. Recover all fit rejections (i.e., keep any masks in place that are
tied to the data quality, but remove any masks associated with fit
quality). Then use :func:`~nirvana.models.thindisk.ThinDisk.reject`
to perform a fresh rejection based on the most recent model; the
rejection sigma is the
*second* element in the provided lists. The resetting of the
fit-outliers and re-rejection is done on the off chance that
rejections from the first few iterations were driven by a bad model.
Refit the data as in the previous iteration, using the parameters
from the previous fit as the starting point and use the intrinsic
scatter estimates provided by
:func:`~nirvana.models.thindisk.ThinDisk.reject`. Covariance is
still ignored.
#. Reject outliers for all 4 kinematic measurements (gas v, gas sigma,
stellar v, stellar sigma) using
:func:`~nirvana.models.thindisk.ThinDisk.reject`. The rejection
sigma used is the *third* element in the provided lists. Then refit
the data, but fix or free the center and inclination based on the
provided keywords (``fix_cen`` and ``fix_inc``). Also, as in all
previous iterations, the covariance is ignored in the outlier
rejection and intrinsic scatter determination; however, the
covariance *is* used by the fit, as available and if ``ignore_covar``
is False.
#. Redo the previous iteration in exactly the same way, except outlier
rejection and intrinsic-scatter determination now use the covariance,
as available and if ``ignore_covar`` is False. The rejection sigma
used is the *fourth* element in the provided lists.
#. If a lower inclination threshold is set (see ``low_inc``) and the
best-fitting inclination is below this value (assuming the
inclination is freely fit), a final iteration refits the data by
fixing the inclination at the value set by
:func:`~nirvana.data.meta.GlobalPar.guess_inclination`. The code
issues a warning, and the global fit-quality bit is set to include
the ``LOWINC`` flag.
Args:
galmeta (:class:`~nirvana.data.meta.GlobalPar`):
Object with metadata for the galaxy to be fit.
gas_kin (:class:`~nirvana.data.kinematics.Kinematics`):
Object with the gas data to be fit
str_kin (:class:`~nirvana.data.kinematics.Kinematics`):
Object with the stellar data to be fit
gas_disk (:class:`~nirvana.models.thindisk.ThinDisk`):
Thin disk object used to model and set the initial guess parameters
for the gas disk (see above).
str_disk (:class:`~nirvana.models.thindisk.ThinDisk`):
Thin disk object used to model and set the initial guess parameters
for the stellar disk (see above).
gas_vel_mask (`numpy.ndarray`_, optional):
Initial array with the mask bits for gas velocities. If None,
initialization uses
:func:`~nirvana.data.kinematics.Kinematics.init_fitting_masks` for
the gas data.
gas_sig_mask (`numpy.ndarray`_, optional):
Initial array with the mask bits for gas velocity dispersions. If
None, initialization uses
:func:`~nirvana.data.kinematics.Kinematics.init_fitting_masks` for
the gas data.
str_vel_mask (`numpy.ndarray`_, optional):
Initial array with the mask bits for stellar velocities. If None,
initialization uses
:func:`~nirvana.data.kinematics.Kinematics.init_fitting_masks` for
the stellar data.
gas_sig_mask (`numpy.ndarray`_, optional):
Initial array with the mask bits for stellar velocity dispersions.
If None, initialization uses
:func:`~nirvana.data.kinematics.Kinematics.init_fitting_masks` for
the stellar data.
ignore_covar (:obj:`bool`, optional):
If ``kin`` provides the covariance between measurements, ignore it
and fit the data assuming there is no covariance.
assume_posdef_covar (:obj:`bool`, optional):
If ``kin`` provides the covariance between measurements, assume the
covariance matrices are positive definite.
gas_vel_sigma_rej (:obj:`float`, :obj:`list`, optional):
Sigma values used for rejection of the gas velocity measurements.
Must be a single float or a *four-element* list. If None, no
rejections are performed. The description above provides which
value is used in each iteration.
gas_sig_sigma_rej (:obj:`float`, :obj:`list`, optional):
Sigma values used for rejection of gas dispersion measurements; cf.
``gas_vel_sigma_rej``.
str_vel_sigma_rej (:obj:`float`, :obj:`list`, optional):
Sigma values used for rejection of the stellar velocity
measurements; cf. ``gas_vel_sigma_rej``.
str_sig_sigma_rej (:obj:`float`, :obj:`list`, optional):
Sigma values used for rejection of stellar dispersion measurements;
cf. ``gas_vel_sigma_rej``.
fix_cen (:obj:`bool`, optional):
Fix the dynamical center of the fit to 0,0 in the final fit
iteration.
fix_inc (:obj:`bool`, optional):
Fix the kinematic inclination of the fit to estimate provided by the
:func:`~nirvana.data.meta.GlobalPar.guess_inclination` method of
``galmeta``.
low_inc (scalar-like, optional):
If the inclination is free and the best-fitting inclination from the
final fit iteration is below this value, flag the global bitmask of
the fit as having a low inclination and refit the data using a fixed
inclination set by
:func:`~nirvana.data.meta.GlobalPar.guess_inclination` (i.e., this
is the same as when setting ``fix_inc`` to True). If None, no
minimum is set on the viable inclination (apart from the fit
boundaries).
min_unmasked (:obj:`int`, optional):
The minimum of velocity measurements (and velocity dispersion
measurements, if they are available and being fit) required to
proceed with the fit, after applying all masking. This is applied
independently to both the gas and stellar data.
analytic_jac (:obj:`bool`, optional):
Use the analytic calculation of the Jacobian matrix during the fit
optimization. If False, the Jacobian is calculated using
finite-differencing methods provided by
`scipy.optimize.least_squares`_.
fit_scatter (:obj:`bool`, optional):
Model the intrinsic scatter in the data about the model during the
fit optimization. The scatter is modeled independently for all 4
kinematic measurements (gas velocity and dispersion, stellar
velocity and dispersion).
verbose (:obj:`int`, optional):
Verbosity level: 0=only status output written to terminal; 1=show
fit result QA plot; 2=full output
Returns:
:obj:`tuple`: Returns 9 objects: (1) the
:class:`~nirvana.models.multitrace.MultiTracerDisk` instance used during
the fit, (2) a `numpy.ndarray`_ with the input guess parameters, (3,4)
`numpy.ndarray`_ objects with the lower and upper bounds imposed on the
best-fit parameters, (5) a boolean `numpy.ndarray`_ selecting the
parameters that were fixed during the fit, (6,7) `numpy.ndarray`_
objects with the bad-pixel masks for the gas velocity and dispersion
measurements used in the fit, and (8,9) `numpy.ndarray`_ objects with
the bad-pixel masks for the stellar velocity and dispersion measurements
used in the fit.
"""
# Running in "debug" mode
debug = verbose > 1
# Check input
_gas_vel_sigma_rej = _rej_iters(gas_vel_sigma_rej)
_gas_sig_sigma_rej = _rej_iters(gas_sig_sigma_rej)
_str_vel_sigma_rej = _rej_iters(str_vel_sigma_rej)
_str_sig_sigma_rej = _rej_iters(str_sig_sigma_rej)
#---------------------------------------------------------------------------
# Initialize the fitting object and set the guess parameters
disk = MultiTracerDisk([gas_disk, str_disk])
if gas_disk.par is None:
gas_disk.par = gas_disk.guess_par()
if str_disk.par is None:
str_disk.par = str_disk.guess_par()
p0 = np.append(gas_disk.par, str_disk.par)
p0[:disk.nbp] = (gas_disk.par[:disk.nbp] + str_disk.par[:disk.nbp])/2.
p0[gas_disk.np:gas_disk.np+disk.nbp] = p0[:disk.nbp]
# Force the inclination to be the photometric inclination
p0[3] = p0[gas_disk.np+3] = galmeta.guess_inclination(lb=1., ub=89.)
#---------------------------------------------------------------------------
# Define the fitting object
# Constrain the center to be in the middle third of the map relative to the
# photometric center. The mean in the calculation is to mitigate that some
# galaxies can be off center, but the detail here and how well it works
# hasn't been well tested.
# TODO: Should this use grid_x instead, so that it's more uniform for all
# IFUs? Or should this be set as a fraction of Reff?
_x = np.append(gas_kin.x, str_kin.x)
_y = np.append(gas_kin.y, str_kin.y)
dx = np.mean([abs(np.amin(_x)), abs(np.amax(_x))])
dy = np.mean([abs(np.amin(_y)), abs(np.amax(_y))])
lb, ub = disk.par_bounds(base_lb=np.array([-dx/3, -dy/3, -350., 1., -500.]),
base_ub=np.array([dx/3, dy/3, 350., 89., 500.]))
print(f'If free, center constrained within +/- {dx/3:.1f} in X and +/- {dy/3:.1f} in Y.')
# TODO: Handle these issues instead of faulting
if np.any(np.less(p0, lb)):
raise ValueError('Parameter lower bounds cannot accommodate initial guess value!')
if np.any(np.greater(p0, ub)):
raise ValueError('Parameter upper bounds cannot accommodate initial guess value!')
#---------------------------------------------------------------------------
# Setup the masks
print('Initializing data masking')
if gas_vel_mask is None or gas_sig_mask is None:
_gas_vel_mask, _gas_sig_mask = gas_kin.init_fitting_masks(bitmask=disk.mbm, verbose=True)
else:
_gas_vel_mask = gas_vel_mask.copy()
_gas_sig_mask = gas_sig_mask.copy()
# Make sure there are sufficient data to fit!
if min_unmasked is None:
if np.all(_gas_vel_mask > 0):
raise ValueError('All gas velocity measurements masked!')
if _gas_sig_mask is not None and np.all(_gas_sig_mask > 0):
raise ValueError('All gas velocity dispersion measurements masked!')
else:
if np.sum(np.logical_not(_gas_vel_mask > 0)) < min_unmasked:
raise ValueError('Insufficient valid gas velocity measurements to continue!')
if _gas_sig_mask is not None and np.sum(np.logical_not(_gas_sig_mask > 0)) < min_unmasked:
raise ValueError('Insufficient valid gas dispersion measurements to continue!')
if str_vel_mask is None or str_sig_mask is None:
_str_vel_mask, _str_sig_mask = str_kin.init_fitting_masks(bitmask=disk.mbm, verbose=True)
else:
_str_vel_mask = str_vel_mask.copy()
_str_sig_mask = str_sig_mask.copy()
# Make sure there are sufficient data to fit!
if min_unmasked is None:
if np.all(_str_vel_mask > 0):
raise ValueError('All stellar velocity measurements masked!')
if _str_sig_mask is not None and np.all(_str_sig_mask > 0):
raise ValueError('All stellar dispersion measurements masked!')
else:
if np.sum(np.logical_not(_str_vel_mask > 0)) < min_unmasked:
raise ValueError('Insufficient valid stellar velocity measurements to continue!')
if _str_sig_mask is not None and np.sum(np.logical_not(_str_sig_mask > 0)) < min_unmasked:
raise ValueError('Insufficient valid stellar dispersion measurements to continue!')
#---------------------------------------------------------------------------
# Perform the fit iterations
#---------------------------------------------------------------------------
# Tie all the geometric projection parameters, but leave the systemic
# velocities to be independent for each dataset.
disk.update_tie_base([True, True, True, True, False])
# Fit iteration 1: Fit all data but fix the inclination and center
# x0 y0 pa inc vsys
fix = np.append([True, True, False, True, False], np.zeros(p0.size-5, dtype=bool))
print('Running fit iteration 1')
# TODO: sb_wgt is always true throughout. Make this a command-line
# parameter?
disk.lsq_fit([gas_kin, str_kin], sb_wgt=True, p0=p0, fix=fix, lb=lb, ub=ub,
ignore_covar=True, assume_posdef_covar=assume_posdef_covar,
analytic_jac=analytic_jac, verbose=verbose)
# Show
if verbose > 0:
asymdrift_fit_plot(galmeta, [gas_kin, str_kin], disk, fix=fix)
#---------------------------------------------------------------------------
# Fit iteration 2:
# - Reject very large outliers. This is aimed at finding data that is
# so descrepant from the model that it's reasonable to expect the
# measurements are bogus.
print('Running rejection iterations')
gas_vel_rej, gas_vel_sig, gas_sig_rej, gas_sig_sig \
= disk.disk[0].reject(vel_sigma_rej=_gas_vel_sigma_rej[0], show_vel=debug,
sig_sigma_rej=_gas_sig_sigma_rej[0], show_sig=debug,
verbose=verbose > 1)
if np.any(gas_vel_rej):
print(f'{np.sum(gas_vel_rej)} gas velocity measurements rejected as unreliable.')
gas_vel_mask[gas_vel_rej] = disk.mbm.turn_on(gas_vel_mask[gas_vel_rej], 'REJ_UNR')
if gas_sig_rej is not None and np.any(gas_sig_rej):
print(f'{np.sum(gas_sig_rej)} gas dispersion measurements rejected as unreliable.')
gas_sig_mask[gas_sig_rej] = disk.mbm.turn_on(gas_sig_mask[gas_sig_rej], 'REJ_UNR')
gas_kin.reject(vel_rej=gas_vel_rej, sig_rej=gas_sig_rej)
str_vel_rej, str_vel_sig, str_sig_rej, str_sig_sig \
= disk.disk[1].reject(vel_sigma_rej=_str_vel_sigma_rej[0], show_vel=debug,
sig_sigma_rej=_str_sig_sigma_rej[0], show_sig=debug,
verbose=verbose > 1)
if np.any(str_vel_rej):
print(f'{np.sum(str_vel_rej)} stellar velocity measurements rejected as unreliable.')
str_vel_mask[str_vel_rej] = disk.mbm.turn_on(str_vel_mask[str_vel_rej], 'REJ_UNR')
if str_sig_rej is not None and np.any(str_sig_rej):
print(f'{np.sum(str_sig_rej)} stellar dispersion measurements rejected as unreliable.')
str_sig_mask[str_sig_rej] = disk.mbm.turn_on(str_sig_mask[str_sig_rej], 'REJ_UNR')
str_kin.reject(vel_rej=str_vel_rej, sig_rej=str_sig_rej)
# - Refit, again with the inclination and center fixed. However, do not
# use the parameters from the previous fit as the starting point, and
# ignore the estimated intrinsic scatter.
print('Running fit iteration 2')
disk.lsq_fit([gas_kin, str_kin], sb_wgt=True, p0=p0, fix=fix, lb=lb, ub=ub,
ignore_covar=True, assume_posdef_covar=assume_posdef_covar,
analytic_jac=analytic_jac, verbose=verbose)
# Show
if verbose > 0:
asymdrift_fit_plot(galmeta, [gas_kin, str_kin], disk, fix=fix)
#---------------------------------------------------------------------------
# Fit iteration 3:
# - Perform a more restricted rejection
print('Running rejection iterations')
gas_vel_rej, gas_vel_sig, gas_sig_rej, gas_sig_sig \
= disk.disk[0].reject(vel_sigma_rej=_gas_vel_sigma_rej[1], show_vel=debug,
sig_sigma_rej=_gas_sig_sigma_rej[1], show_sig=debug,
verbose=verbose > 1)
if np.any(gas_vel_rej):
print(f'{np.sum(gas_vel_rej)} gas velocity measurements rejected as unreliable.')
gas_vel_mask[gas_vel_rej] = disk.mbm.turn_on(gas_vel_mask[gas_vel_rej], 'REJ_RESID')
if gas_sig_rej is not None and np.any(gas_sig_rej):
print(f'{np.sum(gas_sig_rej)} gas dispersion measurements rejected as unreliable.')
gas_sig_mask[gas_sig_rej] = disk.mbm.turn_on(gas_sig_mask[gas_sig_rej], 'REJ_RESID')
gas_kin.reject(vel_rej=gas_vel_rej, sig_rej=gas_sig_rej)
str_vel_rej, str_vel_sig, str_sig_rej, str_sig_sig \
= disk.disk[1].reject(vel_sigma_rej=_str_vel_sigma_rej[1], show_vel=debug,
sig_sigma_rej=_str_sig_sigma_rej[1], show_sig=debug,
verbose=verbose > 1)
if np.any(str_vel_rej):
print(f'{np.sum(str_vel_rej)} stellar velocity measurements rejected as unreliable.')
str_vel_mask[str_vel_rej] = disk.mbm.turn_on(str_vel_mask[str_vel_rej], 'REJ_RESID')
if str_sig_rej is not None and np.any(str_sig_rej):
print(f'{np.sum(str_sig_rej)} stellar dispersion measurements rejected as unreliable.')
str_sig_mask[str_sig_rej] = disk.mbm.turn_on(str_sig_mask[str_sig_rej], 'REJ_RESID')
str_kin.reject(vel_rej=str_vel_rej, sig_rej=str_sig_rej)
# - Refit again with the inclination and center fixed, but use the
# previous fit as the starting point and include the estimated
# intrinsic scatter.
print('Running fit iteration 3')
scatter = np.array([gas_vel_sig, gas_sig_sig, str_vel_sig, str_sig_sig]) \
if fit_scatter else None
disk.lsq_fit([gas_kin, str_kin], sb_wgt=True, p0=disk.par[disk.untie], fix=fix, lb=lb, ub=ub,
ignore_covar=True, assume_posdef_covar=assume_posdef_covar, scatter=scatter,
analytic_jac=analytic_jac, verbose=verbose)
# Show
if verbose > 0:
asymdrift_fit_plot(galmeta, [gas_kin, str_kin], disk, fix=fix)
#---------------------------------------------------------------------------
# Fit iteration 4:
# - Recover data from the restricted rejection
disk.mbm.reset_to_base_flags(gas_kin, gas_vel_mask, gas_sig_mask)
disk.mbm.reset_to_base_flags(str_kin, str_vel_mask, str_sig_mask)
# - Reject again based on the new fit parameters
print('Running rejection iterations')
gas_vel_rej, gas_vel_sig, gas_sig_rej, gas_sig_sig \
= disk.disk[0].reject(vel_sigma_rej=_gas_vel_sigma_rej[1], show_vel=debug,
sig_sigma_rej=_gas_sig_sigma_rej[1], show_sig=debug,
verbose=verbose > 1)
if np.any(gas_vel_rej):
print(f'{np.sum(gas_vel_rej)} gas velocity measurements rejected as unreliable.')
gas_vel_mask[gas_vel_rej] = disk.mbm.turn_on(gas_vel_mask[gas_vel_rej], 'REJ_RESID')
if gas_sig_rej is not None and np.any(gas_sig_rej):
print(f'{np.sum(gas_sig_rej)} gas dispersion measurements rejected as unreliable.')
gas_sig_mask[gas_sig_rej] = disk.mbm.turn_on(gas_sig_mask[gas_sig_rej], 'REJ_RESID')
gas_kin.reject(vel_rej=gas_vel_rej, sig_rej=gas_sig_rej)
str_vel_rej, str_vel_sig, str_sig_rej, str_sig_sig \
= disk.disk[1].reject(vel_sigma_rej=_str_vel_sigma_rej[1], show_vel=debug,
sig_sigma_rej=_str_sig_sigma_rej[1], show_sig=debug,
verbose=verbose > 1)
if np.any(str_vel_rej):
print(f'{np.sum(str_vel_rej)} stellar velocity measurements rejected as unreliable.')
str_vel_mask[str_vel_rej] = disk.mbm.turn_on(str_vel_mask[str_vel_rej], 'REJ_RESID')
if str_sig_rej is not None and np.any(str_sig_rej):
print(f'{np.sum(str_sig_rej)} stellar dispersion measurements rejected as unreliable.')
str_sig_mask[str_sig_rej] = disk.mbm.turn_on(str_sig_mask[str_sig_rej], 'REJ_RESID')
str_kin.reject(vel_rej=str_vel_rej, sig_rej=str_sig_rej)
# - Refit again with the inclination and center fixed, but use the
# previous fit as the starting point and include the estimated
# intrinsic scatter.
print('Running fit iteration 4')
scatter = np.array([gas_vel_sig, gas_sig_sig, str_vel_sig, str_sig_sig]) \
if fit_scatter else None
disk.lsq_fit([gas_kin, str_kin], sb_wgt=True, p0=disk.par[disk.untie], fix=fix, lb=lb, ub=ub,
ignore_covar=True, assume_posdef_covar=assume_posdef_covar, scatter=scatter,
analytic_jac=analytic_jac, verbose=verbose)
# Show
if verbose > 0:
asymdrift_fit_plot(galmeta, [gas_kin, str_kin], disk, fix=fix)
#---------------------------------------------------------------------------
# Fit iteration 5:
# - Recover data from the restricted rejection
disk.mbm.reset_to_base_flags(gas_kin, gas_vel_mask, gas_sig_mask)
disk.mbm.reset_to_base_flags(str_kin, str_vel_mask, str_sig_mask)
# - Reject again based on the new fit parameters
print('Running rejection iterations')
gas_vel_rej, gas_vel_sig, gas_sig_rej, gas_sig_sig \
= disk.disk[0].reject(vel_sigma_rej=_gas_vel_sigma_rej[2], show_vel=debug,
sig_sigma_rej=_gas_sig_sigma_rej[2], show_sig=debug,
verbose=verbose > 1)
if np.any(gas_vel_rej):
print(f'{np.sum(gas_vel_rej)} gas velocity measurements rejected as unreliable.')
gas_vel_mask[gas_vel_rej] = disk.mbm.turn_on(gas_vel_mask[gas_vel_rej], 'REJ_RESID')
if gas_sig_rej is not None and np.any(gas_sig_rej):
print(f'{np.sum(gas_sig_rej)} gas dispersion measurements rejected as unreliable.')
gas_sig_mask[gas_sig_rej] = disk.mbm.turn_on(gas_sig_mask[gas_sig_rej], 'REJ_RESID')
gas_kin.reject(vel_rej=gas_vel_rej, sig_rej=gas_sig_rej)
str_vel_rej, str_vel_sig, str_sig_rej, str_sig_sig \
= disk.disk[1].reject(vel_sigma_rej=_str_vel_sigma_rej[2], show_vel=debug,
sig_sigma_rej=_str_sig_sigma_rej[2], show_sig=debug,
verbose=verbose > 1)
if np.any(str_vel_rej):
print(f'{np.sum(str_vel_rej)} stellar velocity measurements rejected as unreliable.')
str_vel_mask[str_vel_rej] = disk.mbm.turn_on(str_vel_mask[str_vel_rej], 'REJ_RESID')
if str_sig_rej is not None and np.any(str_sig_rej):
print(f'{np.sum(str_sig_rej)} stellar dispersion measurements rejected as unreliable.')
str_sig_mask[str_sig_rej] = disk.mbm.turn_on(str_sig_mask[str_sig_rej], 'REJ_RESID')
str_kin.reject(vel_rej=str_vel_rej, sig_rej=str_sig_rej)
# - Now fit as requested by the user, freeing one or both of the
# inclination and center. Use the previous fit as the starting point
# and include the estimated intrinsic scatter and the covariance.
# x0 y0 pa inc vsys
base_fix = np.array([False, False, False, False, False])
if fix_cen:
base_fix[:2] = True
if fix_inc:
base_fix[3] = True
fix = np.append(base_fix, np.zeros(p0.size-5, dtype=bool))
print('Running fit iteration 5')
scatter = np.array([gas_vel_sig, gas_sig_sig, str_vel_sig, str_sig_sig]) \
if fit_scatter else None
disk.lsq_fit([gas_kin, str_kin], sb_wgt=True, p0=disk.par[disk.untie], fix=fix, lb=lb, ub=ub,
ignore_covar=ignore_covar, assume_posdef_covar=assume_posdef_covar,
scatter=scatter, analytic_jac=analytic_jac, verbose=verbose)
# Show
if verbose > 0:
asymdrift_fit_plot(galmeta, [gas_kin, str_kin], disk, fix=fix)
#---------------------------------------------------------------------------
# Fit iteration 6:
# - Recover data from the restricted rejection
disk.mbm.reset_to_base_flags(gas_kin, gas_vel_mask, gas_sig_mask)
disk.mbm.reset_to_base_flags(str_kin, str_vel_mask, str_sig_mask)
# - Reject again based on the new fit parameters
print('Running rejection iterations')
gas_vel_rej, gas_vel_sig, gas_sig_rej, gas_sig_sig \
= disk.disk[0].reject(vel_sigma_rej=_gas_vel_sigma_rej[3], show_vel=debug,
sig_sigma_rej=_gas_sig_sigma_rej[3], show_sig=debug,
verbose=verbose > 1)
if np.any(gas_vel_rej):
print(f'{np.sum(gas_vel_rej)} gas velocity measurements rejected as unreliable.')
gas_vel_mask[gas_vel_rej] = disk.mbm.turn_on(gas_vel_mask[gas_vel_rej], 'REJ_RESID')
if gas_sig_rej is not None and np.any(gas_sig_rej):
print(f'{np.sum(gas_sig_rej)} gas dispersion measurements rejected as unreliable.')
gas_sig_mask[gas_sig_rej] = disk.mbm.turn_on(gas_sig_mask[gas_sig_rej], 'REJ_RESID')
gas_kin.reject(vel_rej=gas_vel_rej, sig_rej=gas_sig_rej)
str_vel_rej, str_vel_sig, str_sig_rej, str_sig_sig \
= disk.disk[1].reject(vel_sigma_rej=_str_vel_sigma_rej[3], show_vel=debug,
sig_sigma_rej=_str_sig_sigma_rej[3], show_sig=debug,
verbose=verbose > 1)
if np.any(str_vel_rej):
print(f'{np.sum(str_vel_rej)} stellar velocity measurements rejected as unreliable.')
str_vel_mask[str_vel_rej] = disk.mbm.turn_on(str_vel_mask[str_vel_rej], 'REJ_RESID')
if str_sig_rej is not None and np.any(str_sig_rej):
print(f'{np.sum(str_sig_rej)} stellar dispersion measurements rejected as unreliable.')
str_sig_mask[str_sig_rej] = disk.mbm.turn_on(str_sig_mask[str_sig_rej], 'REJ_RESID')
str_kin.reject(vel_rej=str_vel_rej, sig_rej=str_sig_rej)
# - Redo previous fit
print('Running fit iteration 6')
scatter = np.array([gas_vel_sig, gas_sig_sig, str_vel_sig, str_sig_sig]) \
if fit_scatter else None
disk.lsq_fit([gas_kin, str_kin], sb_wgt=True, p0=disk.par[disk.untie], fix=fix, lb=lb, ub=ub,
ignore_covar=ignore_covar, assume_posdef_covar=assume_posdef_covar,
scatter=scatter, analytic_jac=analytic_jac, verbose=verbose)
# Show
if verbose > 0:
asymdrift_fit_plot(galmeta, [gas_kin, str_kin], disk, fix=fix)
if fix_inc or low_inc is None or disk.par[3] > low_inc:
# Inclination is valid, so return
return disk, p0, lb, ub, fix, gas_vel_mask, gas_sig_mask, str_vel_mask, str_sig_mask
#---------------------------------------------------------------------------
# Fit iteration 7:
# - The best-fitting inclination is below the viable value. Flag it.
disk.global_mask = disk.gbm.turn_on(disk.global_mask, 'LOWINC')
# - Refit the data, but fix the inclination to the guess value.
# x0 y0 pa inc vsys
base_fix = np.array([False, False, False, True, False])
if fix_cen:
# Fix the center, if requested
base_fix[:2] = True
fix = np.append(base_fix, np.zeros(p0.size-5, dtype=bool))
# NOTE: This assumes the inclination is tied!!
disk.par[3] = galmeta.guess_inclination(lb=1., ub=89.)
warnings.warn(f'Best-fitting inclination is below {low_inc:.1f} degrees. Running a final '
f'fit fixing the inclination to {disk.par[3]:.1f}')
print('Running fit iteration 7')
disk.lsq_fit([gas_kin, str_kin], sb_wgt=True, p0=disk.par[disk.untie], fix=fix, lb=lb, ub=ub,
ignore_covar=ignore_covar, assume_posdef_covar=assume_posdef_covar,
scatter=scatter, analytic_jac=analytic_jac, verbose=verbose)
# Show
if verbose > 0:
asymdrift_fit_plot(galmeta, [gas_kin, str_kin], disk, fix=fix)
return disk, p0, lb, ub, fix, gas_vel_mask, gas_sig_mask, str_vel_mask, str_sig_mask
# TODO:
# - This is MaNGA-specific and needs to be abstracted
# - Copy over the DataTable class from the DAP, or use an astropy.table.Table?
def _ad_meta_dtype(nr):
"""
"""
return [('MANGAID', '<U30'),
('PLATEIFU', '<U12'),
# Azimuthally binned radial profiles
('BINR', float, (nr,)),
('AD', float, (nr,)),
('AD_SDEV', float, (nr,)),
('AD_MOD', float, (nr,)),
('AD_MOD_SDEV', float, (nr,)),
('AD_NUSE', int, (nr,)),
('AD_BC', float, (nr,)),
('AD_BC_SDEV', float, (nr,)),
('AD_BC_MOD', float, (nr,)),
('AD_BC_MOD_SDEV', float, (nr,)),
('AD_BC_NUSE', int, (nr,)),
('ADOS', float, (nr,)),
('ADOS_SDEV', float, (nr,)),
('ADOS_MOD', float, (nr,)),
('ADOS_MOD_SDEV', float, (nr,)),
('ADOS_NUSE', int, (nr,)),
('ADOS_BC', float, (nr,)),
('ADOS_BC_SDEV', float, (nr,)),
('ADOS_BC_MOD', float, (nr,)),
('ADOS_BC_MOD_SDEV', float, (nr,)),
('ADOS_BC_NUSE', int, (nr,))
]
def asymdrift_fit_data(galmeta, kin, disk, p0, lb, ub, gas_vel_mask, gas_sig_mask,
str_vel_mask, str_sig_mask, ofile=None):
# Create the output data file
# - Ensure the best-fitting parameters have been distributed to the disks
disk.distribute_par()
# - Propagate the global fit assessments/flags to each tracer-specific disk
# object
disk[0].global_mask = disk.global_mask
disk[0].fit_status = disk.fit_status
disk[0].fit_success = disk.fit_success
disk[1].global_mask = disk.global_mask
disk[1].fit_status = disk.fit_status
disk[1].fit_success = disk.fit_success
# - Get the output data for the gas
gas_slice = disk.disk_slice(0)
gas_hdu = axisym.axisym_fit_data(galmeta, kin[0], p0[gas_slice], lb[gas_slice], ub[gas_slice],
disk[0], gas_vel_mask, gas_sig_mask)
# - Get the output data for the stars
str_slice = disk.disk_slice(1)
str_hdu = axisym.axisym_fit_data(galmeta, kin[1], p0[str_slice], lb[str_slice], ub[str_slice],
disk[1], str_vel_mask, str_sig_mask)
# Get the asymmetric drift data
fwhm = galmeta.psf_fwhm[1]
oversample = 1.5
rstep = fwhm/oversample
maj_wedge = 30.
gv_map, gv_ivar_map, gv_mod_map, gv_mod_intr_map, \
sv_map, sv_ivar_map, sv_mod_map, sv_mod_intr_map, \
sd_map, sd_ivar_map, sd_mod_map, sd_mod_intr_map, \
ad_map, ad_ivar_map, ad_bc_map, ad_bc_ivar_map, ad_mod_map, ad_mod_bc_map, ad_mask_map, \
ados_map, ados_ivar_map, ados_bc_map, ados_bc_ivar_map, ados_mod_map, ados_mod_bc_map, \
ados_mask_map, \
ad_spx_r, ad_spx, ad_spx_ivar, ad_spx_mask, \
binr, \
ad_ewmean, ad_ewsdev, ad_mod_ewmean, ad_mod_ewsdev, ad_nbin, \
ad_bc_ewmean, ad_bc_ewsdev, ad_mod_bc_ewmean, ad_mod_bc_ewsdev, ad_bc_nbin, \
ados_ewmean, ados_ewsdev, ados_mod_ewmean, ados_mod_ewsdev, ados_nbin, \
ados_bc_ewmean, ados_bc_ewsdev, ados_mod_bc_ewmean, ados_mod_bc_ewsdev, ados_bc_nbin \
= asymdrift_fit_maps(kin, disk, rstep, maj_wedge=maj_wedge)
adprof = fileio.init_record_array(1, _ad_meta_dtype(binr.size))
adprof['MANGAID'] = galmeta.mangaid
adprof['PLATEIFU'] = galmeta.plateifu
adprof['BINR'] = binr
adprof['AD'] = ad_ewmean
adprof['AD_SDEV'] = ad_ewsdev
adprof['AD_MOD'] = ad_mod_ewmean
adprof['AD_MOD_SDEV'] = ad_mod_ewsdev
adprof['AD_NUSE'] = ad_nbin
adprof['AD_BC'] = ad_bc_ewmean
adprof['AD_BC_SDEV'] = ad_bc_ewsdev
adprof['AD_BC_MOD'] = ad_mod_bc_ewmean
adprof['AD_BC_MOD_SDEV'] = ad_mod_bc_ewsdev
adprof['AD_BC_NUSE'] = ad_bc_nbin
adprof['ADOS'] = ados_ewmean
adprof['ADOS_SDEV'] = ados_ewsdev
adprof['ADOS_MOD'] = ados_mod_ewmean
adprof['ADOS_MOD_SDEV'] = ados_mod_ewsdev
adprof['ADOS_NUSE'] = ados_nbin
adprof['ADOS_BC'] = ados_bc_ewmean
adprof['ADOS_BC_SDEV'] = ados_bc_ewsdev
adprof['ADOS_BC_MOD'] = ados_mod_bc_ewmean
adprof['ADOS_BC_MOD_SDEV'] = ados_mod_bc_ewsdev
adprof['ADOS_BC_NUSE'] = ados_bc_nbin
# - Combine the data into a single fits file
prihdr = gas_hdu[0].header.copy()
prihdr.remove('MODELTYP')
prihdr.remove('RCMODEL')
prihdr.remove('DCMODEL')
prihdr['GMODTYP'] = gas_hdu[0].header['MODELTYP']
prihdr['GRCMOD'] = gas_hdu[0].header['RCMODEL']
if 'DCMODEL' in gas_hdu[0].header:
prihdr['GDCMOD'] = gas_hdu[0].header['DCMODEL']
prihdr['SMODTYP'] = str_hdu[0].header['MODELTYP']
prihdr['SRCMOD'] = str_hdu[0].header['RCMODEL']
if 'DCMODEL' in str_hdu[0].header:
prihdr['SDCMOD'] = str_hdu[0].header['DCMODEL']
prihdr['QUAL'] = disk.global_mask
resid = disk.fom(disk.par)
prihdr['CHI2'] = (np.sum(resid**2), 'Total chi-square')
prihdr['RCHI2'] = (prihdr['CHI2']/(resid.size - disk.nfree), 'Total reduced chi-square')
prihdr['ADWEDGE'] = (maj_wedge, 'Major axis wedge for AD')
maphdr = fileio.add_wcs(prihdr, kin[0])
mapmaskhdr = maphdr.copy()
disk.mbm.to_header(mapmaskhdr)
for h in gas_hdu[1:]:
h.name = 'GAS_'+h.name
for h in str_hdu[1:]:
h.name = 'STR_'+h.name
hdus = [fits.PrimaryHDU(header=prihdr)] + gas_hdu[1:] + str_hdu[1:] \
+ [fits.ImageHDU(data=gv_map.filled(0.0),
header=fileio.finalize_header(maphdr, 'BIN_GAS_VEL', bunit='km/s',
err=True),
name='BIN_GAS_VEL'),
fits.ImageHDU(data=gv_ivar_map.filled(0.0),
header=fileio.finalize_header(maphdr, 'BIN_GAS_VEL',
bunit='(km/s)^{-2}', hduclas2='ERROR'),
name='BIN_GAS_VEL_IVAR'),
fits.ImageHDU(data=gv_mod_map.filled(0.0),
header=fileio.finalize_header(maphdr, 'BIN_GAS_VEL_MOD',
bunit='km/s'),
name='BIN_GAS_VEL_MOD'),
fits.ImageHDU(data=gv_mod_intr_map.filled(0.0),
header=fileio.finalize_header(maphdr, 'BIN_GAS_VEL_MODI',
bunit='km/s'),
name='BIN_GAS_VEL_MODI'),
fits.ImageHDU(data=ad_map.filled(0.0),
header=fileio.finalize_header(maphdr, 'AD', bunit='(km/s)^2',
err=True, qual=True),
name='AD'),
fits.ImageHDU(data=ad_ivar_map.filled(0.0),
header=fileio.finalize_header(maphdr, 'AD', bunit='(km/s)^{-4}',
hduclas2='ERROR', qual=True),
name='AD_IVAR'),
fits.ImageHDU(data=ad_mask_map.astype(np.int16),
header=fileio.finalize_header(mapmaskhdr, 'AD', hduclas2='QUALITY',
err=True, bit_type=bool),
name='AD_MASK'),
fits.ImageHDU(data=ad_bc_map.filled(0.0),
header=fileio.finalize_header(maphdr, 'AD_BC', bunit='(km/s)^2',
err=True),
name='AD_BC'),
fits.ImageHDU(data=ad_bc_ivar_map.filled(0.0),
header=fileio.finalize_header(maphdr, 'AD_BC', bunit='(km/s)^{-4}',
hduclas2='ERROR'),
name='AD_BC_IVAR'),
fits.ImageHDU(data=ad_mod_map.filled(0.0),
header=fileio.finalize_header(maphdr, 'AD_MOD', bunit='(km/s)^2'),
name='AD_MOD'),
fits.ImageHDU(data=ad_mod_bc_map.filled(0.0),
header=fileio.finalize_header(maphdr, 'AD_MODI', bunit='(km/s)^2'),
name='AD_MODI'),
fits.ImageHDU(data=ados_map.filled(0.0),
header=fileio.finalize_header(maphdr, 'ADOS', err=True, qual=True),
name='ADOS'),
fits.ImageHDU(data=ados_ivar_map.filled(0.0),
header=fileio.finalize_header(maphdr, 'ADOS', hduclas2='ERROR',
qual=True),
name='ADOS_IVAR'),
fits.ImageHDU(data=ados_mask_map.astype(np.int16),
header=fileio.finalize_header(mapmaskhdr, 'ADOS', hduclas2='QUALITY',
err=True, bit_type=bool),
name='ADOS_MASK'),
fits.ImageHDU(data=ados_bc_map.filled(0.0),
header=fileio.finalize_header(maphdr, 'ADOS_BC', err=True),
name='ADOS_BC'),
fits.ImageHDU(data=ados_bc_ivar_map.filled(0.0),
header=fileio.finalize_header(maphdr, 'ADOS_BC', hduclas2='ERROR'),
name='ADOS_BC_IVAR'),
fits.ImageHDU(data=ados_mod_map.filled(0.0),
header=fileio.finalize_header(maphdr, 'ADOS_MOD'),
name='ADOS_MOD'),
fits.ImageHDU(data=ados_mod_bc_map.filled(0.0),
header=fileio.finalize_header(maphdr, 'ADOS_MODI'),
name='ADOS_MODI'),
fits.BinTableHDU.from_columns([fits.Column(name=n,
format=fileio.rec_to_fits_type(adprof[n]),
dim=fileio.rec_to_fits_col_dim(adprof[n]),
array=adprof[n])
for n in adprof.dtype.names],
name='ADPROF')]
# Construct the HDUList, write it if requested, and return
hdu = fits.HDUList(hdus)
if ofile is not None:
if ofile.split('.')[-1] == 'gz':
_ofile = ofile[:ofile.rfind('.')]
compress = True
else:
_ofile = ofile
hdu.writeto(_ofile, overwrite=True, checksum=True)
if compress:
fileio.compress_file(_ofile, overwrite=True, rm_original=True)
return hdu
|
ricardoclandimREPO_NAMENIRVANAPATH_START.@NIRVANA_extracted@NIRVANA-master@nirvana@models@multitrace.py@.PATH_END.py
|
{
"filename": "fits.py",
"repo_name": "vortex-exoplanet/VIP",
"repo_path": "VIP_extracted/VIP-master/vip_hci/fits/fits.py",
"type": "Python"
}
|
#! /usr/bin/env python
"""
Module with various fits handling functions.
"""
__author__ = "C. A. Gomez Gonzalez, T. BΓ©drine, V. Christiaens, I. Hammond"
__all__ = ["open_fits", "info_fits", "write_fits", "verify_fits",
"byteswap_array"]
from os.path import isfile, exists
from os import remove
import numpy as np
from astropy.io.fits.convenience import writeto
from astropy.io.fits.hdu.hdulist import fitsopen, HDUList
from astropy.io.fits.hdu.image import ImageHDU
from ..config.paramenum import ALL_FITS
def open_fits(fitsfilename, n=0, header=False, ignore_missing_end=False,
precision=np.float32, return_memmap=False, verbose=True,
**kwargs):
"""
Load a fits file into memory as numpy array.
Parameters
----------
fitsfilename : string or pathlib.Path
Name of the fits file or ``pathlib.Path`` object
n : int, optional
It chooses which HDU to open. Default is the first one. If n is equal
to -2, opens and returns all extensions.
header : bool, optional
Whether to return the header along with the data or not.
precision : numpy dtype, optional
Float precision, by default np.float32 or single precision float.
ignore_missing_end : bool optional
Allows to open fits files with a header missing END card.
return_memmap : bool, optional
If True, the function returns the handle to the FITS file opened by
mmap. With the hdulist, array data of each HDU to be accessed with mmap,
rather than being read into memory all at once. This is particularly
useful for working with very large arrays that cannot fit entirely into
physical memory.
verbose : bool, optional
If True prints message of completion.
**kwargs: optional
Optional arguments to the astropy.io.fits.open() function. E.g.
"output_verify" can be set to ignore, in case of non-standard header.
Returns
-------
hdulist : HDU or HDUList
[memmap=True] FITS file ``n`` hdulist. If n equals -2, returns the whole
hdulist.
data : numpy ndarray or list of numpy ndarrays
[memmap=False] Array containing the frames of the fits-cube. If n
equals -2, returns a list of all arrays.
header : dict or list of dict
[memmap=False, header=True] Dictionary containing the fits header.
If n equals -2, returns a list of all dictionaries.
"""
fitsfilename = str(fitsfilename)
if not isfile(fitsfilename):
fitsfilename += ".fits"
try:
hdulist = fitsopen(fitsfilename, ignore_missing_end=ignore_missing_end,
memmap=True, **kwargs)
except ValueError:
# If BZERO/BSCALE/BLANK header keywords present HDU canβt load as memmap
hdulist = fitsopen(fitsfilename, ignore_missing_end=ignore_missing_end,
memmap=False, **kwargs)
# Opening all extensions in a MEF
if n == ALL_FITS:
if return_memmap:
return hdulist
data_list = []
header_list = []
for index, element in enumerate(hdulist):
data, head = _return_data_fits(hdulist=hdulist, index=index,
header=header, precision=precision,
verbose=verbose)
data_list.append(data)
header_list.append(head)
hdulist.close()
if header:
if verbose:
msg = f"All {len(hdulist)} FITS HDU data and headers "
msg += "successfully loaded."
print(msg)
return data_list, header_list
else:
if verbose:
print(f"All {len(hdulist)} FITS HDU data successfully loaded.")
return data_list
# Opening only a specified extension
else:
if return_memmap:
return hdulist[n]
data, head = _return_data_fits(hdulist=hdulist, index=n, header=header,
precision=precision, verbose=verbose)
hdulist.close()
if header:
return data, head
else:
return data
def _return_data_fits(hdulist: HDUList,
index: int,
header: bool = False,
precision=np.float32,
verbose: bool = True):
"""
Subfunction used to return data (and header) from a given index.
Parameters
----------
hdulist : HDUList
List of FITS cubes with their headers.
index : int
The wanted index to extract.
"""
data = hdulist[index].data
data = np.array(data, dtype=precision)
head = hdulist[index].header
if verbose:
if header:
print(f"FITS HDU-{index} data and header successfully loaded. "
f"Data shape: {data.shape}")
else:
print(f"FITS HDU-{index} data successfully loaded. "
f"Data shape: {data.shape}")
return data, head
def byteswap_array(array):
"""FITS files are stored in big-endian byte order. All modern CPUs are
little-endian byte order, so at some point you have to byteswap the data.
Some FITS readers (cfitsio, the fitsio python module) do the byteswap when
reading the data from disk to memory, so we get numpy arrays in native
(little-endian) byte order. Unfortunately, astropy.io.fits does not byteswap
for us, and we get numpy arrays in non-native byte order. However, most of
the time we never notice this because when you do any numpy operations on
such arrays, numpy uses an intermediate buffer to byteswap the array behind
the scenes and returns the result as a native byte order array. Some
operations require the data to be byteswaped before and will complain about
it. This function will help in those cases.
Parameters
----------
array : numpy ndarray
2d input array.
Returns
-------
array_out : numpy ndarray
2d resulting array after the byteswap operation.
Note
----
More info about byteswapping here:
https://docs.scipy.org/doc/numpy-1.10.1/user/basics.byteswapping.html
"""
array_out = array.byteswap().newbyteorder()
return array_out
def info_fits(fitsfilename, **kwargs):
"""
Print the information about a fits file.
Parameters
----------
fitsfilename : str
Path to the fits file.
**kwargs: optional
Optional arguments to the astropy.io.fits.open() function. E.g.
"output_verify" can be set to ignore, in case of non-standard header.
"""
with fitsopen(fitsfilename, memmap=True, **kwargs) as hdulist:
hdulist.info()
def verify_fits(fitsfilename):
"""
Verify "the FITS standard" of a fits file or list of fits.
Parameters
----------
fitsfilename : string or list
Path to the fits file or list with fits filename paths.
"""
if isinstance(fitsfilename, list):
for ffile in fitsfilename:
with fitsopen(ffile) as f:
f.verify()
else:
with fitsopen(fitsfilename) as f:
f.verify()
def write_fits(fitsfilename, array, header=None, output_verify="exception",
precision=np.float32, verbose=True):
"""
Write array and header into FITS file.
If there is a previous file with the same filename then it's replaced.
Parameters
----------
fitsfilename : string
Full path of the fits file to be written.
array : numpy ndarray or tuple of numpy ndarray
Array(s) to be written into a fits file. If a tuple of several arrays,
the fits file will be written as a multiple extension fits file
header : numpy ndarray, or tuple of headers, optional
Header dictionary, or tuple of headers for a multiple extension fits
file.
output_verify : str, optional
{"fix", "silentfix", "ignore", "warn", "exception"}
Verification options:
https://docs.astropy.org/en/stable/io/fits/api/verification.html
precision : numpy dtype, optional
Float precision, by default np.float32 or single precision float.
verbose : bool, optional
If True prints message.
"""
if not fitsfilename.endswith(".fits"):
fitsfilename += ".fits"
res = "saved"
if exists(fitsfilename):
remove(fitsfilename)
res = "overwritten"
if isinstance(array, tuple):
new_hdul = HDUList()
if header is None:
header = [None] * len(array)
elif not isinstance(header, tuple):
header = [header] * len(array)
elif len(header) != len(array):
msg = "If input header is a tuple, it should have the same length "
msg += "as tuple of arrays."
raise ValueError(msg)
for i in range(len(array)):
array_tmp = array[i].astype(precision, copy=False)
new_hdul.append(ImageHDU(array_tmp, header=header[i]))
new_hdul.writeto(fitsfilename, output_verify=output_verify)
else:
array = array.astype(precision, copy=False)
writeto(fitsfilename, array, header, output_verify)
if verbose:
print(f"FITS file successfully {res}")
|
vortex-exoplanetREPO_NAMEVIPPATH_START.@VIP_extracted@VIP-master@vip_hci@fits@fits.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "spacetelescope/calcos",
"repo_path": "calcos_extracted/calcos-master/setup.py",
"type": "Python"
}
|
#!/usr/bin/env python
from setuptools import setup, Extension
from numpy import get_include as numpy_includes
from pathlib import Path
def c_sources(parent: str) -> list[str]:
return [str(filename) for filename in Path(parent).glob("*.c")]
def c_includes(parent: str, depth: int = 1):
return [
parent,
*(
str(filename)
for filename in Path(parent).iterdir()
if filename.is_dir() and len(filename.parts) - 1 <= depth
),
]
PACKAGENAME = "calcos"
SOURCES = c_sources("src")
INCLUDES = c_includes("src") + [numpy_includes()]
setup(
ext_modules=[
Extension(
PACKAGENAME + ".ccos",
sources=SOURCES,
include_dirs=INCLUDES,
),
],
)
|
spacetelescopeREPO_NAMEcalcosPATH_START.@calcos_extracted@calcos-master@setup.py@.PATH_END.py
|
{
"filename": "text_dataset_utils_test.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/utils/text_dataset_utils_test.py",
"type": "Python"
}
|
import os
import random
import string
from keras.src import testing
from keras.src.utils import text_dataset_utils
class TextDatasetFromDirectoryTest(testing.TestCase):
def _prepare_directory(
self, num_classes=2, nested_dirs=False, count=16, length=20
):
# Get a unique temp directory
temp_dir = self.get_temp_dir()
# Generate paths to class subdirectories
paths = []
for class_index in range(num_classes):
class_directory = f"class_{class_index}"
if nested_dirs:
class_paths = [
class_directory,
os.path.join(class_directory, "subfolder_1"),
os.path.join(class_directory, "subfolder_2"),
os.path.join(
class_directory, "subfolder_1", "sub-subfolder"
),
]
else:
class_paths = [class_directory]
for path in class_paths:
os.mkdir(os.path.join(temp_dir, path))
paths += class_paths
for i in range(count):
path = paths[i % len(paths)]
filename = os.path.join(path, f"text_{i}.txt")
with open(os.path.join(temp_dir, filename), "w") as f:
text = "".join(
[random.choice(string.printable) for _ in range(length)]
)
f.write(text)
return temp_dir
def test_text_dataset_from_directory_standalone(self):
# Test retrieving txt files without labels from a directory and its
# subdirs. Save a few extra files in the parent directory.
directory = self._prepare_directory(count=7, num_classes=2)
for i in range(3):
filename = f"text_{i}.txt"
with open(os.path.join(directory, filename), "w") as f:
text = "".join(
[random.choice(string.printable) for _ in range(20)]
)
f.write(text)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=5, label_mode=None, max_length=10
)
batch = next(iter(dataset))
# We just return the texts, no labels
self.assertEqual(batch.shape, (5,))
self.assertEqual(batch.dtype.name, "string")
# Count samples
batch_count = 0
sample_count = 0
for batch in dataset:
batch_count += 1
sample_count += batch.shape[0]
self.assertEqual(batch_count, 2)
self.assertEqual(sample_count, 10)
def test_text_dataset_from_directory_binary(self):
directory = self._prepare_directory(num_classes=2)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode="int", max_length=10
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
self.assertEqual(batch[0].dtype.name, "string")
self.assertEqual(len(batch[0].numpy()[0]), 10) # Test max_length
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, "int32")
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode="binary"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
self.assertEqual(batch[0].dtype.name, "string")
self.assertEqual(batch[1].shape, (8, 1))
self.assertEqual(batch[1].dtype.name, "float32")
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode="categorical"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
self.assertEqual(batch[0].dtype.name, "string")
self.assertEqual(batch[1].shape, (8, 2))
self.assertEqual(batch[1].dtype.name, "float32")
def test_sample_count(self):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode=None
)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 15)
def test_text_dataset_from_directory_multiclass(self):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode=None
)
batch = next(iter(dataset))
self.assertEqual(batch.shape, (8,))
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode=None
)
sample_count = 0
iterator = iter(dataset)
for batch in dataset:
sample_count += next(iterator).shape[0]
self.assertEqual(sample_count, 15)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode="int"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
self.assertEqual(batch[0].dtype.name, "string")
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, "int32")
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode="categorical"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
self.assertEqual(batch[0].dtype.name, "string")
self.assertEqual(batch[1].shape, (8, 4))
self.assertEqual(batch[1].dtype.name, "float32")
def test_text_dataset_from_directory_validation_split(self):
directory = self._prepare_directory(num_classes=2, count=10)
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=10,
validation_split=0.2,
subset="training",
seed=1337,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=10,
validation_split=0.2,
subset="validation",
seed=1337,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (2,))
(
train_dataset,
val_dataset,
) = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=10,
validation_split=0.2,
subset="both",
seed=1337,
)
batch = next(iter(train_dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
batch = next(iter(val_dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (2,))
def test_text_dataset_from_directory_manual_labels(self):
directory = self._prepare_directory(num_classes=2, count=2)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, labels=[0, 1], shuffle=False
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertAllClose(batch[1], [0, 1])
def test_text_dataset_from_directory_follow_links(self):
directory = self._prepare_directory(
num_classes=2, count=25, nested_dirs=True
)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode=None, follow_links=True
)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 25)
def test_text_dataset_from_directory_no_files(self):
directory = self._prepare_directory(num_classes=2, count=0)
with self.assertRaisesRegex(ValueError, "No text files found"):
_ = text_dataset_utils.text_dataset_from_directory(directory)
def test_text_dataset_from_directory_errors(self):
directory = self._prepare_directory(num_classes=3, count=5)
with self.assertRaisesRegex(ValueError, "`labels` argument should be"):
_ = text_dataset_utils.text_dataset_from_directory(
directory, labels="other"
)
with self.assertRaisesRegex(
ValueError, "`label_mode` argument must be"
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, label_mode="other"
)
with self.assertRaisesRegex(
ValueError, 'only pass `class_names` if `labels="inferred"`'
):
_ = text_dataset_utils.text_dataset_from_directory(
directory,
labels=[0, 0, 1, 1, 1],
class_names=["class_0", "class_1", "class_2"],
)
with self.assertRaisesRegex(
ValueError,
"Expected the lengths of `labels` to match the number of files",
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, labels=[0, 0, 1, 1]
)
with self.assertRaisesRegex(
ValueError, "`class_names` passed did not match"
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, class_names=["class_0", "wrong_class"]
)
with self.assertRaisesRegex(ValueError, "there must be exactly 2"):
_ = text_dataset_utils.text_dataset_from_directory(
directory, label_mode="binary"
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be between 0 and 1"
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, validation_split=2
)
with self.assertRaisesRegex(
ValueError,
'`subset` must be either "training", "validation" or "both"',
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, validation_split=0.2, subset="other"
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be set"
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, validation_split=0.0, subset="training"
)
with self.assertRaisesRegex(ValueError, "must provide a `seed`"):
_ = text_dataset_utils.text_dataset_from_directory(
directory, validation_split=0.2, subset="training"
)
def test_text_dataset_from_directory_not_batched(self):
directory = self._prepare_directory()
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=None, label_mode=None, follow_links=True
)
sample = next(iter(dataset))
self.assertEqual(len(sample.shape), 0)
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@utils@text_dataset_utils_test.py@.PATH_END.py
|
{
"filename": "_showcoastlines.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/geo/_showcoastlines.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowcoastlinesValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="showcoastlines", parent_name="layout.geo", **kwargs
):
super(ShowcoastlinesValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@geo@_showcoastlines.py@.PATH_END.py
|
{
"filename": "_colorssrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/pie/marker/_colorssrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="colorssrc", parent_name="pie.marker", **kwargs):
super(ColorssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@pie@marker@_colorssrc.py@.PATH_END.py
|
{
"filename": "test_simulator.py",
"repo_name": "peregrine-gw/saqqara",
"repo_path": "saqqara_extracted/saqqara-main/testing/test_simulator.py",
"type": "Python"
}
|
import unittest
import saqqara
import os
import numpy as np
import swyft
TEST_DATA_PATH = os.path.join(os.path.dirname(__file__), "test_data/")
class TestSimulator(unittest.TestCase):
def test_simulator(self):
config_path = (
os.path.join(os.path.dirname(saqqara.__file__), "defaults")
+ "/default_config.yaml"
)
config = saqqara.load_settings(config_path)
settings = saqqara.get_settings(config)
saqqara_sim = saqqara.SaqqaraSim(settings)
prior_sample = saqqara_sim.sample_prior(10000)
self.assertEqual(prior_sample.shape, (10000, 4))
self.assertTrue(
np.all(
saqqara_sim.prior.bounds
- np.array([[-20.0, -5.0], [-5.0, 5.0], [0.0, 6.0], [0.0, 30.0]])
== 0.0
)
)
print(
np.all(
saqqara_sim.prior.bounds
- np.array([[-20.0, -5.0], [-5.0, 5.0], [0.0, 6.0], [0.0, 30.0]])
== 0.0
)
)
self.assertListEqual(saqqara_sim.prior.parnames, ["amp", "tilt", "TM", "OMS"])
self.assertEqual(saqqara_sim.prior.name, "prior")
self.assertEqual(saqqara_sim.prior.transform_samples, swyft.to_numpy32)
self.assertTrue("z" in saqqara_sim.graph.nodes)
shapes, dtypes = saqqara_sim.get_shapes_and_dtypes()
self.assertEqual(
shapes,
{
"z": (4,),
},
)
self.assertEqual(
dtypes,
{"z": np.float32},
)
sample = saqqara_sim.sample(10)
self.assertEqual(sample["z"].shape, (10, 4))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
peregrine-gwREPO_NAMEsaqqaraPATH_START.@saqqara_extracted@saqqara-main@testing@test_simulator.py@.PATH_END.py
|
{
"filename": "red_blue.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/emcee_ES/moves/red_blue.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
import numpy as np
from ..state import State
from .move import Move
__all__ = ["RedBlueMove"]
class RedBlueMove(Move):
"""
An abstract red-blue ensemble move with parallelization as described in
`Foreman-Mackey et al. (2013) <https://arxiv.org/abs/1202.3665>`_.
Args:
nsplits (Optional[int]): The number of sub-ensembles to use. Each
sub-ensemble is updated in parallel using the other sets as the
complementary ensemble. The default value is ``2`` and you
probably won't need to change that.
randomize_split (Optional[bool]): Randomly shuffle walkers between
sub-ensembles. The same number of walkers will be assigned to
each sub-ensemble on each iteration. By default, this is ``True``.
live_dangerously (Optional[bool]): By default, an update will fail with
a ``RuntimeError`` if the number of walkers is smaller than twice
the dimension of the problem because the walkers would then be
stuck on a low dimensional subspace. This can be avoided by
switching between the stretch move and, for example, a
Metropolis-Hastings step. If you want to do this and suppress the
error, set ``live_dangerously = True``. Thanks goes (once again)
to @dstndstn for this wonderful terminology.
"""
def __init__(
self, nsplits=2, randomize_split=True, live_dangerously=False
):
self.nsplits = int(nsplits)
self.live_dangerously = live_dangerously
self.randomize_split = randomize_split
def setup(self, coords):
pass
def get_proposal(self, sample, complement, random):
raise NotImplementedError(
"The proposal must be implemented by " "subclasses"
)
def propose(self, model, state):
"""Use the move to generate a proposal and compute the acceptance
Args:
coords: The initial coordinates of the walkers.
log_probs: The initial log probabilities of the walkers.
log_prob_fn: A function that computes the log probabilities for a
subset of walkers.
random: A numpy-compatible random number state.
"""
# Check that the dimensions are compatible.
nwalkers, ndim = state.coords.shape
if nwalkers < 2 * ndim and not self.live_dangerously:
raise RuntimeError(
"It is unadvisable to use a red-blue move "
"with fewer walkers than twice the number of "
"dimensions."
)
# Run any move-specific setup.
self.setup(state.coords)
# Split the ensemble in half and iterate over these two halves.
accepted = np.zeros(nwalkers, dtype=bool)
all_inds = np.arange(nwalkers)
inds = all_inds % self.nsplits
if self.randomize_split:
model.random.shuffle(inds)
for split in range(self.nsplits):
S1 = inds == split
# Get the two halves of the ensemble.
sets = [state.coords[inds == j] for j in range(self.nsplits)]
s = sets[split]
c = sets[:split] + sets[split + 1 :]
# Get the move-specific proposal.
q, factors = self.get_proposal(s, c, model.random)
# Compute the lnprobs of the proposed position.
new_log_probs, new_blobs = model.compute_log_prob_fn(q)
# Loop over the walkers and update them accordingly.
for i, (j, f, nlp) in enumerate(
zip(all_inds[S1], factors, new_log_probs)
):
lnpdiff = f + nlp - state.log_prob[j]
if lnpdiff > np.log(model.random.rand()):
accepted[j] = True
new_state = State(q, log_prob=new_log_probs, blobs=new_blobs)
state = self.update(state, new_state, accepted, S1)
return state, accepted
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@emcee_ES@moves@red_blue.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "kbarbary/extinction",
"repo_path": "extinction_extracted/extinction-main/testdata/README.md",
"type": "Markdown"
}
|
Test data
---------
This directory contains comparison files used in the tests.
These outputs from running the IDL procedure `FM_UNRED` from the
IDL Astro library: http://idlastro.gsfc.nasa.gov/ftp/pro/astro/
(`FM_UNRED` also requires the `CSPLINE` procedure.)
The script used to generate the output is included here.
|
kbarbaryREPO_NAMEextinctionPATH_START.@extinction_extracted@extinction-main@testdata@README.md@.PATH_END.py
|
{
"filename": "var_exts_framework.py",
"repo_name": "Keck-DataReductionPipelines/KPF-Pipeline",
"repo_path": "KPF-Pipeline_extracted/KPF-Pipeline-master/modules/var_exts/src/var_exts_framework.py",
"type": "Python"
}
|
import os
from os.path import exists
import numpy as np
import configparser as cp
import psycopg2
import ast
# Pipeline dependencies
from kpfpipe.logger import *
from kpfpipe.models.level0 import KPF0
from kpfpipe.primitives.level0 import KPF0_Primitive
from kpfpipe.pipelines.fits_primitives import to_fits
from keckdrpframework.models.arguments import Arguments
# Global read-only variables
DEFAULT_CFG_PATH = 'modules/var_exts/configs/default.cfg'
debug = 1
class VarExtsFramework(KPF0_Primitive):
"""
Description:
Input L0 filename and database primary key rId for the L0Files database table.
Select the record from the ReadNoise database table, and square for the read-noise variances.
Gather all the other variances, sum them all, and write the resulting total variance images
to FITS extensions ['GREEN_VAR','RED_VAR'] in the associated 2D FITS file.
Arguments:
data_type (str): Type of data (e.g., KPF).
l0_filename (str): Full path and filename of L0 FITS file within container.
masterbias_path (str): Input master bias.
masterdark_path (str): Input master dark.
masterflat_path (str): Input master flat.
rId (int): Primary database key of L0 FITS file in L0Files database record.
"""
def __init__(self, action, context):
KPF0_Primitive.__init__(self, action, context)
self.data_type = self.action.args[0]
self.l0_filename = self.action.args[1]
self.masterbias_path = self.action.args[2]
self.masterdark_path = self.action.args[3]
self.masterflat_path = self.action.args[4]
self.rId = self.action.args[5]
try:
self.module_config_path = context.config_path['var_exts']
print("--->",self.__class__.__name__,": self.module_config_path =",self.module_config_path)
except:
self.module_config_path = DEFAULT_CFG_PATH
print("{} class: self.module_config_path = {}".format(self.__class__.__name__,self.module_config_path))
print("Starting logger...")
self.logger = start_logger(self.__class__.__name__, self.module_config_path)
if self.logger is not None:
print("--->self.logger is not None...")
else:
print("--->self.logger is None...")
self.logger.info('Started {}'.format(self.__class__.__name__))
self.logger.debug('module_config_path = {}'.format(self.module_config_path))
module_config_obj = cp.ConfigParser()
res = module_config_obj.read(self.module_config_path)
if res == []:
raise IOError('failed to read {}'.format(self.module_config_path))
module_param_cfg = module_config_obj['PARAM']
rn_flag_cfg_str = module_param_cfg.get('rn_flag')
self.rn_flag_cfg = ast.literal_eval(rn_flag_cfg_str)
self.logger.info('self.data_type = {}'.format(self.data_type))
self.logger.info('self.l0_filename = {}'.format(self.l0_filename))
self.logger.info('self.masterbias_path = {}'.format(self.masterbias_path))
self.logger.info('self.masterdark_path = {}'.format(self.masterdark_path))
self.logger.info('self.masterflat_path = {}'.format(self.masterflat_path))
self.logger.info('self.rId = {}'.format(self.rId))
self.logger.info('self.rn_flag_cfg = {}'.format(self.rn_flag_cfg))
self.logger.info('Type of self.rn_flag_cfg = {}'.format(type(self.rn_flag_cfg)))
def select_read_noise(self,input_rid):
var_exts_exit_code = 0
# Get database connection parameters from environment.
dbport = os.getenv('DBPORT')
dbname = os.getenv('DBNAME')
dbuser = os.getenv('DBUSER')
dbpass = os.getenv('DBPASS')
dbserver = os.getenv('DBSERVER')
# Connect to database
try:
conn = psycopg2.connect(host=dbserver,database=dbname,port=dbport,user=dbuser,password=dbpass)
except:
self.logger.info('Could not connect to database...')
var_exts_exit_code = 64
return Arguments(var_exts_exit_code)
# Open database cursor.
cur = conn.cursor()
# Select database version.
q1 = 'SELECT version();'
self.logger.info('q1 = {}'.format(q1))
cur.execute(q1)
db_version = cur.fetchone()
self.logger.info('PostgreSQL database version = {}'.format(db_version))
# Check database current_user.
q2 = 'SELECT current_user;'
self.logger.info('q2 = {}'.format(q2))
cur.execute(q2)
for record in cur:
self.logger.info('record = {}'.format(record))
pass
###########################################################################
###########################################################################
# Execute query.
query = "SELECT rngreen1,rngreen2,rngreen3,rngreen4,rnred1,rnred2,rnred3,rnred4 from ReadNoise where rId = " +\
str(self.rId) + ";"
self.logger.info('query = {}'.format(query))
try:
cur.execute(query)
record = cur.fetchone()
if record is not None:
rngreen1 = record[0]
rngreen2 = record[1]
rngreen3 = record[2]
rngreen4 = record[3]
rnred1 = record[4]
rnred2 = record[5]
rnred3 = record[6]
rnred4 = record[7]
self.logger.info(record)
else:
self.logger.info("Database record not found; skipping...")
var_exts_exit_code = 66
return var_exts_exit_code
except (Exception, psycopg2.DatabaseError) as error:
self.logger.info('*** Error selecting record ({}); skipping...'.format(error))
var_exts_exit_code = 67
return var_exts_exit_code
###########################################################################
###########################################################################
# Close database cursor and then connection.
try:
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
self.logger.info('*** Error closing database connection ({}); skipping...'.format(error))
var_exts_exit_code = 2
finally:
if conn is not None:
conn.close()
self.logger.info('rngreen1 = {}'.format(rngreen1))
self.logger.info('rngreen2 = {}'.format(rngreen2))
self.logger.info('rngreen3 = {}'.format(rngreen3))
self.logger.info('rngreen4 = {}'.format(rngreen4))
self.logger.info('rnred1 = {}'.format(rnred1))
self.logger.info('rnred2 = {}'.format(rnred2))
self.logger.info('rnred3 = {}'.format(rnred3))
self.logger.info('rnred4 = {}'.format(rnred4))
return var_exts_exit_code,rngreen1,rngreen2,rngreen3,rngreen4,rnred1,rnred2,rnred3,rnred4
def assemble_read_noise_var_images(self,rngreen1,rngreen2,rngreen3,rngreen4,rnred1,rnred2,rnred3,rnred4):
if rngreen3 is None:
num_amps_green = 2
else:
num_amps_green = 4
if rnred3 is None:
num_amps_red = 2
else:
num_amps_red = 4
# Read image data object from 2D FITS file.
fits_filename = self.l0_filename
fits_filename = fits_filename.replace('L0', '2D')
fits_filename = fits_filename.replace('.fits', '_2D.fits')
fits_filename_exists = exists(fits_filename)
if not fits_filename_exists:
self.logger.info('*** 2D file does not exist ({}); skipping...'.format(fits_filename))
return
hdul_input = KPF0.from_fits(fits_filename,self.data_type)
exp_time = float(hdul_input.header['PRIMARY']['EXPTIME'])
if debug == 1:
print("exp_time = {}".format(exp_time))
exts = ['GREEN_CCD','RED_CCD']
rngreenvarimg = None
rnredvarimg = None
for ext in exts:
try:
naxis1 = hdul_input.header[ext]["NAXIS1"]
except:
continue
try:
naxis2 = hdul_input.header[ext]["NAXIS2"]
except:
continue
if debug == 1:
print("ext,naxis1,naxis2 = {},{},{}".\
format(ext,naxis1,naxis2))
if 'GREEN' in ext:
num_amps = num_amps_green
rn1 = rngreen1
rn2 = rngreen2
rn3 = rngreen3
rn4 = rngreen4
else:
num_amps = num_amps_red
rn1 = rnred1
rn2 = rnred2
rn3 = rnred3
rn4 = rnred4
if num_amps == 2:
ny = naxis2
nx = int(naxis1 / 2)
var1 = rn1 * rn1
var2 = rn2 * rn2
amp1 = np.full((ny,nx),var1,dtype=float)
amp2 = np.full((ny,nx),var2,dtype=float)
var_img = np.concatenate((amp1, amp2), axis=1)
else:
ny = int(naxis2 / 2)
nx = int(naxis1 / 2)
var1 = rn1 * rn1
var2 = rn2 * rn2
var3 = rn3 * rn3
var4 = rn4 * rn4
amp1 = np.full((ny,nx),var1,dtype=float)
amp2 = np.full((ny,nx),var2,dtype=float)
amp3 = np.full((ny,nx),var3,dtype=float)
amp4 = np.full((ny,nx),var4,dtype=float)
img_top = np.concatenate((amp1, amp2), axis=1)
img_bot = np.concatenate((amp3, amp4), axis=1)
var_img = np.concatenate((img_top, img_bot), axis=0)
if 'GREEN' in ext:
rngreenvarimg = var_img
else:
rnredvarimg = var_img
return exp_time,rngreenvarimg,rnredvarimg
def assemble_var_images(self, fits_filename):
# Read image data object from master file.
fits_filename_exists = exists(fits_filename)
if not fits_filename_exists:
self.logger.info('*** Master file does not exist ({}); skipping...'.format(fits_filename))
return
hdul_input = KPF0.from_fits(fits_filename,self.data_type)
exts = ['GREEN_CCD_UNC','RED_CCD_UNC']
greenvarimg = None
redvarimg = None
for ext in exts:
try:
unc_img = np.array(hdul_input[ext])
except:
continue
var_img = unc_img * unc_img
if 'GREEN' in ext:
greenvarimg = var_img
else:
redvarimg = var_img
return greenvarimg,redvarimg
def assemble_ccd_images(self):
# Read image data object from 2D FITS file.
fits_filename = self.l0_filename
fits_filename = fits_filename.replace('L0', '2D')
fits_filename = fits_filename.replace('.fits', '_2D.fits')
fits_filename_exists = exists(fits_filename)
if not fits_filename_exists:
self.logger.info('*** 2D file does not exist ({}); skipping...'.format(fits_filename))
return
hdul_input = KPF0.from_fits(fits_filename,self.data_type)
exts = ['GREEN_CCD','RED_CCD']
greenccdimg = None
redccdimg = None
for ext in exts:
try:
ccd_img = np.array(hdul_input[ext])
except:
continue
ccd_img = np.where(ccd_img >= 0.0, ccd_img, 0.0) # Ensure the photon noise is positive.
if 'GREEN' in ext:
greenccdimg = ccd_img
else:
redccdimg = ccd_img
return greenccdimg,redccdimg
def write_var_exts(self,greenvarimg,redvarimg):
fits_filename = self.l0_filename
fits_filename = fits_filename.replace('L0', '2D')
fits_filename = fits_filename.replace('.fits', '_2D.fits')
fits_filename_exists = exists(fits_filename)
if not fits_filename_exists:
self.logger.info('*** 2D File does not exist ({}); skipping...'.format(fits_filename))
return
fits_obj = KPF0.from_fits(fits_filename,self.data_type)
exts = ['GREEN_VAR','RED_VAR']
for ext in exts:
if 'GREEN' in ext:
if greenvarimg is None:
continue
else:
img = np.array(greenvarimg)
else:
if redvarimg is None:
continue
else:
img = np.array(redvarimg)
img_shape = np.shape(img)
self.logger.info('--->ext,img_shape = {},{}'.format(ext,img_shape))
fits_obj[ext] = img.astype(np.float32)
fits_obj.header[ext]['BUNIT'] = ('electrons','Units of variance')
# Remove any AMP extensions (which are automatically re-added as empty extensions for L0 FITS objects).
del_ext_list = ['GREEN_AMP1','GREEN_AMP2','GREEN_AMP3','GREEN_AMP4','RED_AMP1','RED_AMP2','RED_AMP3','RED_AMP4']
for ext in del_ext_list:
try:
fits_obj.del_extension(ext)
except:
pass
fits_obj.to_fits(fits_filename)
return
def _perform(self):
"""
Perform the following steps:
1. Connect to pipeline-operations database
2. Perform calculations for record(s) in the .
a. if self.rn_flag_cfg == 0, select record from ReadNoise database table
for given rId.
b. if self.rn_flag_cfg == 1, skip this step.
3. Disconnect from database.
Returns exitcode:
0 = Normal
2 = Exception raised closing database connection
64 = Cannot connect to database
65 = Input L0 file does not exist
66 = Database record for rId not found
67 = Could not select database record
68 = Input master bias does not exist
69 = Input master dark does not exist
70 = Input master flat does not exist
"""
var_exts_exit_code = 0
# See if input L0 file exists.
isExist = os.path.exists(self.l0_filename)
self.logger.info('File existence = {}'.format(isExist))
if isExist is False:
self.logger.info('Input L0 file does not exist ({})...'.format(self.l0_filename))
var_exts_exit_code = 65
return var_exts_exit_code
# See if input master files exist.
isExist1 = os.path.exists(self.masterbias_path)
self.logger.info('File existence = {}'.format(isExist1))
if isExist1 is False:
self.logger.info('Input master file does not exist ({})...'.format(self.masterbias_path))
var_exts_exit_code = 68
return var_exts_exit_code
isExist2 = os.path.exists(self.masterdark_path)
self.logger.info('File existence = {}'.format(isExist2))
if isExist2 is False:
self.logger.info('Input master file does not exist ({})...'.format(self.masterdark_path))
var_exts_exit_code = 69
return var_exts_exit_code
isExist3 = os.path.exists(self.masterflat_path)
self.logger.info('File existence = {}'.format(isExist3))
if isExist3 is False:
self.logger.info('Input master file does not exist ({})...'.format(self.masterflat_path))
var_exts_exit_code = 70
return var_exts_exit_code
###########################################################################
# Perform calculation for read noise.
###########################################################################
if self.rn_flag_cfg == 0:
# Select read noise for a single L0 FITS file via database query.
var_exts_exit_code,rngreen1,rngreen2,rngreen3,rngreen4,rnred1,rnred2,rnred3,rnred4 =\
self.select_read_noise(self.rId)
else:
rngreen1 = 4.0
rngreen2 = 4.0
rngreen3 = 4.0
rngreen4 = 4.0
rnred1 = 4.0
rnred2 = 4.0
rnred3 = 4.0
rnred4 = 4.0
# Assemble CCD images.
greenccdimg,redccdimg = self.assemble_ccd_images()
# Assemble read-noise variance images.
exp_time,rn_greenvarimg,rn_redvarimg = \
self.assemble_read_noise_var_images(rngreen1,rngreen2,rngreen3,rngreen4,rnred1,rnred2,rnred3,rnred4)
# Assemble master-file variance images.
bias_greenvarimg,bias_redvarimg = self.assemble_var_images(self.masterbias_path)
dark_greenvarimg,dark_redvarimg = self.assemble_var_images(self.masterdark_path)
flat_greenvarimg,flat_redvarimg = self.assemble_var_images(self.masterflat_path)
# Sum the variances for GREEN and RED chips, after converting all terms to electrons.
# The terms in the following formulas are, respectively:
# 1. Read-noise variance
# 2. Master-bias variance
# 3. Master-dark variance
# 4. Master-flat variance
# 5. Photon-noise variance
# GREEN
try:
greenvarimg = rn_greenvarimg +\
bias_greenvarimg +\
dark_greenvarimg * exp_time +\
flat_greenvarimg * greenccdimg +\
greenccdimg
except Exception as e:
print("Exception raised [",e,"]; continuing...")
greenvarimg = None
# RED
try:
redvarimg = rn_redvarimg +\
bias_redvarimg +\
dark_redvarimg * exp_time +\
flat_redvarimg * redccdimg +\
redccdimg
except Exception as e:
print("Exception raised [",e,"]; continuing...")
redvarimg = None
# Write variance FITS-extensions.
if (greenvarimg is not None) or (redvarimg is not None):
self.write_var_exts(greenvarimg,redvarimg)
self.logger.info('Finished {}'.format(self.__class__.__name__))
return Arguments(var_exts_exit_code)
|
Keck-DataReductionPipelinesREPO_NAMEKPF-PipelinePATH_START.@KPF-Pipeline_extracted@KPF-Pipeline-master@modules@var_exts@src@var_exts_framework.py@.PATH_END.py
|
{
"filename": "wenss.py",
"repo_name": "HERA-Team/aipy",
"repo_path": "aipy_extracted/aipy-main/aipy/_src/wenss.py",
"type": "Python"
}
|
"""
The WENSS Catalog.
Data files are in tab-separated format from Vizier.
To download in the correct format, open a catalog online in Vizier,
select'Tab-Separated-Values' as the Output layout in the drop-down box, set
the maximum entries to 'unlimited', and click 'Sexagesimal' under the box
for 'Target Name or Position'. Submit the query, and copy the output to a
txt file. Copy this file to "wenss.txt" in the _src directory of your AIPY
installation.
"""
from __future__ import print_function, division, absolute_import
try:
import aipy as a
except ImportError:
import aipy as a
import numpy as np, os
class WenssCatalog(a.fit.SrcCatalog):
def fromfile(self,filename):
f = open(filename)
addsrcs = []
for L in [L for L in f.readlines() if not L.startswith('#')]:
text = L.split('\t')
if len(text) <= 4: continue
try: int(text[0][0])
except(ValueError): continue
ra = text[4].replace(' ',':')
dec = text[5].replace(' ',':')
name = text[2].strip()
jys = float(text[9])/1000
addsrcs.append(a.fit.RadioFixedBody(ra, dec, name=name,
jys=jys, index=0, mfreq=0.330))
self.add_srcs(addsrcs)
WENSSFILE = os.path.join(os.path.dirname(__file__), 'wenss.txt')
_wensscat = None
def get_srcs(srcs=None, cutoff=None):
global _wensscat
if _wensscat is None:
_wensscat = WenssCatalog()
_wensscat.fromfile(WENSSFILE)
if srcs is None:
if cutoff is None: srcs = _wensscat.keys()
else:
cut, fq = cutoff
fq = np.array([fq])
for s in _wensscat.keys(): _wensscat[s].update_jys(fq)
srcs = [s for s in _wensscat.keys() if _wensscat[s].jys[0] > cut]
srclist = []
for s in srcs:
try: srclist.append(_wensscat[s])
except(KeyError): pass
return srclist
|
HERA-TeamREPO_NAMEaipyPATH_START.@aipy_extracted@aipy-main@aipy@_src@wenss.py@.PATH_END.py
|
{
"filename": "llamacpp.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/embeddings/llamacpp.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.embeddings import LlamaCppEmbeddings
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"LlamaCppEmbeddings": "langchain_community.embeddings"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"LlamaCppEmbeddings",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@embeddings@llamacpp.py@.PATH_END.py
|
{
"filename": "ReadParamsReaclib.py",
"repo_name": "mmicromegas/ransX",
"repo_path": "ransX_extracted/ransX-master/UTILS/REACLIB/ReadParamsReaclib.py",
"type": "Python"
}
|
import re # python regular expressions
class ReadParamsReaclib:
def __init__(self, filename):
file = open(filename, 'r')
next(file) # skip header line
next(file) # skip header line
input = []
for line in file:
prsvalue = re.search(r'\[(.*)\]', line).group(1) # parse out values from square brackets
input.append(prsvalue)
file.close()
self.input = input
def getForProp(self, param):
match = [s for s in self.input if param in s] # choose only lists identified by param
reaclib = match[0].split(",")[2]
eht_data = match[1].split(",")[2]
plabel = match[2].split(",")[2]
prefix = match[3].split(",")[2]
ieos = match[4].split(",")[2]
fext = match[5].split(",")[2]
ig = int(match[6].split(",")[2])
nsdim = int(match[7].split(",")[2])
intc = int(match[8].split(",")[2])
laxis = int(match[9].split(",")[2])
tnuc = int(match[10].split(",")[2])
xbl = float(match[11].split(",")[2])
xbr = float(match[12].split(",")[2])
return {'reaclib': reaclib, 'eht_data': eht_data, 'plabel': plabel, 'prefix': prefix, 'ig': ig, 'ieos': ieos, 'intc': intc,
'laxis': laxis, 'fext': fext, 'tnuc': tnuc, 'xbl': xbl, 'xbr': xbr, 'nsdim': nsdim}
def getForEqs(self, param):
match = [s for s in self.input if param in s] # choose only lists identified by param
# print(param,match)
match_split = match[0].split(",")
# equation = match_split[0]
plotMee = match_split[1]
xbl = float(match_split[2])
xbr = float(match_split[3])
ybu = float(match_split[4])
ybd = float(match_split[5])
ilg = int(match_split[6])
return {'plotMee': plotMee, 'xbl': xbl, 'xbr': xbr, 'ybu': ybu, 'ybd': ybd, 'ilg': ilg}
def getForEqsBar(self, param):
match = [s for s in self.input if param in s] # choose only lists identified by param
match_split = match[0].split(",")
# equation = match_split[0]
plotMee = match_split[1]
xbl = float(match_split[2])
xbr = float(match_split[3])
ybu = float(match_split[4])
ybd = float(match_split[5])
return {'plotMee': plotMee, 'xbl': xbl, 'xbr': xbr, 'ybu': ybu, 'ybd': ybd}
def getNetwork(self):
match = [s for s in self.input if 'network' in s]
match_split = match[0].split(",")
return match_split
def getInuc(self, network, element):
inuc_tmp = int(network.index(element))
if inuc_tmp < 10:
inuc = '000' + str(inuc_tmp)
if inuc_tmp >= 10 and inuc_tmp < 100:
inuc = '00' + str(inuc_tmp)
if inuc_tmp >= 100 and inuc_tmp < 1000:
inuc = '0' + str(inuc_tmp)
return inuc
|
mmicromegasREPO_NAMEransXPATH_START.@ransX_extracted@ransX-master@UTILS@REACLIB@ReadParamsReaclib.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/densitymapbox/colorbar/title/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._text import TextValidator
from ._side import SideValidator
from ._font import FontValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
["._text.TextValidator", "._side.SideValidator", "._font.FontValidator"],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@densitymapbox@colorbar@title@__init__.py@.PATH_END.py
|
{
"filename": "_tstutils.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py3/scipy/optimize/_tstutils.py",
"type": "Python"
}
|
r"""
Parameters used in test and benchmark methods.
Collections of test cases suitable for testing 1-D root-finders
'original': The original benchmarking functions.
Real-valued functions of real-valued inputs on an interval
with a zero.
f1, .., f3 are continuous and infinitely differentiable
f4 has a left- and right- discontinuity at the root
f5 has a root at 1 replacing a 1st order pole
f6 is randomly positive on one side of the root,
randomly negative on the other.
f4 - f6 are not continuous at the root.
'aps': The test problems in the 1995 paper
TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions"
by Alefeld, Potra and Shi. Real-valued functions of
real-valued inputs on an interval with a zero.
Suitable for methods which start with an enclosing interval, and
derivatives up to 2nd order.
'complex': Some complex-valued functions of complex-valued inputs.
No enclosing bracket is provided.
Suitable for methods which use one or more starting values, and
derivatives up to 2nd order.
The test cases are provided as a list of dictionaries. The dictionary
keys will be a subset of:
["f", "fprime", "fprime2", "args", "bracket", "smoothness",
"a", "b", "x0", "x1", "root", "ID"]
"""
# Sources:
# [1] Alefeld, G. E. and Potra, F. A. and Shi, Yixun,
# "Algorithm 748: Enclosing Zeros of Continuous Functions",
# ACM Trans. Math. Softw. Volume 221(1995)
# doi = {10.1145/210089.210111},
# [2] Chandrupatla, Tirupathi R. "A new hybrid quadratic/bisection algorithm
# for finding the zero of a nonlinear function without using derivatives."
# Advances in Engineering Software 28.3 (1997): 145-149.
from random import random
import numpy as np
from scipy.optimize import _zeros_py as cc
# "description" refers to the original functions
description = """
f2 is a symmetric parabola, x**2 - 1
f3 is a quartic polynomial with large hump in interval
f4 is step function with a discontinuity at 1
f5 is a hyperbola with vertical asymptote at 1
f6 has random values positive to left of 1, negative to right
Of course, these are not real problems. They just test how the
'good' solvers behave in bad circumstances where bisection is
really the best. A good solver should not be much worse than
bisection in such circumstance, while being faster for smooth
monotone sorts of functions.
"""
def f1(x):
r"""f1 is a quadratic with roots at 0 and 1"""
return x * (x - 1.)
def f1_fp(x):
return 2 * x - 1
def f1_fpp(x):
return 2
def f2(x):
r"""f2 is a symmetric parabola, x**2 - 1"""
return x**2 - 1
def f2_fp(x):
return 2 * x
def f2_fpp(x):
return 2
def f3(x):
r"""A quartic with roots at 0, 1, 2 and 3"""
return x * (x - 1.) * (x - 2.) * (x - 3.) # x**4 - 6x**3 + 11x**2 - 6x
def f3_fp(x):
return 4 * x**3 - 18 * x**2 + 22 * x - 6
def f3_fpp(x):
return 12 * x**2 - 36 * x + 22
def f4(x):
r"""Piecewise linear, left- and right- discontinuous at x=1, the root."""
if x > 1:
return 1.0 + .1 * x
if x < 1:
return -1.0 + .1 * x
return 0
def f5(x):
r"""Hyperbola with a pole at x=1, but pole replaced with 0. Not continuous at root."""
if x != 1:
return 1.0 / (1. - x)
return 0
# f6(x) returns random value. Without memoization, calling twice with the
# same x returns different values, hence a "random value", not a
# "function with random values"
_f6_cache = {}
def f6(x):
v = _f6_cache.get(x, None)
if v is None:
if x > 1:
v = random()
elif x < 1:
v = -random()
else:
v = 0
_f6_cache[x] = v
return v
# Each Original test case has
# - a function and its two derivatives,
# - additional arguments,
# - a bracket enclosing a root,
# - the order of differentiability (smoothness) on this interval
# - a starting value for methods which don't require a bracket
# - the root (inside the bracket)
# - an Identifier of the test case
_ORIGINAL_TESTS_KEYS = ["f", "fprime", "fprime2", "args", "bracket", "smoothness", "x0", "root", "ID"]
_ORIGINAL_TESTS = [
[f1, f1_fp, f1_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.01.00"],
[f2, f2_fp, f2_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.02.00"],
[f3, f3_fp, f3_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.03.00"],
[f4, None, None, (), [0.5, np.sqrt(3)], -1, 0.6, 1.0, "original.04.00"],
[f5, None, None, (), [0.5, np.sqrt(3)], -1, 0.6, 1.0, "original.05.00"],
[f6, None, None, (), [0.5, np.sqrt(3)], -np.inf, 0.6, 1.0, "original.05.00"]
]
_ORIGINAL_TESTS_DICTS = [dict(zip(_ORIGINAL_TESTS_KEYS, testcase)) for testcase in _ORIGINAL_TESTS]
# ##################
# "APS" test cases
# Functions and test cases that appear in [1]
def aps01_f(x):
r"""Straightforward sum of trigonometric function and polynomial"""
return np.sin(x) - x / 2
def aps01_fp(x):
return np.cos(x) - 1.0 / 2
def aps01_fpp(x):
return -np.sin(x)
def aps02_f(x):
r"""poles at x=n**2, 1st and 2nd derivatives at root are also close to 0"""
ii = np.arange(1, 21)
return -2 * np.sum((2 * ii - 5)**2 / (x - ii**2)**3)
def aps02_fp(x):
ii = np.arange(1, 21)
return 6 * np.sum((2 * ii - 5)**2 / (x - ii**2)**4)
def aps02_fpp(x):
ii = np.arange(1, 21)
return 24 * np.sum((2 * ii - 5)**2 / (x - ii**2)**5)
def aps03_f(x, a, b):
r"""Rapidly changing at the root"""
return a * x * np.exp(b * x)
def aps03_fp(x, a, b):
return a * (b * x + 1) * np.exp(b * x)
def aps03_fpp(x, a, b):
return a * (b * (b * x + 1) + b) * np.exp(b * x)
def aps04_f(x, n, a):
r"""Medium-degree polynomial"""
return x**n - a
def aps04_fp(x, n, a):
return n * x**(n - 1)
def aps04_fpp(x, n, a):
return n * (n - 1) * x**(n - 2)
def aps05_f(x):
r"""Simple Trigonometric function"""
return np.sin(x) - 1.0 / 2
def aps05_fp(x):
return np.cos(x)
def aps05_fpp(x):
return -np.sin(x)
def aps06_f(x, n):
r"""Exponential rapidly changing from -1 to 1 at x=0"""
return 2 * x * np.exp(-n) - 2 * np.exp(-n * x) + 1
def aps06_fp(x, n):
return 2 * np.exp(-n) + 2 * n * np.exp(-n * x)
def aps06_fpp(x, n):
return -2 * n * n * np.exp(-n * x)
def aps07_f(x, n):
r"""Upside down parabola with parametrizable height"""
return (1 + (1 - n)**2) * x - (1 - n * x)**2
def aps07_fp(x, n):
return (1 + (1 - n)**2) + 2 * n * (1 - n * x)
def aps07_fpp(x, n):
return -2 * n * n
def aps08_f(x, n):
r"""Degree n polynomial"""
return x * x - (1 - x)**n
def aps08_fp(x, n):
return 2 * x + n * (1 - x)**(n - 1)
def aps08_fpp(x, n):
return 2 - n * (n - 1) * (1 - x)**(n - 2)
def aps09_f(x, n):
r"""Upside down quartic with parametrizable height"""
return (1 + (1 - n)**4) * x - (1 - n * x)**4
def aps09_fp(x, n):
return (1 + (1 - n)**4) + 4 * n * (1 - n * x)**3
def aps09_fpp(x, n):
return -12 * n * (1 - n * x)**2
def aps10_f(x, n):
r"""Exponential plus a polynomial"""
return np.exp(-n * x) * (x - 1) + x**n
def aps10_fp(x, n):
return np.exp(-n * x) * (-n * (x - 1) + 1) + n * x**(n - 1)
def aps10_fpp(x, n):
return np.exp(-n * x) * (-n * (-n * (x - 1) + 1) + -n * x) + n * (n - 1) * x**(n - 2)
def aps11_f(x, n):
r"""Rational function with a zero at x=1/n and a pole at x=0"""
return (n * x - 1) / ((n - 1) * x)
def aps11_fp(x, n):
return 1 / (n - 1) / x**2
def aps11_fpp(x, n):
return -2 / (n - 1) / x**3
def aps12_f(x, n):
r"""nth root of x, with a zero at x=n"""
return np.power(x, 1.0 / n) - np.power(n, 1.0 / n)
def aps12_fp(x, n):
return np.power(x, (1.0 - n) / n) / n
def aps12_fpp(x, n):
return np.power(x, (1.0 - 2 * n) / n) * (1.0 / n) * (1.0 - n) / n
_MAX_EXPABLE = np.log(np.finfo(float).max)
def aps13_f(x):
r"""Function with *all* derivatives 0 at the root"""
if x == 0:
return 0
# x2 = 1.0/x**2
# if x2 > 708:
# return 0
y = 1 / x**2
if y > _MAX_EXPABLE:
return 0
return x / np.exp(y)
def aps13_fp(x):
if x == 0:
return 0
y = 1 / x**2
if y > _MAX_EXPABLE:
return 0
return (1 + 2 / x**2) / np.exp(y)
def aps13_fpp(x):
if x == 0:
return 0
y = 1 / x**2
if y > _MAX_EXPABLE:
return 0
return 2 * (2 - x**2) / x**5 / np.exp(y)
def aps14_f(x, n):
r"""0 for negative x-values, trigonometric+linear for x positive"""
if x <= 0:
return -n / 20.0
return n / 20.0 * (x / 1.5 + np.sin(x) - 1)
def aps14_fp(x, n):
if x <= 0:
return 0
return n / 20.0 * (1.0 / 1.5 + np.cos(x))
def aps14_fpp(x, n):
if x <= 0:
return 0
return -n / 20.0 * (np.sin(x))
def aps15_f(x, n):
r"""piecewise linear, constant outside of [0, 0.002/(1+n)]"""
if x < 0:
return -0.859
if x > 2 * 1e-3 / (1 + n):
return np.e - 1.859
return np.exp((n + 1) * x / 2 * 1000) - 1.859
def aps15_fp(x, n):
if not 0 <= x <= 2 * 1e-3 / (1 + n):
return np.e - 1.859
return np.exp((n + 1) * x / 2 * 1000) * (n + 1) / 2 * 1000
def aps15_fpp(x, n):
if not 0 <= x <= 2 * 1e-3 / (1 + n):
return np.e - 1.859
return np.exp((n + 1) * x / 2 * 1000) * (n + 1) / 2 * 1000 * (n + 1) / 2 * 1000
# Each APS test case has
# - a function and its two derivatives,
# - additional arguments,
# - a bracket enclosing a root,
# - the order of differentiability of the function on this interval
# - a starting value for methods which don't require a bracket
# - the root (inside the bracket)
# - an Identifier of the test case
#
# Algorithm 748 is a bracketing algorithm so a bracketing interval was provided
# in [1] for each test case. Newton and Halley methods need a single
# starting point x0, which was chosen to be near the middle of the interval,
# unless that would have made the problem too easy.
_APS_TESTS_KEYS = ["f", "fprime", "fprime2", "args", "bracket", "smoothness", "x0", "root", "ID"]
_APS_TESTS = [
[aps01_f, aps01_fp, aps01_fpp, (), [np.pi / 2, np.pi], np.inf, 3, 1.89549426703398094e+00, "aps.01.00"],
[aps02_f, aps02_fp, aps02_fpp, (), [1 + 1e-9, 4 - 1e-9], np.inf, 2, 3.02291534727305677e+00, "aps.02.00"],
[aps02_f, aps02_fp, aps02_fpp, (), [4 + 1e-9, 9 - 1e-9], np.inf, 5, 6.68375356080807848e+00, "aps.02.01"],
[aps02_f, aps02_fp, aps02_fpp, (), [9 + 1e-9, 16 - 1e-9], np.inf, 10, 1.12387016550022114e+01, "aps.02.02"],
[aps02_f, aps02_fp, aps02_fpp, (), [16 + 1e-9, 25 - 1e-9], np.inf, 17, 1.96760000806234103e+01, "aps.02.03"],
[aps02_f, aps02_fp, aps02_fpp, (), [25 + 1e-9, 36 - 1e-9], np.inf, 26, 2.98282273265047557e+01, "aps.02.04"],
[aps02_f, aps02_fp, aps02_fpp, (), [36 + 1e-9, 49 - 1e-9], np.inf, 37, 4.19061161952894139e+01, "aps.02.05"],
[aps02_f, aps02_fp, aps02_fpp, (), [49 + 1e-9, 64 - 1e-9], np.inf, 50, 5.59535958001430913e+01, "aps.02.06"],
[aps02_f, aps02_fp, aps02_fpp, (), [64 + 1e-9, 81 - 1e-9], np.inf, 65, 7.19856655865877997e+01, "aps.02.07"],
[aps02_f, aps02_fp, aps02_fpp, (), [81 + 1e-9, 100 - 1e-9], np.inf, 82, 9.00088685391666701e+01, "aps.02.08"],
[aps02_f, aps02_fp, aps02_fpp, (), [100 + 1e-9, 121 - 1e-9], np.inf, 101, 1.10026532748330197e+02, "aps.02.09"],
[aps03_f, aps03_fp, aps03_fpp, (-40, -1), [-9, 31], np.inf, -2, 0, "aps.03.00"],
[aps03_f, aps03_fp, aps03_fpp, (-100, -2), [-9, 31], np.inf, -2, 0, "aps.03.01"],
[aps03_f, aps03_fp, aps03_fpp, (-200, -3), [-9, 31], np.inf, -2, 0, "aps.03.02"],
[aps04_f, aps04_fp, aps04_fpp, (4, 0.2), [0, 5], np.inf, 2.5, 6.68740304976422006e-01, "aps.04.00"],
[aps04_f, aps04_fp, aps04_fpp, (6, 0.2), [0, 5], np.inf, 2.5, 7.64724491331730039e-01, "aps.04.01"],
[aps04_f, aps04_fp, aps04_fpp, (8, 0.2), [0, 5], np.inf, 2.5, 8.17765433957942545e-01, "aps.04.02"],
[aps04_f, aps04_fp, aps04_fpp, (10, 0.2), [0, 5], np.inf, 2.5, 8.51339922520784609e-01, "aps.04.03"],
[aps04_f, aps04_fp, aps04_fpp, (12, 0.2), [0, 5], np.inf, 2.5, 8.74485272221167897e-01, "aps.04.04"],
[aps04_f, aps04_fp, aps04_fpp, (4, 1), [0, 5], np.inf, 2.5, 1, "aps.04.05"],
[aps04_f, aps04_fp, aps04_fpp, (6, 1), [0, 5], np.inf, 2.5, 1, "aps.04.06"],
[aps04_f, aps04_fp, aps04_fpp, (8, 1), [0, 5], np.inf, 2.5, 1, "aps.04.07"],
[aps04_f, aps04_fp, aps04_fpp, (10, 1), [0, 5], np.inf, 2.5, 1, "aps.04.08"],
[aps04_f, aps04_fp, aps04_fpp, (12, 1), [0, 5], np.inf, 2.5, 1, "aps.04.09"],
[aps04_f, aps04_fp, aps04_fpp, (8, 1), [-0.95, 4.05], np.inf, 1.5, 1, "aps.04.10"],
[aps04_f, aps04_fp, aps04_fpp, (10, 1), [-0.95, 4.05], np.inf, 1.5, 1, "aps.04.11"],
[aps04_f, aps04_fp, aps04_fpp, (12, 1), [-0.95, 4.05], np.inf, 1.5, 1, "aps.04.12"],
[aps04_f, aps04_fp, aps04_fpp, (14, 1), [-0.95, 4.05], np.inf, 1.5, 1, "aps.04.13"],
[aps05_f, aps05_fp, aps05_fpp, (), [0, 1.5], np.inf, 1.3, np.pi / 6, "aps.05.00"],
[aps06_f, aps06_fp, aps06_fpp, (1,), [0, 1], np.inf, 0.5, 4.22477709641236709e-01, "aps.06.00"],
[aps06_f, aps06_fp, aps06_fpp, (2,), [0, 1], np.inf, 0.5, 3.06699410483203705e-01, "aps.06.01"],
[aps06_f, aps06_fp, aps06_fpp, (3,), [0, 1], np.inf, 0.5, 2.23705457654662959e-01, "aps.06.02"],
[aps06_f, aps06_fp, aps06_fpp, (4,), [0, 1], np.inf, 0.5, 1.71719147519508369e-01, "aps.06.03"],
[aps06_f, aps06_fp, aps06_fpp, (5,), [0, 1], np.inf, 0.4, 1.38257155056824066e-01, "aps.06.04"],
[aps06_f, aps06_fp, aps06_fpp, (20,), [0, 1], np.inf, 0.1, 3.46573590208538521e-02, "aps.06.05"],
[aps06_f, aps06_fp, aps06_fpp, (40,), [0, 1], np.inf, 5e-02, 1.73286795139986315e-02, "aps.06.06"],
[aps06_f, aps06_fp, aps06_fpp, (60,), [0, 1], np.inf, 1.0 / 30, 1.15524530093324210e-02, "aps.06.07"],
[aps06_f, aps06_fp, aps06_fpp, (80,), [0, 1], np.inf, 2.5e-02, 8.66433975699931573e-03, "aps.06.08"],
[aps06_f, aps06_fp, aps06_fpp, (100,), [0, 1], np.inf, 2e-02, 6.93147180559945415e-03, "aps.06.09"],
[aps07_f, aps07_fp, aps07_fpp, (5,), [0, 1], np.inf, 0.4, 3.84025518406218985e-02, "aps.07.00"],
[aps07_f, aps07_fp, aps07_fpp, (10,), [0, 1], np.inf, 0.4, 9.90000999800049949e-03, "aps.07.01"],
[aps07_f, aps07_fp, aps07_fpp, (20,), [0, 1], np.inf, 0.4, 2.49375003906201174e-03, "aps.07.02"],
[aps08_f, aps08_fp, aps08_fpp, (2,), [0, 1], np.inf, 0.9, 0.5, "aps.08.00"],
[aps08_f, aps08_fp, aps08_fpp, (5,), [0, 1], np.inf, 0.9, 3.45954815848242059e-01, "aps.08.01"],
[aps08_f, aps08_fp, aps08_fpp, (10,), [0, 1], np.inf, 0.9, 2.45122333753307220e-01, "aps.08.02"],
[aps08_f, aps08_fp, aps08_fpp, (15,), [0, 1], np.inf, 0.9, 1.95547623536565629e-01, "aps.08.03"],
[aps08_f, aps08_fp, aps08_fpp, (20,), [0, 1], np.inf, 0.9, 1.64920957276440960e-01, "aps.08.04"],
[aps09_f, aps09_fp, aps09_fpp, (1,), [0, 1], np.inf, 0.5, 2.75508040999484394e-01, "aps.09.00"],
[aps09_f, aps09_fp, aps09_fpp, (2,), [0, 1], np.inf, 0.5, 1.37754020499742197e-01, "aps.09.01"],
[aps09_f, aps09_fp, aps09_fpp, (4,), [0, 1], np.inf, 0.5, 1.03052837781564422e-02, "aps.09.02"],
[aps09_f, aps09_fp, aps09_fpp, (5,), [0, 1], np.inf, 0.5, 3.61710817890406339e-03, "aps.09.03"],
[aps09_f, aps09_fp, aps09_fpp, (8,), [0, 1], np.inf, 0.5, 4.10872918496395375e-04, "aps.09.04"],
[aps09_f, aps09_fp, aps09_fpp, (15,), [0, 1], np.inf, 0.5, 2.59895758929076292e-05, "aps.09.05"],
[aps09_f, aps09_fp, aps09_fpp, (20,), [0, 1], np.inf, 0.5, 7.66859512218533719e-06, "aps.09.06"],
[aps10_f, aps10_fp, aps10_fpp, (1,), [0, 1], np.inf, 0.9, 4.01058137541547011e-01, "aps.10.00"],
[aps10_f, aps10_fp, aps10_fpp, (5,), [0, 1], np.inf, 0.9, 5.16153518757933583e-01, "aps.10.01"],
[aps10_f, aps10_fp, aps10_fpp, (10,), [0, 1], np.inf, 0.9, 5.39522226908415781e-01, "aps.10.02"],
[aps10_f, aps10_fp, aps10_fpp, (15,), [0, 1], np.inf, 0.9, 5.48182294340655241e-01, "aps.10.03"],
[aps10_f, aps10_fp, aps10_fpp, (20,), [0, 1], np.inf, 0.9, 5.52704666678487833e-01, "aps.10.04"],
[aps11_f, aps11_fp, aps11_fpp, (2,), [0.01, 1], np.inf, 1e-02, 1.0 / 2, "aps.11.00"],
[aps11_f, aps11_fp, aps11_fpp, (5,), [0.01, 1], np.inf, 1e-02, 1.0 / 5, "aps.11.01"],
[aps11_f, aps11_fp, aps11_fpp, (15,), [0.01, 1], np.inf, 1e-02, 1.0 / 15, "aps.11.02"],
[aps11_f, aps11_fp, aps11_fpp, (20,), [0.01, 1], np.inf, 1e-02, 1.0 / 20, "aps.11.03"],
[aps12_f, aps12_fp, aps12_fpp, (2,), [1, 100], np.inf, 1.1, 2, "aps.12.00"],
[aps12_f, aps12_fp, aps12_fpp, (3,), [1, 100], np.inf, 1.1, 3, "aps.12.01"],
[aps12_f, aps12_fp, aps12_fpp, (4,), [1, 100], np.inf, 1.1, 4, "aps.12.02"],
[aps12_f, aps12_fp, aps12_fpp, (5,), [1, 100], np.inf, 1.1, 5, "aps.12.03"],
[aps12_f, aps12_fp, aps12_fpp, (6,), [1, 100], np.inf, 1.1, 6, "aps.12.04"],
[aps12_f, aps12_fp, aps12_fpp, (7,), [1, 100], np.inf, 1.1, 7, "aps.12.05"],
[aps12_f, aps12_fp, aps12_fpp, (9,), [1, 100], np.inf, 1.1, 9, "aps.12.06"],
[aps12_f, aps12_fp, aps12_fpp, (11,), [1, 100], np.inf, 1.1, 11, "aps.12.07"],
[aps12_f, aps12_fp, aps12_fpp, (13,), [1, 100], np.inf, 1.1, 13, "aps.12.08"],
[aps12_f, aps12_fp, aps12_fpp, (15,), [1, 100], np.inf, 1.1, 15, "aps.12.09"],
[aps12_f, aps12_fp, aps12_fpp, (17,), [1, 100], np.inf, 1.1, 17, "aps.12.10"],
[aps12_f, aps12_fp, aps12_fpp, (19,), [1, 100], np.inf, 1.1, 19, "aps.12.11"],
[aps12_f, aps12_fp, aps12_fpp, (21,), [1, 100], np.inf, 1.1, 21, "aps.12.12"],
[aps12_f, aps12_fp, aps12_fpp, (23,), [1, 100], np.inf, 1.1, 23, "aps.12.13"],
[aps12_f, aps12_fp, aps12_fpp, (25,), [1, 100], np.inf, 1.1, 25, "aps.12.14"],
[aps12_f, aps12_fp, aps12_fpp, (27,), [1, 100], np.inf, 1.1, 27, "aps.12.15"],
[aps12_f, aps12_fp, aps12_fpp, (29,), [1, 100], np.inf, 1.1, 29, "aps.12.16"],
[aps12_f, aps12_fp, aps12_fpp, (31,), [1, 100], np.inf, 1.1, 31, "aps.12.17"],
[aps12_f, aps12_fp, aps12_fpp, (33,), [1, 100], np.inf, 1.1, 33, "aps.12.18"],
[aps13_f, aps13_fp, aps13_fpp, (), [-1, 4], np.inf, 1.5, 0, "aps.13.00"],
[aps14_f, aps14_fp, aps14_fpp, (1,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.00"],
[aps14_f, aps14_fp, aps14_fpp, (2,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.01"],
[aps14_f, aps14_fp, aps14_fpp, (3,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.02"],
[aps14_f, aps14_fp, aps14_fpp, (4,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.03"],
[aps14_f, aps14_fp, aps14_fpp, (5,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.04"],
[aps14_f, aps14_fp, aps14_fpp, (6,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.05"],
[aps14_f, aps14_fp, aps14_fpp, (7,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.06"],
[aps14_f, aps14_fp, aps14_fpp, (8,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.07"],
[aps14_f, aps14_fp, aps14_fpp, (9,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.08"],
[aps14_f, aps14_fp, aps14_fpp, (10,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.09"],
[aps14_f, aps14_fp, aps14_fpp, (11,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.10"],
[aps14_f, aps14_fp, aps14_fpp, (12,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.11"],
[aps14_f, aps14_fp, aps14_fpp, (13,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.12"],
[aps14_f, aps14_fp, aps14_fpp, (14,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.13"],
[aps14_f, aps14_fp, aps14_fpp, (15,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.14"],
[aps14_f, aps14_fp, aps14_fpp, (16,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.15"],
[aps14_f, aps14_fp, aps14_fpp, (17,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.16"],
[aps14_f, aps14_fp, aps14_fpp, (18,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.17"],
[aps14_f, aps14_fp, aps14_fpp, (19,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.18"],
[aps14_f, aps14_fp, aps14_fpp, (20,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.19"],
[aps14_f, aps14_fp, aps14_fpp, (21,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.20"],
[aps14_f, aps14_fp, aps14_fpp, (22,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.21"],
[aps14_f, aps14_fp, aps14_fpp, (23,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.22"],
[aps14_f, aps14_fp, aps14_fpp, (24,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.23"],
[aps14_f, aps14_fp, aps14_fpp, (25,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.24"],
[aps14_f, aps14_fp, aps14_fpp, (26,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.25"],
[aps14_f, aps14_fp, aps14_fpp, (27,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.26"],
[aps14_f, aps14_fp, aps14_fpp, (28,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.27"],
[aps14_f, aps14_fp, aps14_fpp, (29,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.28"],
[aps14_f, aps14_fp, aps14_fpp, (30,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.29"],
[aps14_f, aps14_fp, aps14_fpp, (31,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.30"],
[aps14_f, aps14_fp, aps14_fpp, (32,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.31"],
[aps14_f, aps14_fp, aps14_fpp, (33,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.32"],
[aps14_f, aps14_fp, aps14_fpp, (34,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.33"],
[aps14_f, aps14_fp, aps14_fpp, (35,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.34"],
[aps14_f, aps14_fp, aps14_fpp, (36,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.35"],
[aps14_f, aps14_fp, aps14_fpp, (37,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.36"],
[aps14_f, aps14_fp, aps14_fpp, (38,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.37"],
[aps14_f, aps14_fp, aps14_fpp, (39,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.38"],
[aps14_f, aps14_fp, aps14_fpp, (40,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.39"],
[aps15_f, aps15_fp, aps15_fpp, (20,), [-1000, 1e-4], 0, -2, 5.90513055942197166e-05, "aps.15.00"],
[aps15_f, aps15_fp, aps15_fpp, (21,), [-1000, 1e-4], 0, -2, 5.63671553399369967e-05, "aps.15.01"],
[aps15_f, aps15_fp, aps15_fpp, (22,), [-1000, 1e-4], 0, -2, 5.39164094555919196e-05, "aps.15.02"],
[aps15_f, aps15_fp, aps15_fpp, (23,), [-1000, 1e-4], 0, -2, 5.16698923949422470e-05, "aps.15.03"],
[aps15_f, aps15_fp, aps15_fpp, (24,), [-1000, 1e-4], 0, -2, 4.96030966991445609e-05, "aps.15.04"],
[aps15_f, aps15_fp, aps15_fpp, (25,), [-1000, 1e-4], 0, -2, 4.76952852876389951e-05, "aps.15.05"],
[aps15_f, aps15_fp, aps15_fpp, (26,), [-1000, 1e-4], 0, -2, 4.59287932399486662e-05, "aps.15.06"],
[aps15_f, aps15_fp, aps15_fpp, (27,), [-1000, 1e-4], 0, -2, 4.42884791956647841e-05, "aps.15.07"],
[aps15_f, aps15_fp, aps15_fpp, (28,), [-1000, 1e-4], 0, -2, 4.27612902578832391e-05, "aps.15.08"],
[aps15_f, aps15_fp, aps15_fpp, (29,), [-1000, 1e-4], 0, -2, 4.13359139159538030e-05, "aps.15.09"],
[aps15_f, aps15_fp, aps15_fpp, (30,), [-1000, 1e-4], 0, -2, 4.00024973380198076e-05, "aps.15.10"],
[aps15_f, aps15_fp, aps15_fpp, (31,), [-1000, 1e-4], 0, -2, 3.87524192962066869e-05, "aps.15.11"],
[aps15_f, aps15_fp, aps15_fpp, (32,), [-1000, 1e-4], 0, -2, 3.75781035599579910e-05, "aps.15.12"],
[aps15_f, aps15_fp, aps15_fpp, (33,), [-1000, 1e-4], 0, -2, 3.64728652199592355e-05, "aps.15.13"],
[aps15_f, aps15_fp, aps15_fpp, (34,), [-1000, 1e-4], 0, -2, 3.54307833565318273e-05, "aps.15.14"],
[aps15_f, aps15_fp, aps15_fpp, (35,), [-1000, 1e-4], 0, -2, 3.44465949299614980e-05, "aps.15.15"],
[aps15_f, aps15_fp, aps15_fpp, (36,), [-1000, 1e-4], 0, -2, 3.35156058778003705e-05, "aps.15.16"],
[aps15_f, aps15_fp, aps15_fpp, (37,), [-1000, 1e-4], 0, -2, 3.26336162494372125e-05, "aps.15.17"],
[aps15_f, aps15_fp, aps15_fpp, (38,), [-1000, 1e-4], 0, -2, 3.17968568584260013e-05, "aps.15.18"],
[aps15_f, aps15_fp, aps15_fpp, (39,), [-1000, 1e-4], 0, -2, 3.10019354369653455e-05, "aps.15.19"],
[aps15_f, aps15_fp, aps15_fpp, (40,), [-1000, 1e-4], 0, -2, 3.02457906702100968e-05, "aps.15.20"],
[aps15_f, aps15_fp, aps15_fpp, (100,), [-1000, 1e-4], 0, -2, 1.22779942324615231e-05, "aps.15.21"],
[aps15_f, aps15_fp, aps15_fpp, (200,), [-1000, 1e-4], 0, -2, 6.16953939044086617e-06, "aps.15.22"],
[aps15_f, aps15_fp, aps15_fpp, (300,), [-1000, 1e-4], 0, -2, 4.11985852982928163e-06, "aps.15.23"],
[aps15_f, aps15_fp, aps15_fpp, (400,), [-1000, 1e-4], 0, -2, 3.09246238772721682e-06, "aps.15.24"],
[aps15_f, aps15_fp, aps15_fpp, (500,), [-1000, 1e-4], 0, -2, 2.47520442610501789e-06, "aps.15.25"],
[aps15_f, aps15_fp, aps15_fpp, (600,), [-1000, 1e-4], 0, -2, 2.06335676785127107e-06, "aps.15.26"],
[aps15_f, aps15_fp, aps15_fpp, (700,), [-1000, 1e-4], 0, -2, 1.76901200781542651e-06, "aps.15.27"],
[aps15_f, aps15_fp, aps15_fpp, (800,), [-1000, 1e-4], 0, -2, 1.54816156988591016e-06, "aps.15.28"],
[aps15_f, aps15_fp, aps15_fpp, (900,), [-1000, 1e-4], 0, -2, 1.37633453660223511e-06, "aps.15.29"],
[aps15_f, aps15_fp, aps15_fpp, (1000,), [-1000, 1e-4], 0, -2, 1.23883857889971403e-06, "aps.15.30"]
]
_APS_TESTS_DICTS = [dict(zip(_APS_TESTS_KEYS, testcase)) for testcase in _APS_TESTS]
# ##################
# "complex" test cases
# A few simple, complex-valued, functions, defined on the complex plane.
def cplx01_f(z, n, a):
r"""z**n-a: Use to find the nth root of a"""
return z**n - a
def cplx01_fp(z, n, a):
return n * z**(n - 1)
def cplx01_fpp(z, n, a):
return n * (n - 1) * z**(n - 2)
def cplx02_f(z, a):
r"""e**z - a: Use to find the log of a"""
return np.exp(z) - a
def cplx02_fp(z, a):
return np.exp(z)
def cplx02_fpp(z, a):
return np.exp(z)
# Each "complex" test case has
# - a function and its two derivatives,
# - additional arguments,
# - the order of differentiability of the function on this interval
# - two starting values x0 and x1
# - the root
# - an Identifier of the test case
#
# Algorithm 748 is a bracketing algorithm so a bracketing interval was provided
# in [1] for each test case. Newton and Halley need a single starting point
# x0, which was chosen to be near the middle of the interval, unless that
# would make the problem too easy.
_COMPLEX_TESTS_KEYS = ["f", "fprime", "fprime2", "args", "smoothness", "x0", "x1", "root", "ID"]
_COMPLEX_TESTS = [
[cplx01_f, cplx01_fp, cplx01_fpp, (2, -1), np.inf, (1 + 1j), (0.5 + 0.5j), 1j, "complex.01.00"],
[cplx01_f, cplx01_fp, cplx01_fpp, (3, 1), np.inf, (-1 + 1j), (-0.5 + 2.0j), (-0.5 + np.sqrt(3) / 2 * 1.0j),
"complex.01.01"],
[cplx01_f, cplx01_fp, cplx01_fpp, (3, -1), np.inf, 1j, (0.5 + 0.5j), (0.5 + np.sqrt(3) / 2 * 1.0j),
"complex.01.02"],
[cplx01_f, cplx01_fp, cplx01_fpp, (3, 8), np.inf, 5, 4, 2, "complex.01.03"],
[cplx02_f, cplx02_fp, cplx02_fpp, (-1,), np.inf, (1 + 2j), (0.5 + 0.5j), np.pi * 1.0j, "complex.02.00"],
[cplx02_f, cplx02_fp, cplx02_fpp, (1j,), np.inf, (1 + 2j), (0.5 + 0.5j), np.pi * 0.5j, "complex.02.01"],
]
_COMPLEX_TESTS_DICTS = [dict(zip(_COMPLEX_TESTS_KEYS, testcase)) for testcase in _COMPLEX_TESTS]
def _add_a_b(tests):
r"""Add "a" and "b" keys to each test from the "bracket" value"""
for d in tests:
for k, v in zip(['a', 'b'], d.get('bracket', [])):
d[k] = v
_add_a_b(_ORIGINAL_TESTS_DICTS)
_add_a_b(_APS_TESTS_DICTS)
_add_a_b(_COMPLEX_TESTS_DICTS)
def get_tests(collection='original', smoothness=None):
r"""Return the requested collection of test cases, as an array of dicts with subset-specific keys
Allowed values of collection:
'original': The original benchmarking functions.
Real-valued functions of real-valued inputs on an interval with a zero.
f1, .., f3 are continuous and infinitely differentiable
f4 has a single discontinuity at the root
f5 has a root at 1 replacing a 1st order pole
f6 is randomly positive on one side of the root, randomly negative on the other
'aps': The test problems in the TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions"
paper by Alefeld, Potra and Shi. Real-valued functions of
real-valued inputs on an interval with a zero.
Suitable for methods which start with an enclosing interval, and
derivatives up to 2nd order.
'complex': Some complex-valued functions of complex-valued inputs.
No enclosing bracket is provided.
Suitable for methods which use one or more starting values, and
derivatives up to 2nd order.
The dictionary keys will be a subset of
["f", "fprime", "fprime2", "args", "bracket", "a", b", "smoothness", "x0", "x1", "root", "ID"]
"""
collection = collection or "original"
subsets = {"aps": _APS_TESTS_DICTS,
"complex": _COMPLEX_TESTS_DICTS,
"original": _ORIGINAL_TESTS_DICTS,
"chandrupatla": _CHANDRUPATLA_TESTS_DICTS}
tests = subsets.get(collection, [])
if smoothness is not None:
tests = [tc for tc in tests if tc['smoothness'] >= smoothness]
return tests
# Backwards compatibility
methods = [cc.bisect, cc.ridder, cc.brenth, cc.brentq]
mstrings = ['cc.bisect', 'cc.ridder', 'cc.brenth', 'cc.brentq']
functions = [f2, f3, f4, f5, f6]
fstrings = ['f2', 'f3', 'f4', 'f5', 'f6']
# ##################
# "Chandrupatla" test cases
# Functions and test cases that appear in [2]
def fun1(x):
return x**3 - 2*x - 5
fun1.root = 2.0945514815423265 # additional precision using mpmath.findroot
def fun2(x):
return 1 - 1/x**2
fun2.root = 1
def fun3(x):
return (x-3)**3
fun3.root = 3
def fun4(x):
return 6*(x-2)**5
fun4.root = 2
def fun5(x):
return x**9
fun5.root = 0
def fun6(x):
return x**19
fun6.root = 0
def fun7(x):
return 0 if abs(x) < 3.8e-4 else x*np.exp(-x**(-2))
fun7.root = 0
def fun8(x):
xi = 0.61489
return -(3062*(1-xi)*np.exp(-x))/(xi + (1-xi)*np.exp(-x)) - 1013 + 1628/x
fun8.root = 1.0375360332870405
def fun9(x):
return np.exp(x) - 2 - 0.01/x**2 + .000002/x**3
fun9.root = 0.7032048403631358
# Each "chandropatla" test case has
# - a function,
# - two starting values x0 and x1
# - the root
# - the number of function evaluations required by Chandrupatla's algorithm
# - an Identifier of the test case
#
# Chandrupatla's is a bracketing algorithm, so a bracketing interval was
# provided in [2] for each test case. No special support for testing with
# secant/Newton/Halley is provided.
_CHANDRUPATLA_TESTS_KEYS = ["f", "bracket", "root", "nfeval", "ID"]
_CHANDRUPATLA_TESTS = [
[fun1, [2, 3], fun1.root, 7],
[fun1, [1, 10], fun1.root, 11],
[fun1, [1, 100], fun1.root, 14],
[fun1, [-1e4, 1e4], fun1.root, 23],
[fun1, [-1e10, 1e10], fun1.root, 43],
[fun2, [0.5, 1.51], fun2.root, 8],
[fun2, [1e-4, 1e4], fun2.root, 22],
[fun2, [1e-6, 1e6], fun2.root, 28],
[fun2, [1e-10, 1e10], fun2.root, 41],
[fun2, [1e-12, 1e12], fun2.root, 48],
[fun3, [0, 5], fun3.root, 21],
[fun3, [-10, 10], fun3.root, 23],
[fun3, [1e-4, 1e4], fun3.root, 36],
[fun3, [1e-6, 1e6], fun3.root, 45],
[fun3, [1e-10, 1e10], fun3.root, 55],
[fun4, [0, 5], fun4.root, 21],
[fun4, [-10, 10], fun4.root, 23],
[fun4, [1e-4, 1e4], fun4.root, 33],
[fun4, [1e-6, 1e6], fun4.root, 43],
[fun4, [1e-10, 1e10], fun4.root, 54],
[fun5, [-1, 4], fun5.root, 21],
[fun5, [-2, 5], fun5.root, 22],
[fun5, [-1, 10], fun5.root, 23],
[fun5, [-5, 50], fun5.root, 25],
[fun5, [-10, 100], fun5.root, 26],
[fun6, [-1., 4.], fun6.root, 21],
[fun6, [-2., 5.], fun6.root, 22],
[fun6, [-1., 10.], fun6.root, 23],
[fun6, [-5., 50.], fun6.root, 25],
[fun6, [-10., 100.], fun6.root, 26],
[fun7, [-1, 4], fun7.root, 8],
[fun7, [-2, 5], fun7.root, 8],
[fun7, [-1, 10], fun7.root, 11],
[fun7, [-5, 50], fun7.root, 18],
[fun7, [-10, 100], fun7.root, 19],
[fun8, [2e-4, 2], fun8.root, 9],
[fun8, [2e-4, 3], fun8.root, 10],
[fun8, [2e-4, 9], fun8.root, 11],
[fun8, [2e-4, 27], fun8.root, 12],
[fun8, [2e-4, 81], fun8.root, 14],
[fun9, [2e-4, 2], fun9.root, 7],
[fun9, [2e-4, 3], fun9.root, 8],
[fun9, [2e-4, 9], fun9.root, 10],
[fun9, [2e-4, 27], fun9.root, 11],
[fun9, [2e-4, 81], fun9.root, 13],
]
_CHANDRUPATLA_TESTS = [test + [f'{test[0].__name__}.{i%5+1}']
for i, test in enumerate(_CHANDRUPATLA_TESTS)]
_CHANDRUPATLA_TESTS_DICTS = [dict(zip(_CHANDRUPATLA_TESTS_KEYS, testcase))
for testcase in _CHANDRUPATLA_TESTS]
_add_a_b(_CHANDRUPATLA_TESTS_DICTS)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py3@scipy@optimize@_tstutils.py@.PATH_END.py
|
{
"filename": "cosmology_library.py",
"repo_name": "franciscovillaescusa/Pylians",
"repo_path": "Pylians_extracted/Pylians-master/library/cosmology_library.py",
"type": "Python"
}
|
import numpy as np
import scipy.integrate as si
import sys,os,time
#################### FUNCTIONS USED TO COMPUTE INTEGRALS #####################
#we use this function to compute the comoving distance to a given redshift
def func(y,x,Omega_m,Omega_L):
return [1.0/np.sqrt(Omega_m*(1.0+x)**3+Omega_L)]
##############################################################################
##############################################################################
#This functions computes the comoving distance to redshift z, in Mpc/h
#As input it needs z, Omega_m and Omega_L. It assumes a flat cosmology
def comoving_distance(z,Omega_m,Omega_L):
H0=100.0 #km/s/(Mpc/h)
c=3e5 #km/s
#compute the comoving distance to redshift z
yinit=[0.0]
z_limits=[0.0,z]
I=si.odeint(func,yinit,z_limits,args=(Omega_m,Omega_L),
rtol=1e-8,atol=1e-8,mxstep=100000,h0=1e-6)[1][0]
r=c/H0*I
return r
##############################################################################
##############################################################################
def func_lgf(y,x,Omega_m,Omega_L):
#print x, 1.0/(x**3 * (np.sqrt(Omega_m/x**3 + Omega_L))**3)
return 1.0/(x*np.sqrt(Omega_m/x**3 + Omega_L))**3
#This function computes the linear growth factor. See Eq. 1 of 0006089
#Notice that in that formula H(a) = (Omega_m/a^3+Omega_L)^1/2 and that the
#growth is D(a), not g(a). We normalize it such as D(a=1)=1
def linear_growth_factor(z,Omega_m,Omega_L):
# compute linear growth factor at z and z=0
yinit = [0.0]; a_limits = [1e-30, 1.0/(1.0+z), 1.0/(1.0+0.0)]
I = si.odeint(func_lgf,yinit,a_limits,args=(Omega_m,Omega_L),
rtol=1e-10,atol=1e-10,mxstep=100000,h0=1e-20)[1:]
redshifts = np.array([ [z], [0.0] ])
Ha = np.sqrt(Omega_m*(1.0+redshifts)**3 + Omega_L)
D = (5.0*Omega_m/2.0)*Ha*I
return D[0]/D[1]
##############################################################################
#This function computes the absoption distance:
#dX = H0*(1+z)^2/H(z)*dz
#Omega_m ----> value of the Omega_m cosmological parameter
#Omega_L ----> value of the Omega_L cosmological parameter
#z ----------> cosmological redshift
#BoxSize ----> size of the simulation box in Mpc/h
def absorption_distance(Omega_m,Omega_L,z,BoxSize):
iterations=40; tol=1e-4; i=0; final=False
dz_max=10.0; dz_min=0.0; dz=0.5*(dz_min+dz_max)
r0=comoving_distance(z,Omega_m,Omega_L) #Mpc/h
while not(final):
dr=comoving_distance(z+dz,Omega_m,Omega_L)-r0
if (np.absolute(dr-BoxSize)/BoxSize)<tol or i>iterations:
final=True
else:
i+=1
if dr>BoxSize:
dz_max=dz
else:
dz_min=dz
dz=0.5*(dz_min+dz_max)
dX=(1.0+z)**2/np.sqrt(Omega_m*(1.0+z)**3+Omega_L)*dz
return dX
##############################################################################
# This routine implements the Takahashi 2012 halofit formula (1208.2701)
# we have neglected the terms with dark energy variation (Eqs. A6 and A7)
# Omega_m ---------------> value of Omega_m at z=0
# Omega_l ---------------> value of Omega_l at z=0
# z ---------------------> redshift
# k_lin,Pk_lin ----------> linear power spectrum at z=0
# for redshifts different to 0 the code computes the growth factor and
# rescale it. It returns the non-linear P(k) at redshift z. We have checked
# that for a Planck cosmology it reproduces CAMB within 0.6% up to z=5
def Halofit_12(Omega_m,Omega_l,z,k_lin,Pk_lin):
# compute growth factor at redshift z and rescale P(k)
Dz = linear_growth_factor(z,Omega_m,Omega_l)
Pk_lin = Pk_lin*Dz**2
######### find the value of k_sigma #########
Rmin,Rmax = 0.01, 10.0 #Mpc/h
found = False; precision = 1e-5
while not(found):
R = 0.5*(Rmin + Rmax)
yinit = [0.0]; k_limits = [k_lin[0],k_lin[-1]]
sigma2 = si.odeint(sigma_func, yinit, k_limits, args=(k_lin,Pk_lin,R),
mxstep=1000000, rtol=1e-8, atol=1e-21,
h0=1e-10)[1][0]
sigma2 = sigma2/(2.0*np.pi**2)
if abs(sigma2-1.0)<precision: found = True
elif sigma2>1.0: Rmin = R
else: Rmax = R
k_sigma = 1.0/R #h/Mpc
#############################################
####### compute value of neff and C #########
# notice that we are doing dlog sigma^2/dlnR =
# log(sigma^2(log(R)+log(h))) - log(sigma^2(log(R))) / log(h) =
# log(sigma^2(log(R*h))) - log(sigma^2(log(R))) / log(h)
h = 1.05
Rp = R*h; yinit = [0.0]; k_limits = [k_lin[0],k_lin[-1]]
sigma2p = si.odeint(sigma_func, yinit, k_limits, args=(k_lin,Pk_lin,Rp),
mxstep=1000000, rtol=1e-8, atol=1e-21, h0=1e-10)[1][0]
sigma2p = sigma2p/(2.0*np.pi**2)
Rm = R/h; yinit = [0.0]; k_limits = [k_lin[0],k_lin[-1]]
sigma2m = si.odeint(sigma_func, yinit, k_limits, args=(k_lin,Pk_lin,Rm),
mxstep=1000000, rtol=1e-8, atol=1e-21, h0=1e-10)[1][0]
sigma2m = sigma2m/(2.0*np.pi**2)
# sanity check
if abs(sigma2p-sigma2)<1e3*precision or abs(sigma2-sigma2m)<1e3*precision:
print 'value of h too small for given precision'; sys.exit()
neff = -(np.log(sigma2p)-np.log(sigma2m))/(2.0*np.log(h)) - 3.0
C = -(np.log(sigma2p) - 2.0*np.log(sigma2) + np.log(sigma2m))/np.log(h)**2
#############################################
################ constants ##################
Omegamz = Omega_m*(1.0+z)**3/(Omega_m*(1.0+z)**3 + Omega_l)
f1 = Omegamz**(-0.0307); f2 = Omegamz**(-0.0585); f3 = Omegamz**(0.0743)
an = 10**(1.5222 + 2.8553*neff + 2.3706*neff**2 + 0.9903*neff**3 + \
0.2250*neff**4 - 0.6038*C)
bn = 10**(-0.5642 + 0.5864*neff + 0.5716*neff**2 - 1.5474*C)
cn = 10**(0.3698 + 2.0404*neff + 0.8161*neff**2 + 0.5869*C)
gamman = 0.1971 - 0.0843*neff + 0.8460*C
alphan = abs(6.0835 + 1.3373*neff - 0.1959*neff**2 - 5.5274*C)
betan = 2.0379 - 0.7354*neff + 0.3157*neff**2 + 1.2490*neff**3 + \
0.3980*neff**4 - 0.1682*C
mun = 0.0
nun = 10**(5.2105 + 3.6902*neff)
############################################
Pk_hf = np.zeros(len(k_lin),dtype=np.float64)
for i,k in enumerate(k_lin):
# dimensionless linear power spectrum
delta2_lin = k**3*Pk_lin[i]/(2.0*np.pi**2)
y = (k/k_sigma); fy = y/4.0 + y**2/8.0
# two-halo term
delta2_Q = delta2_lin*(1.0 + delta2_lin)**betan/\
(1.0 + alphan*delta2_lin)*np.exp(-fy)
# one-halo term
delta2_HH = an*y**(3.0*f1)/(1.0 + bn*y**f2 + (cn*f3*y)**(3.0-gamman))
delta2_H = delta2_HH/(1.0 + mun/y + nun/y**2)
# total non-linear dimensionless power spectrum
delta2_hf = delta2_Q + delta2_H
# non-linear power spectrum
Pk_hf[i] = (2*np.pi**2)*delta2_hf/k**3
return Pk_hf
#############################################
def sigma_func(y,x,k,Pk,R):
return [np.interp(x,k,Pk)*x**2*np.exp(-x**2*R**2)]
##############################################################################
###############################################################################
################################### USAGE #####################################
###############################################################################
###### comoving distance ######
"""
z=3.0
Omega_m=0.3
Omega_L=0.7
r=comoving_distance(z,Omega_m,Omega_L)
print 'comoving distance to z = %2.2f ---> %f Mpc/h'%(z,r)
"""
###### linear growth factor ######
"""
z = 1.0
Omega_m = 0.308
Omega_l = 0.692
h = 0.6781
Da = linear_growth_factor(z,Omega_m,Omega_l,h)
print 'Linear growth factor at z = %.1f : %.3e'%(z,Da)
"""
###### absorption distance ######
"""
Omega_m=0.274247
Omega_L=0.725753
z=3.0
BoxSize=60.0 #Mpc/h
dX=absorption_distance(Omega_m,Omega_L,z,BoxSize)
print 'dX =',dX
"""
###### halofit P(k) ######
"""
Omega_m = 0.3175
Omega_l = 0.6825
z = 1.0
k_lin,Pk_lin = np.loadtxt('ics_matterpow_0.dat',unpack=True)
Pk_hf = Halofit_12(Omega_m,Omega_l,z,k_lin,Pk_lin)
"""
|
franciscovillaescusaREPO_NAMEPyliansPATH_START.@Pylians_extracted@Pylians-master@library@cosmology_library.py@.PATH_END.py
|
{
"filename": "basic_ui.py",
"repo_name": "sherpa/sherpa",
"repo_path": "sherpa_extracted/sherpa-main/docs/_examples/ui/basic_ui.py",
"type": "Python"
}
|
from sherpa.ui import *
x = [100, 200, 300, 400]
y = [10, 12, 9, 13]
load_arrays(1, x, y)
print("# list_data_ids")
print(list_data_ids())
print("# get_data()")
print(repr(get_data()))
print("# get_data()")
print(get_data())
print("# get_stat_name/get_method_name")
print(get_stat_name())
print(get_method_name())
set_stat('cash')
set_method('simplex')
set_source('const1d.mdl')
print("# mdl")
print(mdl)
print("# get_source")
print(get_source())
print("# fit")
fit()
print("# get_fit_results")
r = get_fit_results()
print(r)
get_data_plot_prefs()['yerrorbars'] = False
print("--> call")
print("plot_fit()")
|
sherpaREPO_NAMEsherpaPATH_START.@sherpa_extracted@sherpa-main@docs@_examples@ui@basic_ui.py@.PATH_END.py
|
{
"filename": "Sim1-multisim.ipynb",
"repo_name": "abhisrkckl/pint-noise",
"repo_path": "pint-noise_extracted/pint-noise-main/sim1-efac-equad/Sim1-multisim.ipynb",
"type": "Jupyter Notebook"
}
|
```python
from pint.models import get_model
from pint.simulation import make_fake_toas_fromtim
from pint.logging import setup as setup_log
from pint.fitter import Fitter
from io import StringIO
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
import corner
from joblib import delayed, Parallel
```
```python
setup_log(level="WARNING")
```
1
```python
m = get_model("sim1.par")
```
```python
def simulate_and_measure():
setup_log(level="WARNING")
t = make_fake_toas_fromtim("sim1.tim", m, add_noise=True)
ftr = Fitter.auto(t, m)
ftr.fit_toas()
print (
ftr.model.EFAC1.value,
ftr.model.EFAC1.uncertainty_value,
ftr.model.EQUAD1.value,
ftr.model.EQUAD1.uncertainty_value
)
return (
ftr.model.EFAC1.value,
ftr.model.EFAC1.uncertainty_value,
ftr.model.EQUAD1.value,
ftr.model.EQUAD1.uncertainty_value
)
```
```python
results = Parallel(n_jobs=16)(delayed(simulate_and_measure)() for _ in range(500))
```
/home/abhimanyu/miniconda3/envs/pint-noise/lib/python3.9/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.4
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
/home/abhimanyu/miniconda3/envs/pint-noise/lib/python3.9/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.4
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
/home/abhimanyu/miniconda3/envs/pint-noise/lib/python3.9/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.4
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
/home/abhimanyu/miniconda3/envs/pint-noise/lib/python3.9/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.4
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
/home/abhimanyu/miniconda3/envs/pint-noise/lib/python3.9/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.4
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
/home/abhimanyu/miniconda3/envs/pint-noise/lib/python3.9/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.4
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
/home/abhimanyu/miniconda3/envs/pint-noise/lib/python3.9/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.4
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
/home/abhimanyu/miniconda3/envs/pint-noise/lib/python3.9/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.4
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
/home/abhimanyu/miniconda3/envs/pint-noise/lib/python3.9/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.4
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
/home/abhimanyu/miniconda3/envs/pint-noise/lib/python3.9/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.4
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
/home/abhimanyu/miniconda3/envs/pint-noise/lib/python3.9/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.4
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
/home/abhimanyu/miniconda3/envs/pint-noise/lib/python3.9/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.4
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
/home/abhimanyu/miniconda3/envs/pint-noise/lib/python3.9/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.4
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
/home/abhimanyu/miniconda3/envs/pint-noise/lib/python3.9/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.4
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
/home/abhimanyu/miniconda3/envs/pint-noise/lib/python3.9/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.4
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
/home/abhimanyu/miniconda3/envs/pint-noise/lib/python3.9/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.4
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
1.4881566020063894 0.06793539354744106 1.1027148044770767 0.09950155262022584
1.5273181202038424 0.07295809488996854 1.119973904145485 0.10487039233794392
1.43599381654982 0.07110555677481471 1.197627733507059 0.11002111112182264
1.5843600754836147 0.06878357021222178 1.0367456890589657 0.09361544113534603
1.5606883121042434 0.0670309531852757 1.0574226460835248 0.09253447366462357
1.4957996979934458 0.06285842657231691 1.0458996181976816 0.09017816507837409
1.4420244655988745 0.06947212445983726 1.1683208662365903 0.10635410553919689
1.5319556844417725 0.07161900273479967 1.0693755113537047 0.10200665468559134
1.3955201369877313 0.07206256397906675 1.2195370757805277 0.11556682865113667
1.5383475725502973 0.06678734156670069 1.092662808317204 0.09387927530414587
1.4161023422947812 0.06468364888259094 1.13911298589725 0.09986359791446407
1.3942374752376478 0.07314404203470874 1.2719372711993364 0.11843863896914165
1.3940997855628483 0.06819675816772251 1.244597203134457 0.10918711797273128
1.4844470954871465 0.0659023573679902 1.069436993910786 0.09618517273158049
1.4161294567324816 0.07198580583182547 1.220717515934481 0.11359241551121424
1.5356304622393238 0.06987132934525855 1.0665499696689569 0.09890698729775287
1.4645607367866311 0.06960787470104378 1.177806017793412 0.10485413708633712
1.5894786244908516 0.06453318966853662 0.9904817876878245 0.0865169310318337
1.5519583096287362 0.06440347879222294 0.9853308362255522 0.088769330780559
1.6791386691187176 0.06433804475021232 0.872453402972544 0.08123886120720407
1.5950053334591086 0.06946809255046668 1.010898953968958 0.09390463134904357
1.470646660290502 0.07495010408522534 1.1954877960599375 0.11356813153569162
1.4526975489505962 0.06957816713595692 1.1392601356966077 0.10535855316107211
1.5688437213121 0.06677148485577791 1.0302394155147705 0.09147318374543349
1.5564877338821017 0.06501997232963753 1.0307875077600095 0.08950825917382677
1.6124393214075208 0.06746819910818257 0.969054223072079 0.08964558218347746
1.6149169450114123 0.06326312525188862 0.8914758738762227 0.08327054010607558
1.543145441508288 0.06963156001958988 1.0942295627662277 0.09813578248172199
1.5078208079043216 0.06975316174069335 1.149772608257836 0.10141314784583116
1.4590164638633896 0.06576401094594095 1.114622315401009 0.09816041364057
1.5432687878410993 0.06999894137060411 1.0714454162838436 0.09858002650946705
1.4392591293486918 0.0690945555922113 1.2043948676325837 0.10636487236955235
1.5212001884959392 0.06752427622170377 1.0238962131529177 0.0959999743507069
1.5722207511501125 0.06844938896857548 1.026207911837784 0.09389173879182419
1.4435152593099947 0.07016822447526246 1.1674723477634443 0.10741397202812866
1.6287664157879218 0.06510124233783189 0.9563149978759886 0.08497111851408114
1.5953928858924185 0.06736144985956889 0.9793159583419255 0.09058280161144182
1.5749634413935107 0.06395814969788997 0.9902031634476633 0.08653971873119848
1.5891257686384055 0.06704741569567561 1.0060096535832832 0.0905079396059749
1.5105939981627712 0.0659768009780977 1.0749821175057859 0.09442720573169248
1.3609297204575959 0.07257243782919966 1.285562457724843 0.12082479478114226
1.5193657531613447 0.0661215481638365 1.0741564739204839 0.09403240212097724
1.4777731118500586 0.06912270353922366 1.157903935071391 0.10276989009103826
1.3939972263148275 0.06907266981916856 1.2391794494476833 0.11068026042859846
1.5590843636739236 0.06412905156243673 0.9656500657106144 0.08787454590845012
1.489829730352106 0.06577568880250075 1.0869123334652666 0.09567317124459308
1.4044002700576712 0.06902422602709261 1.2468384444186524 0.10979471007109066
1.4487824296218215 0.07017898597274831 1.1827490992593397 0.1071716663528515
1.5230349346171699 0.06538897393392945 1.0471357785515092 0.09245356365486863
1.5222017523298168 0.06964164879746607 1.0635460833779828 0.09951104577580215
1.4643732870400792 0.07192149338917483 1.182793867628614 0.10884037524381274
1.5227389231694748 0.06838912016792636 1.0681010634497217 0.09745932554451829
1.6205584579077421 0.06791859754943295 0.9747078027089914 0.08980863239279466
1.6298398527553621 0.0645827579819356 0.9466499078732912 0.08412829628190514
1.4619786183519465 0.06760403126452205 1.1253085990787122 0.10114172892593001
1.4243371795731306 0.06703126288292637 1.1768917637160305 0.1036851681625913
1.4810943978304223 0.06822581181143114 1.145712834905798 0.100886438475472
1.5902479616325782 0.06903241166142043 1.003604201760072 0.09353956683197541
1.4598239777442064 0.07171653388448576 1.1729831509356812 0.1087554020224709
1.482419516332314 0.06988086245541389 1.144613714741191 0.10354119282753643
1.4485498618250225 0.07001206670727766 1.2046573809452945 0.10717683151747945
1.5199077107004477 0.06590920128586636 1.048340308410972 0.09352997239820791
1.6071366536353593 0.06647128292414081 0.958546270643347 0.08845917021891461
1.4408728667224036 0.06802033925050892 1.1659167245401467 0.10392757275939002
1.545256752076707 0.06431379358176402 1.0099566646161524 0.08908508617909888
1.3774545585623563 0.07265289511071958 1.2600016212363312 0.11893685763215522
1.6315327869287304 0.0634773789032895 0.8936223142224116 0.08257497664645247
1.5643015316454598 0.06543319435950537 0.9985135446060626 0.08959734365313445
1.5176545515051862 0.07253400469425571 1.0993234625436477 0.10476728944472015
1.4012254734197742 0.07277631749003967 1.2745169355721464 0.11718066208627803
1.4294538793626952 0.07222403960156903 1.2524017272724064 0.11330736700108299
1.510663748850951 0.06408865196351816 1.0389868809216924 0.09115672944138936
1.509986032431797 0.06617671422712038 1.0947408451910767 0.09491654824079816
1.4544664396354758 0.07014576101772066 1.1845211979548016 0.10666725154527917
1.5435035820897034 0.06898662576811034 1.0063331459960836 0.09672772175811152
1.47977025550611 0.06949612089270667 1.1387655874340215 0.10305092952009508
1.5903629010084983 0.06537870471247666 0.9843402195293375 0.08779577922934165
1.4934569552213812 0.07079789651419591 1.1153383712443137 0.10393549225230773
1.3409400366765705 0.0757696913628207 1.3358653183763967 0.12972519993806858
1.4279573610718912 0.07155511039981556 1.1991363019490078 0.11150833696843235
1.541813059395272 0.06416723748163677 0.974892343070266 0.08907701532471662
1.4772551587237912 0.06688238527235074 1.1594302671147017 0.09905586983430377
1.4858355813928472 0.06551729931632985 1.0845158098937775 0.0955215678849409
1.598482374131712 0.06871830306264348 0.9925245466721987 0.09248965670985779
1.495583287824812 0.06742686315813573 1.0410062358815668 0.09776932663849658
1.486869982620274 0.06668599183140947 1.0891271071882114 0.09742532428047024
1.5479852994597039 0.06653395951018819 1.0547732498759825 0.09260170858105485
1.4740490519346638 0.0625879428851397 1.0309490048807273 0.0912229503937419
1.5703957651296452 0.06580401344226804 1.0321312186868123 0.08983504239805867
1.4635907692421044 0.07074172408297785 1.141811847399534 0.10646545263440572
1.4384226744211077 0.07024417742457288 1.18192239930267 0.10813742934374307
1.520619825756823 0.06385217208725695 1.0531379789815567 0.09012368201290487
1.396515316395216 0.07021208976223592 1.2618340413973523 0.11283723630718484
1.4875267138578205 0.06821312857762316 1.1212013442960544 0.1001541865274295
1.593638822474524 0.06761192277567986 0.9780217242486289 0.09109188364184194
1.6219943075864256 0.06184344697417159 0.8933857061592748 0.08059330541254617
1.5053916777576177 0.067880788136655 1.139428817846655 0.09841089523984069
1.3760872844266776 0.06763992738773361 1.236810788777932 0.10965970500609483
1.4359041617335857 0.07134425435168855 1.216086533836417 0.11068715934127797
1.4111141186217493 0.07301628564236393 1.236663072323478 0.11608373870261152
1.409943735506613 0.07035449522588802 1.1877768255577201 0.11084033096865195
1.4480919415490805 0.06846894546231157 1.1752733012469145 0.10421515152141757
1.6616190878519919 0.06166354929260964 0.8530705369729827 0.07832933336345145
1.4797410879115698 0.06775807957610905 1.1570142230206522 0.10031570651415543
1.3695940473059376 0.07073336456914278 1.2990925914301503 0.11688440212058526
1.5547197687179162 0.06739900437611027 1.0332754396899788 0.09344897197212022
1.5635872936343014 0.06421965661383917 0.9927374155884138 0.08770085480031915
1.5548071597149087 0.06702601748514658 0.975303411193542 0.09279493211757964
1.5118872555345144 0.06726370826865798 1.0788161277321324 0.09647052897470608
1.5031785220335927 0.06474737880188178 1.0821408179447032 0.0929656313424743
1.5153919877333524 0.06912576500102492 1.0883756803879168 0.0993179952238445
1.5539062315595173 0.06673362661343626 1.0048078485521765 0.09238664378845396
1.4307508554611703 0.07432234107711383 1.2296012057929142 0.11647703606262348
1.5469481741138864 0.06656621470999337 1.051742942068777 0.09271335505304973
1.5930414274095115 0.06662739678215952 0.9884959396596501 0.08958321566028477
1.398923498915852 0.07370248793433333 1.2757130197830102 0.11905896735297884
1.5737752229659152 0.06642903991307038 0.998009736430587 0.0905484667928139
1.5614654069825542 0.06881930318133148 1.0434542856925235 0.09527509488237425
1.6205543009895536 0.06538166865398745 0.933824120661008 0.0859894796160019
1.4790651088002107 0.0694476869553162 1.1496162732082913 0.10312972391312947
1.4487796708839271 0.07002089686844798 1.1406078784886513 0.10644525701068326
1.4737583588801988 0.07296178930939398 1.1794348183656467 0.1097711383935771
1.503709830279054 0.06617621902860037 1.113594950865343 0.09550207212408561
1.5664885630042282 0.06489001850542349 0.9753453424433145 0.08859063066737367
1.5193140120360968 0.06367794188400501 1.0546971453827816 0.08993321244719414
1.4638502391459687 0.07033840683133205 1.1560990188381077 0.10590909645720004
1.5905387600200007 0.0632952278042917 0.8908529908001123 0.08484876822437866
1.5666166430106736 0.06513830598380564 0.9844429454826097 0.08897178506276154
1.3617108826905007 0.0702368501552113 1.2534967424026138 0.1159429323481407
1.6104667575808433 0.06514697825438395 0.9246332228845477 0.08629980311248306
1.4497762784889432 0.07260075961524158 1.1864956047827806 0.11126470308680812
1.5470709115119343 0.06451572767677398 1.0227706490943678 0.08931060445519955
1.4750542772668411 0.07103901910504033 1.1502925826732442 0.10612075341213968
1.5508397974989154 0.06986785050650475 1.0753945294853924 0.097844114633711
1.3412115431878968 0.07321741499469234 1.32717937747672 0.12477201831105879
1.4390840785381669 0.07135285838641706 1.169425409675418 0.10983778981223646
1.6072416593620187 0.06710891928617928 1.0187169780009198 0.08943344124271804
1.5226536169663656 0.06797411521441468 1.0439184551371368 0.09668550590294434
1.5340465456955843 0.0675712626165597 1.1083402814370649 0.09556104386952226
1.3708235772217225 0.07532124866618839 1.2970638624567106 0.12506216865079328
1.6002435621515547 0.06929970575331687 1.0084709883569956 0.09328589055483691
1.618619102362338 0.0665458948554558 0.9378292800739371 0.08790278056370246
1.5910946245755586 0.06355287326605041 0.9501255928741638 0.08491958088813438
1.5277630102090365 0.07138353440915757 1.1311639731674619 0.10238784725767526
1.5772741741365086 0.06591357375467408 0.9712841099867752 0.08951040544693352
1.5890954784353069 0.06641992242023508 0.9767646882242088 0.08952330165960314
1.5336952008402762 0.06970382843701266 1.116494954415422 0.09910350074421902
1.5309360432573167 0.07008738027273569 1.0599867516896877 0.09956834070969046
1.4244998612102497 0.06712289347779764 1.1718832738102565 0.10377574194971965
1.506645491100491 0.06698118000248701 1.0481579836684811 0.09624361315139195
1.5126468781500717 0.06533388357643086 1.0815172833714017 0.093256691107796
1.5618267301948312 0.06437023760753748 0.962797069303229 0.08808591088883914
1.485602450690652 0.06572599861660254 1.0968200879094612 0.0959650545253263
1.438947134647349 0.07107856058404254 1.2035106932277595 0.10979976402214943
1.5870776009394196 0.06433902230668065 0.9674853990816357 0.08638255345828047
1.5416680887241292 0.06460505381483099 0.9765429804525315 0.08979498543029724
1.5616455496144266 0.06557984802690559 0.9896452811308813 0.09000787418932221
1.4884494819471354 0.06760048812908782 1.1152279931930398 0.09901523878439089
1.6439198467027007 0.06643162591802543 0.9126423027214245 0.08625839077772707
1.4805801449067582 0.06850720834732882 1.098816709773897 0.10100732022862033
1.4675769076852958 0.06712112993433528 1.1464361325912575 0.10007749189455219
1.5003214786402612 0.06761753144738156 1.1219770430270428 0.09820475136162547
1.4832738049579002 0.06911694261026391 1.1414314287182183 0.10216819549489647
1.4526753610020728 0.06736651758061511 1.1245002205175474 0.10146332202559592
1.4298877588855934 0.07083706303343691 1.2255898741443052 0.11045897229490798
1.5231382870492214 0.06709913263737752 1.0863063037166383 0.09542937371854612
1.500471761808981 0.06956474666771272 1.1083091287065108 0.10130289954480202
1.5130193490935744 0.06888061314385747 1.1115667458958922 0.09925622131941966
1.3649043231203686 0.0701260926001057 1.2835593419446714 0.11594299865286847
1.4949912587660983 0.07013107272666173 1.1013612510034791 0.10260657358697563
1.5716022029792718 0.07163662345879884 1.0144171586384134 0.09893048043654584
1.4387233579052963 0.07513902434393856 1.250499298986887 0.11749669671443172
1.4745963485892277 0.07096253690958271 1.1132044610478702 0.1056873957096229
1.4943806797915389 0.06668234822191908 1.1002658671508583 0.09693291226956366
1.4278353121969753 0.07134866164181404 1.1913859009422758 0.11106134164056425
1.646009291399139 0.061917360762352866 0.8616570012879519 0.07954663457481037
1.5517749026353203 0.06656413821807187 1.0240446957766447 0.09229193570078971
1.5336948519333553 0.07053542890508531 1.087546837283054 0.10023954702445478
1.5441276552859593 0.06303655387454422 1.024025015153435 0.08711199235508855
1.4943693682085908 0.06961672694283488 1.137723932869981 0.10210510503162053
1.5042502278029368 0.06941778403883767 1.108815885719913 0.10077705645043945
1.5277419781883956 0.06763329273387 1.0340545029919084 0.09573228655267581
1.436223472011818 0.06929268804142219 1.2093462365685088 0.10702387926320174
1.5747376451693065 0.07065347062501912 1.0243215049013858 0.09718405355886534
1.6074772405716826 0.06801704394943707 0.9738827265342621 0.09081395194716599
1.5506193058161646 0.06611800923973125 1.0410376990819212 0.09170093263294746
1.3624758412902767 0.07469249823973063 1.3652099064597805 0.12614969391831254
1.4570728454700446 0.0706407104694495 1.1654125831338935 0.10707465685155444
1.547201490091317 0.07061405052515368 1.0800145469406448 0.0993263668782384
1.500358910132654 0.068745953137772 1.1176510908334627 0.10003354029366451
1.526059896383329 0.06515600409661158 1.0382086903319654 0.09183122402493205
1.563690677485838 0.06648238787740526 1.0219588693890593 0.09134283114743333
1.4724329206952944 0.0691062103863749 1.147210384623054 0.10305547368523839
1.5014246155355495 0.06408775290727875 1.0371829647804442 0.09180083110226628
1.4592140878162012 0.06764138245075961 1.1267916934616125 0.1014347586391758
1.5177569968761173 0.07150119613890188 1.1175880185753206 0.10322441613434455
1.4857757832784557 0.06936719687374444 1.1473281090676226 0.10244753202434954
1.4763817511038693 0.06543357670821874 1.0662081872721287 0.09598294320912726
1.4080058592109588 0.06564410461709498 1.1336110744618062 0.10215683205831165
1.4995769788714521 0.06651257872148829 1.0517527225702639 0.09600260133576671
1.4285016011480158 0.0739530371817569 1.2183149655483883 0.11587285006011054
1.4966194590289406 0.06613017548127688 1.0866401215248471 0.09576240991929212
1.2990339025378579 0.07482218284955207 1.3659667986877535 0.13312237076869227
1.4611537321521622 0.06828118230143311 1.1684263998775364 0.10277154350052346
1.521745977040686 0.06659310703217383 1.044228641242934 0.09450194703643451
1.5190555685521332 0.06363509073334761 0.9906387601711153 0.08975090966621974
1.585466723277228 0.06815670330276644 1.0247945980885271 0.09252494921302355
1.500754371333717 0.06294098481024195 1.0245188578023157 0.08990858386175204
1.3807042876016684 0.07022644399356193 1.2588029219485228 0.11424726164662524
1.564579838050264 0.06582574955572804 1.0388931466718592 0.0902782760373558
1.4153936619349285 0.0699571601662773 1.2053716652814033 0.10989791295359415
1.434937908393289 0.06827625887114772 1.1836228714486114 0.10505332572291821
1.2600015941993927 0.07416617510402372 1.4698714150759877 0.13892790015703446
1.6462339838826299 0.0652434345003158 0.9063007315258479 0.0843302284372755
1.4966761121972905 0.06641569871907926 1.116461005572144 0.09643728067917957
1.5697875482865418 0.07026133249178941 1.0482176862749943 0.0969902995592465
1.5063227497860034 0.06659097335377759 1.1137738904408159 0.09599910429457213
1.5540569209385349 0.07127887881939904 1.0517634338411206 0.09974300778584039
1.4358996151354648 0.0696597338170026 1.2026435600829046 0.10759882537899099
1.4569125520065107 0.06876579690442769 1.161721967567537 0.10386288000212174
1.4847918689215045 0.06662386933695728 1.0883916542591126 0.09747263808890448
1.5010316065231384 0.06886593448963961 1.0953907746301095 0.10001564623640183
1.4934132186942002 0.06731960886276821 1.0990283190153451 0.09805541486395483
1.4270357336977804 0.06405383292938507 1.1015479799494428 0.09759704519234325
1.506524422396105 0.07007210884904977 1.1199132048209417 0.10176724174882548
1.4708146664564574 0.06848193190244703 1.1229761201509896 0.10191185815911954
1.5731592491236692 0.06586840439567355 0.9831007363100247 0.08970042632691894
1.496978730333743 0.06683249087659378 1.0391352382916172 0.09667660449143997
1.5352734601895233 0.06903133590318834 1.0477064851644162 0.09749584094804627
1.5712917475876191 0.06904392337256716 1.0410224703593145 0.09493755444560481
1.5204898275322636 0.06413436899333944 0.9693589973690676 0.09048871061998762
1.4906305851699726 0.07177808151209235 1.132732099078282 0.105930995349821
1.4494911465323825 0.0671100338037523 1.1376472683360435 0.10139320397536893
1.5031621351811888 0.06666195344280897 1.0653992226022049 0.09604744287571315
1.4855762198566298 0.06796128096483776 1.0998735159519069 0.09972086798135954
1.4244958996582162 0.06796862502322623 1.1823790296167782 0.10536836815134554
1.5300352096419916 0.06506135390548504 1.019794260622092 0.09135464126252733
1.5665099563788412 0.072250428640468 1.1105267025535033 0.1007261449924348
1.46825096471124 0.06976465334368855 1.1624651838130986 0.10465325008405377
1.4396752791002674 0.06746852130170414 1.1598184640854985 0.10300991480889406
1.581170460255828 0.0652297345122704 1.0136472244101256 0.0881736994878415
1.497841084123773 0.06718386137062253 1.091745272863044 0.09745160185111133
1.5703062451702448 0.06427344431181935 0.9845743823888119 0.08734739291074002
1.433539502054501 0.07281174676481478 1.2597291043698589 0.11408513861780596
1.4937575468439626 0.06713530008843231 1.0566752570440883 0.09748428950956849
1.5914868883468447 0.06784067749742957 1.0547046564689253 0.09171937407572542
1.5339225496289683 0.06755529051814184 1.0395982370644095 0.09518101366505681
1.550778198563852 0.06529537293773242 1.0076085678588131 0.09029377089314657
1.4682283642531428 0.07019206908597588 1.1483948104645856 0.10522954387299488
1.3907517946326478 0.07411926583334465 1.283704432160143 0.12071365937820795
1.4829934990321394 0.06639536732173988 1.098857002739115 0.09729362057442803
1.4686256608393249 0.07049159283438333 1.159759845334306 0.10581940204395521
1.5541138364128937 0.0668022293275782 1.0834849671922315 0.09274989453814345
1.4539382340907334 0.06576296070975093 1.1148764653244196 0.09855144778727296
1.4506025156589235 0.067654788557403 1.1228105030114863 0.10210478047730402
1.675707959192698 0.0685276074269573 0.9458925298768587 0.08732702848628326
1.5917069433389452 0.06326845523899496 0.978654890015815 0.08437912519430044
1.4642142223810808 0.06993418744266129 1.154206261469494 0.10517730998287565
1.5150582533354382 0.0700661501525407 1.085904075948026 0.10086156694638126
1.484593921181647 0.0708885740555283 1.1646984783387404 0.10525669729732141
1.4966606760796897 0.06306380569300563 1.0514947343833096 0.09048103140764192
1.441415776588616 0.06708697076128024 1.1548018403835332 0.1021615599132169
1.5518924800791432 0.06853785971070897 1.0578154442719205 0.09555639435006377
1.5912256963291374 0.06561215406609348 0.9812513920034716 0.08810928185801321
1.5327253155941178 0.06757862486180019 1.0170229461253042 0.09524671219560833
1.437758181484478 0.07049439771574584 1.1972651176803861 0.10881374701631855
1.4404750363735925 0.06987079099917815 1.1569330973397494 0.10704223522602183
1.5040260677482211 0.06479805673064903 1.0496080383204982 0.09283565344838615
1.445674142052998 0.06627109905966927 1.1284345074688322 0.10017573150803792
1.4263512356440362 0.06647075941771582 1.162768435794867 0.10239106459027698
1.5063047201890791 0.06859357336621304 1.0841839000282834 0.0990985403656315
1.590692287851448 0.06404283488781219 0.9638667824737792 0.08568887397016661
1.5370077693964812 0.06917066461802263 1.095591903344045 0.09784804881749894
1.5918703002310375 0.0672445723294143 1.0177746667734218 0.09065180089400614
1.5020713963507593 0.06933793833142245 1.1507109355070875 0.10117561680763927
1.4648974247878117 0.06641968581087941 1.0956665456322365 0.09868496894570758
1.5813339270869022 0.06512122320396412 0.9997217171432236 0.08797691066214587
1.5973839693339253 0.06417846573720663 0.9475737236772748 0.08551758798820495
1.5837598336259684 0.06661318277443619 0.9846247949024843 0.09017425980627762
1.4940370141025687 0.06846037388011472 1.0935595400713156 0.0998631171373518
1.4158762240614289 0.06933860844711033 1.199594549719328 0.10869850890743227
1.384555807767766 0.06976462565785858 1.2541697300580847 0.11299224217747564
1.565036030443315 0.06699532699262484 1.0178871231192397 0.0920606070829823
1.4152706934138433 0.07020187364628386 1.2386828649530008 0.11080487384983181
1.4791064653376162 0.06578166366691236 1.0934082016652475 0.09651924259138217
1.5215729996570198 0.06602292047744716 1.0491710308438464 0.09360095097656251
1.5740325050138724 0.06799169039819618 1.0266939093339804 0.09304551183771133
1.6190257088121545 0.06202460075255649 0.9029824536577229 0.08099368231411946
1.5124812265379612 0.06824227119853292 1.0859831254962076 0.09807393914574239
1.4970595285621568 0.069442169362255 1.1237177532893652 0.1014854359446819
1.46299755756831 0.06914274883411256 1.14660072753192 0.10386070944434347
1.6027505952932875 0.06741585089391834 0.9411191244257745 0.09026888065092203
1.486109830569877 0.06721683219471833 1.13453225994758 0.09871148082691056
1.4472699748294546 0.06865319247450531 1.146288252086308 0.10429138820001256
1.542098720411558 0.0651136659082553 1.0322263502380291 0.09063782173603806
1.4634524061444 0.07154142024978953 1.190516498934168 0.10836883770142235
1.486669704894482 0.07075903635954052 1.2043680678705537 0.10534168038734658
1.4922477991039353 0.06470944288253118 1.0487785855589102 0.09353108915083656
1.5039420850992846 0.06465144392092616 1.0865853352018207 0.09277740416573589
1.465865780258018 0.07073085791838682 1.1810793180558774 0.10668619120316128
1.437818602080908 0.06716373914901513 1.1311923712490866 0.10235847328735578
1.5282109389245309 0.06188670559919233 0.9753297779100083 0.08626375224566948
1.51203470018095 0.06658614068303358 1.0345717809875532 0.09515689969293047
1.5373838630205539 0.06699289467471548 1.0451483046784003 0.09404535448647877
1.5562155076204962 0.06369317284393222 0.9571152277831023 0.08737987649092728
1.5058238280150067 0.06995861699289019 1.1011603639701908 0.10148908809641052
1.5301613077891958 0.06378571104957549 1.0399485253884688 0.08931410359366733
1.4745432169413937 0.0698723349142261 1.1634682957317013 0.10434257376461613
1.448609524899133 0.06577361418404568 1.1245926302576859 0.0990606101735675
1.500175802233695 0.06776888989780164 1.0907684251382113 0.09823921311751135
1.5777567151987555 0.06838861922273186 1.0107737998117656 0.09338763527214529
1.6006983313499534 0.06602740432583926 0.9632512523551883 0.08817125639916613
1.4083697064856453 0.07101459316305553 1.1900738713441976 0.11216389011593345
1.6367888379042121 0.06742926869440802 0.9993630048465413 0.08801397960457635
1.5800345562105294 0.06600649901839556 0.9711475063473847 0.08947510781894
1.457961367246877 0.06903098822715738 1.140129375678945 0.10401237902173179
1.471435033229507 0.06790989416553998 1.1510914611647818 0.10115600871963765
1.4490168872589821 0.06543807709599234 1.1219883322509376 0.09843250150489541
1.444999646324619 0.07167549952140394 1.2024406746893088 0.11029621962005833
1.4611131027838529 0.06919500766587242 1.1255376787587068 0.10390424976289211
1.5837104274676352 0.0672039431837359 0.9627672863843754 0.09113528547856656
1.470100245196995 0.07119918595673409 1.186645134937535 0.10719743481433508
1.5458017788683307 0.06686349013269469 1.0378231227869188 0.09322280841956777
1.5918416876499868 0.06549483911379579 0.9956768206140114 0.08788209468207314
1.4988922380935095 0.0668591795054145 1.074752278519417 0.0967353918600232
1.5177287701180442 0.06675765887534243 1.095417952222691 0.09531718243320608
1.5251658496619158 0.06682367910138462 1.0837948182189021 0.09481961787926144
1.539631930051639 0.06500704207289472 1.0066087135375201 0.09058609448501488
1.5373190171919877 0.06946441562855085 1.0772829788302745 0.09818605366740457
1.5107557862203351 0.061975025607326324 1.0333323090440476 0.08763366941616971
1.5515377571380518 0.06695372716092295 1.0437929569019688 0.09298910408594539
1.4435505175817593 0.07116861686086133 1.210897925893121 0.10966232407729659
1.4936271780128696 0.06589813529265541 1.0633373756373168 0.09547099983176016
1.3720334493369402 0.07598547573295183 1.3534340523795574 0.1272987160419061
1.6805926178634292 0.06899508951665619 0.9183853139805317 0.08784546115665204
1.5762008229430038 0.069709881506695 1.0141196271974275 0.0955788730604212
1.4100187901218042 0.07047624798016798 1.2263957881117864 0.11156776222474635
1.5153611414289896 0.06959994078349109 1.1599540161856883 0.1006912853470152
1.4186557399338957 0.0697871170364571 1.2376883490431656 0.10976860699953013
1.4489562017219635 0.0685801729407356 1.1622645526272968 0.10419345027458214
1.6267425573064276 0.06590451532718722 0.9425441644685506 0.08637934987293495
1.4938787904431734 0.07104119913652793 1.1373965976834635 0.1044987562486229
1.479853074415434 0.07165752337240093 1.131197137973167 0.1065803152980085
1.5312575844817302 0.07003766265769379 1.0939526998202322 0.09965852386475266
1.4582272556314189 0.0725861546927235 1.2234808631119445 0.11101640646496998
1.4156019390920125 0.0742860451595023 1.274216536562095 0.11851512626736278
1.6730425709997356 0.06680276490274185 0.8714724479699091 0.08535239688262726
1.640755030501233 0.06466686640630655 0.9456149500333578 0.08359497927694484
1.492757263391995 0.06774302866109802 1.1003913212986611 0.0988172495753957
1.3253855656118385 0.07303451453011482 1.3423865958828216 0.12636945709824612
1.4618702379141366 0.0647581866265133 1.1057713843009733 0.09616611648724822
1.5590073043456236 0.06311133377539327 0.9472179357635663 0.08628508365357562
1.6680164139591023 0.06515208953872366 0.8534335324596563 0.08332428339294295
1.5448129866802311 0.06820984247744351 1.0081075063369958 0.09539096266321696
1.450653360852046 0.06662700670177774 1.1640160671649553 0.10073131894261411
1.4637163816295637 0.06918484998878617 1.1291138739444635 0.10371062934219522
1.5036427831539299 0.06784296573399676 1.1179082875386228 0.09829742836682
1.421476368683947 0.07123541703378844 1.2194078422339985 0.11179348978978643
1.4232189957656398 0.07245853024246274 1.2203634524811888 0.11378258724546186
1.3921177174109785 0.07449666385589604 1.29287664357669 0.12142459181474503
1.5765563865536205 0.06699469647279396 0.9950098970729724 0.0912564578385205
1.526772949380862 0.06661827283035088 1.0195485477365411 0.09411733054827291
1.4914580777644217 0.0693723772199584 1.1045299018799724 0.10164824843451377
1.5102342264122233 0.0673300695819322 1.0481103847230573 0.09655408828763863
1.4965739919905567 0.0652004791436473 1.0818842296396578 0.09419245017412917
1.5936686576060415 0.06661135482064857 0.9784641377919091 0.08952216357571427
1.6830923507318059 0.06301773337162614 0.8263196571566642 0.07949429490626388
1.3985549495490133 0.06676102299432413 1.1930741129569606 0.10554952001428532
1.4902887990295561 0.06820674342096308 1.1061791870479585 0.09981593959010988
1.527336324537365 0.06891603370264116 1.0839618955830999 0.09806731621093821
1.5902743917251525 0.06493061275298662 0.9756971628608578 0.0871012833834526
1.4100918772522761 0.07052304651779377 1.2182040867392923 0.11152733798332747
1.4737462426430625 0.0701585197831453 1.1784193979397575 0.10505315896547682
1.5148933750651288 0.06711836070945904 1.0729698646480683 0.09598125830730538
1.5271215689022568 0.06722730971428069 1.0685412469905142 0.09525461906906543
1.514053912881641 0.06921858137689162 1.0838769909167216 0.09954129496244071
1.508871329117007 0.06431430000021836 1.036986525118847 0.09164937420167271
1.4306590670256139 0.07145841495053751 1.2248391601808126 0.1114596631151627
1.386699735716251 0.07841488115010138 1.328686703742274 0.12967945486995452
1.4214544060517016 0.07250403088058117 1.2544737654353035 0.11453625950556552
1.5904088930640767 0.06803474935599259 1.0222091212169597 0.0919958077845193
1.4503358503224226 0.06684267853755112 1.113472469593814 0.10066438957408962
1.5177384684461943 0.06517569520476484 1.099892311589946 0.09274662702006074
1.4574956917170043 0.07162483722028999 1.1820339728708293 0.10890096562291787
1.4161964153085538 0.07406101772922967 1.2455208298783231 0.11758878442224424
1.281207254959585 0.07291626139363104 1.3909259616877503 0.13198572247872398
1.4219292474772118 0.07078844301372218 1.2178776696079605 0.11095352328174056
1.6297512917382173 0.06570136905686924 0.9697180587463646 0.08581201124259648
1.6930292290908142 0.06615396041073074 0.8928159303038424 0.08300700147196291
1.4956455903954369 0.0684763301610934 1.054610679778238 0.0995514940787941
1.5425898577628563 0.06762477055158495 1.048261487486202 0.09470908982024573
1.5870474142022497 0.06618328602699071 1.0007011239103973 0.08928145929624297
1.4560644181309563 0.0681621266096103 1.1703305288585928 0.10299472040286825
1.495246485831073 0.06826155033933577 1.14144698867505 0.0998231323890854
1.4114551019045294 0.06752190869112636 1.1852554367629227 0.10571069500536867
1.4535593074064461 0.06652407927538102 1.1382320520017657 0.10007686942278561
1.5425335467489145 0.06641728704825506 1.0555650260139624 0.09279497381752834
1.5141798263348967 0.06887599344590763 1.0799930912679807 0.09894893568674591
1.401153858280201 0.06998644343668867 1.2008866798481281 0.1111334818815274
1.3940755235830709 0.07202709042074754 1.2553199894064646 0.11618750548292517
1.5149808480112579 0.06767447422498328 1.066960638739534 0.0968545998309924
1.4752073154442793 0.06753902717276015 1.134993875968016 0.10008853385574852
1.455187394483501 0.0690676197445809 1.1738024476470228 0.10464733355470979
1.4107625155437553 0.06818779565348036 1.2163318921069806 0.10733401174139967
1.5114484503047547 0.07025807253870842 1.1261199760085092 0.10174903502486118
1.532856940229392 0.07085178786642861 1.03308397275464 0.10055265424981673
1.4945726286581316 0.06957098414073803 1.1315867383409652 0.10195807982612606
1.5200392532857954 0.0673699645124778 1.0796398681169266 0.09605638970062828
1.494788804292653 0.07534125516132562 1.19550310951321 0.11218315661507403
1.5207004984794794 0.0682022434348568 1.0992094743554957 0.09748978268800917
1.5155053341517946 0.07110404091975812 1.1179365684012292 0.10275288684963738
1.5958495212225596 0.06551295217427418 0.9884847091730589 0.08765106524228271
1.4347418460598786 0.06739662651720706 1.179716569291666 0.10350188193203093
1.550351282544784 0.0667369454418419 1.0477846571879115 0.09273739286145301
1.5248790878307692 0.06301558258458814 1.0138773348371248 0.08835161379145355
1.5942083444203512 0.06271863957105635 0.9093121789247537 0.08357675176898116
1.4259977659935439 0.06886358188812411 1.1827755058406353 0.1068042599204584
1.6075905019504506 0.06505915529032849 0.9577649279084473 0.0862272162620264
1.5046019169589175 0.06926865696297185 1.1274517657359424 0.10065552888032672
1.5361119569082435 0.06976823606639027 1.0258726686240642 0.09855158886633927
1.5590102221196815 0.066325431339812 0.9904300326428783 0.09137799099525819
1.4883819542077725 0.06573478674496115 1.0870831470915732 0.09571264708466021
1.496593107666459 0.06988817010575372 1.1471534831111068 0.10247223229650804
1.5981515194261036 0.06749866537535469 0.9704210780408429 0.09062535076759197
1.3814644361763926 0.07118890307613356 1.2472883575781253 0.11572391486090056
1.4479709126406415 0.06931246446507955 1.161487009621435 0.10551714510226799
1.580837014008487 0.06674399558865475 0.9902775112377469 0.09057348620837265
1.473029537809016 0.06915841346324011 1.142295521488774 0.10304873109585713
1.5169473779266225 0.06595426152943502 1.0286085803314522 0.09375233387722072
1.4843300279301883 0.07123384330351383 1.1645164158881038 0.10585144614378542
1.4931886864064887 0.065956733387292 1.0681860721709564 0.0956241332784734
1.5332712189071582 0.06617557888521133 1.0560195279043691 0.09305205516468876
1.511220880455137 0.06813145900419315 1.0798490133682561 0.09794885456984802
1.520661863043752 0.06781342238725763 1.052408827057597 0.09660125092027337
1.4692857688798535 0.06759263243781234 1.1505303602439303 0.10078191052730316
1.5485013581740446 0.06528112585881318 1.037617382641948 0.0904871220537828
1.5218061080974332 0.06557477226319999 1.0735996904272238 0.09296059854849392
1.486581867134851 0.0681768171320415 1.118816567292132 0.10014647749690092
1.486282361591506 0.06462598502501647 1.039606359946793 0.09379225320410631
1.4734224429182423 0.06425395396852848 1.1066851704245688 0.09445507262942869
1.4932778549539976 0.06832695951683469 1.1468703599132957 0.10013214538693839
1.6057286068133116 0.06275783629843104 0.9567446491007825 0.08273258062494764
1.5162618366059897 0.07050488865642719 1.1064209641177678 0.10162785407016385
1.4196674012418207 0.0721517977245691 1.2141039307291672 0.11347417258873257
1.5774951184894617 0.06394735670618111 0.9858457800561097 0.08635967811127232
1.4137655446828687 0.07020274401779782 1.2353291941631304 0.11088829571589433
1.522980802597796 0.06875855064859132 1.0683486672476086 0.0980435628477303
1.5577162941069658 0.06262227038791408 0.9711626095189698 0.08552717447360703
1.4225700393055682 0.07071302984989189 1.2171553673792583 0.11075648646682129
1.4591581002552634 0.06811804414058566 1.16132305921466 0.10257745856042952
1.452202883713671 0.0657489612132966 1.1662471218267427 0.09912626659264691
1.4797076206112711 0.0659061914654593 1.1190764883689526 0.09686704919206335
1.452236477741406 0.07161122895181873 1.1881698297821122 0.1093928243047923
1.5102406999239044 0.06540703503778968 1.061064482749076 0.09344553983544199
1.5109580756296377 0.0681792704359136 1.0437548838909199 0.09787599364142195
1.4843108955418707 0.06787955342834248 1.1134023278539367 0.09978118122449413
1.4747945319428553 0.0721757850214869 1.1658515717556481 0.10821161677646622
1.4785919980710065 0.06888255376295019 1.146013577113595 0.10218460199591967
1.5691734868460645 0.06668258210444059 0.957705507265844 0.0912983998829725
1.6118298435065768 0.06900756100974528 1.0116107222950115 0.09206465686759827
1.526643125630413 0.06429969730534052 1.00541346146681 0.0903250723079315
1.611960225455988 0.06841161665495522 1.0081458049297787 0.09113045252022926
1.602356641330698 0.06918909464404302 0.9878451225371239 0.0929621820231978
1.6118372892887198 0.06390832516538716 0.9421505782904269 0.08420369577988043
1.5403053041634023 0.06622151460320688 1.0169202827591044 0.09251926677004581
1.5070394924571686 0.0654308819656025 1.0609229935820936 0.09371326539356238
1.4837326314492005 0.06464454257967131 1.111793995898201 0.09439229960346958
1.5515069389893075 0.06703976138250646 1.0563241506899623 0.0931771190531209
1.486718086873669 0.07152864674417346 1.1564152607923164 0.10606442362588546
1.5298870079635993 0.06750236120257413 1.0551353017931246 0.0954416095490663
1.4708747848540384 0.06927966208719123 1.1257448785490485 0.10327186333362415
1.5057771545185323 0.06635494643768913 1.0921898110893895 0.09549970808367905
1.4536777983438454 0.06945731638831627 1.162671345179958 0.10531139282283435
1.5167165087899184 0.0665882005713891 1.0761334404397185 0.0949974762938021
1.4762475992599102 0.06407458912712563 1.0790113019741312 0.09377091413568651
1.538465204027182 0.07051782301367195 1.0426642892147442 0.09963379501599755
1.4979409940825994 0.06424690199728582 1.0701403508754377 0.09244450768784271
1.5317526110693287 0.06614126431017822 1.0441879560396452 0.09305762189171743
1.3846573047537434 0.0720953347115262 1.2552613658175544 0.11718038861395423
1.490067734521714 0.06849646499526413 1.122622193257124 0.10044337475858547
1.4566831964296103 0.06692140556335043 1.1582535556488012 0.10070228977691267
1.4527399063095683 0.06816267569545736 1.1584011910169763 0.10313505519418188
1.5352928969316002 0.06604960191146436 1.0242970134121714 0.09260455409157738
1.4589051061135458 0.06836018457281452 1.1577178578417746 0.10297204003577899
1.555312025566111 0.06459902292288149 0.9707590695578381 0.08887311011302594
1.4472216846673107 0.06765530701079955 1.189513344529835 0.10305213859793544
1.4367113778358949 0.06810782507410404 1.168712179194911 0.10444792299870086
1.4696825057703833 0.06633343942078 1.0962207141555116 0.09817732898570819
1.3834957922286533 0.0721611227679934 1.2420687844812186 0.11719448038997571
1.4414082684374665 0.06735208051031993 1.1433590158804587 0.10250610530938409
1.4399631018266243 0.07089686701890016 1.1921464256875876 0.10925498943432502
1.5492085849315156 0.0669835416890751 1.01652460291162 0.0931275232250858
1.584687126787588 0.0613720499859997 0.9363608351015257 0.08189238507055972
1.5928733005530984 0.06861937531197008 0.9902226479889861 0.09271168918400732
1.5066313286523172 0.06685701522671135 1.0651483644321005 0.0961140894954853
1.5762436589262814 0.06718557676624765 0.9829848417821153 0.09158099669303225
1.548172257661177 0.0627468805125596 0.9914514609894206 0.0863396197515478
1.4094590162099758 0.07097321750435284 1.1894900458429618 0.11198785939953132
1.5985228975353898 0.06792969234836385 0.9877746133635719 0.09125870833469057
1.3918388736990195 0.070019018515353 1.205387100978334 0.11207792049466857
```python
results = np.array(results)
efac_vals = results[:,0]
efac_errs = results[:,1]
equad_vals = results[:,2]
equad_errs = results[:,3]
# for idx in range(500):
# if idx % 20 == 0:
# print(idx)
# t = make_fake_toas_fromtim("sim1.tim", m, add_noise=True)
# ftr = Fitter.auto(t, m)
# ftr.fit_toas()
# efac_vals.append(ftr.model.EFAC1.value)
# equad_vals.append(ftr.model.EQUAD1.value)
# efac_errs.append(ftr.model.EFAC1.uncertainty_value)
# equad_errs.append(ftr.model.EQUAD1.uncertainty_value)
```
```python
plt.errorbar(x=efac_vals, y=equad_vals, xerr=efac_errs, yerr=equad_errs, ls="",
label="Measurements", alpha=0.2, color="red")
plt.axvline(m.EFAC1.value, color="blue")
plt.axhline(m.EQUAD1.value, color="blue", label="Injected value")
corner.hist2d(np.array(efac_vals), np.array(equad_vals), bins=8, smooth=0.2)
plt.xlabel("EFAC", fontsize=13)
plt.ylabel("EQUAD (ΞΌs)", fontsize=13)
plt.tick_params(axis="both", labelsize=11)
plt.legend()
plt.tight_layout()
plt.savefig("sim1-multisim.pdf")
```

```python
```
|
abhisrkcklREPO_NAMEpint-noisePATH_START.@pint-noise_extracted@pint-noise-main@sim1-efac-equad@Sim1-multisim.ipynb@.PATH_END.py
|
{
"filename": "plot_triangle_planck.py",
"repo_name": "philbull/RadioFisher",
"repo_path": "RadioFisher_extracted/RadioFisher-master/plotting/plot_triangle_planck.py",
"type": "Python"
}
|
#!/usr/bin/python
"""
Make a triangle plot for a set of parameters.
"""
import numpy as np
import pylab as P
from rfwrapper import rf
import matplotlib.patches
import matplotlib.cm
import matplotlib.ticker
from units import *
from mpi4py import MPI
import os
import euclid
print "Obsolete."
exit()
USE_DETF_PLANCK_PRIOR = True # If False, use Euclid prior instead
MARGINALISE_OVER_W0WA = True # Whether to fix or marginalise over (w0, wa)
cosmo = rf.experiments.cosmo
names = ['EuclidRef', 'cexptL', 'iexptM', 'iexptM'] #, 'exptS']
labels = ['DETF IV', 'Facility', 'Mature', 'Planck'] #, 'Snapshot']
# TESTING
names = ['cexptL', 'cexptL', 'cexptL']
labels = ['PPlanck', 'Planck', 'Facility']
#names = ['cexptL', 'iexptM', 'exptS']
#labels = ['Facility', 'Mature', 'Snapshot']
colours = [ ['#CC0000', '#F09B9B'],
['#1619A1', '#B1C9FD'],
['#FFB928', '#FFEA28'],
['#5B9C0A', '#BAE484'] ]
scale_idx = 2 #1 # Index of experiment to use as reference for setting the x,y scales
nsigma = 4. #4.2 # No. of sigma (of reference experiment 1D marginal) to plot out to
# Set-up triangle plot
Nparam = 6 # No. of parameters
fig = P.figure()
axes = [[fig.add_subplot(Nparam, Nparam, (j+1) + i*Nparam) for i in range(j, Nparam)] for j in range(Nparam)]
# Fixed width and height for each subplot
w = 1.0 / (Nparam+1.)
h = 1.0 / (Nparam+1.)
l0 = 0.1
b0 = 0.1
# Prepare to save 1D marginals
params_1d = []; params_lbls = []
# Loop though rf.experiments.
_k = range(len(names))[::-1] # Reverse order of rf.experiments.
for k in _k:
root = "output/" + names[k]
print "-"*50
print names[k]
print "-"*50
# Load cosmo fns.
dat = np.atleast_2d( np.genfromtxt(root+"-cosmofns-zc.dat") ).T
zc, Hc, dAc, Dc, fc = dat
zs, Hs, dAs, Ds, fs = np.genfromtxt(root+"-cosmofns-smooth.dat").T
kc = np.genfromtxt(root+"-fisher-kc.dat").T
# Load Fisher matrices as fn. of z
Nbins = zc.size
F_list = [np.genfromtxt(root+"-fisher-full-%d.dat" % i) for i in range(Nbins)]
# EOS FISHER MATRIX
# Actually, (aperp, apar) are (D_A, H)
pnames = ['A', 'b_HI', 'Tb', 'sigma_NL', 'sigma8', 'n_s', 'f', 'aperp', 'apar',
'omegak', 'omegaDE', 'w0', 'wa', 'h', 'gamma'] #, 'Mnu']
#if "Euclid" not in names[k]: pnames.append('Mnu')
pnames += ["pk%d" % i for i in range(kc.size)]
zfns = [1,]
excl = [2, 6,7,8, 14,] #15] # 4:sigma8
#if "Euclid" not in names[k]: excl.append(15)
excl += [i for i in range(len(pnames)) if "pk" in pnames[i]]
F, lbls = rf.combined_fisher_matrix( F_list,
expand=zfns, names=pnames,
exclude=excl )
# Apply Planck prior
if USE_DETF_PLANCK_PRIOR:
# DETF Planck prior
print "*** Using DETF Planck prior ***"
l2 = ['n_s', 'w0', 'wa', 'omega_b', 'omegak', 'omegaDE', 'h']
F_detf = euclid.detf_to_rf("DETF_PLANCK_FISHER.txt", cosmo)
Fpl, lbls = rf.add_fisher_matrices(F, F_detf, lbls, l2, expand=True)
else:
# Euclid Planck prior
print "*** Using Euclid (Mukherjee) Planck prior ***"
l2 = ['n_s', 'w0', 'wa', 'omega_b', 'omegak', 'omegaDE', 'h']
Fe = euclid.planck_prior_full
F_eucl = euclid.euclid_to_rf(Fe, cosmo)
Fpl, lbls = rf.add_fisher_matrices(F, F_eucl, lbls, l2, expand=True)
# FIXME: Use Planck prior alone
if labels[k] == 'PPlanck':
lbls = ['n_s', 'w0', 'wa', 'omega_b', 'omegak', 'omegaDE', 'h', 'sigma8']
F2 = euclid.detf_to_rf("DETF_PLANCK_FISHER.txt", cosmo)
Fpl = np.eye(F2.shape[0]+1) * 1e2 #sigma8
Fpl[:F2.shape[0],:F2.shape[0]] = F2
elif labels[k] == 'Planck':
pass
else:
# Revert Fisher matrix to prev. values (without Planck prior)
Fpl[:-1,:-1] = F
tmp = Fpl[-1,-1]
Fpl[-1,:] = 0.
Fpl[:,-1] = 0.
Fpl[-1,-1] = 1e2
# Remove unwanted params
fixed_params = ['w0', 'wa']
Fpl, lbls = rf.combined_fisher_matrix( [Fpl,], expand=[],
names=lbls, exclude=[lbls.index(p) for p in fixed_params] )
# Invert matrices
cov_pl = np.linalg.inv(Fpl)
# Store 1D marginals
params_1d.append(np.sqrt(np.diag(cov_pl)))
params_lbls.append(lbls)
# Set which parameters are going into the triangle plot
params = ['h', 'omega_b', 'omegak', 'omegaDE', 'n_s', 'sigma8'][::-1]
label = ['h', 'omega_b', 'omegak', 'omegaDE', 'n_s', 'sigma8'][::-1]
fid = [ cosmo['h'], cosmo['omega_b_0'], 0., cosmo['omega_lambda_0'], cosmo['ns'], cosmo['sigma_8'] ][::-1]
# Loop through rows, columns, repositioning plots
# i is column, j is row
for j in range(Nparam):
for i in range(Nparam-j):
ax = axes[j][i]
# Hide tick labels for subplots that aren't on the main x,y axes
if j != 0:
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_visible(False)
if i != 0:
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_visible(False)
# Fiducial values
ii = Nparam - i - 1
x = fid[ii] #rf.experiments.cosmo['w0']
y = fid[j] #rf.experiments.cosmo['wa']
p1 = lbls.index(params[ii])
p2 = lbls.index(params[j])
ax.tick_params(axis='both', which='major', labelsize=12)
ax.tick_params(axis='both', which='minor', labelsize=12)
# Plot ellipse *or* 1D
if p1 != p2:
# Plot contours
AAA = 1.
if labels[k] == 'PPlanck': AAA = 0.5
ww, hh, ang, alpha = rf.ellipse_for_fisher_params(
p1, p2, None, Finv=cov_pl)
ellipses = [matplotlib.patches.Ellipse(xy=(x, y), width=alpha[kk]*ww,
height=alpha[kk]*hh, angle=ang, fc=colours[k][kk],
ec=colours[k][0], lw=1.5, alpha=0.5*AAA) for kk in [1,0]]
for e in ellipses: ax.add_patch(e)
# Centroid and axis scale
if k == scale_idx:
sig1 = np.sqrt(cov_pl[p1,p1])
sig2 = np.sqrt(cov_pl[p2,p2])
ax.plot(x, y, 'kx')
ax.set_xlim((x-nsigma*sig1, x+nsigma*sig1))
ax.set_ylim((y-nsigma*sig2, y+nsigma*sig2))
else:
sig = np.sqrt(cov_pl[p1,p1])
xx = np.linspace(x-20.*sig, x+20.*sig, 4000)
yy = 1./np.sqrt(2.*np.pi*sig**2.) * np.exp(-0.5 * ((xx-x)/sig)**2.)
yy /= np.max(yy)
ax.plot(xx, yy, ls='solid', color=colours[k][0], lw=1.5)
# Match x scale, and hide y ticks
if k == scale_idx:
ax.set_xlim((x-nsigma*sig, x+nsigma*sig))
ax.tick_params(axis='y', which='both', left='off', right='off')
ax.set_ylim((0., 1.08))
# Set position of subplot
pos = ax.get_position().get_points()
ax.set_position([l0+w*i, b0+h*j, w, h])
if j == 0:
ax.set_xlabel(label[ii], fontdict={'fontsize':'20'}, labelpad=20.)
#if i == Nparam-j-1: ax.set_title(label[ii], fontdict={'fontsize':'20'})
if i == 0:
ax.set_ylabel(label[j], fontdict={'fontsize':'20'}, labelpad=20.)
ax.get_yaxis().set_label_coords(-0.3,0.5)
params = ['h', 'omega_b', 'omegaDE', 'n_s', 'sigma8']
print [names[kk] for kk in _k]
for p in params:
idxs = [params_lbls[i].index(p) for i in range(3)]
print "%9s: %5.5f %5.5f %5.5f" % (p, params_1d[0][idxs[0]], params_1d[1][idxs[1]], params_1d[2][idxs[2]])
# Set size and save
P.gcf().set_size_inches(16.5,10.5)
P.show()
|
philbullREPO_NAMERadioFisherPATH_START.@RadioFisher_extracted@RadioFisher-master@plotting@plot_triangle_planck.py@.PATH_END.py
|
{
"filename": "_len.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/splom/marker/colorbar/_len.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="len", parent_name="splom.marker.colorbar", **kwargs
):
super(LenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@splom@marker@colorbar@_len.py@.PATH_END.py
|
{
"filename": "test_return_character.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_character.py",
"type": "Python"
}
|
from __future__ import division, absolute_import, print_function
from numpy import array
from numpy.testing import run_module_suite, assert_, dec
from . import util
class TestReturnCharacter(util.F2PyTest):
def check_function(self, t):
tname = t.__doc__.split()[0]
if tname in ['t0', 't1', 's0', 's1']:
assert_(t(23) == b'2')
r = t('ab')
assert_(r == b'a', repr(r))
r = t(array('ab'))
assert_(r == b'a', repr(r))
r = t(array(77, 'u1'))
assert_(r == b'M', repr(r))
#assert_(_raises(ValueError, t, array([77,87])))
#assert_(_raises(ValueError, t, array(77)))
elif tname in ['ts', 'ss']:
assert_(t(23) == b'23 ', repr(t(23)))
assert_(t('123456789abcdef') == b'123456789a')
elif tname in ['t5', 's5']:
assert_(t(23) == b'23 ', repr(t(23)))
assert_(t('ab') == b'ab ', repr(t('ab')))
assert_(t('123456789abcdef') == b'12345')
else:
raise NotImplementedError
class TestF77ReturnCharacter(TestReturnCharacter):
code = """
function t0(value)
character value
character t0
t0 = value
end
function t1(value)
character*1 value
character*1 t1
t1 = value
end
function t5(value)
character*5 value
character*5 t5
t5 = value
end
function ts(value)
character*(*) value
character*(*) ts
ts = value
end
subroutine s0(t0,value)
character value
character t0
cf2py intent(out) t0
t0 = value
end
subroutine s1(t1,value)
character*1 value
character*1 t1
cf2py intent(out) t1
t1 = value
end
subroutine s5(t5,value)
character*5 value
character*5 t5
cf2py intent(out) t5
t5 = value
end
subroutine ss(ts,value)
character*(*) value
character*10 ts
cf2py intent(out) ts
ts = value
end
"""
@dec.slow
def test_all(self):
for name in "t0,t1,t5,s0,s1,s5,ss".split(","):
self.check_function(getattr(self.module, name))
class TestF90ReturnCharacter(TestReturnCharacter):
suffix = ".f90"
code = """
module f90_return_char
contains
function t0(value)
character :: value
character :: t0
t0 = value
end function t0
function t1(value)
character(len=1) :: value
character(len=1) :: t1
t1 = value
end function t1
function t5(value)
character(len=5) :: value
character(len=5) :: t5
t5 = value
end function t5
function ts(value)
character(len=*) :: value
character(len=10) :: ts
ts = value
end function ts
subroutine s0(t0,value)
character :: value
character :: t0
!f2py intent(out) t0
t0 = value
end subroutine s0
subroutine s1(t1,value)
character(len=1) :: value
character(len=1) :: t1
!f2py intent(out) t1
t1 = value
end subroutine s1
subroutine s5(t5,value)
character(len=5) :: value
character(len=5) :: t5
!f2py intent(out) t5
t5 = value
end subroutine s5
subroutine ss(ts,value)
character(len=*) :: value
character(len=10) :: ts
!f2py intent(out) ts
ts = value
end subroutine ss
end module f90_return_char
"""
@dec.slow
def test_all(self):
for name in "t0,t1,t5,ts,s0,s1,s5,ss".split(","):
self.check_function(getattr(self.module.f90_return_char, name))
if __name__ == "__main__":
run_module_suite()
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@numpy@f2py@tests@test_return_character.py@.PATH_END.py
|
{
"filename": "_unselected.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/scattermapbox/_unselected.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Unselected(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattermapbox"
_path_str = "scattermapbox.unselected"
_valid_props = {"marker"}
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermapbox.unselected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of unselected points,
applied only when a selection exists.
opacity
Sets the marker opacity of unselected points,
applied only when a selection exists.
size
Sets the marker size of unselected points,
applied only when a selection exists.
Returns
-------
plotly.graph_objs.scattermapbox.unselected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.scattermapbox.unselected.M
arker` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, **kwargs):
"""
Construct a new Unselected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattermapbox.Unselected`
marker
:class:`plotly.graph_objects.scattermapbox.unselected.M
arker` instance or dict with compatible properties
Returns
-------
Unselected
"""
super(Unselected, self).__init__("unselected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattermapbox.Unselected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattermapbox.Unselected`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@scattermapbox@_unselected.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/scatter3d/legendgrouptitle/_font.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatter3d.legendgrouptitle"
_path_str = "scatter3d.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One",
"Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow",
"Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# lineposition
# ------------
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
# shadow
# ------
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# style
# -----
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
# textcase
# --------
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
# variant
# -------
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
# weight
# ------
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatter3d.lege
ndgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.legendgrouptitle.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("lineposition", None)
_v = lineposition if lineposition is not None else _v
if _v is not None:
self["lineposition"] = _v
_v = arg.pop("shadow", None)
_v = shadow if shadow is not None else _v
if _v is not None:
self["shadow"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("style", None)
_v = style if style is not None else _v
if _v is not None:
self["style"] = _v
_v = arg.pop("textcase", None)
_v = textcase if textcase is not None else _v
if _v is not None:
self["textcase"] = _v
_v = arg.pop("variant", None)
_v = variant if variant is not None else _v
if _v is not None:
self["variant"] = _v
_v = arg.pop("weight", None)
_v = weight if weight is not None else _v
if _v is not None:
self["weight"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@scatter3d@legendgrouptitle@_font.py@.PATH_END.py
|
{
"filename": "test_ndcubesequence.py",
"repo_name": "sunpy/ndcube",
"repo_path": "ndcube_extracted/ndcube-main/ndcube/tests/test_ndcubesequence.py",
"type": "Python"
}
|
import numpy as np
import pytest
import astropy.units as u
from astropy.time import Time, TimeDelta
from ndcube import NDCube, NDCubeSequence
from ndcube.tests import helpers
def derive_sliced_cube_dims(orig_cube_dims, tuple_item):
expected_cube_dims = list(orig_cube_dims)
len_cube_item = len(tuple_item) - 1
if len_cube_item > 0:
cube_item = tuple_item[1:]
for i, s in zip(np.arange(len_cube_item)[::-1], cube_item[::-1]):
if isinstance(s, int):
del expected_cube_dims[i]
else:
expected_cube_dims[i] = float(s.stop - s.start)
return tuple(expected_cube_dims)
@pytest.mark.parametrize(("ndc", "item"),
[
("ndcubesequence_4c_ln_lt_l_cax1", np.s_[0:1], ),
("ndcubesequence_4c_ln_lt_l_cax1", np.s_[0:1, 0:2]),
("ndcubesequence_4c_ln_lt_l_cax1", np.s_[0:1, 1]),
("ndcubesequence_4c_ln_lt_l_cax1", np.s_[1:3, 1, 0:2])
],
indirect=('ndc',))
def test_slice_sequence_axis(ndc, item):
# Calculate expected dimensions of cubes with sequence after slicing.
tuple_item = item if isinstance(item, tuple) else (item,)
expected_cube0_dims = derive_sliced_cube_dims(ndc.data[tuple_item[0]][0].shape, tuple_item)
# Assert output is as expected.
sliced_sequence = ndc[item]
assert isinstance(sliced_sequence, NDCubeSequence)
assert int(sliced_sequence.shape[0]) == tuple_item[0].stop - tuple_item[0].start
assert np.all(sliced_sequence[0].shape == expected_cube0_dims)
@pytest.mark.parametrize(("ndc", "item"),
[
("ndcubesequence_4c_ln_lt_l_cax1", np.s_[0]),
("ndcubesequence_4c_ln_lt_l_cax1", np.s_[1, 0:1]),
("ndcubesequence_4c_ln_lt_l_cax1", np.s_[2, 1]),
("ndcubesequence_4c_ln_lt_l_cax1", np.s_[3, 1, 0:2])
],
indirect=("ndc",))
def test_extract_ndcube(ndc, item):
cube = ndc[item]
tuple_item = item if isinstance(item, tuple) else (item,)
expected_cube_dims = derive_sliced_cube_dims(ndc.data[tuple_item[0]].shape, tuple_item)
assert isinstance(cube, NDCube)
assert np.all(cube.shape == expected_cube_dims)
@pytest.mark.parametrize(("ndc", "item", "expected_common_axis"),
[
("ndcubesequence_4c_ln_lt_l_cax1", np.s_[:, 0], 0),
("ndcubesequence_4c_ln_lt_l_cax1", np.s_[:, 0:1, 0:2], 1),
("ndcubesequence_4c_ln_lt_l_cax1", np.s_[:, :, :, 1], 1),
("ndcubesequence_4c_ln_lt_l_cax1", np.s_[:, :, 0], None)
],
indirect=("ndc",))
def test_slice_common_axis(ndc, item, expected_common_axis):
sliced_sequence = ndc[item]
assert sliced_sequence._common_axis == expected_common_axis
@pytest.mark.parametrize(("ndc", "item", "expected_shape"),
[
("ndcubesequence_4c_ln_lt_l_cax1", np.s_[:, 1:7], (3, 2, (2, 3, 1), 4)),
("ndcubesequence_4c_ln_lt_l_cax1", np.s_[0, 1:7], (3, (2, 3, 1), 4)),
("ndcubesequence_4c_ln_lt_l_cax1", np.s_[:, 2:4], (2, 2, 1, 4)),
("ndcubesequence_4c_ln_lt_l_cax1", np.s_[:, 0:6], (2, 2, 3, 4)),
("ndcubesequence_4c_ln_lt_l_cax1", np.s_[0, 0:6], (2, 3, 4)),
],
indirect=("ndc",))
def test_index_as_cube(ndc, item, expected_shape):
assert (ndc.index_as_cube[item].shape == expected_shape)
@pytest.mark.parametrize(("ndc", "axis", "expected_shape"),
[
("ndcubesequence_4c_ln_lt_l", 0, (8,
3,
4)),
("ndcubesequence_4c_ln_lt_l_cax1", 1, (12,
2,
4))
],
indirect=("ndc",))
def test_explode_along_axis_common_axis_none(ndc, axis, expected_shape):
exploded_sequence = ndc.explode_along_axis(axis)
assert np.all(exploded_sequence.shape == expected_shape)
assert exploded_sequence._common_axis is None
@pytest.mark.parametrize("ndc", (['ndcubesequence_4c_ln_lt_l_cax1']), indirect=("ndc",))
def test_explode_along_axis_common_axis_same(ndc):
exploded_sequence = ndc.explode_along_axis(2)
assert exploded_sequence.shape == (16, 2, 3)
assert exploded_sequence._common_axis == ndc._common_axis
@pytest.mark.parametrize("ndc", (['ndcubesequence_4c_ln_lt_l_cax1']), indirect=("ndc",))
def test_explode_along_axis_common_axis_changed(ndc):
exploded_sequence = ndc.explode_along_axis(0)
assert exploded_sequence.shape == (8, 3, 4)
assert exploded_sequence._common_axis == ndc._common_axis - 1
@pytest.mark.parametrize(("ndc", "expected_shape"),
[
("ndcubesequence_4c_ln_lt_l_cax1", (4,
2.,
3.,
4.)),
],
indirect=("ndc",))
def test_shape(ndc, expected_shape):
np.testing.assert_array_equal(ndc.shape, expected_shape)
@pytest.mark.parametrize(("ndc", "expected_shape"),
[
("ndcubesequence_4c_ln_lt_l_cax1", [2., 12, 4]),
],
indirect=("ndc",))
def test_cube_like_shape(ndc, expected_shape):
assert np.all(ndc.cube_like_shape == expected_shape)
@pytest.mark.parametrize("ndc", (["ndcubesequence_4c_ln_lt_l"]), indirect=("ndc",))
def test_cube_like_shape_error(ndc):
with pytest.raises(TypeError):
ndc.cube_like_shape
@pytest.mark.parametrize("ndc", (["ndcubesequence_3c_l_ln_lt_cax1"]), indirect=("ndc",))
def test_common_axis_coords(ndc):
# Construct expected skycoord
common_coords = [cube.axis_world_coords('lon') for cube in ndc]
expected_skycoords = []
for cube_coords in common_coords:
expected_skycoords += [cube_coords[0][i] for i in range(len(cube_coords[0]))]
# Construct expected Times
base_time = Time('2000-01-01', format='fits', scale='utc')
expected_times = [base_time + TimeDelta(60*i, format='sec') for i in range(15)]
# Run test function.
output = ndc.common_axis_coords
# Check right number of coords returned.
assert len(output) == 2
output_skycoords, output_times = output
# Check SkyCoords are equal.
for output_coord, expected_coord in zip(output_skycoords, expected_skycoords):
assert all(output_coord == expected_coord)
# Check times are equal
for output_time, expected_time in zip(output_times, expected_times):
td = output_time - expected_time
assert u.allclose(td.to(u.s), 0*u.s, atol=1e-10*u.s)
@pytest.mark.parametrize("ndc", (["ndcubesequence_3c_l_ln_lt_cax1"]), indirect=("ndc",))
def test_sequence_axis_coords(ndc):
expected = {'distance': [1*u.m, 2*u.m, 3*u.m]}
output = ndc.sequence_axis_coords
assert output == expected
def test_crop(ndcubesequence_4c_ln_lt_l):
seq = ndcubesequence_4c_ln_lt_l
intervals = seq[0].wcs.array_index_to_world([1, 2], [0, 1], [0, 2])
lower_corner = [coord[0] for coord in intervals]
upper_corner = [coord[-1] for coord in intervals]
expected = seq[:, 1:3, 0:2, 0:3]
output = seq.crop(lower_corner, upper_corner)
helpers.assert_cubesequences_equal(output, expected)
def test_crop_by_values(ndcubesequence_4c_ln_lt_l):
seq = ndcubesequence_4c_ln_lt_l
intervals = seq[0].wcs.array_index_to_world_values([1, 2], [0, 1], [0, 2])
units = [u.m, u.deg, u.deg]
lower_corner = [coord[0] * unit for coord, unit in zip(intervals, units)]
upper_corner = [coord[-1] * unit for coord, unit in zip(intervals, units)]
# Ensure some quantities are in units different from each other
# and those stored in the WCS.
lower_corner[0] = lower_corner[0].to(units[0])
lower_corner[-1] = lower_corner[-1].to(units[-1])
upper_corner[-1] = upper_corner[-1].to(units[-1])
expected = seq[:, 1:3, 0:2, 0:3]
output = seq.crop_by_values(lower_corner, upper_corner)
helpers.assert_cubesequences_equal(output, expected)
|
sunpyREPO_NAMEndcubePATH_START.@ndcube_extracted@ndcube-main@ndcube@tests@test_ndcubesequence.py@.PATH_END.py
|
{
"filename": "_name.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/xaxis/rangebreak/_name.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="name", parent_name="layout.xaxis.rangebreak", **kwargs
):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@xaxis@rangebreak@_name.py@.PATH_END.py
|
{
"filename": "01_train_transferlearning_zoo1-checkpoint.ipynb",
"repo_name": "hfarias/mask_galaxy",
"repo_path": "mask_galaxy_extracted/mask_galaxy-master/redes/two_class_zoo1/.ipynb_checkpoints/01_train_transferlearning_zoo1-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import os
import sys
import random
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.model import log
```
/Users/humbertofariasaroca/anaconda3/envs/mask/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/Users/humbertofariasaroca/anaconda3/envs/mask/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/Users/humbertofariasaroca/anaconda3/envs/mask/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:528: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/Users/humbertofariasaroca/anaconda3/envs/mask/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:529: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/Users/humbertofariasaroca/anaconda3/envs/mask/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:530: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/Users/humbertofariasaroca/anaconda3/envs/mask/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:535: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
Using TensorFlow backend.
```python
import keras
keras.__version__
```
'2.1.3'
```python
import warnings
warnings.filterwarnings("ignore")
```
```python
import json
import skimage.draw
```
## Configurations
```python
class GalaxiaConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "galaxia"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 7
IMAGES_PER_GPU = 3
#BACKBONE = "resnet50"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
#IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 2 + 1 # Background + galaxia
# Don't exclude based on confidence. Since we have two classes
DETECTION_MIN_CONFIDENCE = 0
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 256
IMAGE_MAX_DIM = 256
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8,16, 32, 64, 128) # anchor side in pixels
# How many anchors per image to use for RPN training
RPN_TRAIN_ANCHORS_PER_IMAGE = 256
# ROIs kept after non-maximum supression (training and inference)
POST_NMS_ROIS_TRAINING = 1000
POST_NMS_ROIS_INFERENCE = 2000
# Non-max suppression threshold to filter RPN proposals.
# You can increase this during training to generate more propsals.
RPN_NMS_THRESHOLD = 0.9
# Image mean (RGB)
MEAN_PIXEL = np.array([10.71, 15.81, 19.04])
#BACKBONE_STRIDES = (8,16, 32, 64, 128)
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 128
# Maximum number of ground truth instances to use in one image
MAX_GT_INSTANCES = 200
# Max number of final detections per image
DETECTION_MAX_INSTANCES = 400
#LEN_TRAIN_ = 84627
#LEN_VAL_ = 21155
#LEN_TRAIN_ = 24000
#LEN_VAL_ = 6000
# Number of training and validation steps per epoch
#STEPS_PER_EPOCH = max(1, LEN_TRAIN_ // (IMAGES_PER_GPU*GPU_COUNT))
#VALIDATION_STEPS = max(1,LEN_VAL_ // (IMAGES_PER_GPU*GPU_COUNT))
STEPS_PER_EPOCH = 100
VALIDATION_STEPS = 20
# Store masks inside the bounding boxes (looses some accuracy but speeds up training)
USE_MINI_MASK = True
```
```python
config = GalaxiaConfig()
config.display()
```
Configurations:
BACKBONE resnet101
BACKBONE_STRIDES [4, 8, 16, 32, 64]
BATCH_SIZE 21
BBOX_STD_DEV [0.1 0.1 0.2 0.2]
COMPUTE_BACKBONE_SHAPE None
DETECTION_MAX_INSTANCES 400
DETECTION_MIN_CONFIDENCE 0
DETECTION_NMS_THRESHOLD 0.3
FPN_CLASSIF_FC_LAYERS_SIZE 1024
GPU_COUNT 7
GRADIENT_CLIP_NORM 5.0
IMAGES_PER_GPU 3
IMAGE_CHANNEL_COUNT 3
IMAGE_MAX_DIM 256
IMAGE_META_SIZE 15
IMAGE_MIN_DIM 256
IMAGE_MIN_SCALE 0
IMAGE_RESIZE_MODE square
IMAGE_SHAPE [256 256 3]
LEARNING_MOMENTUM 0.9
LEARNING_RATE 0.001
LOSS_WEIGHTS {'rpn_class_loss': 1.0, 'rpn_bbox_loss': 1.0, 'mrcnn_class_loss': 1.0, 'mrcnn_bbox_loss': 1.0, 'mrcnn_mask_loss': 1.0}
MASK_POOL_SIZE 14
MASK_SHAPE [28, 28]
MAX_GT_INSTANCES 200
MEAN_PIXEL [10.71 15.81 19.04]
MINI_MASK_SHAPE (56, 56)
NAME galaxia
NUM_CLASSES 3
POOL_SIZE 7
POST_NMS_ROIS_INFERENCE 2000
POST_NMS_ROIS_TRAINING 1000
PRE_NMS_LIMIT 6000
ROI_POSITIVE_RATIO 0.33
RPN_ANCHOR_RATIOS [0.5, 1, 2]
RPN_ANCHOR_SCALES (8, 16, 32, 64, 128)
RPN_ANCHOR_STRIDE 1
RPN_BBOX_STD_DEV [0.1 0.1 0.2 0.2]
RPN_NMS_THRESHOLD 0.9
RPN_TRAIN_ANCHORS_PER_IMAGE 256
STEPS_PER_EPOCH 100
TOP_DOWN_PYRAMID_SIZE 256
TRAIN_BN False
TRAIN_ROIS_PER_IMAGE 128
USE_MINI_MASK True
USE_RPN_ROIS True
VALIDATION_STEPS 20
WEIGHT_DECAY 0.0001
```python
import math
def plot_history(history):
fig = plt.figure(figsize=(16,10))
n_history_loss = len(history.history)
n_epochs = len(history.epoch)
epoch = history.epoch
# The loss is in pairs, one for train, one for val
loss_stats = [ k for k in history.history.keys() if 'val_' not in k ]
n_cols = 4
n_rows = math.ceil(len(loss_stats) / n_cols)
for i, k in enumerate(loss_stats):
val_k = 'val_'+k
ax = plt.subplot( n_rows, n_cols, i+1)
ax.plot(epoch, history.history[k], label=k)
ax.plot(epoch, history.history[val_k], label=val_k)
ax.set_xlabel('Epochs')
ax.set_ylabel('Loss')
ax.set_title(str(i)+' - '+k)
plt.legend(shadow=True, fancybox=True)
fig.tight_layout()
plt.show()
```
## Notebook Preferences
```python
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
```
## Class Dataset
```python
class GalaxiaDataset(utils.Dataset):
def load_galaxia(self, dataset_dir, subset):
"""Load a subset of the galaxia dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. We have only one class to add.
#self.add_class("galaxia", 1, "U")
self.add_class("galaxia", 1, "S")
self.add_class("galaxia", 2, "E")
# Train or validation dataset?
assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir, subset)
annotations = json.load(open(os.path.join(dataset_dir, "via_region_data.json")))
annotations = list(annotations.values()) # don't need the dict keys
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]
# Add images
for a in annotations:
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. These are stores in the
# shape_attributes (see json format above)
# The if condition is needed to support VIA versions 1.x and 2.x.
if type(a['regions']) is dict:
polygons = [r['shape_attributes'] for r in a['regions'].values()]
objects = [s['region_attributes'] for s in a['regions'].values()]
else:
polygons = [r['shape_attributes'] for r in a['regions']]
objects = [s['region_attributes'] for s in a['regions']]
#class_ids = [n['object_name'] for n in objects]
class_ids = []
for n in objects:
#print("aca cual sera = ",n['object_name'])
#if(n['object_name'] == "U"):
#class_ids.append(1)
if(n['object_name'] == "S"):
class_ids.append(1)
if(n['object_name'] == "E"):
class_ids.append(2)
#print("class_ids = ",class_ids)
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"galaxia",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons,class_ids=class_ids)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a galaxia dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "galaxia":
return super(self.__class__, self).load_mask(image_id)
class_ids = image_info['class_ids']
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
#print("info['class_ids']=", info['class_ids'])
class_ids = np.array(class_ids, dtype=np.int32)
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
#return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
return mask.astype(np.bool), class_ids
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "galaxia":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "galaxia":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
```
## Paths
```python
MODEL_DIR = os.path.join(ROOT_DIR, "redes/two_class_zoo1/modelos")
MODEL_DIR
```
'/Users/humbertofariasaroca/Doctorado/2019-2/paper1/Mask_RCNN/redes/two_class_zoo1/modelos'
```python
GALAXIA_DIR = os.path.join(ROOT_DIR, "redes/dataset")
GALAXIA_DIR
```
'/Users/humbertofariasaroca/Doctorado/2019-2/paper1/Mask_RCNN/redes/dataset'
```python
%load_ext tensorboard
```
The tensorboard module is not an IPython extension.
```python
#%tensorboard --logdir samples/galaxia/modelos
```
## Dataset
```python
%%time
# Training dataset
dataset_train = GalaxiaDataset()
dataset_train.load_galaxia(GALAXIA_DIR, "train")
dataset_train.prepare()
```
CPU times: user 11.8 s, sys: 2.67 s, total: 14.5 s
Wall time: 20.5 s
```python
%%time
# Validation dataset
dataset_val = GalaxiaDataset()
dataset_val.load_galaxia(GALAXIA_DIR, "val")
dataset_val.prepare()
```
CPU times: user 2.9 s, sys: 703 ms, total: 3.6 s
Wall time: 5.36 s
```python
# Load and display random samples
image_ids = np.random.choice(dataset_train.image_ids, 4)
for image_id in image_ids:
image = dataset_train.load_image(image_id)
print(image_id)
mask, class_ids = dataset_train.load_mask(image_id)
visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
```
4124

971

38

8

```python
dataset_train.class_names
```
['BG', 'S', 'E']
## Create Model
```python
# Create model in training mode
model = modellib.MaskRCNN(mode="training", config=config,model_dir=MODEL_DIR)
```
WARNING:tensorflow:From /Users/humbertofariasaroca/anaconda3/envs/mask/lib/python3.7/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
# Which weights to start with?
init_with = "coco" # imagenet, coco, or last
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training
model.load_weights(model.find_last(), by_name=True)
```python
WEIGHTS_PATH = os.path.join(ROOT_DIR,"redes/two_class_zoo1/modelos/galaxia_heads.h5")
WEIGHTS_PATH
```
'/Users/humbertofariasaroca/Doctorado/2019-2/paper1/Mask_RCNN/redes/two_class_zoo1/modelos/galaxia_heads.h5'
```python
print("Loading weights ", WEIGHTS_PATH)
model.load_weights(WEIGHTS_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
```
Loading weights /Users/humbertofariasaroca/Doctorado/2019-2/paper1/Mask_RCNN/redes/two_class_zoo1/modelos/galaxia_heads.h5
---------------------------------------------------------------------------
KeyboardInterrupt Traceback (most recent call last)
<ipython-input-33-ba098ace4f28> in <module>
2 model.load_weights(WEIGHTS_PATH, by_name=True,
3 exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
----> 4 "mrcnn_bbox", "mrcnn_mask"])
~/Doctorado/2019-2/paper1/Mask_RCNN/mrcnn/model.py in load_weights(self, filepath, by_name, exclude)
2128
2129 if by_name:
-> 2130 saving.load_weights_from_hdf5_group_by_name(f, layers)
2131 else:
2132 saving.load_weights_from_hdf5_group(f, layers)
~/anaconda3/envs/mask/lib/python3.7/site-packages/keras/engine/topology.py in load_weights_from_hdf5_group_by_name(f, layers, skip_mismatch)
3246 weight_values[i]))
3247
-> 3248 K.batch_set_value(weight_value_tuples)
~/anaconda3/envs/mask/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py in batch_set_value(tuples)
2368 assign_ops.append(assign_op)
2369 feed_dict[assign_placeholder] = value
-> 2370 get_session().run(assign_ops, feed_dict=feed_dict)
2371
2372
~/anaconda3/envs/mask/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py in get_session()
187 # not already marked as initialized.
188 is_initialized = session.run(
--> 189 [tf.is_variable_initialized(v) for v in candidate_vars])
190 uninitialized_vars = []
191 for flag, v in zip(is_initialized, candidate_vars):
~/anaconda3/envs/mask/lib/python3.7/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
927 try:
928 result = self._run(None, fetches, feed_dict, options_ptr,
--> 929 run_metadata_ptr)
930 if run_metadata:
931 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
~/anaconda3/envs/mask/lib/python3.7/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
1150 if final_fetches or final_targets or (handle and feed_dict_tensor):
1151 results = self._do_run(handle, final_targets, final_fetches,
-> 1152 feed_dict_tensor, options, run_metadata)
1153 else:
1154 results = []
~/anaconda3/envs/mask/lib/python3.7/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1326 if handle is None:
1327 return self._do_call(_run_fn, feeds, fetches, targets, options,
-> 1328 run_metadata)
1329 else:
1330 return self._do_call(_prun_fn, handle, feeds, fetches)
~/anaconda3/envs/mask/lib/python3.7/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1332 def _do_call(self, fn, *args):
1333 try:
-> 1334 return fn(*args)
1335 except errors.OpError as e:
1336 message = compat.as_text(e.message)
~/anaconda3/envs/mask/lib/python3.7/site-packages/tensorflow/python/client/session.py in _run_fn(feed_dict, fetch_list, target_list, options, run_metadata)
1317 self._extend_graph()
1318 return self._call_tf_sessionrun(
-> 1319 options, feed_dict, fetch_list, target_list, run_metadata)
1320
1321 def _prun_fn(handle, feed_dict, fetch_list):
~/anaconda3/envs/mask/lib/python3.7/site-packages/tensorflow/python/client/session.py in _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, run_metadata)
1405 return tf_session.TF_SessionRun_wrapper(
1406 self._session, options, feed_dict, fetch_list, target_list,
-> 1407 run_metadata)
1408
1409 def _call_tf_sessionprun(self, handle, feed_dict, fetch_list):
KeyboardInterrupt:
## Training
Train in two stages:
1. Only the heads. Here we're freezing all the backbone layers and training only the randomly initialized layers (i.e. the ones that we didn't use pre-trained weights from MS COCO). To train only the head layers, pass `layers='heads'` to the `train()` function.
2. Fine-tune all layers. For this simple example it's not necessary, but we're including it to show the process. Simply pass `layers="all` to train all layers.
```python
def train(model, dataset_train,dataset_val,_layers,_learning_rate, epochs):
"""Train the model."""
print("Training network ="+_layers)
history = model.train(dataset_train, dataset_val,
learning_rate=_learning_rate,
epochs=epochs,
layers=_layers,
#callbacks[]
#augmentation=seq_of_aug
)
return history
```
```python
_learning_rate=config.LEARNING_RATE
_learning_rate
```
0.001
```python
%%time
history_head = train(model, dataset_train,dataset_val,'heads',_learning_rate, 10)
```
Training network =heads
Starting at epoch 0. LR=0.001
Checkpoint Path: /home/ubuntu/lab/mask_r_cnn/Mask_RCNN/samples/zoo1/modelos/galaxia20191119T1410/mask_rcnn_galaxia_{epoch:04d}.h5
Selecting layers to train
fpn_c5p5 (Conv2D)
fpn_c4p4 (Conv2D)
fpn_c3p3 (Conv2D)
fpn_c2p2 (Conv2D)
fpn_p5 (Conv2D)
fpn_p2 (Conv2D)
fpn_p3 (Conv2D)
fpn_p4 (Conv2D)
In model: rpn_model
rpn_conv_shared (Conv2D)
rpn_class_raw (Conv2D)
rpn_bbox_pred (Conv2D)
mrcnn_mask_conv1 (TimeDistributed)
mrcnn_mask_bn1 (TimeDistributed)
mrcnn_mask_conv2 (TimeDistributed)
mrcnn_mask_bn2 (TimeDistributed)
mrcnn_class_conv1 (TimeDistributed)
mrcnn_class_bn1 (TimeDistributed)
mrcnn_mask_conv3 (TimeDistributed)
mrcnn_mask_bn3 (TimeDistributed)
mrcnn_class_conv2 (TimeDistributed)
mrcnn_class_bn2 (TimeDistributed)
mrcnn_mask_conv4 (TimeDistributed)
mrcnn_mask_bn4 (TimeDistributed)
mrcnn_bbox_fc (TimeDistributed)
mrcnn_mask_deconv (TimeDistributed)
mrcnn_class_logits (TimeDistributed)
mrcnn_mask (TimeDistributed)
W1119 14:10:08.056451 140173346723584 deprecation_wrapper.py:119] From /home/ubuntu/anaconda3/envs/maskrcnn_keras/lib/python3.7/site-packages/keras/optimizers.py:744: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.
W1119 14:10:31.891849 140173346723584 deprecation_wrapper.py:119] From /home/ubuntu/anaconda3/envs/maskrcnn_keras/lib/python3.7/site-packages/keras/callbacks.py:714: The name tf.summary.merge_all is deprecated. Please use tf.compat.v1.summary.merge_all instead.
W1119 14:10:31.892859 140173346723584 deprecation_wrapper.py:119] From /home/ubuntu/anaconda3/envs/maskrcnn_keras/lib/python3.7/site-packages/keras/callbacks.py:717: The name tf.summary.FileWriter is deprecated. Please use tf.compat.v1.summary.FileWriter instead.
Epoch 1/10
100/100 [==============================] - 334s 3s/step - loss: 1.3898 - rpn_class_loss: 0.0155 - rpn_bbox_loss: 0.4050 - mrcnn_class_loss: 0.0661 - mrcnn_bbox_loss: 0.3470 - mrcnn_mask_loss: 0.5563 - val_loss: 0.9096 - val_rpn_class_loss: 0.0115 - val_rpn_bbox_loss: 0.3200 - val_mrcnn_class_loss: 0.0199 - val_mrcnn_bbox_loss: 0.2250 - val_mrcnn_mask_loss: 0.3332
Epoch 2/10
100/100 [==============================] - 118s 1s/step - loss: 0.8498 - rpn_class_loss: 0.0101 - rpn_bbox_loss: 0.3293 - mrcnn_class_loss: 0.0214 - mrcnn_bbox_loss: 0.1935 - mrcnn_mask_loss: 0.2955 - val_loss: 0.7726 - val_rpn_class_loss: 0.0096 - val_rpn_bbox_loss: 0.2811 - val_mrcnn_class_loss: 0.0235 - val_mrcnn_bbox_loss: 0.1961 - val_mrcnn_mask_loss: 0.2623
Epoch 3/10
100/100 [==============================] - 118s 1s/step - loss: 0.7708 - rpn_class_loss: 0.0095 - rpn_bbox_loss: 0.3207 - mrcnn_class_loss: 0.0207 - mrcnn_bbox_loss: 0.1680 - mrcnn_mask_loss: 0.2520 - val_loss: 0.7404 - val_rpn_class_loss: 0.0090 - val_rpn_bbox_loss: 0.3168 - val_mrcnn_class_loss: 0.0125 - val_mrcnn_bbox_loss: 0.1649 - val_mrcnn_mask_loss: 0.2372
Epoch 4/10
100/100 [==============================] - 119s 1s/step - loss: 0.6821 - rpn_class_loss: 0.0086 - rpn_bbox_loss: 0.2810 - mrcnn_class_loss: 0.0179 - mrcnn_bbox_loss: 0.1490 - mrcnn_mask_loss: 0.2256 - val_loss: 0.6694 - val_rpn_class_loss: 0.0082 - val_rpn_bbox_loss: 0.2651 - val_mrcnn_class_loss: 0.0178 - val_mrcnn_bbox_loss: 0.1509 - val_mrcnn_mask_loss: 0.2274
Epoch 5/10
100/100 [==============================] - 119s 1s/step - loss: 0.6601 - rpn_class_loss: 0.0081 - rpn_bbox_loss: 0.2823 - mrcnn_class_loss: 0.0121 - mrcnn_bbox_loss: 0.1396 - mrcnn_mask_loss: 0.2181 - val_loss: 0.7207 - val_rpn_class_loss: 0.0074 - val_rpn_bbox_loss: 0.3285 - val_mrcnn_class_loss: 0.0102 - val_mrcnn_bbox_loss: 0.1519 - val_mrcnn_mask_loss: 0.2227
Epoch 6/10
100/100 [==============================] - 119s 1s/step - loss: 0.6026 - rpn_class_loss: 0.0075 - rpn_bbox_loss: 0.2557 - mrcnn_class_loss: 0.0141 - mrcnn_bbox_loss: 0.1231 - mrcnn_mask_loss: 0.2022 - val_loss: 0.6319 - val_rpn_class_loss: 0.0071 - val_rpn_bbox_loss: 0.2773 - val_mrcnn_class_loss: 0.0154 - val_mrcnn_bbox_loss: 0.1400 - val_mrcnn_mask_loss: 0.1921
Epoch 7/10
100/100 [==============================] - 119s 1s/step - loss: 0.6218 - rpn_class_loss: 0.0070 - rpn_bbox_loss: 0.2636 - mrcnn_class_loss: 0.0151 - mrcnn_bbox_loss: 0.1308 - mrcnn_mask_loss: 0.2052 - val_loss: 0.7517 - val_rpn_class_loss: 0.0067 - val_rpn_bbox_loss: 0.3132 - val_mrcnn_class_loss: 0.0099 - val_mrcnn_bbox_loss: 0.1257 - val_mrcnn_mask_loss: 0.2961
Epoch 8/10
100/100 [==============================] - 119s 1s/step - loss: 0.5997 - rpn_class_loss: 0.0072 - rpn_bbox_loss: 0.2468 - mrcnn_class_loss: 0.0144 - mrcnn_bbox_loss: 0.1300 - mrcnn_mask_loss: 0.2013 - val_loss: 0.6197 - val_rpn_class_loss: 0.0070 - val_rpn_bbox_loss: 0.2564 - val_mrcnn_class_loss: 0.0123 - val_mrcnn_bbox_loss: 0.1576 - val_mrcnn_mask_loss: 0.1864
Epoch 9/10
100/100 [==============================] - 119s 1s/step - loss: 0.5846 - rpn_class_loss: 0.0067 - rpn_bbox_loss: 0.2479 - mrcnn_class_loss: 0.0136 - mrcnn_bbox_loss: 0.1257 - mrcnn_mask_loss: 0.1907 - val_loss: 0.6581 - val_rpn_class_loss: 0.0067 - val_rpn_bbox_loss: 0.3241 - val_mrcnn_class_loss: 0.0086 - val_mrcnn_bbox_loss: 0.1249 - val_mrcnn_mask_loss: 0.1938
Epoch 10/10
100/100 [==============================] - 118s 1s/step - loss: 0.5644 - rpn_class_loss: 0.0059 - rpn_bbox_loss: 0.2364 - mrcnn_class_loss: 0.0137 - mrcnn_bbox_loss: 0.1156 - mrcnn_mask_loss: 0.1929 - val_loss: 0.5925 - val_rpn_class_loss: 0.0060 - val_rpn_bbox_loss: 0.2843 - val_mrcnn_class_loss: 0.0132 - val_mrcnn_bbox_loss: 0.1093 - val_mrcnn_mask_loss: 0.1797
CPU times: user 43min 32s, sys: 5min 56s, total: 49min 28s
Wall time: 24min
```python
model_path = os.path.join(MODEL_DIR, "galaxia_heads.h5")
model.keras_model.save_weights(model_path)
```
```python
_learning_rate=config.LEARNING_RATE / 10
```
```python
%%time
history_5 = train(model, dataset_train,dataset_val,'5+',_learning_rate, 20)
```
Training network =5+
Starting at epoch 10. LR=0.0001
Checkpoint Path: /home/ubuntu/lab/mask_r_cnn/Mask_RCNN/samples/zoo1/modelos/galaxia20191119T1410/mask_rcnn_galaxia_{epoch:04d}.h5
Selecting layers to train
res5a_branch2a (Conv2D)
bn5a_branch2a (BatchNorm)
res5a_branch2b (Conv2D)
bn5a_branch2b (BatchNorm)
res5a_branch2c (Conv2D)
res5a_branch1 (Conv2D)
bn5a_branch2c (BatchNorm)
bn5a_branch1 (BatchNorm)
res5b_branch2a (Conv2D)
bn5b_branch2a (BatchNorm)
res5b_branch2b (Conv2D)
bn5b_branch2b (BatchNorm)
res5b_branch2c (Conv2D)
bn5b_branch2c (BatchNorm)
res5c_branch2a (Conv2D)
bn5c_branch2a (BatchNorm)
res5c_branch2b (Conv2D)
bn5c_branch2b (BatchNorm)
res5c_branch2c (Conv2D)
bn5c_branch2c (BatchNorm)
fpn_c5p5 (Conv2D)
fpn_c4p4 (Conv2D)
fpn_c3p3 (Conv2D)
fpn_c2p2 (Conv2D)
fpn_p5 (Conv2D)
fpn_p2 (Conv2D)
fpn_p3 (Conv2D)
fpn_p4 (Conv2D)
In model: rpn_model
rpn_conv_shared (Conv2D)
rpn_class_raw (Conv2D)
rpn_bbox_pred (Conv2D)
mrcnn_mask_conv1 (TimeDistributed)
mrcnn_mask_bn1 (TimeDistributed)
mrcnn_mask_conv2 (TimeDistributed)
mrcnn_mask_bn2 (TimeDistributed)
mrcnn_class_conv1 (TimeDistributed)
mrcnn_class_bn1 (TimeDistributed)
mrcnn_mask_conv3 (TimeDistributed)
mrcnn_mask_bn3 (TimeDistributed)
mrcnn_class_conv2 (TimeDistributed)
mrcnn_class_bn2 (TimeDistributed)
mrcnn_mask_conv4 (TimeDistributed)
mrcnn_mask_bn4 (TimeDistributed)
mrcnn_bbox_fc (TimeDistributed)
mrcnn_mask_deconv (TimeDistributed)
mrcnn_class_logits (TimeDistributed)
mrcnn_mask (TimeDistributed)
Epoch 11/20
100/100 [==============================] - 181s 2s/step - loss: 0.5053 - rpn_class_loss: 0.0058 - rpn_bbox_loss: 0.2085 - mrcnn_class_loss: 0.0130 - mrcnn_bbox_loss: 0.1010 - mrcnn_mask_loss: 0.1771 - val_loss: 0.5191 - val_rpn_class_loss: 0.0058 - val_rpn_bbox_loss: 0.2404 - val_mrcnn_class_loss: 0.0096 - val_mrcnn_bbox_loss: 0.0912 - val_mrcnn_mask_loss: 0.1721
Epoch 12/20
100/100 [==============================] - 122s 1s/step - loss: 0.5187 - rpn_class_loss: 0.0059 - rpn_bbox_loss: 0.2254 - mrcnn_class_loss: 0.0129 - mrcnn_bbox_loss: 0.0993 - mrcnn_mask_loss: 0.1751 - val_loss: 0.5274 - val_rpn_class_loss: 0.0058 - val_rpn_bbox_loss: 0.2253 - val_mrcnn_class_loss: 0.0112 - val_mrcnn_bbox_loss: 0.1086 - val_mrcnn_mask_loss: 0.1765
Epoch 13/20
100/100 [==============================] - 122s 1s/step - loss: 0.5014 - rpn_class_loss: 0.0062 - rpn_bbox_loss: 0.2121 - mrcnn_class_loss: 0.0125 - mrcnn_bbox_loss: 0.0944 - mrcnn_mask_loss: 0.1761 - val_loss: 0.5315 - val_rpn_class_loss: 0.0050 - val_rpn_bbox_loss: 0.2267 - val_mrcnn_class_loss: 0.0101 - val_mrcnn_bbox_loss: 0.1090 - val_mrcnn_mask_loss: 0.1806
Epoch 14/20
100/100 [==============================] - 122s 1s/step - loss: 0.4917 - rpn_class_loss: 0.0055 - rpn_bbox_loss: 0.2035 - mrcnn_class_loss: 0.0121 - mrcnn_bbox_loss: 0.0953 - mrcnn_mask_loss: 0.1754 - val_loss: 0.5118 - val_rpn_class_loss: 0.0048 - val_rpn_bbox_loss: 0.2114 - val_mrcnn_class_loss: 0.0135 - val_mrcnn_bbox_loss: 0.1029 - val_mrcnn_mask_loss: 0.1792
Epoch 15/20
100/100 [==============================] - 122s 1s/step - loss: 0.4766 - rpn_class_loss: 0.0057 - rpn_bbox_loss: 0.1944 - mrcnn_class_loss: 0.0131 - mrcnn_bbox_loss: 0.0911 - mrcnn_mask_loss: 0.1723 - val_loss: 0.5581 - val_rpn_class_loss: 0.0059 - val_rpn_bbox_loss: 0.2605 - val_mrcnn_class_loss: 0.0131 - val_mrcnn_bbox_loss: 0.1009 - val_mrcnn_mask_loss: 0.1778
Epoch 16/20
100/100 [==============================] - 122s 1s/step - loss: 0.4848 - rpn_class_loss: 0.0059 - rpn_bbox_loss: 0.2006 - mrcnn_class_loss: 0.0136 - mrcnn_bbox_loss: 0.0923 - mrcnn_mask_loss: 0.1724 - val_loss: 0.5367 - val_rpn_class_loss: 0.0045 - val_rpn_bbox_loss: 0.2397 - val_mrcnn_class_loss: 0.0099 - val_mrcnn_bbox_loss: 0.1056 - val_mrcnn_mask_loss: 0.1770
Epoch 17/20
100/100 [==============================] - 122s 1s/step - loss: 0.4754 - rpn_class_loss: 0.0059 - rpn_bbox_loss: 0.2001 - mrcnn_class_loss: 0.0129 - mrcnn_bbox_loss: 0.0885 - mrcnn_mask_loss: 0.1681 - val_loss: 0.5588 - val_rpn_class_loss: 0.0058 - val_rpn_bbox_loss: 0.2415 - val_mrcnn_class_loss: 0.0162 - val_mrcnn_bbox_loss: 0.1133 - val_mrcnn_mask_loss: 0.1820
Epoch 18/20
100/100 [==============================] - 122s 1s/step - loss: 0.4876 - rpn_class_loss: 0.0056 - rpn_bbox_loss: 0.2020 - mrcnn_class_loss: 0.0137 - mrcnn_bbox_loss: 0.0946 - mrcnn_mask_loss: 0.1717 - val_loss: 0.5426 - val_rpn_class_loss: 0.0050 - val_rpn_bbox_loss: 0.2418 - val_mrcnn_class_loss: 0.0144 - val_mrcnn_bbox_loss: 0.1073 - val_mrcnn_mask_loss: 0.1741
Epoch 19/20
100/100 [==============================] - 122s 1s/step - loss: 0.4788 - rpn_class_loss: 0.0059 - rpn_bbox_loss: 0.1963 - mrcnn_class_loss: 0.0139 - mrcnn_bbox_loss: 0.0929 - mrcnn_mask_loss: 0.1699 - val_loss: 0.5259 - val_rpn_class_loss: 0.0063 - val_rpn_bbox_loss: 0.2357 - val_mrcnn_class_loss: 0.0126 - val_mrcnn_bbox_loss: 0.0978 - val_mrcnn_mask_loss: 0.1736
Epoch 20/20
100/100 [==============================] - 122s 1s/step - loss: 0.4671 - rpn_class_loss: 0.0056 - rpn_bbox_loss: 0.1926 - mrcnn_class_loss: 0.0135 - mrcnn_bbox_loss: 0.0869 - mrcnn_mask_loss: 0.1685 - val_loss: 0.5283 - val_rpn_class_loss: 0.0062 - val_rpn_bbox_loss: 0.2077 - val_mrcnn_class_loss: 0.0149 - val_mrcnn_bbox_loss: 0.1163 - val_mrcnn_mask_loss: 0.1831
CPU times: user 43min 32s, sys: 7min 8s, total: 50min 41s
Wall time: 22min 10s
```python
model_path = os.path.join(MODEL_DIR, "galaxia_plus_5.h5")
model.keras_model.save_weights(model_path)
```
```python
_learning_rate=config.LEARNING_RATE / 100
```
```python
%%time
history_3 = train(model, dataset_train,dataset_val,'3+',_learning_rate, 30)
```
Training network =3+
Starting at epoch 20. LR=1e-05
Checkpoint Path: /home/ubuntu/lab/mask_r_cnn/Mask_RCNN/samples/zoo1/modelos/galaxia20191119T1410/mask_rcnn_galaxia_{epoch:04d}.h5
Selecting layers to train
res3a_branch2a (Conv2D)
bn3a_branch2a (BatchNorm)
res3a_branch2b (Conv2D)
bn3a_branch2b (BatchNorm)
res3a_branch2c (Conv2D)
res3a_branch1 (Conv2D)
bn3a_branch2c (BatchNorm)
bn3a_branch1 (BatchNorm)
res3b_branch2a (Conv2D)
bn3b_branch2a (BatchNorm)
res3b_branch2b (Conv2D)
bn3b_branch2b (BatchNorm)
res3b_branch2c (Conv2D)
bn3b_branch2c (BatchNorm)
res3c_branch2a (Conv2D)
bn3c_branch2a (BatchNorm)
res3c_branch2b (Conv2D)
bn3c_branch2b (BatchNorm)
res3c_branch2c (Conv2D)
bn3c_branch2c (BatchNorm)
res3d_branch2a (Conv2D)
bn3d_branch2a (BatchNorm)
res3d_branch2b (Conv2D)
bn3d_branch2b (BatchNorm)
res3d_branch2c (Conv2D)
bn3d_branch2c (BatchNorm)
res4a_branch2a (Conv2D)
bn4a_branch2a (BatchNorm)
res4a_branch2b (Conv2D)
bn4a_branch2b (BatchNorm)
res4a_branch2c (Conv2D)
res4a_branch1 (Conv2D)
bn4a_branch2c (BatchNorm)
bn4a_branch1 (BatchNorm)
res4b_branch2a (Conv2D)
bn4b_branch2a (BatchNorm)
res4b_branch2b (Conv2D)
bn4b_branch2b (BatchNorm)
res4b_branch2c (Conv2D)
bn4b_branch2c (BatchNorm)
res4c_branch2a (Conv2D)
bn4c_branch2a (BatchNorm)
res4c_branch2b (Conv2D)
bn4c_branch2b (BatchNorm)
res4c_branch2c (Conv2D)
bn4c_branch2c (BatchNorm)
res4d_branch2a (Conv2D)
bn4d_branch2a (BatchNorm)
res4d_branch2b (Conv2D)
bn4d_branch2b (BatchNorm)
res4d_branch2c (Conv2D)
bn4d_branch2c (BatchNorm)
res4e_branch2a (Conv2D)
bn4e_branch2a (BatchNorm)
res4e_branch2b (Conv2D)
bn4e_branch2b (BatchNorm)
res4e_branch2c (Conv2D)
bn4e_branch2c (BatchNorm)
res4f_branch2a (Conv2D)
bn4f_branch2a (BatchNorm)
res4f_branch2b (Conv2D)
bn4f_branch2b (BatchNorm)
res4f_branch2c (Conv2D)
bn4f_branch2c (BatchNorm)
res4g_branch2a (Conv2D)
bn4g_branch2a (BatchNorm)
res4g_branch2b (Conv2D)
bn4g_branch2b (BatchNorm)
res4g_branch2c (Conv2D)
bn4g_branch2c (BatchNorm)
res4h_branch2a (Conv2D)
bn4h_branch2a (BatchNorm)
res4h_branch2b (Conv2D)
bn4h_branch2b (BatchNorm)
res4h_branch2c (Conv2D)
bn4h_branch2c (BatchNorm)
res4i_branch2a (Conv2D)
bn4i_branch2a (BatchNorm)
res4i_branch2b (Conv2D)
bn4i_branch2b (BatchNorm)
res4i_branch2c (Conv2D)
bn4i_branch2c (BatchNorm)
res4j_branch2a (Conv2D)
bn4j_branch2a (BatchNorm)
res4j_branch2b (Conv2D)
bn4j_branch2b (BatchNorm)
res4j_branch2c (Conv2D)
bn4j_branch2c (BatchNorm)
res4k_branch2a (Conv2D)
bn4k_branch2a (BatchNorm)
res4k_branch2b (Conv2D)
bn4k_branch2b (BatchNorm)
res4k_branch2c (Conv2D)
bn4k_branch2c (BatchNorm)
res4l_branch2a (Conv2D)
bn4l_branch2a (BatchNorm)
res4l_branch2b (Conv2D)
bn4l_branch2b (BatchNorm)
res4l_branch2c (Conv2D)
bn4l_branch2c (BatchNorm)
res4m_branch2a (Conv2D)
bn4m_branch2a (BatchNorm)
res4m_branch2b (Conv2D)
bn4m_branch2b (BatchNorm)
res4m_branch2c (Conv2D)
bn4m_branch2c (BatchNorm)
res4n_branch2a (Conv2D)
bn4n_branch2a (BatchNorm)
res4n_branch2b (Conv2D)
bn4n_branch2b (BatchNorm)
res4n_branch2c (Conv2D)
bn4n_branch2c (BatchNorm)
res4o_branch2a (Conv2D)
bn4o_branch2a (BatchNorm)
res4o_branch2b (Conv2D)
bn4o_branch2b (BatchNorm)
res4o_branch2c (Conv2D)
bn4o_branch2c (BatchNorm)
res4p_branch2a (Conv2D)
bn4p_branch2a (BatchNorm)
res4p_branch2b (Conv2D)
bn4p_branch2b (BatchNorm)
res4p_branch2c (Conv2D)
bn4p_branch2c (BatchNorm)
res4q_branch2a (Conv2D)
bn4q_branch2a (BatchNorm)
res4q_branch2b (Conv2D)
bn4q_branch2b (BatchNorm)
res4q_branch2c (Conv2D)
bn4q_branch2c (BatchNorm)
res4r_branch2a (Conv2D)
bn4r_branch2a (BatchNorm)
res4r_branch2b (Conv2D)
bn4r_branch2b (BatchNorm)
res4r_branch2c (Conv2D)
bn4r_branch2c (BatchNorm)
res4s_branch2a (Conv2D)
bn4s_branch2a (BatchNorm)
res4s_branch2b (Conv2D)
bn4s_branch2b (BatchNorm)
res4s_branch2c (Conv2D)
bn4s_branch2c (BatchNorm)
res4t_branch2a (Conv2D)
bn4t_branch2a (BatchNorm)
res4t_branch2b (Conv2D)
bn4t_branch2b (BatchNorm)
res4t_branch2c (Conv2D)
bn4t_branch2c (BatchNorm)
res4u_branch2a (Conv2D)
bn4u_branch2a (BatchNorm)
res4u_branch2b (Conv2D)
bn4u_branch2b (BatchNorm)
res4u_branch2c (Conv2D)
bn4u_branch2c (BatchNorm)
res4v_branch2a (Conv2D)
bn4v_branch2a (BatchNorm)
res4v_branch2b (Conv2D)
bn4v_branch2b (BatchNorm)
res4v_branch2c (Conv2D)
bn4v_branch2c (BatchNorm)
res4w_branch2a (Conv2D)
bn4w_branch2a (BatchNorm)
res4w_branch2b (Conv2D)
bn4w_branch2b (BatchNorm)
res4w_branch2c (Conv2D)
bn4w_branch2c (BatchNorm)
res5a_branch2a (Conv2D)
bn5a_branch2a (BatchNorm)
res5a_branch2b (Conv2D)
bn5a_branch2b (BatchNorm)
res5a_branch2c (Conv2D)
res5a_branch1 (Conv2D)
bn5a_branch2c (BatchNorm)
bn5a_branch1 (BatchNorm)
res5b_branch2a (Conv2D)
bn5b_branch2a (BatchNorm)
res5b_branch2b (Conv2D)
bn5b_branch2b (BatchNorm)
res5b_branch2c (Conv2D)
bn5b_branch2c (BatchNorm)
res5c_branch2a (Conv2D)
bn5c_branch2a (BatchNorm)
res5c_branch2b (Conv2D)
bn5c_branch2b (BatchNorm)
res5c_branch2c (Conv2D)
bn5c_branch2c (BatchNorm)
fpn_c5p5 (Conv2D)
fpn_c4p4 (Conv2D)
fpn_c3p3 (Conv2D)
fpn_c2p2 (Conv2D)
fpn_p5 (Conv2D)
fpn_p2 (Conv2D)
fpn_p3 (Conv2D)
fpn_p4 (Conv2D)
In model: rpn_model
rpn_conv_shared (Conv2D)
rpn_class_raw (Conv2D)
rpn_bbox_pred (Conv2D)
mrcnn_mask_conv1 (TimeDistributed)
mrcnn_mask_bn1 (TimeDistributed)
mrcnn_mask_conv2 (TimeDistributed)
mrcnn_mask_bn2 (TimeDistributed)
mrcnn_class_conv1 (TimeDistributed)
mrcnn_class_bn1 (TimeDistributed)
mrcnn_mask_conv3 (TimeDistributed)
mrcnn_mask_bn3 (TimeDistributed)
mrcnn_class_conv2 (TimeDistributed)
mrcnn_class_bn2 (TimeDistributed)
mrcnn_mask_conv4 (TimeDistributed)
mrcnn_mask_bn4 (TimeDistributed)
mrcnn_bbox_fc (TimeDistributed)
mrcnn_mask_deconv (TimeDistributed)
mrcnn_class_logits (TimeDistributed)
mrcnn_mask (TimeDistributed)
Epoch 21/30
100/100 [==============================] - 252s 3s/step - loss: 0.4870 - rpn_class_loss: 0.0059 - rpn_bbox_loss: 0.2065 - mrcnn_class_loss: 0.0150 - mrcnn_bbox_loss: 0.0895 - mrcnn_mask_loss: 0.1701 - val_loss: 0.5361 - val_rpn_class_loss: 0.0063 - val_rpn_bbox_loss: 0.2386 - val_mrcnn_class_loss: 0.0117 - val_mrcnn_bbox_loss: 0.1074 - val_mrcnn_mask_loss: 0.1720
Epoch 22/30
100/100 [==============================] - 146s 1s/step - loss: 0.4478 - rpn_class_loss: 0.0051 - rpn_bbox_loss: 0.1746 - mrcnn_class_loss: 0.0135 - mrcnn_bbox_loss: 0.0868 - mrcnn_mask_loss: 0.1677 - val_loss: 0.5073 - val_rpn_class_loss: 0.0045 - val_rpn_bbox_loss: 0.2130 - val_mrcnn_class_loss: 0.0123 - val_mrcnn_bbox_loss: 0.1038 - val_mrcnn_mask_loss: 0.1736
Epoch 23/30
100/100 [==============================] - 146s 1s/step - loss: 0.4738 - rpn_class_loss: 0.0058 - rpn_bbox_loss: 0.2014 - mrcnn_class_loss: 0.0134 - mrcnn_bbox_loss: 0.0892 - mrcnn_mask_loss: 0.1640 - val_loss: 0.4632 - val_rpn_class_loss: 0.0044 - val_rpn_bbox_loss: 0.1860 - val_mrcnn_class_loss: 0.0136 - val_mrcnn_bbox_loss: 0.0941 - val_mrcnn_mask_loss: 0.1651
Epoch 24/30
100/100 [==============================] - 146s 1s/step - loss: 0.4679 - rpn_class_loss: 0.0062 - rpn_bbox_loss: 0.1961 - mrcnn_class_loss: 0.0147 - mrcnn_bbox_loss: 0.0860 - mrcnn_mask_loss: 0.1649 - val_loss: 0.5166 - val_rpn_class_loss: 0.0054 - val_rpn_bbox_loss: 0.2283 - val_mrcnn_class_loss: 0.0125 - val_mrcnn_bbox_loss: 0.0980 - val_mrcnn_mask_loss: 0.1723
Epoch 25/30
100/100 [==============================] - 146s 1s/step - loss: 0.4589 - rpn_class_loss: 0.0052 - rpn_bbox_loss: 0.1871 - mrcnn_class_loss: 0.0128 - mrcnn_bbox_loss: 0.0873 - mrcnn_mask_loss: 0.1665 - val_loss: 0.4653 - val_rpn_class_loss: 0.0050 - val_rpn_bbox_loss: 0.1876 - val_mrcnn_class_loss: 0.0118 - val_mrcnn_bbox_loss: 0.0948 - val_mrcnn_mask_loss: 0.1660
Epoch 26/30
100/100 [==============================] - 146s 1s/step - loss: 0.4561 - rpn_class_loss: 0.0058 - rpn_bbox_loss: 0.1875 - mrcnn_class_loss: 0.0133 - mrcnn_bbox_loss: 0.0849 - mrcnn_mask_loss: 0.1646 - val_loss: 0.4801 - val_rpn_class_loss: 0.0052 - val_rpn_bbox_loss: 0.2059 - val_mrcnn_class_loss: 0.0110 - val_mrcnn_bbox_loss: 0.0927 - val_mrcnn_mask_loss: 0.1654
Epoch 27/30
100/100 [==============================] - 146s 1s/step - loss: 0.4469 - rpn_class_loss: 0.0054 - rpn_bbox_loss: 0.1845 - mrcnn_class_loss: 0.0137 - mrcnn_bbox_loss: 0.0813 - mrcnn_mask_loss: 0.1619 - val_loss: 0.5073 - val_rpn_class_loss: 0.0051 - val_rpn_bbox_loss: 0.2169 - val_mrcnn_class_loss: 0.0143 - val_mrcnn_bbox_loss: 0.1002 - val_mrcnn_mask_loss: 0.1708
Epoch 28/30
100/100 [==============================] - 146s 1s/step - loss: 0.4547 - rpn_class_loss: 0.0056 - rpn_bbox_loss: 0.1861 - mrcnn_class_loss: 0.0131 - mrcnn_bbox_loss: 0.0863 - mrcnn_mask_loss: 0.1636 - val_loss: 0.4454 - val_rpn_class_loss: 0.0054 - val_rpn_bbox_loss: 0.1751 - val_mrcnn_class_loss: 0.0146 - val_mrcnn_bbox_loss: 0.0862 - val_mrcnn_mask_loss: 0.1641
Epoch 29/30
100/100 [==============================] - 146s 1s/step - loss: 0.4550 - rpn_class_loss: 0.0054 - rpn_bbox_loss: 0.1886 - mrcnn_class_loss: 0.0135 - mrcnn_bbox_loss: 0.0845 - mrcnn_mask_loss: 0.1630 - val_loss: 0.4813 - val_rpn_class_loss: 0.0047 - val_rpn_bbox_loss: 0.2005 - val_mrcnn_class_loss: 0.0121 - val_mrcnn_bbox_loss: 0.0970 - val_mrcnn_mask_loss: 0.1668
Epoch 30/30
100/100 [==============================] - 146s 1s/step - loss: 0.4489 - rpn_class_loss: 0.0054 - rpn_bbox_loss: 0.1812 - mrcnn_class_loss: 0.0134 - mrcnn_bbox_loss: 0.0844 - mrcnn_mask_loss: 0.1645 - val_loss: 0.4756 - val_rpn_class_loss: 0.0050 - val_rpn_bbox_loss: 0.2004 - val_mrcnn_class_loss: 0.0104 - val_mrcnn_bbox_loss: 0.0917 - val_mrcnn_mask_loss: 0.1681
CPU times: user 1h 9min 9s, sys: 15min 16s, total: 1h 24min 25s
Wall time: 27min 39s
```python
model_path = os.path.join(MODEL_DIR, "galaxia_plus_3.h5")
model.keras_model.save_weights(model_path)
```
```python
_learning_rate=config.LEARNING_RATE / 1000
```
```python
%%time
history_all = train(model, dataset_train,dataset_val,'all',_learning_rate, 40)
```
Training network =all
Starting at epoch 30. LR=1e-06
Checkpoint Path: /home/ubuntu/lab/mask_r_cnn/Mask_RCNN/samples/zoo1/modelos/galaxia20191119T1410/mask_rcnn_galaxia_{epoch:04d}.h5
Selecting layers to train
conv1 (Conv2D)
bn_conv1 (BatchNorm)
res2a_branch2a (Conv2D)
bn2a_branch2a (BatchNorm)
res2a_branch2b (Conv2D)
bn2a_branch2b (BatchNorm)
res2a_branch2c (Conv2D)
res2a_branch1 (Conv2D)
bn2a_branch2c (BatchNorm)
bn2a_branch1 (BatchNorm)
res2b_branch2a (Conv2D)
bn2b_branch2a (BatchNorm)
res2b_branch2b (Conv2D)
bn2b_branch2b (BatchNorm)
res2b_branch2c (Conv2D)
bn2b_branch2c (BatchNorm)
res2c_branch2a (Conv2D)
bn2c_branch2a (BatchNorm)
res2c_branch2b (Conv2D)
bn2c_branch2b (BatchNorm)
res2c_branch2c (Conv2D)
bn2c_branch2c (BatchNorm)
res3a_branch2a (Conv2D)
bn3a_branch2a (BatchNorm)
res3a_branch2b (Conv2D)
bn3a_branch2b (BatchNorm)
res3a_branch2c (Conv2D)
res3a_branch1 (Conv2D)
bn3a_branch2c (BatchNorm)
bn3a_branch1 (BatchNorm)
res3b_branch2a (Conv2D)
bn3b_branch2a (BatchNorm)
res3b_branch2b (Conv2D)
bn3b_branch2b (BatchNorm)
res3b_branch2c (Conv2D)
bn3b_branch2c (BatchNorm)
res3c_branch2a (Conv2D)
bn3c_branch2a (BatchNorm)
res3c_branch2b (Conv2D)
bn3c_branch2b (BatchNorm)
res3c_branch2c (Conv2D)
bn3c_branch2c (BatchNorm)
res3d_branch2a (Conv2D)
bn3d_branch2a (BatchNorm)
res3d_branch2b (Conv2D)
bn3d_branch2b (BatchNorm)
res3d_branch2c (Conv2D)
bn3d_branch2c (BatchNorm)
res4a_branch2a (Conv2D)
bn4a_branch2a (BatchNorm)
res4a_branch2b (Conv2D)
bn4a_branch2b (BatchNorm)
res4a_branch2c (Conv2D)
res4a_branch1 (Conv2D)
bn4a_branch2c (BatchNorm)
bn4a_branch1 (BatchNorm)
res4b_branch2a (Conv2D)
bn4b_branch2a (BatchNorm)
res4b_branch2b (Conv2D)
bn4b_branch2b (BatchNorm)
res4b_branch2c (Conv2D)
bn4b_branch2c (BatchNorm)
res4c_branch2a (Conv2D)
bn4c_branch2a (BatchNorm)
res4c_branch2b (Conv2D)
bn4c_branch2b (BatchNorm)
res4c_branch2c (Conv2D)
bn4c_branch2c (BatchNorm)
res4d_branch2a (Conv2D)
bn4d_branch2a (BatchNorm)
res4d_branch2b (Conv2D)
bn4d_branch2b (BatchNorm)
res4d_branch2c (Conv2D)
bn4d_branch2c (BatchNorm)
res4e_branch2a (Conv2D)
bn4e_branch2a (BatchNorm)
res4e_branch2b (Conv2D)
bn4e_branch2b (BatchNorm)
res4e_branch2c (Conv2D)
bn4e_branch2c (BatchNorm)
res4f_branch2a (Conv2D)
bn4f_branch2a (BatchNorm)
res4f_branch2b (Conv2D)
bn4f_branch2b (BatchNorm)
res4f_branch2c (Conv2D)
bn4f_branch2c (BatchNorm)
res4g_branch2a (Conv2D)
bn4g_branch2a (BatchNorm)
res4g_branch2b (Conv2D)
bn4g_branch2b (BatchNorm)
res4g_branch2c (Conv2D)
bn4g_branch2c (BatchNorm)
res4h_branch2a (Conv2D)
bn4h_branch2a (BatchNorm)
res4h_branch2b (Conv2D)
bn4h_branch2b (BatchNorm)
res4h_branch2c (Conv2D)
bn4h_branch2c (BatchNorm)
res4i_branch2a (Conv2D)
bn4i_branch2a (BatchNorm)
res4i_branch2b (Conv2D)
bn4i_branch2b (BatchNorm)
res4i_branch2c (Conv2D)
bn4i_branch2c (BatchNorm)
res4j_branch2a (Conv2D)
bn4j_branch2a (BatchNorm)
res4j_branch2b (Conv2D)
bn4j_branch2b (BatchNorm)
res4j_branch2c (Conv2D)
bn4j_branch2c (BatchNorm)
res4k_branch2a (Conv2D)
bn4k_branch2a (BatchNorm)
res4k_branch2b (Conv2D)
bn4k_branch2b (BatchNorm)
res4k_branch2c (Conv2D)
bn4k_branch2c (BatchNorm)
res4l_branch2a (Conv2D)
bn4l_branch2a (BatchNorm)
res4l_branch2b (Conv2D)
bn4l_branch2b (BatchNorm)
res4l_branch2c (Conv2D)
bn4l_branch2c (BatchNorm)
res4m_branch2a (Conv2D)
bn4m_branch2a (BatchNorm)
res4m_branch2b (Conv2D)
bn4m_branch2b (BatchNorm)
res4m_branch2c (Conv2D)
bn4m_branch2c (BatchNorm)
res4n_branch2a (Conv2D)
bn4n_branch2a (BatchNorm)
res4n_branch2b (Conv2D)
bn4n_branch2b (BatchNorm)
res4n_branch2c (Conv2D)
bn4n_branch2c (BatchNorm)
res4o_branch2a (Conv2D)
bn4o_branch2a (BatchNorm)
res4o_branch2b (Conv2D)
bn4o_branch2b (BatchNorm)
res4o_branch2c (Conv2D)
bn4o_branch2c (BatchNorm)
res4p_branch2a (Conv2D)
bn4p_branch2a (BatchNorm)
res4p_branch2b (Conv2D)
bn4p_branch2b (BatchNorm)
res4p_branch2c (Conv2D)
bn4p_branch2c (BatchNorm)
res4q_branch2a (Conv2D)
bn4q_branch2a (BatchNorm)
res4q_branch2b (Conv2D)
bn4q_branch2b (BatchNorm)
res4q_branch2c (Conv2D)
bn4q_branch2c (BatchNorm)
res4r_branch2a (Conv2D)
bn4r_branch2a (BatchNorm)
res4r_branch2b (Conv2D)
bn4r_branch2b (BatchNorm)
res4r_branch2c (Conv2D)
bn4r_branch2c (BatchNorm)
res4s_branch2a (Conv2D)
bn4s_branch2a (BatchNorm)
res4s_branch2b (Conv2D)
bn4s_branch2b (BatchNorm)
res4s_branch2c (Conv2D)
bn4s_branch2c (BatchNorm)
res4t_branch2a (Conv2D)
bn4t_branch2a (BatchNorm)
res4t_branch2b (Conv2D)
bn4t_branch2b (BatchNorm)
res4t_branch2c (Conv2D)
bn4t_branch2c (BatchNorm)
res4u_branch2a (Conv2D)
bn4u_branch2a (BatchNorm)
res4u_branch2b (Conv2D)
bn4u_branch2b (BatchNorm)
res4u_branch2c (Conv2D)
bn4u_branch2c (BatchNorm)
res4v_branch2a (Conv2D)
bn4v_branch2a (BatchNorm)
res4v_branch2b (Conv2D)
bn4v_branch2b (BatchNorm)
res4v_branch2c (Conv2D)
bn4v_branch2c (BatchNorm)
res4w_branch2a (Conv2D)
bn4w_branch2a (BatchNorm)
res4w_branch2b (Conv2D)
bn4w_branch2b (BatchNorm)
res4w_branch2c (Conv2D)
bn4w_branch2c (BatchNorm)
res5a_branch2a (Conv2D)
bn5a_branch2a (BatchNorm)
res5a_branch2b (Conv2D)
bn5a_branch2b (BatchNorm)
res5a_branch2c (Conv2D)
res5a_branch1 (Conv2D)
bn5a_branch2c (BatchNorm)
bn5a_branch1 (BatchNorm)
res5b_branch2a (Conv2D)
bn5b_branch2a (BatchNorm)
res5b_branch2b (Conv2D)
bn5b_branch2b (BatchNorm)
res5b_branch2c (Conv2D)
bn5b_branch2c (BatchNorm)
res5c_branch2a (Conv2D)
bn5c_branch2a (BatchNorm)
res5c_branch2b (Conv2D)
bn5c_branch2b (BatchNorm)
res5c_branch2c (Conv2D)
bn5c_branch2c (BatchNorm)
fpn_c5p5 (Conv2D)
fpn_c4p4 (Conv2D)
fpn_c3p3 (Conv2D)
fpn_c2p2 (Conv2D)
fpn_p5 (Conv2D)
fpn_p2 (Conv2D)
fpn_p3 (Conv2D)
fpn_p4 (Conv2D)
In model: rpn_model
rpn_conv_shared (Conv2D)
rpn_class_raw (Conv2D)
rpn_bbox_pred (Conv2D)
mrcnn_mask_conv1 (TimeDistributed)
mrcnn_mask_bn1 (TimeDistributed)
mrcnn_mask_conv2 (TimeDistributed)
mrcnn_mask_bn2 (TimeDistributed)
mrcnn_class_conv1 (TimeDistributed)
mrcnn_class_bn1 (TimeDistributed)
mrcnn_mask_conv3 (TimeDistributed)
mrcnn_mask_bn3 (TimeDistributed)
mrcnn_class_conv2 (TimeDistributed)
mrcnn_class_bn2 (TimeDistributed)
mrcnn_mask_conv4 (TimeDistributed)
mrcnn_mask_bn4 (TimeDistributed)
mrcnn_bbox_fc (TimeDistributed)
mrcnn_mask_deconv (TimeDistributed)
mrcnn_class_logits (TimeDistributed)
mrcnn_mask (TimeDistributed)
Epoch 31/40
100/100 [==============================] - 265s 3s/step - loss: 0.4450 - rpn_class_loss: 0.0055 - rpn_bbox_loss: 0.1830 - mrcnn_class_loss: 0.0128 - mrcnn_bbox_loss: 0.0828 - mrcnn_mask_loss: 0.1610 - val_loss: 0.4812 - val_rpn_class_loss: 0.0047 - val_rpn_bbox_loss: 0.1942 - val_mrcnn_class_loss: 0.0118 - val_mrcnn_bbox_loss: 0.1029 - val_mrcnn_mask_loss: 0.1674
Epoch 32/40
100/100 [==============================] - 149s 1s/step - loss: 0.4591 - rpn_class_loss: 0.0055 - rpn_bbox_loss: 0.1903 - mrcnn_class_loss: 0.0131 - mrcnn_bbox_loss: 0.0847 - mrcnn_mask_loss: 0.1655 - val_loss: 0.5243 - val_rpn_class_loss: 0.0057 - val_rpn_bbox_loss: 0.2403 - val_mrcnn_class_loss: 0.0116 - val_mrcnn_bbox_loss: 0.0951 - val_mrcnn_mask_loss: 0.1716
Epoch 33/40
100/100 [==============================] - 150s 1s/step - loss: 0.4489 - rpn_class_loss: 0.0055 - rpn_bbox_loss: 0.1823 - mrcnn_class_loss: 0.0133 - mrcnn_bbox_loss: 0.0836 - mrcnn_mask_loss: 0.1642 - val_loss: 0.4819 - val_rpn_class_loss: 0.0049 - val_rpn_bbox_loss: 0.2134 - val_mrcnn_class_loss: 0.0136 - val_mrcnn_bbox_loss: 0.0900 - val_mrcnn_mask_loss: 0.1599
Epoch 34/40
100/100 [==============================] - 150s 2s/step - loss: 0.4572 - rpn_class_loss: 0.0054 - rpn_bbox_loss: 0.1893 - mrcnn_class_loss: 0.0132 - mrcnn_bbox_loss: 0.0844 - mrcnn_mask_loss: 0.1650 - val_loss: 0.4667 - val_rpn_class_loss: 0.0040 - val_rpn_bbox_loss: 0.1906 - val_mrcnn_class_loss: 0.0125 - val_mrcnn_bbox_loss: 0.0956 - val_mrcnn_mask_loss: 0.1639
Epoch 35/40
100/100 [==============================] - 150s 2s/step - loss: 0.4474 - rpn_class_loss: 0.0056 - rpn_bbox_loss: 0.1854 - mrcnn_class_loss: 0.0125 - mrcnn_bbox_loss: 0.0815 - mrcnn_mask_loss: 0.1623 - val_loss: 0.4913 - val_rpn_class_loss: 0.0054 - val_rpn_bbox_loss: 0.2029 - val_mrcnn_class_loss: 0.0137 - val_mrcnn_bbox_loss: 0.1001 - val_mrcnn_mask_loss: 0.1693
Epoch 36/40
100/100 [==============================] - 150s 2s/step - loss: 0.4391 - rpn_class_loss: 0.0053 - rpn_bbox_loss: 0.1717 - mrcnn_class_loss: 0.0141 - mrcnn_bbox_loss: 0.0862 - mrcnn_mask_loss: 0.1618 - val_loss: 0.4726 - val_rpn_class_loss: 0.0053 - val_rpn_bbox_loss: 0.1973 - val_mrcnn_class_loss: 0.0130 - val_mrcnn_bbox_loss: 0.0909 - val_mrcnn_mask_loss: 0.1660
Epoch 37/40
100/100 [==============================] - 150s 2s/step - loss: 0.4583 - rpn_class_loss: 0.0056 - rpn_bbox_loss: 0.1941 - mrcnn_class_loss: 0.0129 - mrcnn_bbox_loss: 0.0836 - mrcnn_mask_loss: 0.1621 - val_loss: 0.4880 - val_rpn_class_loss: 0.0050 - val_rpn_bbox_loss: 0.2108 - val_mrcnn_class_loss: 0.0123 - val_mrcnn_bbox_loss: 0.0941 - val_mrcnn_mask_loss: 0.1658
Epoch 38/40
100/100 [==============================] - 151s 2s/step - loss: 0.4373 - rpn_class_loss: 0.0051 - rpn_bbox_loss: 0.1789 - mrcnn_class_loss: 0.0121 - mrcnn_bbox_loss: 0.0806 - mrcnn_mask_loss: 0.1605 - val_loss: 0.4728 - val_rpn_class_loss: 0.0046 - val_rpn_bbox_loss: 0.1868 - val_mrcnn_class_loss: 0.0110 - val_mrcnn_bbox_loss: 0.0991 - val_mrcnn_mask_loss: 0.1713
Epoch 39/40
100/100 [==============================] - 150s 1s/step - loss: 0.4474 - rpn_class_loss: 0.0058 - rpn_bbox_loss: 0.1818 - mrcnn_class_loss: 0.0121 - mrcnn_bbox_loss: 0.0840 - mrcnn_mask_loss: 0.1637 - val_loss: 0.4940 - val_rpn_class_loss: 0.0056 - val_rpn_bbox_loss: 0.1999 - val_mrcnn_class_loss: 0.0113 - val_mrcnn_bbox_loss: 0.1035 - val_mrcnn_mask_loss: 0.1737
Epoch 40/40
100/100 [==============================] - 150s 2s/step - loss: 0.4469 - rpn_class_loss: 0.0058 - rpn_bbox_loss: 0.1804 - mrcnn_class_loss: 0.0132 - mrcnn_bbox_loss: 0.0823 - mrcnn_mask_loss: 0.1651 - val_loss: 0.5108 - val_rpn_class_loss: 0.0063 - val_rpn_bbox_loss: 0.2314 - val_mrcnn_class_loss: 0.0129 - val_mrcnn_bbox_loss: 0.0939 - val_mrcnn_mask_loss: 0.1664
CPU times: user 1h 14min 3s, sys: 16min 21s, total: 1h 30min 24s
Wall time: 28min 49s
```python
model_path = os.path.join(MODEL_DIR, "galaxia_all.h5")
model.keras_model.save_weights(model_path)
```
```python
```
## Detection
```python
class InferenceConfig(GalaxiaConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
inference_config = InferenceConfig()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
#model_path = model.find_last()
# Load trained weights
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
```
W1119 15:52:53.835069 140173346723584 deprecation_wrapper.py:119] From /home/ubuntu/lab/mask_r_cnn/Mask_RCNN/mrcnn/model.py:758: The name tf.sets.set_intersection is deprecated. Please use tf.sets.intersection instead.
W1119 15:52:53.854269 140173346723584 deprecation.py:323] From /home/ubuntu/lab/mask_r_cnn/Mask_RCNN/mrcnn/model.py:772: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.cast` instead.
Loading weights from /home/ubuntu/lab/mask_r_cnn/Mask_RCNN/samples/zoo1/modelos/galaxia_all.h5
```python
# Test on a random image
image_id = random.choice(dataset_val.image_ids)
original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
log("original_image", original_image)
log("image_meta", image_meta)
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
dataset_train.class_names, figsize=(8, 8))
```
original_image shape: (256, 256, 3) min: 0.00000 max: 245.00000 uint8
image_meta shape: (15,) min: 0.00000 max: 819.00000 int64
gt_class_id shape: (1,) min: 2.00000 max: 2.00000 int32
gt_bbox shape: (1, 4) min: 101.00000 max: 155.00000 int32
gt_mask shape: (256, 256, 1) min: 0.00000 max: 1.00000 bool

```python
results = model.detect([original_image], verbose=1)
r = results[0]
visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'],
dataset_val.class_names, r['scores'], ax=get_ax())
```
Processing 1 images
image shape: (256, 256, 3) min: 0.00000 max: 245.00000 uint8
molded_images shape: (1, 256, 256, 3) min: -19.04000 max: 234.29000 float64
image_metas shape: (1, 15) min: 0.00000 max: 256.00000 int64
anchors shape: (1, 16368, 4) min: -0.35494 max: 1.10396 float32

```python
# Compute VOC-Style mAP @ IoU=0.5
# Running on 10 images. Increase for better accuracy.
image_ids = np.random.choice(dataset_val.image_ids, 20)
APs = []
for image_id in image_ids:
# Load image and ground truth data
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
# Run object detection
results = model.detect([image], verbose=0)
r = results[0]
# Compute AP
AP, precisions, recalls, overlaps =\
utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
r["rois"], r["class_ids"], r["scores"], r['masks'])
APs.append(AP)
print("mAP: ", np.mean(APs))
```
mAP: 0.95
```python
pwd
```
'/home/ubuntu/lab/mask_r_cnn/Mask_RCNN/samples/zoo1'
```python
```
|
hfariasREPO_NAMEmask_galaxyPATH_START.@mask_galaxy_extracted@mask_galaxy-master@redes@two_class_zoo1@.ipynb_checkpoints@01_train_transferlearning_zoo1-checkpoint.ipynb@.PATH_END.py
|
{
"filename": "constants.py",
"repo_name": "janosch314/GWFish",
"repo_path": "GWFish_extracted/GWFish-main/GWFish/modules/constants.py",
"type": "Python"
}
|
import numpy as np
Mpc = 3.086e22
Msol = 1.9885e30
R_earth = 6.37e6
AU = 1.5e11
sidereal_day = 23.9344696
lunar_sidereal_period = 655.7198333
ecliptic = 23.45 * np.pi / 180.
c = 299792458.
G = 6.674e-11
h = 6.626e-34
|
janosch314REPO_NAMEGWFishPATH_START.@GWFish_extracted@GWFish-main@GWFish@modules@constants.py@.PATH_END.py
|
{
"filename": "e1_envs.py",
"repo_name": "mit-ll/spacegym-kspdg",
"repo_path": "spacegym-kspdg_extracted/spacegym-kspdg-main/src/kspdg/pe1/e1_envs.py",
"type": "Python"
}
|
# Copyright (c) 2024, MASSACHUSETTS INSTITUTE OF TECHNOLOGY
# Subject to FAR 52.227-11 β Patent Rights β Ownership by the Contractor (May 2014).
# SPDX-License-Identifier: MIT
from kspdg.pe1.pe1_base import PursuitEvadeGroup1Env
class PE1_E1_ParentEnv(PursuitEvadeGroup1Env):
def __init__(self, loadfile: str, **kwargs):
super().__init__(loadfile=loadfile, **kwargs)
def evasive_maneuvers(self):
'''Do not perform evasive maneuvers
'''
pass
class PE1_E1_I1_Env(PE1_E1_ParentEnv):
def __init__(self, **kwargs):
super().__init__(loadfile=PursuitEvadeGroup1Env.LOADFILE_I1, **kwargs)
class PE1_E1_I2_Env(PE1_E1_ParentEnv):
def __init__(self, **kwargs):
super().__init__(loadfile=PursuitEvadeGroup1Env.LOADFILE_I2, **kwargs)
class PE1_E1_I3_Env(PE1_E1_ParentEnv):
def __init__(self, **kwargs):
super().__init__(loadfile=PursuitEvadeGroup1Env.LOADFILE_I3, **kwargs)
class PE1_E1_I4_Env(PE1_E1_ParentEnv):
def __init__(self, **kwargs):
super().__init__(loadfile=PursuitEvadeGroup1Env.LOADFILE_I4, **kwargs)
|
mit-llREPO_NAMEspacegym-kspdgPATH_START.@spacegym-kspdg_extracted@spacegym-kspdg-main@src@kspdg@pe1@e1_envs.py@.PATH_END.py
|
{
"filename": "readmultispec.py",
"repo_name": "mzechmeister/viper",
"repo_path": "viper_extracted/viper-master/inst/readmultispec.py",
"type": "Python"
}
|
# Licensed under a GPLv3 style license - see LICENSE
"""readmultispec.py
Read IRAF (echelle) spectrum in multispec format from a FITS file.
Can read most multispec formats including linear, log, cubic spline,
Chebyshev or Legendre dispersion spectra.
Usage: retdict = readmultispec(fitsfile, reform=True)
Inputs:
fitfile Name of the FITS file
reform If true (the default), a single spectrum dimensioned
[4,1,NWAVE] is returned as flux[4,NWAVE]. If false,
it is returned as a 3-D array flux[4,1,NWAVE].
Returns a dictionary with these entries:
flux Array dimensioned [NCOMPONENTS,NORDERS,NWAVE] with the spectra.
If NORDERS=1, array is [NCOMPONENTS,NWAVE]; if NCOMPONENTS is also
unity, array is [NWAVE]. (This can be changed
using the reform keyword.) Commonly the first dimension
is 4 and indexes the spectrum, an alternate version of
the spectrum, the sky, and the error array. I have also
seen examples where NCOMPONENTS=2 (probably spectrum and
error). Generally I think you can rely on the first element
flux[0] to be the extracted spectrum. I don't know of
any foolproof way to figure out from the IRAF header what the
various components are.
wavelen Array dimensioned [NORDERS,NWAVE] with the wavelengths for
each order.
header The full FITS header from pyfits.
wavefields [NORDERS] List with the analytical wavelength
description (polynomial coefficients, etc.) extracted from
the header. This is probably not very useful but is
included just in case.
History:
Created by Rick White based on my IDL readechelle.pro, 2012 August 15
Apologies for any IDL-isms that remain!
"""
import numpy as np
from astropy.io import fits as pyfits
def nonlinearwave(nwave, specstr, verbose=False):
"""Compute non-linear wavelengths from multispec string
Returns wavelength array and dispersion fields.
Raises a ValueError if it can't understand the dispersion string.
"""
fields = specstr.split()
if int(fields[2]) != 2:
raise ValueError('Not nonlinear dispersion: dtype=' + fields[2])
if len(fields) < 12:
raise ValueError('Bad spectrum format (only %d fields)' % len(fields))
wt = float(fields[9])
w0 = float(fields[10])
ftype = int(fields[11])
if ftype == 3:
# cubic spline
if len(fields) < 15:
raise ValueError('Bad spline format (only %d fields)' % len(fields))
npieces = int(fields[12])
pmin = float(fields[13])
pmax = float(fields[14])
if verbose:
print('Dispersion is order-%d cubic spline' % npieces)
if len(fields) != 15 + npieces + 3:
raise ValueError('Bad order-%d spline format (%d fields)' % (npieces, len(fields)))
coeff = np.asarray(fields[15:], dtype=float)
# normalized x coordinates
s = (np.arange(nwave, dtype=float) + 1 - pmin) / (pmax - pmin) * npieces
j = s.astype(int).clip(0, npieces - 1)
a = (j + 1) - s
b = s - j
x0 = a ** 3
x1 = 1 + 3 * a * (1 + a * b)
x2 = 1 + 3 * b * (1 + a * b)
x3 = b ** 3
wave = coeff[j] * x0 + coeff[j + 1] * x1 + coeff[j + 2] * x2 + coeff[j + 3] * x3
elif ftype == 1 or ftype == 2:
# chebyshev or legendre polynomial
# legendre not tested yet
if len(fields) < 15:
raise ValueError('Bad polynomial format (only %d fields)' % len(fields))
order = int(fields[12])
pmin = float(fields[13])
pmax = float(fields[14])
if verbose:
if ftype == 1:
print('Dispersion is order-%d Chebyshev polynomial' % order)
else:
print('Dispersion is order-%d Legendre polynomial (NEEDS TEST)' % order)
if len(fields) != 15 + order:
# raise ValueError('Bad order-%d polynomial format (%d fields)' % (order, len(fields)))
if verbose:
print('Bad order-%d polynomial format (%d fields)' % (order, len(fields)))
print("Changing order from %i to %i" % (order, len(fields) - 15))
order = len(fields) - 15
coeff = np.asarray(fields[15:], dtype=float)
# normalized x coordinates
pmiddle = (pmax + pmin) / 2
prange = pmax - pmin
x = (np.arange(nwave, dtype=float) + 1 - pmiddle) / (prange / 2)
p0 = np.ones(nwave, dtype=float)
p1 = x
wave = p0 * coeff[0] + p1 * coeff[1]
for i in range(2, order):
if ftype == 1:
# chebyshev
p2 = 2 * x * p1 - p0
else:
# legendre
p2 = ((2 * i - 1) * x * p1 - (i - 1) * p0) / i
wave = wave + p2 * coeff[i]
p0 = p1
p1 = p2
else:
raise ValueError('Cannot handle dispersion function of type %d' % ftype)
return wave, fields
def readmultispec(fitsfile, reform=True, quiet=False):
"""Read IRAF echelle spectrum in multispec format from a FITS file
Can read most multispec formats including linear, log, cubic spline,
Chebyshev or Legendre dispersion spectra
If reform is true, a single spectrum dimensioned 4,1,NWAVE is returned
as 4,NWAVE (this is the default.) If reform is false, it is returned as
a 3-D array.
"""
fh = pyfits.open(fitsfile, ignore_blank=True)
try:
header = fh[0].header
flux = fh[0].data
finally:
fh.close()
temp = flux.shape
nwave = temp[-1]
if len(temp) == 1:
nspec = 1
else:
nspec = temp[-2]
# first try linear dispersion
try:
crval1 = header['crval1']
crpix1 = header['crpix1']
cd1_1 = header['cd1_1']
ctype1 = header['ctype1']
if ctype1.strip() == 'LINEAR':
wavelen = np.zeros((nspec, nwave), dtype=float)
ww = (np.arange(nwave, dtype=float) + 1 - crpix1) * cd1_1 + crval1
for i in range(nspec):
wavelen[i, :] = ww
# handle log spacing too
dcflag = header.get('dc-flag', 0)
if dcflag == 1:
wavelen = 10.0 ** wavelen
if not quiet:
print('Dispersion is linear in log wavelength')
elif dcflag == 0:
if not quiet:
print('Dispersion is linear')
else:
raise ValueError('Dispersion not linear or log (DC-FLAG=%s)' % dcflag)
if nspec == 1 and reform:
# get rid of unity dimensions
flux = np.squeeze(flux)
wavelen.shape = (nwave,)
return {'flux': flux, 'wavelen': wavelen, 'header': header, 'wavefields': None}
except KeyError:
pass
# get wavelength parameters from multispec keywords
try:
wat2 = header['wat2_*']
count = len(wat2)
except KeyError:
raise ValueError('Cannot decipher header, need either WAT2_ or CRVAL keywords')
# concatenate them all together into one big string
watstr = []
for i in range(len(wat2)):
# hack to fix the fact that older pyfits versions (< 3.1)
# strip trailing blanks from string values in an apparently
# irrecoverable way
# v = wat2[i].value
v = wat2[i]
v = v + (" " * (68 - len(v))) # restore trailing blanks
watstr.append(v)
watstr = ''.join(watstr)
# find all the spec#="..." strings
specstr = [''] * nspec
for i in range(nspec):
sname = 'spec' + str(i + 1)
p1 = watstr.find(sname)
p2 = watstr.find('"', p1)
p3 = watstr.find('"', p2 + 1)
if p1 < 0 or p1 < 0 or p3 < 0:
raise ValueError('Cannot find ' + sname + ' in WAT2_* keyword')
specstr[i] = watstr[p2 + 1:p3]
wparms = np.zeros((nspec, 9), dtype=float)
w1 = np.zeros(9, dtype=float)
for i in range(nspec):
w1 = np.asarray(specstr[i].split(), dtype=float)
wparms[i, :] = w1[:9]
if w1[2] == -1:
raise ValueError('Spectrum %d has no wavelength calibration (type=%d)' %
(i + 1, w1[2]))
# elif w1[6] != 0:
# raise ValueError('Spectrum %d has non-zero redshift (z=%f)' % (i+1,w1[6]))
wavelen = np.zeros((nspec, nwave), dtype=float)
wavefields = [None] * nspec
for i in range(nspec):
# if i in skipped_orders:
# continue
verbose = (not quiet) and (i == 0)
if wparms[i, 2] == 0 or wparms[i, 2] == 1:
# simple linear or log spacing
wavelen[i, :] = np.arange(nwave, dtype=float) * wparms[i, 4] + wparms[i, 3]
if wparms[i, 2] == 1:
wavelen[i, :] = 10.0 ** wavelen[i, :]
if verbose:
print('Dispersion is linear in log wavelength')
elif verbose:
print('Dispersion is linear')
else:
# non-linear wavelengths
wavelen[i, :], wavefields[i] = nonlinearwave(nwave, specstr[i],
verbose=verbose)
wavelen *= 1.0 + wparms[i, 6]
if verbose:
print("Correcting for redshift: z=%f" % wparms[i, 6])
if nspec == 1 and reform:
# get rid of unity dimensions
flux = np.squeeze(flux)
wavelen.shape = (nwave,)
return {'flux': flux, 'wavelen': wavelen, 'header': header, 'wavefields': wavefields}
|
mzechmeisterREPO_NAMEviperPATH_START.@viper_extracted@viper-master@inst@readmultispec.py@.PATH_END.py
|
{
"filename": "compile_java.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/build/scripts/compile_java.py",
"type": "Python"
}
|
import argparse
import contextlib
from shutil import copytree
import os
import shutil
import subprocess as sp
import tarfile
import zipfile
import sys
# Explicitly enable local imports
# Don't forget to add imported scripts to inputs of the calling command!
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import process_command_files as pcf
import java_command_file as jcf
def parse_args(args):
parser = argparse.ArgumentParser(description='Wrapper to invoke Java compilation from ya make build')
parser.add_argument('--javac-bin', help='path to javac')
parser.add_argument('--jar-bin', help='path to jar tool')
parser.add_argument('--java-bin', help='path to java binary')
parser.add_argument('--kotlin-compiler', help='path to kotlin compiler jar file')
parser.add_argument('--vcs-mf', help='path to VCS info manifest snippet')
parser.add_argument('--package-prefix', help='package prefix for resource files')
parser.add_argument('--jar-output', help='jar file with compiled classes destination path')
parser.add_argument('--srcs-jar-output', help='jar file with sources destination path')
parser.add_argument('srcs', nargs="*")
args = parser.parse_args(args)
return args, args.srcs
def mkdir_p(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def split_cmd_by_delim(cmd, delim='DELIM'):
result = [[]]
for arg in cmd:
if arg == delim:
result.append([])
else:
result[-1].append(arg)
return result
def main():
loaded_args = pcf.get_args(sys.argv[1:])
cmd_parts = split_cmd_by_delim(loaded_args)
assert len(cmd_parts) == 4
args, javac_opts, peers, ktc_opts = cmd_parts
opts, jsrcs = parse_args(args)
jsrcs += list(filter(lambda x: x.endswith('.jsrc'), peers))
peers = list(filter(lambda x: not x.endswith('.jsrc'), peers))
sources_dir = 'src'
mkdir_p(sources_dir)
for s in jsrcs:
if s.endswith('.jsrc'):
with contextlib.closing(tarfile.open(s, 'r')) as tf:
tf.extractall(path=sources_dir, filter='data')
srcs = []
for r, _, files in os.walk(sources_dir):
for f in files:
srcs.append(os.path.join(r, f))
srcs += jsrcs
ktsrcs = list(filter(lambda x: x.endswith('.kt'), srcs))
srcs = list(filter(lambda x: x.endswith('.java'), srcs))
classes_dir = 'cls'
mkdir_p(classes_dir)
classpath = os.pathsep.join(peers)
if srcs:
temp_sources_file = 'temp.sources.list'
with open(temp_sources_file, 'w') as ts:
ts.write(' '.join(srcs))
if ktsrcs:
kt_classes_dir = 'kt_cls'
mkdir_p(kt_classes_dir)
jcf.call_java_with_command_file(
[
opts.java_bin,
'-Didea.max.content.load.filesize=30720',
'-jar',
opts.kotlin_compiler,
'-classpath',
classpath,
'-d',
kt_classes_dir,
]
+ ktc_opts,
wrapped_args=ktsrcs + srcs,
)
classpath = os.pathsep.join([kt_classes_dir, classpath])
if srcs:
jcf.call_java_with_command_file(
[opts.javac_bin, '-nowarn', '-g', '-classpath', classpath, '-encoding', 'UTF-8', '-d', classes_dir]
+ javac_opts,
wrapped_args=srcs,
)
for s in jsrcs:
if s.endswith('-sources.jar'):
with zipfile.ZipFile(s) as zf:
zf.extractall(sources_dir)
elif s.endswith('.jar'):
with zipfile.ZipFile(s) as zf:
zf.extractall(classes_dir)
if ktsrcs:
copytree(kt_classes_dir, classes_dir, dirs_exist_ok=True)
if opts.vcs_mf:
sp.check_call([opts.jar_bin, 'cfm', opts.jar_output, opts.vcs_mf, os.curdir], cwd=classes_dir)
else:
sp.check_call([opts.jar_bin, 'cfM', opts.jar_output, os.curdir], cwd=classes_dir)
if opts.srcs_jar_output:
for s in jsrcs:
if s.endswith('.java'):
if opts.package_prefix:
d = os.path.join(sources_dir, *(opts.package_prefix.split('.') + [os.path.basename(s)]))
else:
d = os.path.join(sources_dir, os.path.basename(s))
shutil.copyfile(s, d)
if opts.vcs_mf:
sp.check_call([opts.jar_bin, 'cfm', opts.srcs_jar_output, opts.vcs_mf, os.curdir], cwd=sources_dir)
else:
sp.check_call([opts.jar_bin, 'cfM', opts.srcs_jar_output, os.curdir], cwd=sources_dir)
if __name__ == '__main__':
main()
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@build@scripts@compile_java.py@.PATH_END.py
|
{
"filename": "logger.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipython/py3/IPython/core/logger.py",
"type": "Python"
}
|
"""Logger class for IPython's logging facilities.
"""
#*****************************************************************************
# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> and
# Copyright (C) 2001-2006 Fernando Perez <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
#****************************************************************************
# Modules and globals
# Python standard modules
import glob
import io
import logging
import os
import time
# prevent jedi/parso's debug messages pipe into interactiveshell
logging.getLogger("parso").setLevel(logging.WARNING)
#****************************************************************************
# FIXME: This class isn't a mixin anymore, but it still needs attributes from
# ipython and does input cache management. Finish cleanup later...
class Logger(object):
"""A Logfile class with different policies for file creation"""
def __init__(self, home_dir, logfname='Logger.log', loghead=u'',
logmode='over'):
# this is the full ipython instance, we need some attributes from it
# which won't exist until later. What a mess, clean up later...
self.home_dir = home_dir
self.logfname = logfname
self.loghead = loghead
self.logmode = logmode
self.logfile = None
# Whether to log raw or processed input
self.log_raw_input = False
# whether to also log output
self.log_output = False
# whether to put timestamps before each log entry
self.timestamp = False
# activity control flags
self.log_active = False
# logmode is a validated property
def _set_mode(self,mode):
if mode not in ['append','backup','global','over','rotate']:
raise ValueError('invalid log mode %s given' % mode)
self._logmode = mode
def _get_mode(self):
return self._logmode
logmode = property(_get_mode,_set_mode)
def logstart(self, logfname=None, loghead=None, logmode=None,
log_output=False, timestamp=False, log_raw_input=False):
"""Generate a new log-file with a default header.
Raises RuntimeError if the log has already been started"""
if self.logfile is not None:
raise RuntimeError('Log file is already active: %s' %
self.logfname)
# The parameters can override constructor defaults
if logfname is not None: self.logfname = logfname
if loghead is not None: self.loghead = loghead
if logmode is not None: self.logmode = logmode
# Parameters not part of the constructor
self.timestamp = timestamp
self.log_output = log_output
self.log_raw_input = log_raw_input
# init depending on the log mode requested
isfile = os.path.isfile
logmode = self.logmode
if logmode == 'append':
self.logfile = io.open(self.logfname, 'a', encoding='utf-8')
elif logmode == 'backup':
if isfile(self.logfname):
backup_logname = self.logfname+'~'
# Manually remove any old backup, since os.rename may fail
# under Windows.
if isfile(backup_logname):
os.remove(backup_logname)
os.rename(self.logfname,backup_logname)
self.logfile = io.open(self.logfname, 'w', encoding='utf-8')
elif logmode == 'global':
self.logfname = os.path.join(self.home_dir,self.logfname)
self.logfile = io.open(self.logfname, 'a', encoding='utf-8')
elif logmode == 'over':
if isfile(self.logfname):
os.remove(self.logfname)
self.logfile = io.open(self.logfname,'w', encoding='utf-8')
elif logmode == 'rotate':
if isfile(self.logfname):
if isfile(self.logfname+'.001~'):
old = glob.glob(self.logfname+'.*~')
old.sort()
old.reverse()
for f in old:
root, ext = os.path.splitext(f)
num = int(ext[1:-1])+1
os.rename(f, root+'.'+repr(num).zfill(3)+'~')
os.rename(self.logfname, self.logfname+'.001~')
self.logfile = io.open(self.logfname, 'w', encoding='utf-8')
if logmode != 'append':
self.logfile.write(self.loghead)
self.logfile.flush()
self.log_active = True
def switch_log(self,val):
"""Switch logging on/off. val should be ONLY a boolean."""
if val not in [False,True,0,1]:
raise ValueError('Call switch_log ONLY with a boolean argument, '
'not with: %s' % val)
label = {0:'OFF',1:'ON',False:'OFF',True:'ON'}
if self.logfile is None:
print("""
Logging hasn't been started yet (use logstart for that).
%logon/%logoff are for temporarily starting and stopping logging for a logfile
which already exists. But you must first start the logging process with
%logstart (optionally giving a logfile name).""")
else:
if self.log_active == val:
print('Logging is already',label[val])
else:
print('Switching logging',label[val])
self.log_active = not self.log_active
self.log_active_out = self.log_active
def logstate(self):
"""Print a status message about the logger."""
if self.logfile is None:
print('Logging has not been activated.')
else:
state = self.log_active and 'active' or 'temporarily suspended'
print('Filename :', self.logfname)
print('Mode :', self.logmode)
print('Output logging :', self.log_output)
print('Raw input log :', self.log_raw_input)
print('Timestamping :', self.timestamp)
print('State :', state)
def log(self, line_mod, line_ori):
"""Write the sources to a log.
Inputs:
- line_mod: possibly modified input, such as the transformations made
by input prefilters or input handlers of various kinds. This should
always be valid Python.
- line_ori: unmodified input line from the user. This is not
necessarily valid Python.
"""
# Write the log line, but decide which one according to the
# log_raw_input flag, set when the log is started.
if self.log_raw_input:
self.log_write(line_ori)
else:
self.log_write(line_mod)
def log_write(self, data, kind='input'):
"""Write data to the log file, if active"""
# print('data: %r' % data) # dbg
if self.log_active and data:
write = self.logfile.write
if kind=='input':
if self.timestamp:
write(time.strftime('# %a, %d %b %Y %H:%M:%S\n', time.localtime()))
write(data)
elif kind=='output' and self.log_output:
odata = u'\n'.join([u'#[Out]# %s' % s
for s in data.splitlines()])
write(u'%s\n' % odata)
try:
self.logfile.flush()
except OSError:
print("Failed to flush the log file.")
print(
f"Please check that {self.logfname} exists and have the right permissions."
)
print(
"Also consider turning off the log with `%logstop` to avoid this warning."
)
def logstop(self):
"""Fully stop logging and close log file.
In order to start logging again, a new logstart() call needs to be
made, possibly (though not necessarily) with a new filename, mode and
other options."""
if self.logfile is not None:
self.logfile.close()
self.logfile = None
else:
print("Logging hadn't been started.")
self.log_active = False
# For backwards compatibility, in case anyone was using this.
close_log = logstop
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipython@py3@IPython@core@logger.py@.PATH_END.py
|
{
"filename": "create_time_con.py",
"repo_name": "shengjin/Nii",
"repo_path": "Nii_extracted/Nii-main/create_time_con.py",
"type": "Python"
}
|
import numpy as np
from inputsimobs import *
##########################
# observation time sequrence in days
# drop save random part
time_con0 = np.linspace(t0,t1,int(N_time_init))
#
seg_rand_min = 0.5
seg_rand_max = 1.5
fill_rand_min = 0.2
fill_rand_max = 1.8
def create_time_seg():
N_miss_seg = np.random.randint(drop_seg_min, drop_seg_max+1)
N_miss_point = round(len(time_con0)*drop_ratio)
N_miss_point_left = N_miss_point
Len_N_miss_seg = np.zeros(N_miss_seg)
for i in range(N_miss_seg-1):
N_of_i_miss_seg = np.random.randint(round(N_miss_point_left/(N_miss_seg-i)*seg_rand_min), round(N_miss_point_left/(N_miss_seg-i)*seg_rand_max))
Len_N_miss_seg[i] = N_of_i_miss_seg
N_miss_point_left = N_miss_point_left - N_of_i_miss_seg
Len_N_miss_seg[-1] = N_miss_point_left
#print(Len_N_miss_seg)
#print(Len_N_miss_seg.sum())
N_fill_point = len(time_con0)-int(Len_N_miss_seg.sum())
N_fill_seg = N_miss_seg + 1
N_fill_point_left = N_fill_point
Len_N_fill_seg = np.zeros(N_fill_seg)
for i in range(N_fill_seg-1):
N_of_i_fill_seg = np.random.randint(round(N_fill_point_left/(N_miss_seg-i)*fill_rand_min), round(N_fill_point_left/(N_fill_seg-i)*fill_rand_max))
Len_N_fill_seg[i] = N_of_i_fill_seg
N_fill_point_left = N_fill_point_left - N_of_i_fill_seg
Len_N_fill_seg[-1] = N_fill_point_left
#print(Len_N_fill_seg)
#print(Len_N_fill_seg.sum())
#print(len(time_con0))
t_transit = np.zeros(len(time_con0))
# fill N_miss to 1
N_miss_start = 0
N_miss_end = 0
#print(N_miss_start,N_miss_end)
for i in range(N_miss_seg):
N_miss_start = N_miss_start + Len_N_fill_seg[i]
N_miss_end = N_miss_start + Len_N_miss_seg[i]
#print(N_miss_start,N_miss_end)
t_transit[int(N_miss_start):int(N_miss_end)] = 1
N_miss_start = N_miss_end
#np.savetxt("t_transit.dat", np.transpose([t_transit]))
retain_ind = np.where(t_transit < 1.0)
return retain_ind
|
shengjinREPO_NAMENiiPATH_START.@Nii_extracted@Nii-main@create_time_con.py@.PATH_END.py
|
{
"filename": "imred.py",
"repo_name": "saltastro/polsalt",
"repo_path": "polsalt_extracted/polsalt-master/polsalt/imred.py",
"type": "Python"
}
|
"""
IMRED
Reduction script for SALT data -- this is
for science level reductions with variance frames
This includes step that are not yet included in the pipeline
and can be used for extended reductions of SALT data.
It does require the pysalt package to be installed
and up to date.
"""
# polSALT: fix Pfits without XTALK
# polSALT: use local version of createbadpixel = masterbadpixel
# polSALT: fix VAR and BPM extensions after mosaic
import os, sys, glob, copy, shutil, inspect
import numpy as np
from astropy.io import fits as pyfits
from scipy.ndimage.filters import median_filter
from pyraf import iraf
from iraf import pysalt
from saltsafelog import logging
from saltobslog import obslog
from saltprepare import *
from saltbias import bias
from saltgain import gain
from saltxtalk import xtalk
from saltcrclean import multicrclean
from saltcombine import saltcombine
from saltflat import saltflat
#from saltmosaic import saltmosaic
from saltmosaic_kn import saltmosaic
from saltillum import saltillum
debug = True
import reddir
from specpolutils import datedline
datadir = os.path.dirname(inspect.getfile(reddir))+"/data/"
def imred(infilelist, prodir, bpmfile=None, crthresh='', gaindb = None, cleanup=True):
#get the name of the files
infiles=','.join(['%s' % x for x in infilelist])
#get the current date for the files
obsdate=os.path.basename(infilelist[0])[1:9]
print "Observation Date: ",obsdate
#set up some files that will be needed
logfile='im'+obsdate+'.log'
flatimage='FLAT%s.fits' % (obsdate)
dbfile='spec%s.db' % obsdate
geomline = datedline(datadir+'RSSgeom.dat',obsdate)
if len(geomline) == 0:
print 'Invalid geometry file, ',datadir+'RSSgeom.dat',', exitting'
exit()
geomfile=obsdate+'_geom.txt'
open(geomfile,'w').write(geomline)
#create the observation log
# obs_dict=obslog(infilelist)
verbose=True
with logging(logfile, debug) as log:
log.message('Pysalt Version: '+pysalt.verno, with_header=False)
#prepare the data
for img in infilelist:
hdu = pyfits.open(img)
# for backwards compatibility
hdu = remove_duplicate_keys(hdu)
if not 'XTALK' in hdu[1].header:
hdu[1].header['XTALK']=1474
hdu[2].header['XTALK']=1474
hdu[3].header['XTALK']=1166
hdu[4].header['XTALK']=1111
hdu[5].header['XTALK']=1377
hdu[6].header['XTALK']=1377
img = os.path.basename(img)
hdu = prepare(hdu, createvar=False, badpixelstruct=None)
if not cleanup: hdu.writeto('p'+img, overwrite=True)
hdu = bias(hdu,subover=True, trim=True, subbias=False,
bstruct=None, median=False, function='polynomial',
order=5, rej_lo=5.0, rej_hi=5.0, niter=10,
plotover=False, log=log, verbose=verbose)
if not cleanup: hdu.writeto('bp'+img, overwrite=True)
# put windowed data into full image
exts = len(hdu)
if exts > 7:
rows, cols = hdu[1].data.shape
cbin, rbin = [int(x) for x in hdu[0].header['CCDSUM'].split(" ")]
ampsecO = hdu[1].header["AMPSEC"].strip("[]").split(",")
ampsecE = hdu[7].header["AMPSEC"].strip("[]").split(",")
rO = int((float(ampsecO[1].split(":")[0]) - 1.)/rbin)
rE = int((float(ampsecE[1].split(":")[0]) - 1.)/rbin)
keylist = ['BIASSEC','DATASEC','AMPSEC','CCDSEC','DETSEC']
oldlist = [hdu[1].header[key].strip("[]").split(",")[1] for key in keylist]
newlist = 2*['1:'+str(int(0.5+4102/rbin))]+3*[str(int(rbin/2))+':4102']
for amp in range(6):
hduO = hdu[amp+1].copy()
hdu[amp+1].data = np.zeros((4102/rbin,cols))
hdu[amp+1].data[rO:rO+rows] = hduO.data
hdu[amp+1].data[rE:rE+rows] = hdu[amp+7].data
hdu[amp+1].update_header
for k,key in enumerate(keylist):
hdu[amp+1].header[key] = \
hdu[amp+1].header[key].replace(oldlist[k],newlist[k])
del hdu[7:]
hdu[0].header['NSCIEXT'] = 6
badpixelstruct = saltio.openfits(bpmfile)
hdu = add_variance(hdu, badpixelstruct)
#gain correct the data
if gaindb:
usedb = True
dblist = saltio.readgaindb(gaindb.strip())
else:
usedb = False
dblist = ''
hdu = gain(hdu, mult=True, usedb=usedb, dblist=dblist, log=log, verbose=verbose)
if not cleanup: hdu.writeto('gbp'+img, overwrite=True)
#cross talk correct the data
hdu=xtalk(hdu, [], log=log, verbose=verbose)
#cosmic ray clean the data
#only clean the object data
if crthresh=='':
thresh = 5.0
if hdu[0].header['GRATING'].strip()=='PG0300': thresh = 7.0
else: thresh=crthresh
if hdu[0].header['CCDTYPE']=='OBJECT' and \
hdu[0].header['LAMPID']=='NONE' and \
hdu[0].header['INSTRUME']=='RSS':
if crthresh != False:
log.message('Cleaning CR using thresh={}'.format(thresh))
hdu = multicrclean(hdu, crtype='edge', thresh=thresh, mbox=11, bthresh=5.0,
flux_ratio=0.2, bbox=25, gain=1.0, rdnoise=5.0, fthresh=5.0, bfactor=2,
gbox=3, maxiter=5, log=log, verbose=verbose)
for ext in range(13,19): hdu[ext].data = hdu[ext].data.astype('uint8')
hdu[0].header.add_history('CRCLEAN: multicrclean, thresh = ',thresh)
else:
hdu[0].header.add_history('CRCLEAN: None')
hdu.writeto('xgbp'+img, overwrite=True)
hdu.close()
#mosaic the data
#khn: attempt to use most recent previous geometry to obsdate.
#geomfile=iraf.osfn("pysalt$data/rss/RSSgeom.dat")
try:
saltmosaic('xgbpP*fits', '', 'm', geomfile, interp='linear', cleanup=True, geotran=True, clobber=True, logfile=logfile, verbose=True)
except:
saltmosaic('xgbpP*fits', '', 'm', geomfile, interp='linear', cleanup=True, geotran=True, clobber=True, logfile=logfile, verbose=True)
#khn: fix mosaiced VAR and BPM extensions
#khn: fix mosaiced bpm missing some of gap
for img in infilelist:
filename = 'mxgbp'+os.path.basename(img)
hdu = pyfits.open(filename, 'update')
hdu[2].header['EXTNAME'] = 'VAR'
hdu[3].header['EXTNAME'] = 'BPM'
bpm_rc = (hdu[3].data>0).astype('uint8')
zeroscicol = hdu['SCI'].data.sum(axis=0) == 0
bpmgapcol = bpm_rc.mean(axis=0) == 1
addbpmcol = zeroscicol & ~bpmgapcol
addbpmcol[np.argmax(addbpmcol)-4:np.argmax(addbpmcol)] = True # allow for chip tilt
bpm_rc[:,addbpmcol] = 1
hdu[3].data = bpm_rc
hdu.writeto(filename,overwrite=True)
#clean up the images
if cleanup:
for f in glob.glob('p*fits'): os.remove(f)
for f in glob.glob('bp*fits'): os.remove(f)
for f in glob.glob('gbp*fits'): os.remove(f)
for f in glob.glob('xgbp*fits'): os.remove(f)
def remove_duplicate_keys(hdu):
# in case of duplicate primary header keys, remove those with blank values
keylist = hdu[0].header.keys()
vallist = hdu[0].header.values()
dupkeylist = list(set([x for x in keylist if keylist.count(x)>1]))
delarglist = []
for key in dupkeylist:
arglist = [i for i in range(len(keylist)) if keylist[i]==key]
for arg in arglist:
if len(vallist[arg].strip()) == 0: delarglist.append(arg)
for arg in sorted(delarglist,reverse=True): del hdu[0].header[arg]
return hdu
def add_variance_files(filenames, bpmfile):
file_list=glob.glob(filenames)
badpixelstruct = saltio.openfits(bpmfile)
for f in file_list:
struct = pyfits.open(f)
struct = add_variance(struct, bpmstruct)
if os.path.isfile(f): os.remove(f)
struct.writeto(f)
def add_variance(struct, badpixelstruct):
"""Add variance and badpixel frame"""
nsciext=len(struct)-1
nextend=nsciext
for i in range(1, nsciext+1):
hdu=CreateVariance(struct[i], i, nextend+i)
hdu.header['EXTNAME'] = 'VAR'
struct[i].header['VAREXT'] = (nextend+i, 'Extension for Variance Frame')
struct.append(hdu)
nextend+=nsciext
for i in range(1, nsciext+1):
hdu=masterbadpixel(struct, badpixelstruct, i, nextend+i)
struct[i].header['BPMEXT'] = (nextend+i, 'Extension for Bad Pixel Mask')
struct.append(hdu)
nextend+=nsciext
struct[0].header['NEXTEND'] = nextend
return struct
def masterbadpixel(inhdu, bphdu, sci_ext, bp_ext):
# khn: Create the bad pixel hdu bp_ext for inhdu[sci_ext] from a master, bphdu
if bphdu is None:
data=np.zeros_like(inhdu[sci_ext].data).astype("uint8")
else:
infile=inhdu.fileinfo(0)['filename']
bpfile=bphdu.fileinfo(0)['filename']
masternext = len(bphdu)-1
masterext = (sci_ext-1) % masternext + 1 # allow for windows
if not saltkey.compare(inhdu[0], bphdu[0], 'INSTRUME', infile, bpfile):
message = '%s and %s are not the same %s' % (infile,bpfile, 'INSTRUME')
raise SaltError(message)
else:
rows,cols = inhdu[sci_ext].data.shape
cbin,rbin = np.array(inhdu[sci_ext].header["CCDSUM"].split(" ")).astype(int)
masterrows,mastercols = bphdu[masterext].data.shape
master_rc = np.ones((masterrows+(masterrows % rbin),mastercols+(mastercols % cbin)))
master_rc[:masterrows,:mastercols] = bphdu[masterext].data
masterrows,mastercols=(masterrows+(masterrows % rbin),mastercols+(mastercols % cbin))
ampsec = inhdu[sci_ext].header["AMPSEC"].strip("[]").split(",")
r1 = int((float(ampsec[1].split(":")[0]) - 1.)/rbin)
c1 = int((float(ampsec[0].split(":")[0]) - 1.)/cbin)
bin_rc = (master_rc.reshape(masterrows/rbin,rbin,mastercols/cbin,cbin).sum(axis=3).sum(axis=1) > 0)
data = bin_rc[ r1:r1+rows, c1:c1+cols ].astype('uint8')
header=inhdu[sci_ext].header.copy()
header['EXTVER'] = bp_ext
header['SCIEXT'] = (sci_ext,'Extension of science frame')
return pyfits.ImageHDU(data=data, header=header, name='BPM')
if __name__=='__main__':
rawdir=sys.argv[1]
prodir=os.path.curdir+'/'
bpmfile = os.path.dirname(sys.argv[0]) + '/bpm_sn.fits'
imred(rawdir, prodir, cleanup=True, bpmfile=bpmfile)
|
saltastroREPO_NAMEpolsaltPATH_START.@polsalt_extracted@polsalt-master@polsalt@imred.py@.PATH_END.py
|
{
"filename": "toastit.py",
"repo_name": "plazar/TOASTER",
"repo_path": "TOASTER_extracted/TOASTER-master/toastit.py",
"type": "Python"
}
|
#!/usr/bin/env python
""" The data reduction pipeline. This script will archive files,
register file-info into the database, reduce data, and store
TOAs and processing information into the DB.
"""
import copy
import sys
import os
import os.path
import warnings
import tempfile
import shutil
import traceback
import shlex
import random
from toaster import config
from toaster import utils
from toaster import colour
from toaster import errors
from toaster import manipulators
from toaster import database
from toaster.toolkit.toas import load_toa
from toaster.toolkit.rawfiles import load_rawfile
from toaster.toolkit.parfiles import load_parfile
from toaster.toolkit.templates import load_template
from toaster.toolkit.processing import diagnose_processing
import toaster.toolkit.toas.general as toas_general
import toaster.toolkit.parfiles.general as parfiles_general
import toaster.toolkit.templates.general as templates_general
import toaster.toolkit.rawfiles.general as rawfiles_general
from toaster import diagnostics
from toaster.utils import notify
from toaster.utils import datafile
from toaster.utils import cache
from toaster.utils import version
###############################################################################
# DO NOT EDIT BELOW HERE
###############################################################################
SUCCESSMSGS = ["Your data are freshly toasted",
"Your data are part of this balanced breakfast",
"Your data are nice and warm now",
"Your data are golden brown",
"Your data would go great with butter and jam"]
def make_proc_diagnostics_dir(fn, proc_id):
"""Given an archive, create the appropriate diagnostics
directory, and cross-references.
Inputs:
fn: The file to create a diagnostic directory for.
proc_id: The processing ID number to create a diagnostic
directory for.
Outputs:
diagdir: The diagnostic directory's name.
"""
diagnostics_location = os.path.join(config.cfg.data_archive_location, "diagnostics")
params = datafile.prep_file(fn)
basedir = datafile.get_archive_dir(fn, params=params,
data_archive_location=diagnostics_location)
diagdir = os.path.join(basedir, "procid_%d" % proc_id)
# Make sure directory exists
if not os.path.isdir(diagdir):
# Create directory
notify.print_info("Making diagnostic directory: %s" % diagdir, 2)
os.makedirs(diagdir, 0770)
crossrefdir = os.path.join(diagnostics_location, "processing")
if not os.path.isdir(crossrefdir):
# Create directory
notify.print_info("Making diagnostic crossref diagdir: %s" % crossrefdir, 2)
os.makedirs(crossrefdir, 0770)
crossref = os.path.join(crossrefdir, "procid_%d" % proc_id)
if not os.path.islink(crossref):
# Create symlink
notify.print_info("Making crossref to diagnostic diagdir: %s" % crossref, 2)
os.symlink(diagdir, crossref)
return diagdir
def fill_process_table(version_id, rawfile_id, parfile_id, template_id,
manip, nchan, nsub, existdb=None):
db = existdb or database.Database()
db.connect()
ins = db.process.insert()
values = {'version_id': version_id,
'rawfile_id': rawfile_id,
'parfile_id': parfile_id,
'template_id': template_id,
'manipulator': manip.name,
'manipulator_args': manip.argstr,
'nchan': nchan,
'nsub': nsub,
'toa_fitting_method': config.cfg.toa_fitting_method,
'user_id': cache.get_userid()}
result = db.execute(ins, values)
process_id = result.inserted_primary_key[0]
result.close()
notify.print_info("Added processing run to DB. Processing ID: %d" %
process_id, 1)
# Close DB connection
if not existdb:
db.close()
return process_id
def pipeline_core(manip, rawfile_id, parfile_id, template_id,
existdb=None):
"""Run a prepared manipulator function on the raw file with
ID 'rawfile_id'. Then generate TOAs and load them into the DB.
Inputs:
manip: A manipulator instance.
rawfile_id: The ID number of the raw data file to generate TOAs from.
parfile_id: The ID number of the parfile to install into the
raw file. If this is None, then no new parfile will be installed.
template_id: The ID number of the template to use.
existdb: An existing database connection object.
(Default: establish a new DB connection)
Outputs:
None
"""
# Initialise these so the 'finally' clause doesn't throw an exception of
# it's own if an error is caught before these filenames are determined
manipfn = ''
adjustfn = ''
#Start pipeline
print "###################################################"
print "Starting to toast data"
print "Start time: %s" % utils.give_utc_now()
print "###################################################"
db = existdb or database.Database()
db.connect()
try:
trans = db.begin() # Open a transaction
# Get version ID
version_id = version.get_version_id(db)
# Get raw data from rawfile_id and verify MD5SUM
rawfile = rawfiles_general.get_rawfile_from_id(rawfile_id,
db, verify_md5=True)
# Manipulate the raw file
notify.print_info("Manipulating file", 1)
# Create a temporary file for the adjusted results
tmpfile, adjustfn = tempfile.mkstemp(prefix='toaster_tmp',
suffix='_newephem.ar',
dir=config.cfg.base_tmp_dir)
os.close(tmpfile)
shutil.copy(rawfile, adjustfn)
if parfile_id is not None:
# Re-install ephemeris
# Get ephemeris from parfile_id and verify MD5SUM
parfile = parfiles_general.get_parfile_from_id(parfile_id,
db, verify_md5=True)
cmd = ["pam", "-m", "-E", parfile, "--update_dm", adjustfn]
utils.execute(cmd)
# Create a temporary file for the manipulated results
tmpfile, manipfn = tempfile.mkstemp(prefix='toaster_tmp',
suffix='_manip.ar',
dir=config.cfg.base_tmp_dir)
os.close(tmpfile)
# Run the manipulator
manip.run([adjustfn], manipfn, tmpdir=config.cfg.base_tmp_dir)
# Get template from template_id and verify MD5SUM
template = templates_general.get_template_from_id(template_id,
db, verify_md5=True)
# Create a temporary file for the toa diagnostic plots
tmpfile, toadiagfn = tempfile.mkstemp(prefix='toaster_tmp',
suffix='_TOAdiag.png',
dir=config.cfg.base_tmp_dir)
os.close(tmpfile)
# Generate TOAs with pat
notify.print_info("Computing TOAs", 0)
cmd = ["pat", "-f", "tempo2", "-A", config.cfg.toa_fitting_method,
"-s", template, "-C", "gof length bw nbin nchan nsubint",
"-t", "-K", "%s/PNG" % toadiagfn, manipfn]
patout, paterr = utils.execute(cmd)
# Check version ID is still the same. Just in case.
new_version_id = version.get_version_id(db)
if version_id != new_version_id:
raise errors.ToasterError("Weird... Version ID at the start "
"of processing (%s) is different "
"from at the end (%d)!" %
(version_id, new_version_id))
# Read some header values from the manipulated archive
hdr = datafile.get_header_vals(manipfn, ['nchan', 'nsub', 'name',
'intmjd', 'fracmjd'])
hdr['secs'] = int(hdr['fracmjd']*24*3600+0.5) # Add 0.5 so result is
# rounded to nearest int
# Fill pipeline table
cmdline = " ".join(sys.argv)
process_id = fill_process_table(version_id, rawfile_id, parfile_id,
template_id, manip, hdr['nchan'],
hdr['nsub'], db)
# Parse pat output
toainfo = toas_general.parse_pat_output(patout)
rawfile_info = rawfiles_general.get_rawfile_info(rawfile_id)
# Insert TOAs into DB
for ti in toainfo:
ti['process_id'] = process_id
ti['template_id'] = template_id
ti['rawfile_id'] = rawfile_id
ti['pulsar_id'] = rawfile_info['pulsar_id']
ti['obssystem_id'] = rawfile_info['obssystem_id']
toa_ids = load_toa.load_toas(toainfo, db)
# Create processing diagnostics
notify.print_info("Generating processing diagnostics", 1)
diagdir = make_proc_diagnostics_dir(manipfn, process_id)
suffix = "_procid%d.%s" % (process_id, manip.name)
diags = []
for diagname in config.cfg.default_rawfile_diagnostics:
diagcls = diagnostics.get_diagnostic_class(diagname)
try:
diags.append(diagcls(manipfn))
except errors.DiagnosticNotApplicable, e:
notify.print_info("Diagnostic isn't applicable: %s. "
"Skipping..." % str(e), 1)
if diags:
# Load processing diagnostics
diagnose_processing.insert_processing_diagnostics(process_id,
diags, diagdir,
suffix, existdb=db)
# Copy TOA diagnostic plots and register them into DB
basefn = "%(name)s_%(intmjd)05d_%(secs)05d" % hdr
values = []
for ii, toa_id in enumerate(toa_ids):
outfn = basefn+"_procid%d.TOA%d.png" % (process_id, ii+1)
if ii == 0:
fn = toadiagfn
else:
fn = "%s_%d" % (toadiagfn, ii+1)
shutil.move(fn, os.path.join(diagdir, outfn))
ins = db.toa_diagnostic_plots.insert()
values.append({'toa_id': toa_id,
'filename': outfn,
'filepath': diagdir,
'plot_type': 'Prof-Temp Resids'})
result = db.execute(ins, values)
result.close()
notify.print_info("Inserted %d TOA diagnostic plots." % len(toa_ids), 2)
except:
db.rollback()
sys.stdout.write(colour.cstring("Error encountered. "
"Rolling back DB transaction!\n",
'error'))
raise
else:
# No exceptions encountered
# Commit database transaction
db.commit()
finally:
# Clean up
for fn in [adjustfn, manipfn]:
if os.path.isfile(fn):
os.remove(fn)
# End pipeline
print "###################################################"
print random.choice(SUCCESSMSGS)
print "End time: %s" % utils.give_utc_now()
print "###################################################"
# Close DB connection
if not existdb:
db.close()
def reduce_rawfile(args, leftover_args=[], existdb=None):
if args.rawfile is not None:
notify.print_info("Loading rawfile %s" % args.rawfile, 1)
args.rawfile_id = load_rawfile.load_rawfile(args.rawfile, existdb)
elif args.rawfile_id is None:
# Neither a rawfile, nor a rawfile_id was provided
raise errors.BadInputError("Either a rawfile, or a rawfile_id "
"_must_ be provided!")
if args.parfile is not None:
notify.print_info("Loading parfile %s" % args.parfile, 1)
args.parfile_id = load_parfile.load_parfile(args.parfile, existdb=existdb)
if args.template is not None:
notify.print_info("Loading template %s" % args.template, 1)
args.template_id = load_template.load_template(args.template,
existdb=existdb)
rawfile_info = rawfiles_general.get_rawfile_info(args.rawfile_id, existdb=existdb)
if args.use_parfile:
if args.parfile_id is None:
args.parfile_id = parfiles_general.get_master_parfile(
rawfile_info['pulsar_id'])[0]
if args.parfile_id is None:
raise errors.NoMasterError("A master parfile is required "
"in the database if no parfile is "
"provided on the command line.")
else:
args.parfile_id = None
if args.template_id is None:
args.template_id = templates_general.get_master_template(
rawfile_info['pulsar_id'],
rawfile_info['obssystem_id'],
existdb=existdb)[0]
if args.template_id is None:
raise errors.NoMasterError("A master template is required "
"in the database if no template is "
"provided on the command line.")
notify.print_info("Using the following IDs:\n"
" rawfile_id: %s\n"
" parfile_id: %s\n"
" template_id: %s" %
(args.rawfile_id, args.parfile_id, args.template_id), 1)
# Load manipulator
manip = manipulators.load_manipulator(args.manip_name)
manip.parse_args(leftover_args)
# Run pipeline core
pipeline_core(manip, args.rawfile_id, args.parfile_id,
args.template_id, existdb)
def main():
# Connect to the database
db = database.Database()
db.connect()
try:
if args.from_file is not None:
if args.from_file == '-':
argfile = sys.stdin
else:
if not os.path.exists(args.from_file):
raise errors.FileError("The list of cmd line args (%s) "
"does not exist." % args.from_file)
argfile = open(args.from_file, 'r')
numfails = 0
for line in argfile:
# Strip comments
line = line.partition('#')[0].strip()
if not line:
# Skip empty line
continue
try:
customargs = copy.deepcopy(args)
arglist = leftover_args+shlex.split(line.strip())
customargs, custom_leftover_args = \
parser.parse_known_args(arglist, namespace=customargs)
reduce_rawfile(customargs, custom_leftover_args, db)
except errors.ToasterError:
numfails += 1
traceback.print_exc()
if args.from_file != '-':
argfile.close()
if numfails:
raise errors.ToasterError(
"\n\n===================================\n"
"The reduction of %d rawfiles failed!\n"
"Please review error output.\n"
"===================================\n" % numfails)
else:
reduce_rawfile(args, leftover_args, db)
finally:
# Close DB connection
db.close()
if __name__ == "__main__":
parser = manipulators.ManipulatorArguments(prog='toaster.py',
description='Reduce an already-uploaded '
'archive. Both a pre-loaded parfile, and a '
'pre-loaded template must be provided as well. ' \
'TOAs generated are loaded into the database, ' \
'as is information about the processing run.')
# Raw data
rawgroup = parser.add_mutually_exclusive_group(required=False)
rawgroup.add_argument("--rawfile", dest='rawfile', type=str,
default=None,
help="A raw file to archive/load to DB and "
"generate TOAs for.")
rawgroup.add_argument('-r', '--rawfile-id', dest='rawfile_id',
type=int, default=None,
help="ID of an already archived/loaded raw data "
"file to use for running the full pipeline.")
# Ephemeris
pargroup = parser.add_mutually_exclusive_group(required=False)
pargroup.add_argument('-p', '--parfile-id', dest='parfile_id',
type=int, default=None,
help="ID of ephemeris to use for running the "
"full pipeline.")
pargroup.add_argument('--parfile', dest='parfile', type=str,
default=None,
help="A parfile to archive/load to DB and "
"use when generating TOAs.")
pargroup.add_argument('--no-parfile', dest="use_parfile",
default=True, action="store_false",
help="Do not install a new ephemeris before "
"generating TOAs. This is useful when solving "
"a pulsar.")
# Template profile
tmpgroup = parser.add_mutually_exclusive_group(required=False)
tmpgroup.add_argument('-t', '--template-id', dest='template_id',
type=int, default=None,
help="ID of template profile to use for running "
"the full pipeline.")
tmpgroup.add_argument('--template', dest='template', type=str,
default=None,
help="A template to archive/load to DB and use "
"when generating TOAs.")
parser.add_argument('--from-file', dest='from_file',
type=str, default=None,
help="A list of command line arguments. "
"Each line of the file refers to a single "
"processing job. Arguments on a single line "
"should not conflict with eachother or "
"arguments provided explicitly on the cmd line. "
"(Default: perform a single processing job "
"defined by the arguments on the cmd line.)")
args, leftover_args = parser.parse_known_args()
if ((args.rawfile is None) and (args.rawfile_id is None)) and \
(args.from_file is None):
warnings.warn("No input file or --from-file argument given "
"will read from stdin.",
errors.ToasterWarning)
args.rawfile = None # In case it was set to '-'
args.from_file = '-'
main()
|
plazarREPO_NAMETOASTERPATH_START.@TOASTER_extracted@TOASTER-master@toastit.py@.PATH_END.py
|
{
"filename": "LIGO_psds.py",
"repo_name": "damonge/schNell",
"repo_path": "schNell_extracted/schNell-master/plots/LIGO_psds.py",
"type": "Python"
}
|
import numpy as np
import schnell as snl
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', **{'family': 'sans-serif',
'sans-serif': ['Helvetica']})
rc('text', usetex=True)
freqs = np.geomspace(8., 1010., 2048)
dets = [snl.GroundDetector('Hanford', 46.4, -119.4, 171.8,
'data/aLIGO.txt'),
snl.GroundDetector('Livingstone', 30.7, -90.8, 243.0,
'data/aLIGO.txt'),
snl.GroundDetector('Virgo', 43.6, 10.5, 116.5,
'data/Virgo.txt'),
snl.GroundDetector('KAGRA', 36.3, 137.2, 225.0,
'data/KAGRA.txt'),
snl.GroundDetector('Cosmic Explorer', 37.24804, -115.800155, 0.,
'data/CE1_strain.txt')]
et = snl.GroundDetectorTriangle(name='ET0', lat=40.1, lon=9.0,
fname_psd='data/ET.txt', detector_id=0)
plt.figure()
plt.plot(freqs, dets[0].psd(freqs), 'k-', label='LIGO')
plt.plot(freqs, dets[2].psd(freqs), 'k--', label='Virgo')
plt.plot(freqs, dets[3].psd(freqs), 'k:', label='KAGRA')
plt.loglog()
plt.xlim([10, 1000])
plt.ylim([2E-48, 2E-43])
plt.xlabel(r'$f\,\,[{\rm Hz}]$', fontsize=16)
plt.ylabel(r'$N_f\,\,[{\rm Hz}^{-1}]$', fontsize=16)
plt.gca().tick_params(labelsize="large")
plt.legend(loc='upper right', fontsize=14, frameon=False)
plt.savefig("psd_LIGO.pdf", bbox_inches='tight')
freqsa = np.geomspace(6, 5000., 3072)
freqsb = np.geomspace(1., 10010., 3072)
plt.figure()
plt.plot(freqsb, et.psd(freqsb), 'k-', label='ET-D')
plt.plot(freqsb, dets[4].psd(freqsb), 'k--', label='CE-S1')
plt.plot(freqsa, dets[0].psd(freqsa), 'k:', label='LIGO A+')
plt.xlim([1.5, 1E4])
plt.ylim([5E-50, 9E-42])
plt.loglog()
plt.xlabel(r'$f\,\,[{\rm Hz}]$', fontsize=16)
plt.ylabel(r'$N_f\,\,[{\rm Hz}^{-1}]$', fontsize=16)
plt.gca().tick_params(labelsize="large")
plt.gca().set_yticks([1E-48, 1E-46, 1E-44, 1E-42])
plt.legend(loc='upper right', fontsize=14, frameon=False)
plt.savefig("psd_ET.pdf", bbox_inches='tight')
plt.show()
|
damongeREPO_NAMEschNellPATH_START.@schNell_extracted@schNell-master@plots@LIGO_psds.py@.PATH_END.py
|
{
"filename": "_family.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram/legendgrouptitle/font/_family.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="family",
parent_name="histogram.legendgrouptitle.font",
**kwargs,
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram@legendgrouptitle@font@_family.py@.PATH_END.py
|
{
"filename": "_y.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattercarpet/marker/colorbar/_y.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="y", parent_name="scattercarpet.marker.colorbar", **kwargs
):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
max=kwargs.pop("max", 3),
min=kwargs.pop("min", -2),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattercarpet@marker@colorbar@_y.py@.PATH_END.py
|
{
"filename": "quadrature.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py2/scipy/integrate/quadrature.py",
"type": "Python"
}
|
from __future__ import division, print_function, absolute_import
import numpy as np
import math
import warnings
# trapz is a public function for scipy.integrate,
# even though it's actually a numpy function.
from numpy import trapz
from scipy.special import roots_legendre
from scipy.special import gammaln
from scipy._lib.six import xrange
__all__ = ['fixed_quad', 'quadrature', 'romberg', 'trapz', 'simps', 'romb',
'cumtrapz', 'newton_cotes']
class AccuracyWarning(Warning):
pass
def _cached_roots_legendre(n):
"""
Cache roots_legendre results to speed up calls of the fixed_quad
function.
"""
if n in _cached_roots_legendre.cache:
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache[n] = roots_legendre(n)
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache = dict()
def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
If integrating a vector-valued function, the returned array must have
shape ``(..., len(x))``.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
Examples
--------
>>> from scipy import integrate
>>> f = lambda x: x**8
>>> integrate.fixed_quad(f, 0.0, 1.0, n=4)
(0.1110884353741496, None)
>>> integrate.fixed_quad(f, 0.0, 1.0, n=5)
(0.11111111111111102, None)
>>> print(1/9.0) # analytical result
0.1111111111111111
>>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=4)
(0.9999999771971152, None)
>>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=5)
(1.000000000039565, None)
>>> np.sin(np.pi/2)-np.sin(0) # analytical result
1.0
"""
x, w = _cached_roots_legendre(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
Examples
--------
>>> from scipy import integrate
>>> f = lambda x: x**8
>>> integrate.quadrature(f, 0.0, 1.0)
(0.11111111111111106, 4.163336342344337e-17)
>>> print(1/9.0) # analytical result
0.1111111111111111
>>> integrate.quadrature(np.cos, 0.0, np.pi/2)
(0.9999999999999536, 3.9611425250996035e-11)
>>> np.sin(np.pi/2)-np.sin(0) # analytical result
1.0
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in xrange(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : float, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, insert this value at the beginning of the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumtrapz(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = np.asarray(y)
if x is None:
d = dx
else:
x = np.asarray(x)
if x.ndim == 1:
d = np.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
else:
d = np.diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res],
axis=axis)
return res
def _basic_simps(y, start, stop, x, dx, axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
slice_all = (slice(None),)*nd
slice0 = tupleset(slice_all, axis, slice(start, stop, step))
slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
if x is None: # Even spaced Simpson's rule.
result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis=axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = np.diff(x, axis=axis)
sl0 = tupleset(slice_all, axis, slice(start, stop, step))
sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1))
result = np.sum(tmp, axis=axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `y`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : str {'avg', 'first', 'last'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
Examples
--------
>>> from scipy import integrate
>>> x = np.arange(0, 10)
>>> y = np.arange(0, 10)
>>> integrate.simps(y, x)
40.5
>>> y = np.power(x, 3)
>>> integrate.simps(y, x)
1642.5
>>> integrate.quad(lambda x: x**3, 0, 9)[0]
1640.25
>>> integrate.simps(y, x, even='first')
1644.5
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
Examples
--------
>>> from scipy import integrate
>>> x = np.arange(10, 14.25, 0.25)
>>> y = np.arange(3, 12)
>>> integrate.romb(y)
56.0
>>> y = np.sin(np.power(x, 2.5))
>>> integrate.romb(y)
-0.742561336672229
>>> integrate.romb(y, show=True)
Richardson Extrapolation Table for Romberg Integration
====================================================================
-0.81576
4.63862 6.45674
-1.10581 -3.02062 -3.65245
-2.57379 -3.06311 -3.06595 -3.05664
-1.34093 -0.92997 -0.78776 -0.75160 -0.74256
====================================================================
-0.742561336672229
"""
y = np.asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = slice_all
start = stop = step = Ninterv
for i in xrange(1, k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
for j in xrange(1, i+1):
prev = R[(i, j-1)]
R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
h /= 2.0
if show:
if not np.isscalar(R[(0, 0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%%%d.%df" % (width, precis)
title = "Richardson Extrapolation Table for Romberg Integration"
print("", title.center(68), "=" * 68, sep="\n", end="\n")
for i in xrange(k+1):
for j in xrange(i+1):
print(formstr % R[(i, j)], end=" ")
print()
print("=" * 68)
print()
return R[(k, k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <ransom@cfa.harvard.edu>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-21
#
# Adapted to scipy by Travis Oliphant <oliphant.travis@ieee.org>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in xrange(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in xrange(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simps : Integrators for sampled data.
cumtrapz : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if np.isinf(a) or np.isinf(b):
raise ValueError("Romberg integration only available "
"for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a, b]
intrange = b - a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
last_row = resmat[0]
for i in xrange(1, divmax+1):
n *= 2
ordsum += _difftrap(vfunc, interval, n)
row = [intrange * ordsum / n]
for k in xrange(i):
row.append(_romberg_diff(last_row[k], row[k], k+1))
result = row[i]
lastresult = last_row[i-1]
if show:
resmat.append(row)
err = abs(result - lastresult)
if err < tol or err < rtol * abs(result):
break
last_row = row
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Netwon-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1: (1,2,[1,1],-1,12),
2: (1,3,[1,4,1],-1,90),
3: (3,8,[1,3,3,1],-3,80),
4: (2,45,[7,32,12,32,7],-8,945),
5: (5,288,[19,75,50,50,75,19],-275,12096),
6: (1,140,[41,216,27,272,27,216,41],-9,1400),
7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
r"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\int_{x_0}^{x_N} f(x)dx = \Delta x \sum_{i=0}^{N} a_i f(x_i)
+ B_N (\Delta x)^{N+2} f^{N+1} (\xi)`
where :math:`\xi \in [x_0,x_N]`
and :math:`\Delta x = \frac{x_N-x_0}{N}` is the average samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\Delta x)^{N+3} f^{N+2}(\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Examples
--------
Compute the integral of sin(x) in [0, :math:`\pi`]:
>>> from scipy.integrate import newton_cotes
>>> def f(x):
... return np.sin(x)
>>> a = 0
>>> b = np.pi
>>> exact = 2
>>> for N in [2, 4, 6, 8, 10]:
... x = np.linspace(a, b, N + 1)
... an, B = newton_cotes(N, 1)
... dx = (b - a) / N
... quad = dx * np.sum(an * f(x))
... error = abs(quad - exact)
... print('{:2d} {:10.9f} {:.5e}'.format(N, quad, error))
...
2 2.094395102 9.43951e-02
4 1.998570732 1.42927e-03
6 2.000017814 1.78136e-05
8 1.999999835 1.64725e-07
10 2.000000001 1.14677e-09
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except Exception:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
an = na * np.array(vi, dtype=float) / da
return an, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2 * yi - 1
nvec = np.arange(N+1)
C = ti ** nvec[:, np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = Cinv[:, ::2].dot(vec) * (N / 2.)
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py2@scipy@integrate@quadrature.py@.PATH_END.py
|
{
"filename": "_colorbar.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/splom/marker/_colorbar.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorbarValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="colorbar", parent_name="splom.marker", **kwargs):
super(ColorbarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "ColorBar"),
data_docs=kwargs.pop(
"data_docs",
"""
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-time-
format#locale_format We add one item to d3's
date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f"
would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.splom.m
arker.colorbar.Tickformatstop` instances or
dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.splom.marker.colorbar.tickformatstopdefaults)
, sets the default property values to use for
elements of
splom.marker.colorbar.tickformatstops
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.splom.marker.color
bar.Title` instance or dict with compatible
properties
titlefont
Deprecated: Please use
splom.marker.colorbar.title.font instead. Sets
this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
splom.marker.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@splom@marker@_colorbar.py@.PATH_END.py
|
{
"filename": "_line.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/histogram/marker/_line.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram.marker"
_path_str = "histogram.marker.line"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorscale",
"colorsrc",
"reversescale",
"width",
"widthsrc",
}
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is true, the
default palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.line.color`) or the
bounds set in `marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color` is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be equidistant
to this point. Has an effect only if in `marker.line.color` is
set to a numerical array. Value should have the same units as
in `marker.line.color`. Has no effect when `marker.line.cauto`
is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# color
# -----
@property
def color(self):
"""
Sets the marker.line color. It accepts either a specific color
or an array of numbers that are mapped to the colorscale
relative to the max and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A number that will be interpreted as a color
according to histogram.marker.line.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in
`marker.line.color` is set to a numerical array. The colorscale
must be an array containing arrays mapping a normalized value
to an rgb, rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and highest (1) values
are required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the colorscale in
color space, use `marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viridis,
YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.line.color` is set to a numerical array. If true,
`marker.line.cmin` will correspond to the last color in the
array and `marker.line.cmax` will correspond to the first
color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the lines bounding the marker points.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# widthsrc
# --------
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `width`.
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color` is set to a numerical array. In
case `colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an effect
only if in `marker.line.color` is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets the marker.line color. It accepts either a
specific color or an array of numbers that are mapped
to the colorscale relative to the max and min values of
the array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color` is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorscale=None,
colorsrc=None,
reversescale=None,
width=None,
widthsrc=None,
**kwargs,
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram.marker.Line`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color` is set to a numerical array. In
case `colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an effect
only if in `marker.line.color` is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets the marker.line color. It accepts either a
specific color or an array of numbers that are mapped
to the colorscale relative to the max and min values of
the array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color` is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.marker.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("cauto", None)
_v = cauto if cauto is not None else _v
if _v is not None:
self["cauto"] = _v
_v = arg.pop("cmax", None)
_v = cmax if cmax is not None else _v
if _v is not None:
self["cmax"] = _v
_v = arg.pop("cmid", None)
_v = cmid if cmid is not None else _v
if _v is not None:
self["cmid"] = _v
_v = arg.pop("cmin", None)
_v = cmin if cmin is not None else _v
if _v is not None:
self["cmin"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
_v = arg.pop("widthsrc", None)
_v = widthsrc if widthsrc is not None else _v
if _v is not None:
self["widthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@histogram@marker@_line.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "BEAST-Fitting/beast",
"repo_path": "beast_extracted/beast-master/beast/observationmodel/tests/__init__.py",
"type": "Python"
}
|
BEAST-FittingREPO_NAMEbeastPATH_START.@beast_extracted@beast-master@beast@observationmodel@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "__init__.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/yt/data_objects/level_sets/__init__.py",
"type": "Python"
}
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@yt@data_objects@level_sets@__init__.py@.PATH_END.py
|
|
{
"filename": "_textsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter3d/_textsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="scatter3d", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter3d@_textsrc.py@.PATH_END.py
|
{
"filename": "split_gen.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/tools/proto_splitter/testdata/split_gen.py",
"type": "Python"
}
|
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates test data for Merger.
Constructs depth- and breadth-first tree-like chunked protos test data for
Merger::Read and Merger::Merge.
"""
from collections.abc import Sequence
import os
from typing import Optional, Union
from absl import app
from absl import flags
from absl import logging
from google.protobuf import message
from tensorflow.python.lib.io import file_io
from tensorflow.tools.proto_splitter import chunk_pb2
from tensorflow.tools.proto_splitter import split
from tensorflow.tools.proto_splitter import util
from tensorflow.tools.proto_splitter.testdata import test_message_pb2
SPLITTER_TESTDATA_PATH = flags.DEFINE_string(
"path", None, help="Path to testdata directory.")
_CHILD_NODES_FIELD_TAG = (
test_message_pb2.StringNode.DESCRIPTOR.fields_by_name[
"child_nodes"
].number
)
class StringNodeSplitter(split.ComposableSplitter):
"""Splits a StringNode proto with N strings into a tree with depth N."""
def __init__(self, proto: test_message_pb2.StringNode,
chunked_message: Optional[chunk_pb2.ChunkedMessage] = None,
**kwargs):
super().__init__(proto, **kwargs)
self._chunked_message = self._chunked_message or chunked_message
def add_chunk(
self, chunk: Union[message.Message, bytes], field_tags: util.FieldTypes
) -> None:
"""Adds a new chunk and updates the ChunkedMessage proto."""
assert self._chunked_message is not None
field = self._chunked_message.chunked_fields.add(
field_tag=util.get_field_tag(self._proto, field_tags)
)
field.message.chunk_index = self.total_chunks_len()
self.add_root_chunk(chunk)
def total_chunks_len(self) -> int:
"""Returns length of chunks stored in root splitter."""
if self._parent_splitter is not None:
return self._parent_splitter.total_chunks_len()
return len(self._chunks)
def add_root_chunk(self, chunk: Union[message.Message, bytes]) -> None:
"""Adds chunk to root splitter chunks."""
if self._parent_splitter is None:
assert self._chunks is not None
self._chunks.append(chunk)
else:
self._parent_splitter.add_root_chunk(chunk)
class DFStringNodeSplitter(StringNodeSplitter):
"""Depth-first string node splitter."""
def build_chunks(self) -> Sequence[Union[message.Message, bytes]]:
if not isinstance(self._proto, test_message_pb2.StringNode):
raise TypeError("Can only split TreeString type protos")
if not self._proto.child_nodes:
return
for i, node in enumerate(self._proto.child_nodes):
self.add_chunk(node, [_CHILD_NODES_FIELD_TAG, i])
DFStringNodeSplitter(
proto=node,
parent_splitter=self,
fields_in_parent=[_CHILD_NODES_FIELD_TAG],
chunked_message=self._chunked_message.chunked_fields[i].message
).build_chunks()
self._proto.ClearField("child_nodes")
if self._parent_splitter is None:
self._chunks.append(self._chunked_message)
file_io.write_string_to_file(
os.path.join(SPLITTER_TESTDATA_PATH.value, "df-split-tree.pbtxt"),
str(self._chunked_message))
return self._chunks
class BFStringNodeSplitter(StringNodeSplitter):
"""Breadth-first string node splitter."""
def build_chunks(self) -> Sequence[Union[message.Message, bytes]]:
if not isinstance(self._proto, test_message_pb2.StringNode):
raise TypeError("Can only split TreeString type protos")
if not self._proto.child_nodes:
return
for i, node in enumerate(self._proto.child_nodes):
self.add_chunk(node, [_CHILD_NODES_FIELD_TAG, i])
for i, node in enumerate(self._proto.child_nodes):
BFStringNodeSplitter(
proto=node,
parent_splitter=self,
fields_in_parent=[_CHILD_NODES_FIELD_TAG],
chunked_message=self._chunked_message.chunked_fields[i].message
).build_chunks()
self._proto.ClearField("child_nodes")
if self._parent_splitter is None:
self._chunks.append(self._chunked_message)
file_io.write_string_to_file(
os.path.join(SPLITTER_TESTDATA_PATH.value, "bf-split-tree.pbtxt"),
str(self._chunked_message))
return self._chunks
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
if SPLITTER_TESTDATA_PATH.value is None:
raise app.UsageError("'path' flag not specified.")
levels = 4
def make_string_tree(
string_tree: test_message_pb2.StringNode, level: int = 0, label: str = "0"
) -> test_message_pb2.StringNode:
string_tree.val = label
if level >= levels-1:
return string_tree
for i in range(level+1):
make_string_tree(string_tree.child_nodes.add(),
level+1, label+str(level+1)+str(i))
return string_tree
def copy_string_tree(string_tree: test_message_pb2.StringNode):
new_tree = test_message_pb2.StringNode()
new_tree.CopyFrom(string_tree)
return new_tree
string_tree = make_string_tree(test_message_pb2.StringNode())
logging.info("StringNode tree generated:\n%s", string_tree)
file_io.write_string_to_file(
os.path.join(SPLITTER_TESTDATA_PATH.value, "split-tree.pbtxt"),
str(string_tree))
# depth-first chunk ordering
DFStringNodeSplitter(copy_string_tree(string_tree)).write(
os.path.join(SPLITTER_TESTDATA_PATH.value, "df-split-tree"))
# breadth-first
BFStringNodeSplitter(copy_string_tree(string_tree)).write(
os.path.join(SPLITTER_TESTDATA_PATH.value, "bf-split-tree"))
if __name__ == "__main__":
app.run(main)
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@tools@proto_splitter@testdata@split_gen.py@.PATH_END.py
|
{
"filename": "osx_shlib.py",
"repo_name": "CosmoLike/cocoa",
"repo_path": "cocoa_extracted/cocoa-main/Cocoa/external_modules/code/planck/code/spt_clik/waf_tools/osx_shlib.py",
"type": "Python"
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def configure(ctx):
import sys
ctx.env.shsuffix = "so"
if sys.platform.lower()=="darwin":
ctx.env.shsuffix = "dylib"
|
CosmoLikeREPO_NAMEcocoaPATH_START.@cocoa_extracted@cocoa-main@Cocoa@external_modules@code@planck@code@spt_clik@waf_tools@osx_shlib.py@.PATH_END.py
|
{
"filename": "animation.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/lib/matplotlib/animation.py",
"type": "Python"
}
|
import abc
import base64
import contextlib
from io import BytesIO, TextIOWrapper
import itertools
import logging
from pathlib import Path
import shutil
import subprocess
import sys
from tempfile import TemporaryDirectory
import uuid
import warnings
import numpy as np
from PIL import Image
import matplotlib as mpl
from matplotlib._animation_data import (
DISPLAY_TEMPLATE, INCLUDED_FRAMES, JS_INCLUDE, STYLE_INCLUDE)
from matplotlib import _api, cbook
import matplotlib.colors as mcolors
_log = logging.getLogger(__name__)
# Process creation flag for subprocess to prevent it raising a terminal
# window. See for example https://stackoverflow.com/q/24130623/
subprocess_creation_flags = (
subprocess.CREATE_NO_WINDOW if sys.platform == 'win32' else 0)
def adjusted_figsize(w, h, dpi, n):
"""
Compute figure size so that pixels are a multiple of n.
Parameters
----------
w, h : float
Size in inches.
dpi : float
The dpi.
n : int
The target multiple.
Returns
-------
wnew, hnew : float
The new figure size in inches.
"""
# this maybe simplified if / when we adopt consistent rounding for
# pixel size across the whole library
def correct_roundoff(x, dpi, n):
if int(x*dpi) % n != 0:
if int(np.nextafter(x, np.inf)*dpi) % n == 0:
x = np.nextafter(x, np.inf)
elif int(np.nextafter(x, -np.inf)*dpi) % n == 0:
x = np.nextafter(x, -np.inf)
return x
wnew = int(w * dpi / n) * n / dpi
hnew = int(h * dpi / n) * n / dpi
return correct_roundoff(wnew, dpi, n), correct_roundoff(hnew, dpi, n)
class MovieWriterRegistry:
"""Registry of available writer classes by human readable name."""
def __init__(self):
self._registered = dict()
def register(self, name):
"""
Decorator for registering a class under a name.
Example use::
@registry.register(name)
class Foo:
pass
"""
def wrapper(writer_cls):
self._registered[name] = writer_cls
return writer_cls
return wrapper
def is_available(self, name):
"""
Check if given writer is available by name.
Parameters
----------
name : str
Returns
-------
bool
"""
try:
cls = self._registered[name]
except KeyError:
return False
return cls.isAvailable()
def __iter__(self):
"""Iterate over names of available writer class."""
for name in self._registered:
if self.is_available(name):
yield name
def list(self):
"""Get a list of available MovieWriters."""
return [*self]
def __getitem__(self, name):
"""Get an available writer class from its name."""
if self.is_available(name):
return self._registered[name]
raise RuntimeError(f"Requested MovieWriter ({name}) not available")
writers = MovieWriterRegistry()
class AbstractMovieWriter(abc.ABC):
"""
Abstract base class for writing movies, providing a way to grab frames by
calling `~AbstractMovieWriter.grab_frame`.
`setup` is called to start the process and `finish` is called afterwards.
`saving` is provided as a context manager to facilitate this process as ::
with moviewriter.saving(fig, outfile='myfile.mp4', dpi=100):
# Iterate over frames
moviewriter.grab_frame(**savefig_kwargs)
The use of the context manager ensures that `setup` and `finish` are
performed as necessary.
An instance of a concrete subclass of this class can be given as the
``writer`` argument of `Animation.save()`.
"""
def __init__(self, fps=5, metadata=None, codec=None, bitrate=None):
self.fps = fps
self.metadata = metadata if metadata is not None else {}
self.codec = mpl._val_or_rc(codec, 'animation.codec')
self.bitrate = mpl._val_or_rc(bitrate, 'animation.bitrate')
@abc.abstractmethod
def setup(self, fig, outfile, dpi=None):
"""
Setup for writing the movie file.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure object that contains the information for frames.
outfile : str
The filename of the resulting movie file.
dpi : float, default: ``fig.dpi``
The DPI (or resolution) for the file. This controls the size
in pixels of the resulting movie file.
"""
# Check that path is valid
Path(outfile).parent.resolve(strict=True)
self.outfile = outfile
self.fig = fig
if dpi is None:
dpi = self.fig.dpi
self.dpi = dpi
@property
def frame_size(self):
"""A tuple ``(width, height)`` in pixels of a movie frame."""
w, h = self.fig.get_size_inches()
return int(w * self.dpi), int(h * self.dpi)
def _supports_transparency(self):
"""
Whether this writer supports transparency.
Writers may consult output file type and codec to determine this at runtime.
"""
return False
@abc.abstractmethod
def grab_frame(self, **savefig_kwargs):
"""
Grab the image information from the figure and save as a movie frame.
All keyword arguments in *savefig_kwargs* are passed on to the
`~.Figure.savefig` call that saves the figure. However, several
keyword arguments that are supported by `~.Figure.savefig` may not be
passed as they are controlled by the MovieWriter:
- *dpi*, *bbox_inches*: These may not be passed because each frame of the
animation much be exactly the same size in pixels.
- *format*: This is controlled by the MovieWriter.
"""
@abc.abstractmethod
def finish(self):
"""Finish any processing for writing the movie."""
@contextlib.contextmanager
def saving(self, fig, outfile, dpi, *args, **kwargs):
"""
Context manager to facilitate writing the movie file.
``*args, **kw`` are any parameters that should be passed to `setup`.
"""
if mpl.rcParams['savefig.bbox'] == 'tight':
_log.info("Disabling savefig.bbox = 'tight', as it may cause "
"frame size to vary, which is inappropriate for "
"animation.")
# This particular sequence is what contextlib.contextmanager wants
self.setup(fig, outfile, dpi, *args, **kwargs)
with mpl.rc_context({'savefig.bbox': None}):
try:
yield self
finally:
self.finish()
class MovieWriter(AbstractMovieWriter):
"""
Base class for writing movies.
This is a base class for MovieWriter subclasses that write a movie frame
data to a pipe. You cannot instantiate this class directly.
See examples for how to use its subclasses.
Attributes
----------
frame_format : str
The format used in writing frame data, defaults to 'rgba'.
fig : `~matplotlib.figure.Figure`
The figure to capture data from.
This must be provided by the subclasses.
"""
# Builtin writer subclasses additionally define the _exec_key and _args_key
# attributes, which indicate the rcParams entries where the path to the
# executable and additional command-line arguments to the executable are
# stored. Third-party writers cannot meaningfully set these as they cannot
# extend rcParams with new keys.
# Pipe-based writers only support RGBA, but file-based ones support more
# formats.
supported_formats = ["rgba"]
def __init__(self, fps=5, codec=None, bitrate=None, extra_args=None,
metadata=None):
"""
Parameters
----------
fps : int, default: 5
Movie frame rate (per second).
codec : str or None, default: :rc:`animation.codec`
The codec to use.
bitrate : int, default: :rc:`animation.bitrate`
The bitrate of the movie, in kilobits per second. Higher values
means higher quality movies, but increase the file size. A value
of -1 lets the underlying movie encoder select the bitrate.
extra_args : list of str or None, optional
Extra command-line arguments passed to the underlying movie encoder. These
arguments are passed last to the encoder, just before the filename. The
default, None, means to use :rc:`animation.[name-of-encoder]_args` for the
builtin writers.
metadata : dict[str, str], default: {}
A dictionary of keys and values for metadata to include in the
output file. Some keys that may be of use include:
title, artist, genre, subject, copyright, srcform, comment.
"""
if type(self) is MovieWriter:
# TODO MovieWriter is still an abstract class and needs to be
# extended with a mixin. This should be clearer in naming
# and description. For now, just give a reasonable error
# message to users.
raise TypeError(
'MovieWriter cannot be instantiated directly. Please use one '
'of its subclasses.')
super().__init__(fps=fps, metadata=metadata, codec=codec,
bitrate=bitrate)
self.frame_format = self.supported_formats[0]
self.extra_args = extra_args
def _adjust_frame_size(self):
if self.codec == 'h264':
wo, ho = self.fig.get_size_inches()
w, h = adjusted_figsize(wo, ho, self.dpi, 2)
if (wo, ho) != (w, h):
self.fig.set_size_inches(w, h, forward=True)
_log.info('figure size in inches has been adjusted '
'from %s x %s to %s x %s', wo, ho, w, h)
else:
w, h = self.fig.get_size_inches()
_log.debug('frame size in pixels is %s x %s', *self.frame_size)
return w, h
def setup(self, fig, outfile, dpi=None):
# docstring inherited
super().setup(fig, outfile, dpi=dpi)
self._w, self._h = self._adjust_frame_size()
# Run here so that grab_frame() can write the data to a pipe. This
# eliminates the need for temp files.
self._run()
def _run(self):
# Uses subprocess to call the program for assembling frames into a
# movie file. *args* returns the sequence of command line arguments
# from a few configuration options.
command = self._args()
_log.info('MovieWriter._run: running command: %s',
cbook._pformat_subprocess(command))
PIPE = subprocess.PIPE
self._proc = subprocess.Popen(
command, stdin=PIPE, stdout=PIPE, stderr=PIPE,
creationflags=subprocess_creation_flags)
def finish(self):
"""Finish any processing for writing the movie."""
out, err = self._proc.communicate()
# Use the encoding/errors that universal_newlines would use.
out = TextIOWrapper(BytesIO(out)).read()
err = TextIOWrapper(BytesIO(err)).read()
if out:
_log.log(
logging.WARNING if self._proc.returncode else logging.DEBUG,
"MovieWriter stdout:\n%s", out)
if err:
_log.log(
logging.WARNING if self._proc.returncode else logging.DEBUG,
"MovieWriter stderr:\n%s", err)
if self._proc.returncode:
raise subprocess.CalledProcessError(
self._proc.returncode, self._proc.args, out, err)
def grab_frame(self, **savefig_kwargs):
# docstring inherited
_validate_grabframe_kwargs(savefig_kwargs)
_log.debug('MovieWriter.grab_frame: Grabbing frame.')
# Readjust the figure size in case it has been changed by the user.
# All frames must have the same size to save the movie correctly.
self.fig.set_size_inches(self._w, self._h)
# Save the figure data to the sink, using the frame format and dpi.
self.fig.savefig(self._proc.stdin, format=self.frame_format,
dpi=self.dpi, **savefig_kwargs)
def _args(self):
"""Assemble list of encoder-specific command-line arguments."""
return NotImplementedError("args needs to be implemented by subclass.")
@classmethod
def bin_path(cls):
"""
Return the binary path to the commandline tool used by a specific
subclass. This is a class method so that the tool can be looked for
before making a particular MovieWriter subclass available.
"""
return str(mpl.rcParams[cls._exec_key])
@classmethod
def isAvailable(cls):
"""Return whether a MovieWriter subclass is actually available."""
return shutil.which(cls.bin_path()) is not None
class FileMovieWriter(MovieWriter):
"""
`MovieWriter` for writing to individual files and stitching at the end.
This must be sub-classed to be useful.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.frame_format = mpl.rcParams['animation.frame_format']
def setup(self, fig, outfile, dpi=None, frame_prefix=None):
"""
Setup for writing the movie file.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure to grab the rendered frames from.
outfile : str
The filename of the resulting movie file.
dpi : float, default: ``fig.dpi``
The dpi of the output file. This, with the figure size,
controls the size in pixels of the resulting movie file.
frame_prefix : str, optional
The filename prefix to use for temporary files. If *None* (the
default), files are written to a temporary directory which is
deleted by `finish`; if not *None*, no temporary files are
deleted.
"""
# Check that path is valid
Path(outfile).parent.resolve(strict=True)
self.fig = fig
self.outfile = outfile
if dpi is None:
dpi = self.fig.dpi
self.dpi = dpi
self._adjust_frame_size()
if frame_prefix is None:
self._tmpdir = TemporaryDirectory()
self.temp_prefix = str(Path(self._tmpdir.name, 'tmp'))
else:
self._tmpdir = None
self.temp_prefix = frame_prefix
self._frame_counter = 0 # used for generating sequential file names
self._temp_paths = list()
self.fname_format_str = '%s%%07d.%s'
def __del__(self):
if hasattr(self, '_tmpdir') and self._tmpdir:
self._tmpdir.cleanup()
@property
def frame_format(self):
"""
Format (png, jpeg, etc.) to use for saving the frames, which can be
decided by the individual subclasses.
"""
return self._frame_format
@frame_format.setter
def frame_format(self, frame_format):
if frame_format in self.supported_formats:
self._frame_format = frame_format
else:
_api.warn_external(
f"Ignoring file format {frame_format!r} which is not "
f"supported by {type(self).__name__}; using "
f"{self.supported_formats[0]} instead.")
self._frame_format = self.supported_formats[0]
def _base_temp_name(self):
# Generates a template name (without number) given the frame format
# for extension and the prefix.
return self.fname_format_str % (self.temp_prefix, self.frame_format)
def grab_frame(self, **savefig_kwargs):
# docstring inherited
# Creates a filename for saving using basename and counter.
_validate_grabframe_kwargs(savefig_kwargs)
path = Path(self._base_temp_name() % self._frame_counter)
self._temp_paths.append(path) # Record the filename for later use.
self._frame_counter += 1 # Ensures each created name is unique.
_log.debug('FileMovieWriter.grab_frame: Grabbing frame %d to path=%s',
self._frame_counter, path)
with open(path, 'wb') as sink: # Save figure to the sink.
self.fig.savefig(sink, format=self.frame_format, dpi=self.dpi,
**savefig_kwargs)
def finish(self):
# Call run here now that all frame grabbing is done. All temp files
# are available to be assembled.
try:
self._run()
super().finish()
finally:
if self._tmpdir:
_log.debug(
'MovieWriter: clearing temporary path=%s', self._tmpdir
)
self._tmpdir.cleanup()
@writers.register('pillow')
class PillowWriter(AbstractMovieWriter):
def _supports_transparency(self):
return True
@classmethod
def isAvailable(cls):
return True
def setup(self, fig, outfile, dpi=None):
super().setup(fig, outfile, dpi=dpi)
self._frames = []
def grab_frame(self, **savefig_kwargs):
_validate_grabframe_kwargs(savefig_kwargs)
buf = BytesIO()
self.fig.savefig(
buf, **{**savefig_kwargs, "format": "rgba", "dpi": self.dpi})
im = Image.frombuffer(
"RGBA", self.frame_size, buf.getbuffer(), "raw", "RGBA", 0, 1)
if im.getextrema()[3][0] < 255:
# This frame has transparency, so we'll just add it as is.
self._frame.append(im)
else:
# Without transparency, we switch to RGB mode, which converts to P mode a
# little better if needed (specifically, this helps with GIF output.)
self._frames.append(im.convert("RGB"))
def finish(self):
self._frames[0].save(
self.outfile, save_all=True, append_images=self._frames[1:],
duration=int(1000 / self.fps), loop=0)
# Base class of ffmpeg information. Has the config keys and the common set
# of arguments that controls the *output* side of things.
class FFMpegBase:
"""
Mixin class for FFMpeg output.
This is a base class for the concrete `FFMpegWriter` and `FFMpegFileWriter`
classes.
"""
_exec_key = 'animation.ffmpeg_path'
_args_key = 'animation.ffmpeg_args'
def _supports_transparency(self):
suffix = Path(self.outfile).suffix
if suffix in {'.apng', '.avif', '.gif', '.webm', '.webp'}:
return True
# This list was found by going through `ffmpeg -codecs` for video encoders,
# running them with _support_transparency() forced to True, and checking that
# the "Pixel format" in Kdenlive included alpha. Note this is not a guarantee
# that transparency will work; you may also need to pass `-pix_fmt`, but we
# trust the user has done so if they are asking for these formats.
return self.codec in {
'apng', 'avrp', 'bmp', 'cfhd', 'dpx', 'ffv1', 'ffvhuff', 'gif', 'huffyuv',
'jpeg2000', 'ljpeg', 'png', 'prores', 'prores_aw', 'prores_ks', 'qtrle',
'rawvideo', 'targa', 'tiff', 'utvideo', 'v408', }
@property
def output_args(self):
args = []
suffix = Path(self.outfile).suffix
if suffix in {'.apng', '.avif', '.gif', '.webm', '.webp'}:
self.codec = suffix[1:]
else:
args.extend(['-vcodec', self.codec])
extra_args = (self.extra_args if self.extra_args is not None
else mpl.rcParams[self._args_key])
# For h264, the default format is yuv444p, which is not compatible
# with quicktime (and others). Specifying yuv420p fixes playback on
# iOS, as well as HTML5 video in firefox and safari (on both Windows and
# macOS). Also fixes internet explorer. This is as of 2015/10/29.
if self.codec == 'h264' and '-pix_fmt' not in extra_args:
args.extend(['-pix_fmt', 'yuv420p'])
# For GIF, we're telling FFmpeg to split the video stream, to generate
# a palette, and then use it for encoding.
elif self.codec == 'gif' and '-filter_complex' not in extra_args:
args.extend(['-filter_complex',
'split [a][b];[a] palettegen [p];[b][p] paletteuse'])
# For AVIF, we're telling FFmpeg to split the video stream, extract the alpha,
# in order to place it in a secondary stream, as needed by AVIF-in-FFmpeg.
elif self.codec == 'avif' and '-filter_complex' not in extra_args:
args.extend(['-filter_complex',
'split [rgb][rgba]; [rgba] alphaextract [alpha]',
'-map', '[rgb]', '-map', '[alpha]'])
if self.bitrate > 0:
args.extend(['-b', '%dk' % self.bitrate]) # %dk: bitrate in kbps.
for k, v in self.metadata.items():
args.extend(['-metadata', f'{k}={v}'])
args.extend(extra_args)
return args + ['-y', self.outfile]
# Combine FFMpeg options with pipe-based writing
@writers.register('ffmpeg')
class FFMpegWriter(FFMpegBase, MovieWriter):
"""
Pipe-based ffmpeg writer.
Frames are streamed directly to ffmpeg via a pipe and written in a single pass.
This effectively works as a slideshow input to ffmpeg with the fps passed as
``-framerate``, so see also `their notes on frame rates`_ for further details.
.. _their notes on frame rates: https://trac.ffmpeg.org/wiki/Slideshow#Framerates
"""
def _args(self):
# Returns the command line parameters for subprocess to use
# ffmpeg to create a movie using a pipe.
args = [self.bin_path(), '-f', 'rawvideo', '-vcodec', 'rawvideo',
'-s', '%dx%d' % self.frame_size, '-pix_fmt', self.frame_format,
'-framerate', str(self.fps)]
# Logging is quieted because subprocess.PIPE has limited buffer size.
# If you have a lot of frames in your animation and set logging to
# DEBUG, you will have a buffer overrun.
if _log.getEffectiveLevel() > logging.DEBUG:
args += ['-loglevel', 'error']
args += ['-i', 'pipe:'] + self.output_args
return args
# Combine FFMpeg options with temp file-based writing
@writers.register('ffmpeg_file')
class FFMpegFileWriter(FFMpegBase, FileMovieWriter):
"""
File-based ffmpeg writer.
Frames are written to temporary files on disk and then stitched together at the end.
This effectively works as a slideshow input to ffmpeg with the fps passed as
``-framerate``, so see also `their notes on frame rates`_ for further details.
.. _their notes on frame rates: https://trac.ffmpeg.org/wiki/Slideshow#Framerates
"""
supported_formats = ['png', 'jpeg', 'tiff', 'raw', 'rgba']
def _args(self):
# Returns the command line parameters for subprocess to use
# ffmpeg to create a movie using a collection of temp images
args = []
# For raw frames, we need to explicitly tell ffmpeg the metadata.
if self.frame_format in {'raw', 'rgba'}:
args += [
'-f', 'image2', '-vcodec', 'rawvideo',
'-video_size', '%dx%d' % self.frame_size,
'-pixel_format', 'rgba',
]
args += ['-framerate', str(self.fps), '-i', self._base_temp_name()]
if not self._tmpdir:
args += ['-frames:v', str(self._frame_counter)]
# Logging is quieted because subprocess.PIPE has limited buffer size.
# If you have a lot of frames in your animation and set logging to
# DEBUG, you will have a buffer overrun.
if _log.getEffectiveLevel() > logging.DEBUG:
args += ['-loglevel', 'error']
return [self.bin_path(), *args, *self.output_args]
# Base class for animated GIFs with ImageMagick
class ImageMagickBase:
"""
Mixin class for ImageMagick output.
This is a base class for the concrete `ImageMagickWriter` and
`ImageMagickFileWriter` classes, which define an ``input_names`` attribute
(or property) specifying the input names passed to ImageMagick.
"""
_exec_key = 'animation.convert_path'
_args_key = 'animation.convert_args'
def _supports_transparency(self):
suffix = Path(self.outfile).suffix
return suffix in {'.apng', '.avif', '.gif', '.webm', '.webp'}
def _args(self):
# ImageMagick does not recognize "raw".
fmt = "rgba" if self.frame_format == "raw" else self.frame_format
extra_args = (self.extra_args if self.extra_args is not None
else mpl.rcParams[self._args_key])
return [
self.bin_path(),
"-size", "%ix%i" % self.frame_size,
"-depth", "8",
"-delay", str(100 / self.fps),
"-loop", "0",
f"{fmt}:{self.input_names}",
*extra_args,
self.outfile,
]
@classmethod
def bin_path(cls):
binpath = super().bin_path()
if binpath == 'convert':
binpath = mpl._get_executable_info('magick').executable
return binpath
@classmethod
def isAvailable(cls):
try:
return super().isAvailable()
except mpl.ExecutableNotFoundError as _enf:
# May be raised by get_executable_info.
_log.debug('ImageMagick unavailable due to: %s', _enf)
return False
# Combine ImageMagick options with pipe-based writing
@writers.register('imagemagick')
class ImageMagickWriter(ImageMagickBase, MovieWriter):
"""
Pipe-based animated gif writer.
Frames are streamed directly to ImageMagick via a pipe and written
in a single pass.
"""
input_names = "-" # stdin
# Combine ImageMagick options with temp file-based writing
@writers.register('imagemagick_file')
class ImageMagickFileWriter(ImageMagickBase, FileMovieWriter):
"""
File-based animated gif writer.
Frames are written to temporary files on disk and then stitched
together at the end.
"""
supported_formats = ['png', 'jpeg', 'tiff', 'raw', 'rgba']
input_names = property(
lambda self: f'{self.temp_prefix}*.{self.frame_format}')
# Taken directly from jakevdp's JSAnimation package at
# http://github.com/jakevdp/JSAnimation
def _included_frames(frame_count, frame_format, frame_dir):
return INCLUDED_FRAMES.format(Nframes=frame_count,
frame_dir=frame_dir,
frame_format=frame_format)
def _embedded_frames(frame_list, frame_format):
"""frame_list should be a list of base64-encoded png files"""
if frame_format == 'svg':
# Fix MIME type for svg
frame_format = 'svg+xml'
template = ' frames[{0}] = "data:image/{1};base64,{2}"\n'
return "\n" + "".join(
template.format(i, frame_format, frame_data.replace('\n', '\\\n'))
for i, frame_data in enumerate(frame_list))
@writers.register('html')
class HTMLWriter(FileMovieWriter):
"""Writer for JavaScript-based HTML movies."""
supported_formats = ['png', 'jpeg', 'tiff', 'svg']
@classmethod
def isAvailable(cls):
return True
def __init__(self, fps=30, codec=None, bitrate=None, extra_args=None,
metadata=None, embed_frames=False, default_mode='loop',
embed_limit=None):
if extra_args:
_log.warning("HTMLWriter ignores 'extra_args'")
extra_args = () # Don't lookup nonexistent rcParam[args_key].
self.embed_frames = embed_frames
self.default_mode = default_mode.lower()
_api.check_in_list(['loop', 'once', 'reflect'],
default_mode=self.default_mode)
# Save embed limit, which is given in MB
self._bytes_limit = mpl._val_or_rc(embed_limit, 'animation.embed_limit')
# Convert from MB to bytes
self._bytes_limit *= 1024 * 1024
super().__init__(fps, codec, bitrate, extra_args, metadata)
def setup(self, fig, outfile, dpi=None, frame_dir=None):
outfile = Path(outfile)
_api.check_in_list(['.html', '.htm'], outfile_extension=outfile.suffix)
self._saved_frames = []
self._total_bytes = 0
self._hit_limit = False
if not self.embed_frames:
if frame_dir is None:
frame_dir = outfile.with_name(outfile.stem + '_frames')
frame_dir.mkdir(parents=True, exist_ok=True)
frame_prefix = frame_dir / 'frame'
else:
frame_prefix = None
super().setup(fig, outfile, dpi, frame_prefix)
self._clear_temp = False
def grab_frame(self, **savefig_kwargs):
_validate_grabframe_kwargs(savefig_kwargs)
if self.embed_frames:
# Just stop processing if we hit the limit
if self._hit_limit:
return
f = BytesIO()
self.fig.savefig(f, format=self.frame_format,
dpi=self.dpi, **savefig_kwargs)
imgdata64 = base64.encodebytes(f.getvalue()).decode('ascii')
self._total_bytes += len(imgdata64)
if self._total_bytes >= self._bytes_limit:
_log.warning(
"Animation size has reached %s bytes, exceeding the limit "
"of %s. If you're sure you want a larger animation "
"embedded, set the animation.embed_limit rc parameter to "
"a larger value (in MB). This and further frames will be "
"dropped.", self._total_bytes, self._bytes_limit)
self._hit_limit = True
else:
self._saved_frames.append(imgdata64)
else:
return super().grab_frame(**savefig_kwargs)
def finish(self):
# save the frames to an html file
if self.embed_frames:
fill_frames = _embedded_frames(self._saved_frames,
self.frame_format)
frame_count = len(self._saved_frames)
else:
# temp names is filled by FileMovieWriter
frame_count = len(self._temp_paths)
fill_frames = _included_frames(
frame_count, self.frame_format,
self._temp_paths[0].parent.relative_to(self.outfile.parent))
mode_dict = dict(once_checked='',
loop_checked='',
reflect_checked='')
mode_dict[self.default_mode + '_checked'] = 'checked'
interval = 1000 // self.fps
with open(self.outfile, 'w') as of:
of.write(JS_INCLUDE + STYLE_INCLUDE)
of.write(DISPLAY_TEMPLATE.format(id=uuid.uuid4().hex,
Nframes=frame_count,
fill_frames=fill_frames,
interval=interval,
**mode_dict))
# Duplicate the temporary file clean up logic from
# FileMovieWriter.finish. We cannot call the inherited version of
# finish because it assumes that there is a subprocess that we either
# need to call to merge many frames together or that there is a
# subprocess call that we need to clean up.
if self._tmpdir:
_log.debug('MovieWriter: clearing temporary path=%s', self._tmpdir)
self._tmpdir.cleanup()
class Animation:
"""
A base class for Animations.
This class is not usable as is, and should be subclassed to provide needed
behavior.
.. note::
You must store the created Animation in a variable that lives as long
as the animation should run. Otherwise, the Animation object will be
garbage-collected and the animation stops.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure object used to get needed events, such as draw or resize.
event_source : object, optional
A class that can run a callback when desired events
are generated, as well as be stopped and started.
Examples include timers (see `TimedAnimation`) and file
system notifications.
blit : bool, default: False
Whether blitting is used to optimize drawing. If the backend does not
support blitting, then this parameter has no effect.
See Also
--------
FuncAnimation, ArtistAnimation
"""
def __init__(self, fig, event_source=None, blit=False):
self._draw_was_started = False
self._fig = fig
# Disables blitting for backends that don't support it. This
# allows users to request it if available, but still have a
# fallback that works if it is not.
self._blit = blit and fig.canvas.supports_blit
# These are the basics of the animation. The frame sequence represents
# information for each frame of the animation and depends on how the
# drawing is handled by the subclasses. The event source fires events
# that cause the frame sequence to be iterated.
self.frame_seq = self.new_frame_seq()
self.event_source = event_source
# Instead of starting the event source now, we connect to the figure's
# draw_event, so that we only start once the figure has been drawn.
self._first_draw_id = fig.canvas.mpl_connect('draw_event', self._start)
# Connect to the figure's close_event so that we don't continue to
# fire events and try to draw to a deleted figure.
self._close_id = self._fig.canvas.mpl_connect('close_event',
self._stop)
if self._blit:
self._setup_blit()
def __del__(self):
if not getattr(self, '_draw_was_started', True):
warnings.warn(
'Animation was deleted without rendering anything. This is '
'most likely not intended. To prevent deletion, assign the '
'Animation to a variable, e.g. `anim`, that exists until you '
'output the Animation using `plt.show()` or '
'`anim.save()`.'
)
def _start(self, *args):
"""
Starts interactive animation. Adds the draw frame command to the GUI
handler, calls show to start the event loop.
"""
# Do not start the event source if saving() it.
if self._fig.canvas.is_saving():
return
# First disconnect our draw event handler
self._fig.canvas.mpl_disconnect(self._first_draw_id)
# Now do any initial draw
self._init_draw()
# Add our callback for stepping the animation and
# actually start the event_source.
self.event_source.add_callback(self._step)
self.event_source.start()
def _stop(self, *args):
# On stop we disconnect all of our events.
if self._blit:
self._fig.canvas.mpl_disconnect(self._resize_id)
self._fig.canvas.mpl_disconnect(self._close_id)
self.event_source.remove_callback(self._step)
self.event_source = None
def save(self, filename, writer=None, fps=None, dpi=None, codec=None,
bitrate=None, extra_args=None, metadata=None, extra_anim=None,
savefig_kwargs=None, *, progress_callback=None):
"""
Save the animation as a movie file by drawing every frame.
Parameters
----------
filename : str
The output filename, e.g., :file:`mymovie.mp4`.
writer : `MovieWriter` or str, default: :rc:`animation.writer`
A `MovieWriter` instance to use or a key that identifies a
class to use, such as 'ffmpeg'.
fps : int, optional
Movie frame rate (per second). If not set, the frame rate from the
animation's frame interval.
dpi : float, default: :rc:`savefig.dpi`
Controls the dots per inch for the movie frames. Together with
the figure's size in inches, this controls the size of the movie.
codec : str, default: :rc:`animation.codec`.
The video codec to use. Not all codecs are supported by a given
`MovieWriter`.
bitrate : int, default: :rc:`animation.bitrate`
The bitrate of the movie, in kilobits per second. Higher values
means higher quality movies, but increase the file size. A value
of -1 lets the underlying movie encoder select the bitrate.
extra_args : list of str or None, optional
Extra command-line arguments passed to the underlying movie encoder. These
arguments are passed last to the encoder, just before the output filename.
The default, None, means to use :rc:`animation.[name-of-encoder]_args` for
the builtin writers.
metadata : dict[str, str], default: {}
Dictionary of keys and values for metadata to include in
the output file. Some keys that may be of use include:
title, artist, genre, subject, copyright, srcform, comment.
extra_anim : list, default: []
Additional `Animation` objects that should be included
in the saved movie file. These need to be from the same
`.Figure` instance. Also, animation frames will
just be simply combined, so there should be a 1:1 correspondence
between the frames from the different animations.
savefig_kwargs : dict, default: {}
Keyword arguments passed to each `~.Figure.savefig` call used to
save the individual frames.
progress_callback : function, optional
A callback function that will be called for every frame to notify
the saving progress. It must have the signature ::
def func(current_frame: int, total_frames: int) -> Any
where *current_frame* is the current frame number and *total_frames* is the
total number of frames to be saved. *total_frames* is set to None, if the
total number of frames cannot be determined. Return values may exist but are
ignored.
Example code to write the progress to stdout::
progress_callback = lambda i, n: print(f'Saving frame {i}/{n}')
Notes
-----
*fps*, *codec*, *bitrate*, *extra_args* and *metadata* are used to
construct a `.MovieWriter` instance and can only be passed if
*writer* is a string. If they are passed as non-*None* and *writer*
is a `.MovieWriter`, a `RuntimeError` will be raised.
"""
all_anim = [self]
if extra_anim is not None:
all_anim.extend(anim for anim in extra_anim
if anim._fig is self._fig)
# Disable "Animation was deleted without rendering" warning.
for anim in all_anim:
anim._draw_was_started = True
if writer is None:
writer = mpl.rcParams['animation.writer']
elif (not isinstance(writer, str) and
any(arg is not None
for arg in (fps, codec, bitrate, extra_args, metadata))):
raise RuntimeError('Passing in values for arguments '
'fps, codec, bitrate, extra_args, or metadata '
'is not supported when writer is an existing '
'MovieWriter instance. These should instead be '
'passed as arguments when creating the '
'MovieWriter instance.')
if savefig_kwargs is None:
savefig_kwargs = {}
else:
# we are going to mutate this below
savefig_kwargs = dict(savefig_kwargs)
if fps is None and hasattr(self, '_interval'):
# Convert interval in ms to frames per second
fps = 1000. / self._interval
# Reuse the savefig DPI for ours if none is given.
dpi = mpl._val_or_rc(dpi, 'savefig.dpi')
if dpi == 'figure':
dpi = self._fig.dpi
writer_kwargs = {}
if codec is not None:
writer_kwargs['codec'] = codec
if bitrate is not None:
writer_kwargs['bitrate'] = bitrate
if extra_args is not None:
writer_kwargs['extra_args'] = extra_args
if metadata is not None:
writer_kwargs['metadata'] = metadata
# If we have the name of a writer, instantiate an instance of the
# registered class.
if isinstance(writer, str):
try:
writer_cls = writers[writer]
except RuntimeError: # Raised if not available.
writer_cls = PillowWriter # Always available.
_log.warning("MovieWriter %s unavailable; using Pillow "
"instead.", writer)
writer = writer_cls(fps, **writer_kwargs)
_log.info('Animation.save using %s', type(writer))
if 'bbox_inches' in savefig_kwargs:
_log.warning("Warning: discarding the 'bbox_inches' argument in "
"'savefig_kwargs' as it may cause frame size "
"to vary, which is inappropriate for animation.")
savefig_kwargs.pop('bbox_inches')
# Create a new sequence of frames for saved data. This is different
# from new_frame_seq() to give the ability to save 'live' generated
# frame information to be saved later.
# TODO: Right now, after closing the figure, saving a movie won't work
# since GUI widgets are gone. Either need to remove extra code to
# allow for this non-existent use case or find a way to make it work.
def _pre_composite_to_white(color):
r, g, b, a = mcolors.to_rgba(color)
return a * np.array([r, g, b]) + 1 - a
# canvas._is_saving = True makes the draw_event animation-starting
# callback a no-op; canvas.manager = None prevents resizing the GUI
# widget (both are likewise done in savefig()).
with (writer.saving(self._fig, filename, dpi),
cbook._setattr_cm(self._fig.canvas, _is_saving=True, manager=None)):
if not writer._supports_transparency():
facecolor = savefig_kwargs.get('facecolor',
mpl.rcParams['savefig.facecolor'])
if facecolor == 'auto':
facecolor = self._fig.get_facecolor()
savefig_kwargs['facecolor'] = _pre_composite_to_white(facecolor)
savefig_kwargs['transparent'] = False # just to be safe!
for anim in all_anim:
anim._init_draw() # Clear the initial frame
frame_number = 0
# TODO: Currently only FuncAnimation has a save_count
# attribute. Can we generalize this to all Animations?
save_count_list = [getattr(a, '_save_count', None)
for a in all_anim]
if None in save_count_list:
total_frames = None
else:
total_frames = sum(save_count_list)
for data in zip(*[a.new_saved_frame_seq() for a in all_anim]):
for anim, d in zip(all_anim, data):
# TODO: See if turning off blit is really necessary
anim._draw_next_frame(d, blit=False)
if progress_callback is not None:
progress_callback(frame_number, total_frames)
frame_number += 1
writer.grab_frame(**savefig_kwargs)
def _step(self, *args):
"""
Handler for getting events. By default, gets the next frame in the
sequence and hands the data off to be drawn.
"""
# Returns True to indicate that the event source should continue to
# call _step, until the frame sequence reaches the end of iteration,
# at which point False will be returned.
try:
framedata = next(self.frame_seq)
self._draw_next_frame(framedata, self._blit)
return True
except StopIteration:
return False
def new_frame_seq(self):
"""Return a new sequence of frame information."""
# Default implementation is just an iterator over self._framedata
return iter(self._framedata)
def new_saved_frame_seq(self):
"""Return a new sequence of saved/cached frame information."""
# Default is the same as the regular frame sequence
return self.new_frame_seq()
def _draw_next_frame(self, framedata, blit):
# Breaks down the drawing of the next frame into steps of pre- and
# post- draw, as well as the drawing of the frame itself.
self._pre_draw(framedata, blit)
self._draw_frame(framedata)
self._post_draw(framedata, blit)
def _init_draw(self):
# Initial draw to clear the frame. Also used by the blitting code
# when a clean base is required.
self._draw_was_started = True
def _pre_draw(self, framedata, blit):
# Perform any cleaning or whatnot before the drawing of the frame.
# This default implementation allows blit to clear the frame.
if blit:
self._blit_clear(self._drawn_artists)
def _draw_frame(self, framedata):
# Performs actual drawing of the frame.
raise NotImplementedError('Needs to be implemented by subclasses to'
' actually make an animation.')
def _post_draw(self, framedata, blit):
# After the frame is rendered, this handles the actual flushing of
# the draw, which can be a direct draw_idle() or make use of the
# blitting.
if blit and self._drawn_artists:
self._blit_draw(self._drawn_artists)
else:
self._fig.canvas.draw_idle()
# The rest of the code in this class is to facilitate easy blitting
def _blit_draw(self, artists):
# Handles blitted drawing, which renders only the artists given instead
# of the entire figure.
updated_ax = {a.axes for a in artists}
# Enumerate artists to cache Axes backgrounds. We do not draw
# artists yet to not cache foreground from plots with shared Axes
for ax in updated_ax:
# If we haven't cached the background for the current view of this
# Axes object, do so now. This might not always be reliable, but
# it's an attempt to automate the process.
cur_view = ax._get_view()
view, bg = self._blit_cache.get(ax, (object(), None))
if cur_view != view:
self._blit_cache[ax] = (
cur_view, ax.figure.canvas.copy_from_bbox(ax.bbox))
# Make a separate pass to draw foreground.
for a in artists:
a.axes.draw_artist(a)
# After rendering all the needed artists, blit each Axes individually.
for ax in updated_ax:
ax.figure.canvas.blit(ax.bbox)
def _blit_clear(self, artists):
# Get a list of the Axes that need clearing from the artists that
# have been drawn. Grab the appropriate saved background from the
# cache and restore.
axes = {a.axes for a in artists}
for ax in axes:
try:
view, bg = self._blit_cache[ax]
except KeyError:
continue
if ax._get_view() == view:
ax.figure.canvas.restore_region(bg)
else:
self._blit_cache.pop(ax)
def _setup_blit(self):
# Setting up the blit requires: a cache of the background for the Axes
self._blit_cache = dict()
self._drawn_artists = []
# _post_draw needs to be called first to initialize the renderer
self._post_draw(None, self._blit)
# Then we need to clear the Frame for the initial draw
# This is typically handled in _on_resize because QT and Tk
# emit a resize event on launch, but the macosx backend does not,
# thus we force it here for everyone for consistency
self._init_draw()
# Connect to future resize events
self._resize_id = self._fig.canvas.mpl_connect('resize_event',
self._on_resize)
def _on_resize(self, event):
# On resize, we need to disable the resize event handling so we don't
# get too many events. Also stop the animation events, so that
# we're paused. Reset the cache and re-init. Set up an event handler
# to catch once the draw has actually taken place.
self._fig.canvas.mpl_disconnect(self._resize_id)
self.event_source.stop()
self._blit_cache.clear()
self._init_draw()
self._resize_id = self._fig.canvas.mpl_connect('draw_event',
self._end_redraw)
def _end_redraw(self, event):
# Now that the redraw has happened, do the post draw flushing and
# blit handling. Then re-enable all of the original events.
self._post_draw(None, False)
self.event_source.start()
self._fig.canvas.mpl_disconnect(self._resize_id)
self._resize_id = self._fig.canvas.mpl_connect('resize_event',
self._on_resize)
def to_html5_video(self, embed_limit=None):
"""
Convert the animation to an HTML5 ``<video>`` tag.
This saves the animation as an h264 video, encoded in base64
directly into the HTML5 video tag. This respects :rc:`animation.writer`
and :rc:`animation.bitrate`. This also makes use of the
*interval* to control the speed, and uses the *repeat*
parameter to decide whether to loop.
Parameters
----------
embed_limit : float, optional
Limit, in MB, of the returned animation. No animation is created
if the limit is exceeded.
Defaults to :rc:`animation.embed_limit` = 20.0.
Returns
-------
str
An HTML5 video tag with the animation embedded as base64 encoded
h264 video.
If the *embed_limit* is exceeded, this returns the string
"Video too large to embed."
"""
VIDEO_TAG = r'''<video {size} {options}>
<source type="video/mp4" src="data:video/mp4;base64,{video}">
Your browser does not support the video tag.
</video>'''
# Cache the rendering of the video as HTML
if not hasattr(self, '_base64_video'):
# Save embed limit, which is given in MB
embed_limit = mpl._val_or_rc(embed_limit, 'animation.embed_limit')
# Convert from MB to bytes
embed_limit *= 1024 * 1024
# Can't open a NamedTemporaryFile twice on Windows, so use a
# TemporaryDirectory instead.
with TemporaryDirectory() as tmpdir:
path = Path(tmpdir, "temp.m4v")
# We create a writer manually so that we can get the
# appropriate size for the tag
Writer = writers[mpl.rcParams['animation.writer']]
writer = Writer(codec='h264',
bitrate=mpl.rcParams['animation.bitrate'],
fps=1000. / self._interval)
self.save(str(path), writer=writer)
# Now open and base64 encode.
vid64 = base64.encodebytes(path.read_bytes())
vid_len = len(vid64)
if vid_len >= embed_limit:
_log.warning(
"Animation movie is %s bytes, exceeding the limit of %s. "
"If you're sure you want a large animation embedded, set "
"the animation.embed_limit rc parameter to a larger value "
"(in MB).", vid_len, embed_limit)
else:
self._base64_video = vid64.decode('ascii')
self._video_size = 'width="{}" height="{}"'.format(
*writer.frame_size)
# If we exceeded the size, this attribute won't exist
if hasattr(self, '_base64_video'):
# Default HTML5 options are to autoplay and display video controls
options = ['controls', 'autoplay']
# If we're set to repeat, make it loop
if getattr(self, '_repeat', False):
options.append('loop')
return VIDEO_TAG.format(video=self._base64_video,
size=self._video_size,
options=' '.join(options))
else:
return 'Video too large to embed.'
def to_jshtml(self, fps=None, embed_frames=True, default_mode=None):
"""
Generate HTML representation of the animation.
Parameters
----------
fps : int, optional
Movie frame rate (per second). If not set, the frame rate from
the animation's frame interval.
embed_frames : bool, optional
default_mode : str, optional
What to do when the animation ends. Must be one of ``{'loop',
'once', 'reflect'}``. Defaults to ``'loop'`` if the *repeat*
parameter is True, otherwise ``'once'``.
Returns
-------
str
An HTML representation of the animation embedded as a js object as
produced with the `.HTMLWriter`.
"""
if fps is None and hasattr(self, '_interval'):
# Convert interval in ms to frames per second
fps = 1000 / self._interval
# If we're not given a default mode, choose one base on the value of
# the _repeat attribute
if default_mode is None:
default_mode = 'loop' if getattr(self, '_repeat',
False) else 'once'
if not hasattr(self, "_html_representation"):
# Can't open a NamedTemporaryFile twice on Windows, so use a
# TemporaryDirectory instead.
with TemporaryDirectory() as tmpdir:
path = Path(tmpdir, "temp.html")
writer = HTMLWriter(fps=fps,
embed_frames=embed_frames,
default_mode=default_mode)
self.save(str(path), writer=writer)
self._html_representation = path.read_text()
return self._html_representation
def _repr_html_(self):
"""IPython display hook for rendering."""
fmt = mpl.rcParams['animation.html']
if fmt == 'html5':
return self.to_html5_video()
elif fmt == 'jshtml':
return self.to_jshtml()
def pause(self):
"""Pause the animation."""
self.event_source.stop()
if self._blit:
for artist in self._drawn_artists:
artist.set_animated(False)
def resume(self):
"""Resume the animation."""
self.event_source.start()
if self._blit:
for artist in self._drawn_artists:
artist.set_animated(True)
class TimedAnimation(Animation):
"""
`Animation` subclass for time-based animation.
A new frame is drawn every *interval* milliseconds.
.. note::
You must store the created Animation in a variable that lives as long
as the animation should run. Otherwise, the Animation object will be
garbage-collected and the animation stops.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure object used to get needed events, such as draw or resize.
interval : int, default: 200
Delay between frames in milliseconds.
repeat_delay : int, default: 0
The delay in milliseconds between consecutive animation runs, if
*repeat* is True.
repeat : bool, default: True
Whether the animation repeats when the sequence of frames is completed.
blit : bool, default: False
Whether blitting is used to optimize drawing.
"""
def __init__(self, fig, interval=200, repeat_delay=0, repeat=True,
event_source=None, *args, **kwargs):
self._interval = interval
# Undocumented support for repeat_delay = None as backcompat.
self._repeat_delay = repeat_delay if repeat_delay is not None else 0
self._repeat = repeat
# If we're not given an event source, create a new timer. This permits
# sharing timers between animation objects for syncing animations.
if event_source is None:
event_source = fig.canvas.new_timer(interval=self._interval)
super().__init__(fig, event_source=event_source, *args, **kwargs)
def _step(self, *args):
"""Handler for getting events."""
# Extends the _step() method for the Animation class. If
# Animation._step signals that it reached the end and we want to
# repeat, we refresh the frame sequence and return True. If
# _repeat_delay is set, change the event_source's interval to our loop
# delay and set the callback to one which will then set the interval
# back.
still_going = super()._step(*args)
if not still_going:
if self._repeat:
# Restart the draw loop
self._init_draw()
self.frame_seq = self.new_frame_seq()
self.event_source.interval = self._repeat_delay
return True
else:
# We are done with the animation. Call pause to remove
# animated flags from artists that were using blitting
self.pause()
if self._blit:
# Remove the resize callback if we were blitting
self._fig.canvas.mpl_disconnect(self._resize_id)
self._fig.canvas.mpl_disconnect(self._close_id)
self.event_source = None
return False
self.event_source.interval = self._interval
return True
class ArtistAnimation(TimedAnimation):
"""
`TimedAnimation` subclass that creates an animation by using a fixed
set of `.Artist` objects.
Before creating an instance, all plotting should have taken place
and the relevant artists saved.
.. note::
You must store the created Animation in a variable that lives as long
as the animation should run. Otherwise, the Animation object will be
garbage-collected and the animation stops.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure object used to get needed events, such as draw or resize.
artists : list
Each list entry is a collection of `.Artist` objects that are made
visible on the corresponding frame. Other artists are made invisible.
interval : int, default: 200
Delay between frames in milliseconds.
repeat_delay : int, default: 0
The delay in milliseconds between consecutive animation runs, if
*repeat* is True.
repeat : bool, default: True
Whether the animation repeats when the sequence of frames is completed.
blit : bool, default: False
Whether blitting is used to optimize drawing.
"""
def __init__(self, fig, artists, *args, **kwargs):
# Internal list of artists drawn in the most recent frame.
self._drawn_artists = []
# Use the list of artists as the framedata, which will be iterated
# over by the machinery.
self._framedata = artists
super().__init__(fig, *args, **kwargs)
def _init_draw(self):
super()._init_draw()
# Make all the artists involved in *any* frame invisible
figs = set()
for f in self.new_frame_seq():
for artist in f:
artist.set_visible(False)
artist.set_animated(self._blit)
# Assemble a list of unique figures that need flushing
if artist.get_figure() not in figs:
figs.add(artist.get_figure())
# Flush the needed figures
for fig in figs:
fig.canvas.draw_idle()
def _pre_draw(self, framedata, blit):
"""Clears artists from the last frame."""
if blit:
# Let blit handle clearing
self._blit_clear(self._drawn_artists)
else:
# Otherwise, make all the artists from the previous frame invisible
for artist in self._drawn_artists:
artist.set_visible(False)
def _draw_frame(self, artists):
# Save the artists that were passed in as framedata for the other
# steps (esp. blitting) to use.
self._drawn_artists = artists
# Make all the artists from the current frame visible
for artist in artists:
artist.set_visible(True)
class FuncAnimation(TimedAnimation):
"""
`TimedAnimation` subclass that makes an animation by repeatedly calling
a function *func*.
.. note::
You must store the created Animation in a variable that lives as long
as the animation should run. Otherwise, the Animation object will be
garbage-collected and the animation stops.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure object used to get needed events, such as draw or resize.
func : callable
The function to call at each frame. The first argument will
be the next value in *frames*. Any additional positional
arguments can be supplied using `functools.partial` or via the *fargs*
parameter.
The required signature is::
def func(frame, *fargs) -> iterable_of_artists
It is often more convenient to provide the arguments using
`functools.partial`. In this way it is also possible to pass keyword
arguments. To pass a function with both positional and keyword
arguments, set all arguments as keyword arguments, just leaving the
*frame* argument unset::
def func(frame, art, *, y=None):
...
ani = FuncAnimation(fig, partial(func, art=ln, y='foo'))
If ``blit == True``, *func* must return an iterable of all artists
that were modified or created. This information is used by the blitting
algorithm to determine which parts of the figure have to be updated.
The return value is unused if ``blit == False`` and may be omitted in
that case.
frames : iterable, int, generator function, or None, optional
Source of data to pass *func* and each frame of the animation
- If an iterable, then simply use the values provided. If the
iterable has a length, it will override the *save_count* kwarg.
- If an integer, then equivalent to passing ``range(frames)``
- If a generator function, then must have the signature::
def gen_function() -> obj
- If *None*, then equivalent to passing ``itertools.count``.
In all of these cases, the values in *frames* is simply passed through
to the user-supplied *func* and thus can be of any type.
init_func : callable, optional
A function used to draw a clear frame. If not given, the results of
drawing from the first item in the frames sequence will be used. This
function will be called once before the first frame.
The required signature is::
def init_func() -> iterable_of_artists
If ``blit == True``, *init_func* must return an iterable of artists
to be re-drawn. This information is used by the blitting algorithm to
determine which parts of the figure have to be updated. The return
value is unused if ``blit == False`` and may be omitted in that case.
fargs : tuple or None, optional
Additional arguments to pass to each call to *func*. Note: the use of
`functools.partial` is preferred over *fargs*. See *func* for details.
save_count : int, optional
Fallback for the number of values from *frames* to cache. This is
only used if the number of frames cannot be inferred from *frames*,
i.e. when it's an iterator without length or a generator.
interval : int, default: 200
Delay between frames in milliseconds.
repeat_delay : int, default: 0
The delay in milliseconds between consecutive animation runs, if
*repeat* is True.
repeat : bool, default: True
Whether the animation repeats when the sequence of frames is completed.
blit : bool, default: False
Whether blitting is used to optimize drawing. Note: when using
blitting, any animated artists will be drawn according to their zorder;
however, they will be drawn on top of any previous artists, regardless
of their zorder.
cache_frame_data : bool, default: True
Whether frame data is cached. Disabling cache might be helpful when
frames contain large objects.
"""
def __init__(self, fig, func, frames=None, init_func=None, fargs=None,
save_count=None, *, cache_frame_data=True, **kwargs):
if fargs:
self._args = fargs
else:
self._args = ()
self._func = func
self._init_func = init_func
# Amount of framedata to keep around for saving movies. This is only
# used if we don't know how many frames there will be: in the case
# of no generator or in the case of a callable.
self._save_count = save_count
# Set up a function that creates a new iterable when needed. If nothing
# is passed in for frames, just use itertools.count, which will just
# keep counting from 0. A callable passed in for frames is assumed to
# be a generator. An iterable will be used as is, and anything else
# will be treated as a number of frames.
if frames is None:
self._iter_gen = itertools.count
elif callable(frames):
self._iter_gen = frames
elif np.iterable(frames):
if kwargs.get('repeat', True):
self._tee_from = frames
def iter_frames(frames=frames):
this, self._tee_from = itertools.tee(self._tee_from, 2)
yield from this
self._iter_gen = iter_frames
else:
self._iter_gen = lambda: iter(frames)
if hasattr(frames, '__len__'):
self._save_count = len(frames)
if save_count is not None:
_api.warn_external(
f"You passed in an explicit {save_count=} "
"which is being ignored in favor of "
f"{len(frames)=}."
)
else:
self._iter_gen = lambda: iter(range(frames))
self._save_count = frames
if save_count is not None:
_api.warn_external(
f"You passed in an explicit {save_count=} which is being "
f"ignored in favor of {frames=}."
)
if self._save_count is None and cache_frame_data:
_api.warn_external(
f"{frames=!r} which we can infer the length of, "
"did not pass an explicit *save_count* "
f"and passed {cache_frame_data=}. To avoid a possibly "
"unbounded cache, frame data caching has been disabled. "
"To suppress this warning either pass "
"`cache_frame_data=False` or `save_count=MAX_FRAMES`."
)
cache_frame_data = False
self._cache_frame_data = cache_frame_data
# Needs to be initialized so the draw functions work without checking
self._save_seq = []
super().__init__(fig, **kwargs)
# Need to reset the saved seq, since right now it will contain data
# for a single frame from init, which is not what we want.
self._save_seq = []
def new_frame_seq(self):
# Use the generating function to generate a new frame sequence
return self._iter_gen()
def new_saved_frame_seq(self):
# Generate an iterator for the sequence of saved data. If there are
# no saved frames, generate a new frame sequence and take the first
# save_count entries in it.
if self._save_seq:
# While iterating we are going to update _save_seq
# so make a copy to safely iterate over
self._old_saved_seq = list(self._save_seq)
return iter(self._old_saved_seq)
else:
if self._save_count is None:
frame_seq = self.new_frame_seq()
def gen():
try:
while True:
yield next(frame_seq)
except StopIteration:
pass
return gen()
else:
return itertools.islice(self.new_frame_seq(), self._save_count)
def _init_draw(self):
super()._init_draw()
# Initialize the drawing either using the given init_func or by
# calling the draw function with the first item of the frame sequence.
# For blitting, the init_func should return a sequence of modified
# artists.
if self._init_func is None:
try:
frame_data = next(self.new_frame_seq())
except StopIteration:
# we can't start the iteration, it may have already been
# exhausted by a previous save or just be 0 length.
# warn and bail.
warnings.warn(
"Can not start iterating the frames for the initial draw. "
"This can be caused by passing in a 0 length sequence "
"for *frames*.\n\n"
"If you passed *frames* as a generator "
"it may be exhausted due to a previous display or save."
)
return
self._draw_frame(frame_data)
else:
self._drawn_artists = self._init_func()
if self._blit:
if self._drawn_artists is None:
raise RuntimeError('The init_func must return a '
'sequence of Artist objects.')
for a in self._drawn_artists:
a.set_animated(self._blit)
self._save_seq = []
def _draw_frame(self, framedata):
if self._cache_frame_data:
# Save the data for potential saving of movies.
self._save_seq.append(framedata)
self._save_seq = self._save_seq[-self._save_count:]
# Call the func with framedata and args. If blitting is desired,
# func needs to return a sequence of any artists that were modified.
self._drawn_artists = self._func(framedata, *self._args)
if self._blit:
err = RuntimeError('The animation function must return a sequence '
'of Artist objects.')
try:
# check if a sequence
iter(self._drawn_artists)
except TypeError:
raise err from None
# check each item if it's artist
for i in self._drawn_artists:
if not isinstance(i, mpl.artist.Artist):
raise err
self._drawn_artists = sorted(self._drawn_artists,
key=lambda x: x.get_zorder())
for a in self._drawn_artists:
a.set_animated(self._blit)
def _validate_grabframe_kwargs(savefig_kwargs):
if mpl.rcParams['savefig.bbox'] == 'tight':
raise ValueError(
f"{mpl.rcParams['savefig.bbox']=} must not be 'tight' as it "
"may cause frame size to vary, which is inappropriate for animation."
)
for k in ('dpi', 'bbox_inches', 'format'):
if k in savefig_kwargs:
raise TypeError(
f"grab_frame got an unexpected keyword argument {k!r}"
)
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@lib@matplotlib@animation.py@.PATH_END.py
|
{
"filename": "rms_norm.py",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/examples/ffi/src/jax_ffi_example/rms_norm.py",
"type": "Python"
}
|
# Copyright 2024 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example demontrating the basic end-to-end use of the JAX FFI.
This example is exactly the same as the one in the `FFI tutorial
<https://jax.readthedocs.io/en/latest/ffi.html>`, so more details can be found
on that page. But, the high level summary is that we implement our custom
extension in ``rms_norm.cc``, then call it usin ``jax.extend.ffi.ffi_call`` in
this module. The behavior under autodiff is implemented using
``jax.custom_vjp``.
"""
from functools import partial
import numpy as np
import jax
import jax.extend as jex
import jax.numpy as jnp
from jax_ffi_example import _rms_norm
for name, target in _rms_norm.registrations().items():
jex.ffi.register_ffi_target(name, target)
@partial(jax.custom_vjp, nondiff_argnums=(1,))
def rms_norm(x, eps=1e-5):
# We only implemented the `float32` version of this function, so we start by
# checking the dtype. This check isn't strictly necessary because type
# checking is also performed by the FFI when decoding input and output
# buffers, but it can be useful to check types in Python to raise more
# informative errors.
if x.dtype != jnp.float32:
raise ValueError("Only the float32 dtype is implemented by rms_norm")
# In this case, the output of our FFI function is just a single array with the
# same shape and dtype as the input.
out_type = jax.ShapeDtypeStruct(x.shape, x.dtype)
# Note that here we're use `numpy` (not `jax.numpy`) to specify a dtype for
# the attribute `eps`. Our FFI function expects this to have the C++ `float`
# type (which corresponds to numpy's `float32` type), and it must be a
# static parameter (i.e. not a JAX array).
return jex.ffi.ffi_call(
# The target name must be the same string as we used to register the target
# above in `register_ffi_target`
"rms_norm",
out_type,
vmap_method="broadcast_all",
)(x, eps=np.float32(eps))
def rms_norm_fwd(x, eps=1e-5):
y, res = jex.ffi.ffi_call(
"rms_norm_fwd",
(
jax.ShapeDtypeStruct(x.shape, x.dtype),
jax.ShapeDtypeStruct(x.shape[:-1], x.dtype),
),
vmap_method="broadcast_all",
)(x, eps=np.float32(eps))
return y, (res, x)
def rms_norm_bwd(eps, res, ct):
del eps
res, x = res
assert res.shape == ct.shape[:-1]
assert x.shape == ct.shape
return (
jex.ffi.ffi_call(
"rms_norm_bwd",
jax.ShapeDtypeStruct(ct.shape, ct.dtype),
vmap_method="broadcast_all",
)(res, x, ct),
)
rms_norm.defvjp(rms_norm_fwd, rms_norm_bwd)
|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@examples@ffi@src@jax_ffi_example@rms_norm.py@.PATH_END.py
|
{
"filename": "test_constructors.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/arrays/timedeltas/test_constructors.py",
"type": "Python"
}
|
import numpy as np
import pytest
from pandas.core.arrays import TimedeltaArray
class TestTimedeltaArrayConstructor:
def test_other_type_raises(self):
msg = r"dtype bool cannot be converted to timedelta64\[ns\]"
with pytest.raises(TypeError, match=msg):
TimedeltaArray._from_sequence(np.array([1, 2, 3], dtype="bool"))
def test_incorrect_dtype_raises(self):
msg = "dtype 'category' is invalid, should be np.timedelta64 dtype"
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence(
np.array([1, 2, 3], dtype="i8"), dtype="category"
)
msg = "dtype 'int64' is invalid, should be np.timedelta64 dtype"
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence(
np.array([1, 2, 3], dtype="i8"), dtype=np.dtype("int64")
)
msg = r"dtype 'datetime64\[ns\]' is invalid, should be np.timedelta64 dtype"
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence(
np.array([1, 2, 3], dtype="i8"), dtype=np.dtype("M8[ns]")
)
msg = (
r"dtype 'datetime64\[us, UTC\]' is invalid, should be np.timedelta64 dtype"
)
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence(
np.array([1, 2, 3], dtype="i8"), dtype="M8[us, UTC]"
)
msg = "Supported timedelta64 resolutions are 's', 'ms', 'us', 'ns'"
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence(
np.array([1, 2, 3], dtype="i8"), dtype=np.dtype("m8[Y]")
)
def test_copy(self):
data = np.array([1, 2, 3], dtype="m8[ns]")
arr = TimedeltaArray._from_sequence(data, copy=False)
assert arr._ndarray is data
arr = TimedeltaArray._from_sequence(data, copy=True)
assert arr._ndarray is not data
assert arr._ndarray.base is not data
def test_from_sequence_dtype(self):
msg = "dtype 'object' is invalid, should be np.timedelta64 dtype"
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence([], dtype=object)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@arrays@timedeltas@test_constructors.py@.PATH_END.py
|
{
"filename": "hnsw.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/library/python/hnsw/hnsw/hnsw.py",
"type": "Python"
}
|
import sys
import os
from six import iteritems
from enum import IntEnum
from contextlib import contextmanager
import json
if sys.version_info >= (3,):
import importlib.util
def load_dynamic(name, path):
spec = importlib.util.spec_from_file_location(name, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
else:
from imp import load_dynamic
def get_so_paths(dir_name):
dir_name = os.path.join(os.path.dirname(__file__), dir_name)
list_dir = os.listdir(dir_name) if os.path.isdir(dir_name) else []
return [os.path.join(dir_name, so_name) for so_name in list_dir if so_name.split('.')[-1] in ['so', 'pyd']]
def get_hnsw_bin_module():
if '_hnsw' in sys.modules:
return sys.modules['_hnsw']
so_paths = get_so_paths('./')
for so_path in so_paths:
try:
loaded_hnsw = load_dynamic('_hnsw', so_path)
sys.modules['hnsw._hnsw'] = loaded_hnsw
return loaded_hnsw
except ImportError:
pass
from . import _hnsw
return _hnsw
@contextmanager
def log_fixup():
_hnsw._set_logger(sys.stdout)
try:
yield
finally:
_hnsw._reset_logger()
class EDistance(IntEnum):
DotProduct = 0
L1 = 1
L2Sqr = 2
PairVectorDistance = 3
class EVectorComponentType(IntEnum):
Float = 0
I8 = 1
I32 = 2
_hnsw = get_hnsw_bin_module()
HnswException = _hnsw.HnswException
_DenseVectorStorage = {
EVectorComponentType.Float: _hnsw._DenseFloatVectorStorage,
EVectorComponentType.I8: _hnsw._DenseI8VectorStorage,
EVectorComponentType.I32: _hnsw._DenseI32VectorStorage
}
_HnswDenseVectorIndex = {
EVectorComponentType.Float: _hnsw._HnswDenseFloatVectorIndex,
EVectorComponentType.I8: _hnsw._HnswDenseI8VectorIndex,
EVectorComponentType.I32: _hnsw._HnswDenseI32VectorIndex
}
_transform_mobius = {
EVectorComponentType.Float: _hnsw._transform_mobius_float,
EVectorComponentType.I8: _hnsw._transform_mobius_i8,
EVectorComponentType.I32: _hnsw._transform_mobius_i32
}
_OnlineHnswDenseVectorIndex = {
EVectorComponentType.Float: _hnsw._OnlineHnswDenseFloatVectorIndex,
EVectorComponentType.I8: _hnsw._OnlineHnswDenseI8VectorIndex,
EVectorComponentType.I32: _hnsw._OnlineHnswDenseI32VectorIndex,
}
class Pool:
"""
Pool is a storage of vectors
"""
def __init__(self, vectors_path, dtype, dimension, vectors_bin_data=None):
"""
Pool is a storage of vectors. You can create it from row-major binary file or
binary data of vectors.
Parameters
----------
vectors_path : string
Path to binary file with vectors.
dtype : EVectorComponentType
Type of vectors.
dimension : int
Dimension of vectors.
vectors_bin_data : bytes
Binary data of vectors.
"""
self.vectors_path = vectors_path
self.dtype = dtype
self.dimension = dimension
assert (vectors_bin_data is None) ^ (vectors_path is None)
if vectors_path is not None:
self._storage = _DenseVectorStorage[dtype](vectors_path, dimension)
self._data = None
if vectors_bin_data is not None:
self._storage = _DenseVectorStorage[dtype](None, dimension, vectors_bin_data)
self._data = vectors_bin_data
@classmethod
def from_file(cls, vectors_path, dtype, dimension):
"""
Create pool from binary file.
Parameters
----------
vectors_path : string
Path to binary file with vectors.
dtype : EVectorComponentType
Type of vectors.
dimension : int
Dimension of vectors.
"""
return Pool(vectors_path, dtype, dimension, None)
@classmethod
def from_bytes(cls, vectors_bin_data, dtype, dimension):
"""
Create pool from binary data.
Parameters
----------
vectors_bin_data : bytes
Binary data of vectors.
dtype : EVectorComponentType
Type of vectors.
dimension : int
Dimension of vectors.
"""
return Pool(None, dtype, dimension, vectors_bin_data)
def get_item(self, id):
"""
Get item from storage by id.
Parameters
----------
id : int
Index of item in storage.
Returns
-------
item : numpy.ndarray
"""
return self._storage._get_item(id)
def get_num_items(self):
"""
Get the number of items in storage.
Returns
-------
num_items : int
"""
return self._storage._get_num_items()
def transform_mobius(pool):
"""
Transform pool for fast dot product search on HNSW graph
https://papers.nips.cc/paper/9032-mobius-transformation-for-fast-inner-product-search-on-graph.pdf
Parameters
----------
pool : Pool
Returns
-------
transformed_pool : Pool
"""
transformed_pool = Pool.from_bytes(bytes(0), EVectorComponentType.Float, pool.dimension)
transformed_pool._storage = _transform_mobius[pool.dtype](pool._storage)
return transformed_pool
class Hnsw:
"""
Class for building, loading and working with Hierarchical Navigable Small World index.
"""
def __init__(self):
"""
Create object for working with HNSW.
"""
self._index = None
self._data = None
def build(self, pool, distance, max_neighbors=None, search_neighborhood_size=None, num_exact_candidates=None,
batch_size=None, upper_level_batch_size=None, level_size_decay=None, num_threads=None, verbose=False,
report_progress=True, snapshot_file=None, snapshot_interval=None):
"""
Build index with given options.
Parameters
----------
pool : Pool
Pool of vectors for which index will be built.
distance : EDistance
Distance that should be used for finding nearest vectors.
max_neighbors : int (default=32)
Maximum number of neighbors that every item can be connected with.
search_neighborhood_size : int (default=300)
Search neighborhood size for ANN-search.
Higher values improve search quality in expense of building time.
num_exact_candidates : int (default=100)
Number of nearest vectors to take from batch.
Higher values improve search quality in expense of building time.
batch_size : int (default=1000)
Number of items that added to graph on each step of algorithm.
upper_level_batch_size : int (default=40000)
Batch size for building upper levels.
level_size_decay : int (default=max_neighbors/2)
Base of exponent for decaying level sizes.
num_threads : int (default=number of CPUs)
Number of threads for building index.
report_progress : bool (default=True)
Print progress of building.
verbose : bool (default=False)
Print additional information about time of building.
snapshot_file : string (default=None)
Path for saving snapshots during the index building.
snapshot_interval : int (default=600)
Interval between saving snapshots (seconds).
Snapshot is saved after building each level also.
"""
params = {}
not_params = ["not_params", "self", "params", "__class__", "pool", "distance"]
for key, value in iteritems(locals()):
if key not in not_params and value is not None:
params[key] = value
self._index = _HnswDenseVectorIndex[pool.dtype](pool._storage, distance)
with log_fixup():
self._index._build(json.dumps(params))
def _check_index(self):
if self._index is None:
raise HnswException("Index is not built and not loaded")
def save(self, index_path):
"""
Save index to file.
Parameters
----------
index_path : string
Path to file for saving index.
"""
self._check_index()
self._index._save(index_path)
def load(self, index_path, pool, distance):
"""
Load index from file.
Parameters
----------
index_path : string
Path to file for loading index.
pool : Pool
Pool of vectors for which index will be loaded.
distance : EDistance
Distance that should be used for finding nearest vectors.
"""
self._index = _HnswDenseVectorIndex[pool.dtype](pool._storage, distance)
self._index._load(index_path)
self._data = None
def load_from_bytes(self, index_data, pool, distance):
"""
Load index from bytes.
Parameters
----------
index_data : bytes
Index binary data.
pool : Pool
Pool of vectors for which index will be loaded.
distance : EDistance
Distance that should be used for finding nearest vectors.
"""
self._index = _HnswDenseVectorIndex[pool.dtype](pool._storage, distance)
self._index._load_from_bytes(index_data)
self._data = index_data
def get_nearest(self, query, top_size, search_neighborhood_size, distance_calc_limit=0):
"""
Get approximate nearest neighbors for query from index.
Parameters
----------
query : list or numpy.ndarray
Vector for which nearest neighbors should be found.
top_size : int
Required number of neighbors.
search_neighborhood_size : int
Search neighborhood size for ANN-search.
Higher values improve search quality in expense of search time.
It should be equal or greater than top_size.
distance_calc_limit : int (default=0)
Limit of distance calculation.
To guarantee satisfactory search time at the expense of quality.
0 is equivalent to no limit.
Returns
-------
neighbors : list of tuples (id, distance)
"""
self._check_index()
return self._index._get_nearest(query, top_size, search_neighborhood_size, distance_calc_limit)
class HnswEstimator:
"""
Class for building, loading and working with Hierarchical Navigable Small World index with SciKit-Learn
Estimator compatible interface.
Mostly drop-in replacement for sklearn.neighbors.NearestNeighbors (except for some parameters)
"""
def __init__(self, n_neighbors=5,
distance=EDistance.DotProduct, max_neighbors=32, search_neighborhood_size=300,
num_exact_candidates=100, batch_size=1000, upper_level_batch_size=40000,
level_size_decay=None):
"""
Parameters
----------
n_neighbors : int, default=5
Number of neighbors to use by default for kneighbors queries.
distance : EDistance
Distance that should be used for finding nearest vectors.
max_neighbors : int (default=32)
Maximum number of neighbors that every item can be connected with.
search_neighborhood_size : int (default=300)
Search neighborhood size for ANN-search.
Higher values improve search quality in expense of building time.
num_exact_candidates : int (default=100)
Number of nearest vectors to take from batch.
Higher values improve search quality in expense of building time.
batch_size : int (default=1000)
Number of items that added to graph on each step of algorithm.
upper_level_batch_size : int (default=40000)
Batch size for building upper levels.
level_size_decay : int (default=max_neighbors/2)
Base of exponent for decaying level sizes.
"""
for key, value in iteritems(locals()):
if key not in ['self', '__class__']:
setattr(self, key, value)
def _check_index(self):
if self._index is None:
raise HnswException("Index is not built and not loaded")
def fit(self, X, y=None, num_threads=None, verbose=False, report_progress=True, snapshot_file=None,
snapshot_interval=600):
"""
Fit the HNSW model.
Parameters
----------
X : array-like of shape (n_samples, n_values)
y: None
Added to be compatible with Estimator API
num_threads : int (default=number of CPUs)
Number of threads for building index.
report_progress : bool (default=True)
Print progress of building.
verbose : bool (default=False)
Print additional information about time of building.
snapshot_file : string (default=None)
Path for saving snapshots during the index building.
snapshot_interval : int (default=600)
Interval between saving snapshots (seconds).
Returns
-------
model : HnswEstimator
"""
self._index, self._index_data = _hnsw._init_index(X, self.distance)
params = self._get_params(return_none=False)
not_params = ["not_params", "self", "params", "__class__", "X", "y"]
for key, value in iteritems(locals()):
if key not in not_params and value is not None:
params[key] = value
del params['distance']
with log_fixup():
self._index._build(json.dumps(params))
return self
def _get_params(self, return_none):
params = {}
for key, value in self.__dict__.items():
if key[0] != '_' and (return_none or (value is not None)):
params[key] = value
return params
def get_params(self, deep=True):
"""
Get parameters for this estimator.
"""
return self._get_params(return_none=True)
def set_params(self, **params):
"""
Set the parameters of this estimator.
Parameters
----------
**params : dict
HnswEstimator parameters.
Returns
-------
self : HnswEstimator instance
"""
if not params:
return self
valid_params = self._get_params(return_none=True)
for key, value in params.items():
if key not in valid_params:
raise HnswException(
'Invalid parameter %s for HnswEstimator. '
'Check the list of available parameters '
'with `get_params().keys()`.'
)
setattr(self, key, value)
return self
@property
def effective_metric_(self):
"""
Returns
-------
Distance that should be used for finding nearest vectors.
"""
return self.distance
@property
def n_samples_fit_(self):
"""
Returns
-------
Number of samples in the fitted data.
"""
self._check_index()
return self._index_data.shape[0]
def kneighbors(self, X=None, n_neighbors=None, return_distance=True, search_neighborhood_size=None,
distance_calc_limit=0):
"""Finds the approximate K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_queries, n_features) or None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int, default=None
Number of neighbors required for each sample. The default is the
value passed to the constructor.
return_distance : bool, default=True
Whether or not to return the distances.
search_neighborhood_size : int, default=None
Search neighborhood size for ANN-search.
Higher values improve search quality in expense of search time.
It should be equal or greater than top_size.
If None set to n_neighbors * 2.
distance_calc_limit : int (default=0)
Limit of distance calculation.
To guarantee satisfactory search time at the expense of quality.
0 is equivalent to no limit.
Returns
-------
neigh_dist :numpy.ndarray of shape (n_queries, n_neighbors)
Array representing the lengths to points, only present if
return_distance=True
neigh_ind : numpy.ndarray of shape (n_queries, n_neighbors)
Indices of the nearest points in the population matrix.
"""
self._check_index()
if X is None:
X = self._index_data
if n_neighbors is None:
n_neighbors = self.n_neighbors
if search_neighborhood_size is None:
search_neighborhood_size = n_neighbors * 2
return self._index._kneighbors(X, n_neighbors, return_distance, self.distance, search_neighborhood_size,
distance_calc_limit)
class OnlineHnsw:
"""
Class for building and working with Online Hierarchical Navigable Small World index.
"""
def __init__(self, dtype, dimension, distance, max_neighbors=None, search_neighborhood_size=None, num_vertices=None, level_size_decay=None):
"""
Create object with given options.
Parameters
----------
dtype : EVectorComponentType
Type of vectors.
dimension : int
Dimension of vectors.
distance : EDistance
Distance that should be used for finding nearest vectors.
max_neighbors : int (default=32)
Maximum number of neighbors that every item can be connected with.
search_neighborhood_size : int (default=300)
Search neighborhood size for ANN-search.
Higher values improve search quality in expense of building time.
num_vertices : int (default=0)
Expected number of vectors in storage.
level_size_decay : int (default=max_neighbors/2)
Base of exponent for decaying level sizes.
"""
self.dtype = dtype
self.dimension = dimension
params = {}
all_params = ["max_neighbors", "search_neighborhood_size", "num_vertices", "level_size_decay"]
for key, value in iteritems(locals()):
if key in all_params and value is not None:
params[key] = value
self._online_index = _OnlineHnswDenseVectorIndex[dtype](dimension, distance, json.dumps(params))
def get_nearest_and_add_item(self, query):
"""
Get approximate nearest neighbors for query from index and add item to index
Parameters
----------
query : list or numpy.ndarray
Vector for which nearest neighbors should be found.
Vector which should be added in index.
Returns
-------
neighbors : list of tuples (id, distance) with length = search_neighborhood_size
"""
return self._online_index._get_nearest_neighbors_and_add_item(query)
def get_nearest(self, query, top_size=0):
"""
Get approximate nearest neighbors for query from index.
Parameters
----------
query : list or numpy.ndarray
Vector for which nearest neighbors should be found.
top_size : int
Required number of neighbors.
Returns
-------
neighbors : list of tuples (id, distance)
"""
return self._online_index._get_nearest_neighbors(query, top_size)
def add_item(self, item):
"""
Add item in index.
Parameters
----------
item : list or numpy.ndarray
Vector which should be added in index.
"""
self._online_index._add_item(item)
def get_item(self, id):
"""
Get item from storage by id.
Parameters
----------
id : int
Index of item in storage.
Returns
-------
item : numpy.ndarray
"""
return self._online_index._get_item(id)
def get_num_items(self):
"""
Get the number of items in storage.
Returns
-------
num_items : int
"""
return self._online_index._get_num_items()
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@library@python@hnsw@hnsw@hnsw.py@.PATH_END.py
|
{
"filename": "test_tpcf_estimators.py",
"repo_name": "astropy/halotools",
"repo_path": "halotools_extracted/halotools-master/halotools/mock_observables/two_point_clustering/tests/test_tpcf_estimators.py",
"type": "Python"
}
|
""" Module provides unit-testing for `~halotools.mock_observables.tpcf_estimators`.
"""
from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
from ..tpcf_estimators import (
_test_for_zero_division,
_list_estimators,
_TP_estimator_requirements,
)
from ....custom_exceptions import HalotoolsError
__all__ = ("test_zero_division1",)
def test_zero_division1():
nbins = 10
DD = np.arange(nbins) + 10
DR = np.arange(nbins) + 10
RR = np.arange(nbins) + 10
ND1, ND2, NR1, NR2 = 100, 100, 100, 100
for estimator in _list_estimators():
_test_for_zero_division(DD, DR, RR, ND1, ND2, NR1, NR2, estimator)
def test_zero_division2():
nbins = 10
DD = np.arange(nbins) + 10
DR = np.arange(nbins) + 10
RR = np.arange(nbins) + 10
ND1, ND2, NR1, NR2 = 100, 100, 100, 100
RR[0] = 0.0
DR[0] = 0.0
for estimator in _list_estimators():
with pytest.raises(ValueError) as err:
_test_for_zero_division(DD, DR, RR, ND1, ND2, NR1, NR2, estimator)
substr = "you will have at least one NaN returned value"
assert substr in err.value.args[0]
def test_TP_estimator_requirements_davis_peebles():
do_DD, do_DR, do_RR = _TP_estimator_requirements("Davis-Peebles")
assert np.all((do_DD, do_DR, do_RR) == (True, True, False))
def test_TP_estimator_requirements_hewett():
do_DD, do_DR, do_RR = _TP_estimator_requirements("Hewett")
assert np.all((do_DD, do_DR, do_RR) == (True, True, True))
def test_TP_estimator_requirements_hamilton():
do_DD, do_DR, do_RR = _TP_estimator_requirements("Hamilton")
assert np.all((do_DD, do_DR, do_RR) == (True, True, True))
def test_TP_estimator_requirements_bad_estimator():
with pytest.raises(HalotoolsError) as err:
__ = _TP_estimator_requirements("Ron Perlman")
substr = "Input `estimator` must be one of the following"
assert substr in err.value.args[0]
|
astropyREPO_NAMEhalotoolsPATH_START.@halotools_extracted@halotools-master@halotools@mock_observables@two_point_clustering@tests@test_tpcf_estimators.py@.PATH_END.py
|
{
"filename": "PBH.py",
"repo_name": "bradkav/BlackHolesDarkDress",
"repo_path": "BlackHolesDarkDress_extracted/BlackHolesDarkDress-master/Nbody/PBH.py",
"type": "Python"
}
|
import numpy as np
import pygadgetic
import random
import math
from tqdm import tqdm
import os.path
from scipy.integrate import quad, cumtrapz
from scipy.interpolate import interp1d
from scipy.optimize import brenth
from matplotlib import pylab as pl
#-------------
#You should just be able to import whichever eddington module
#you're interested in and everything should work from here on...
import eddington as edd
#-------------
L_sim = 1e-5 #pc
#Old code for adding a single dressed PBH, without multi-mass scheme
def AddDressedPBH(body,DMinds, PBHind,nDM, x0, v0, r_soft, a, verbose=False, haloID="nothing"):
"""Add a dressed PBH to the initial conditions...
Parameters:
body - the pygadgetic 'body' object (usually called my_body)
DMinds - indices specifying where to put the DM particles
in the list (the DM particles usually come before the PBH
particles)
PBHind - single index specifying where to place the PBH
in the list (close to the end usually, so -1 or -2)
nDM - number of DM particles around this PBH
x0 - initial position of PBH (in pc)
v0 - initial velocity of PBH+DM halo (in km/s)
r_soft - softening length in parsec
a - Semi major axis in parsec (to 3 decimal places).
Tabulated values are a = [0.001, 0.005, 0.01, 0.02, 0.04, 0.06, 0.08]
haloID - string identifying a text file to load the halo from (in folder /halos)
If file not found, a new halo is generated and saved in /halos.
Set haloID = "nothing" to ignore this option.
"""
#First, load the appropriate distribution function
edd.loadDistribution(a)
#PBH mass and truncation radius imported from the eddington file for
#self-consistency
r_tr = edd.r_tr
#r_tr = 1e-5
r_eq = edd.r_eq
M_PBH = edd.M_PBH
#Check that the indices 'inds' are consistent
#with the number of DM particles
if (len(DMinds) != nDM):
print "Error in PBH.AddDressedPBH: number of indices does not match number of particles..."
#Calculate the relevant masses
#print "FUDGED!"
mHalo = M_PBH*(r_tr/r_eq)**1.5
m1 = mHalo*1.0/nDM
body.mass[PBHind] = M_PBH
body.mass[DMinds] = m1
rho_c = edd.rhoDM(1e-2*r_eq/r_tr)/r_tr**3.0
print "Mass density (M_solar/pc^3): ", rho_c
n_c = rho_c/m1
print "Number density (1/pc^3): ", n_c
print "Mean separation (pc): ", (3.0/(4.0*np.pi*n_c))**(1.0/3.0)
#PBH position and velocity (before CoM velocity is subtracted...)
xPBH=np.array([0.,0.,0.])
vPBH=np.array([0.,0.,0.])
halofile = "halos/" + haloID + ".txt"
#Check to see whether a halo file already exists...
if (haloID != "nothing" and os.path.isfile("halos/" + haloID + ".txt")):
print " Loading halo from file. HaloID:", haloID
#Load DM phase space coordinates from file
xvals, yvals, zvals, vxvals, vyvals, vzvals = np.loadtxt(halofile, unpack=True)
else:
if (haloID != "nothing"):
print " Halo file <" + halofile+"> not found. Generating from scratch..."
#Generate the mass profile
print " Generating mass profile..."
r_max = 8.0*r_tr
r_min = 1e-5*r_tr
#rlist = np.logspace(np.log10(r_min), np.log10(r_max), 500)
rlist = np.append(0,np.logspace(np.log10(r_min), np.log10(r_max), 200))
Menc = 0.0*rlist
#Radial distribution function for the DM halo
P_r_1 = lambda r: 4.0*np.pi*r**2*edd.rhoDM(r/r_tr)
P_r = np.vectorize(P_r_1)
for i in range(len(rlist)):
Menc[i] = edd.Menc(rlist[i]/r_tr) - M_PBH
#Menc[i] = quad(P_r, r_min, rlist[i])[0]
#print Menc
#Menc -= Menc[0]
M_max = Menc[-1]
Minterp = interp1d(Menc/M_max, rlist, kind='linear')
#print (edd.Menc(1e-2) - edd.Menc(1e-3))/M_PBH
#Cut off a fraction p of the radial distribution
#near the PBH (fraction inside r_soft)
p_cut = (edd.Menc(r_soft/r_tr) - M_PBH)/M_max
print "p_cut = ", p_cut
#print x_soft*r_eq
#print r_tr
#print x_soft*r_eq/r_tr
#Calculate and set the pseudo-particle mass
frac = quad(P_r, r_min, r_tr)[0]/quad(P_r, r_min, r_max)[0]
frac01 = quad(P_r, r_min, 0.1*r_tr)[0]/quad(P_r, r_min, r_max)[0]
Menc_tr = quad(P_r, r_min, r_tr)[0]
#DM positions
rvals = Minterp(np.asarray(p_cut + (1.0 - p_cut)*np.random.rand(nDM), dtype='float'))
#-------------
rDM = 1.0*rvals
pl.figure()
pl.hist(np.log10(rDM), bins=np.linspace(-5, -1.5, 36))
pl.axvline(np.log10(r_soft))
pl.show()
x_c_list = np.logspace(np.log10(1e-5), np.log10(np.max(rDM)))
xb_list = 0.0*x_c_list
N_list = 0.0*x_c_list
for i in range(len(xb_list)):
x_c = x_c_list[i]
N1 = np.sum(rDM < x_c)
N_list[i] = np.sum(rDM < x_c)
#print "Number close to centre:", N1
V1 = 4*np.pi*(x_c**3)/3.0
xb_list[i] = (3.0/(4*np.pi*(N1/V1)))**(1.0/3.0)
#print "Mean separation close to centre:", (3.0/(4*np.pi*(N1/V1)))**(1.0/3.0)
print np.min(rvals)
print "Softening length should be (r_eq): ", 0.5e-5/r_eq
print "Softening length (1/35) should be (r_eq):", (1.0/35.0)*xb_list[-1]/r_eq
pl.figure()
pl.loglog(x_c_list, xb_list)
pl.loglog(x_c_list, N_list, '--')
pl.show()
#---------
#Generate some random directions for setting particle positions
ctvals = 2.0*np.random.rand(nDM) - 1.0
thetavals = np.arccos(ctvals)
phivals = 2*np.pi*np.random.rand(nDM)
xvals = rvals*np.cos(phivals)*np.sin(thetavals)
yvals = rvals*np.sin(phivals)*np.sin(thetavals)
zvals = rvals*np.cos(thetavals)
if (verbose):
print "p_cut:",p_cut
print "Mass enclosed inside r_tr (calc):", Menc_tr
print "Mass enclosed inside r_tr (MC):", m1*np.sum(rvals < r_tr)
print "Fraction of DM particles inside r_tr:", frac
print "Smallest DM radius, r_min/r_tr = ", np.min(rvals)/r_tr
print "Largest DM radius, r_max/r_tr = ", np.max(rvals)/r_tr
print "DM particle mass [M_solar]:",m1
print "Typical DM separation inside r_tr:", (frac*nDM)**(-1.0/3.0)
print "Typical DM separation inside 0.1 r_tr:", 0.1*(frac01*nDM)**(-1.0/3.0)
print " "
#DM velocities
print " Sampling DM velocities..."
vvals = np.zeros(nDM)
for ind in tqdm(range(nDM)):
r = rvals[ind]
#Now sample f(v) at given r to get the speed v
found = 0
while (found == 0):
v = np.random.rand(1)*edd.vmax(r/r_tr)
#Use 5/vmax as the 'maximum' values of f(v)
#but in some cases it might not be enough...
if (np.random.rand(1)*(5.0/edd.vmax(r/r_tr)) < edd.f(r, v)):
#pl.show()
found = 1
vvals[ind] = v
#Get a new set of random directions for the velocities
ctvals2 = 2.0*np.random.rand(nDM) - 1.0
thetavals2 = np.arccos(ctvals2)
phivals2 = 2*np.pi*np.random.rand(nDM)
vxvals = vvals*np.cos(phivals2)*np.sin(thetavals2)
vyvals = vvals*np.sin(phivals2)*np.sin(thetavals2)
vzvals = vvals*np.cos(thetavals2)
#Save the output to a halo file if needed
if (haloID != "nothing"):
headertxt = "Number of DM particles: " + str(nDM) + ". Softening length [pc]: " + str(r_soft)
headertxt += "\nColumns: x [pc], y [pc], z [pc], vx [km/s], vy [km/s], vz [km/s]"
np.savetxt("halos/" + haloID + ".txt", zip(xvals,yvals,zvals,vxvals,vyvals,vzvals), header=headertxt)
xDM=np.array([xvals, yvals, zvals]).T
vDM=np.array([vxvals, vyvals, vzvals]).T
rDM = np.sqrt(np.sum(xDM**2, axis=-1))
pl.figure()
pl.hist(np.log10(rDM), bins=np.linspace(-6, -1, 26))
pl.show()
#Subtract off any net momentum of the system
totmass = np.sum(body.mass[DMinds])+body.mass[PBHind]
momentum = np.zeros(3)
momentum[0] = np.sum(vDM[:,0]*body.mass[DMinds])
momentum[1] = np.sum(vDM[:,1]*body.mass[DMinds])
momentum[2] = np.sum(vDM[:,2]*body.mass[DMinds])
vDM -= momentum/totmass
vPBH -= momentum/totmass
print "v_PBH:", np.sqrt(np.sum(vPBH**2))
print "v_apo:", np.sqrt(np.sum(np.asarray(v0)**2))
#Add on the CoM position and velocity
xDM += np.asarray(x0)
xPBH += np.asarray(x0)
vDM += v0
vPBH += v0
#Set particle ids
#body.id[inds]=inds
#Set positions and velocities
#NB: we divide positions by r_tr
#to get them in units of...r_tr
body.pos[PBHind,:] = xPBH/L_sim
body.vel[PBHind,:] = vPBH
body.pos[DMinds,:] = xDM/L_sim
body.vel[DMinds,:] = vDM
#---------------------------------
#---------------------------------
def AddDressedPBH_seg( x0, v0, r_soft, M_PBH, a, N_inner = 100,delta_Rm = 50, verbose=False, haloID="nothing"):
"""Add a dressed PBH to the initial conditions...
Parameters:
body - the pygadgetic 'body' object (usually called my_body)
DMinds - indices specifying where to put the DM particles
in the list (the DM particles usually come before the PBH
particles)
PBHind - single index specifying where to place the PBH
in the list (close to the end usually, so -1 or -2)
nDM - number of DM particles around this PBH
x0 - initial position of PBH (in pc)
v0 - initial velocity of PBH+DM halo (in km/s)
r_soft - softening length in parsec
a - Semi major axis in parsec (to 3 decimal places).
Tabulated values are a = [0.001, 0.005, 0.01, 0.02, 0.04, 0.06, 0.08]
haloID - string identifying a text file to load the halo from (in folder /halos)
If file not found, a new halo is generated and saved in /halos.
Set haloID = "nothing" to ignore this option.
"""
#TO BE ADDED AS PARAMETERS
#N_inner = 100
#delta_Rm = 50
#PBH mass and truncation radius imported from the eddington file for
#self-consistency
edd.loadDistribution(M_PBH, a)
r_tr = edd.r_tr
r_eq = edd.r_eq
#M_PBH = edd.M_PBH
mHalo = M_PBH*(r_tr/r_eq)**1.5
#Number of shells
N_shell = 4
#Initialise the masses, positions and velocities
m_vals = [[] for i in range(N_shell+1)]
pos_vals = [[] for i in range(N_shell+1)]
vel_vals = [[] for i in range(N_shell+1)]
#Set up the central black hole
m_vals[0] = M_PBH
pos_vals[0] = np.zeros(3)
vel_vals[0] = np.zeros(3)
#print pos_vals[0]
#nDM = len(DMinds)
#Check that the indices 'inds' are consistent
#with the number of DM particles
#if (len(DMinds) != nDM):
# print "Error in PBH.AddDressedPBH: number of indices does not match number of particles..."
#Calculate the relevant masses
#print "FUDGED!"
#mHalo = M_PBH*(r_tr/r_eq)**1.5
#m1 = mHalo*1.0/nDM_eff
#body.mass[PBHind] = M_PBH
#N_shell = 4
#rho_c = edd.rhoDM(1e-2*r_eq/r_tr)/r_tr**3.0
#print "Mass density (M_solar/pc^3): ", rho_c
#n_c = rho_c/m1
#print "Number density (1/pc^3): ", n_c
#print "Mean separation (pc): ", (3.0/(4.0*np.pi*n_c))**(1.0/3.0)
#PBH position and velocity (before CoM velocity is subtracted...)
xPBH=np.array([0.,0.,0.])
vPBH=np.array([0.,0.,0.])
halofile = "halos/" + haloID + ".txt"
#Check to see whether a halo file already exists...
if (haloID != "nothing" and os.path.isfile("halos/" + haloID + ".txt")):
print " Loading halo from file. HaloID:", haloID
#Load DM phase space coordinates from file
xvals, yvals, zvals, vxvals, vyvals, vzvals, mvals = np.loadtxt(halofile, unpack=True)
body.mass[DMinds] = mvals
else:
if (haloID != "nothing"):
print " Halo file <" + halofile+"> not found. Generating from scratch..."
#Generate the mass profile
print " Generating mass profile..."
r_max = 8.0*r_tr
r_min = 1e-6*r_tr
#rlist = np.logspace(np.log10(r_min), np.log10(r_max), 500)
rlist = np.append(0,np.logspace(np.log10(r_min), np.log10(r_max), 200))
Menc = 0.0*rlist
#Radial distribution function for the DM halo
P_r_1 = lambda r: 4.0*np.pi*r**2*edd.rhoDM(r/r_tr)
P_r = np.vectorize(P_r_1)
for i in range(len(rlist)):
Menc[i] = edd.Menc(rlist[i]/r_tr) - M_PBH
#Menc[i] = quad(P_r, r_min, rlist[i])[0]
#print Menc
#Menc -= Menc[0]
M_max = Menc[-1]
Minterp = interp1d(Menc/M_max, rlist, kind='linear')
M_shell = np.zeros(N_shell)
nDM_shell = np.zeros(N_shell,dtype='int')
m_shell = np.zeros(N_shell)
force_equal = False
if (force_equal):
#Calculate particle masses per shell
m_shell[0] = 1.0
m_shell[1:] = m_shell[0]*(delta_Rm**(np.arange(1,N_shell)))
m0 = mHalo/(N_inner*np.sum(m_shell))
m_shell *= m0
nDM_shell += N_inner
M_shell = m_shell*N_inner
r_s = np.zeros(N_shell-1)
for i in range(N_shell-1):
M_inner = np.sum(M_shell[:(i+1)])
r_s[i] = Minterp(M_inner/M_max)
else:
r_outer = 0.1*r_tr
r_inner = 15*r_soft
r_s = np.logspace(np.log10(r_inner), np.log10(r_outer), N_shell-1)
r_bound = np.append(r_s, 1e100)
for i in range(N_shell):
M_sofar = np.sum(M_shell)
M_shell[i] = edd.Menc(r_bound[i]/r_tr) - M_PBH - M_sofar
#Calculate particle masses per shell
m_shell[0] = M_shell[0]/N_inner
m_shell[1:] = m_shell[0]*(delta_Rm**(np.arange(1,N_shell)))
#m0 = mHalo/(N_inner*np.sum(m_shell))
#m_shell *= m0
nDM_shell = np.asarray(M_shell/m_shell, dtype='int')
soft_list = np.zeros(N_shell)
soft_list[0] = r_soft
#print 2.8*Minterp(m_shell[0]/M_max)
#for i in range(1,N_shell):
# soft_list[i] = 2.8*Minterp(m_shell[i]/M_max)
#soft_list[i] = r_s[i-1]/2.8
r0 = Minterp(m_shell[0]/mHalo)
for i in range(1,N_shell):
#soft_list[i] = 10*Minterp(m_shell[i]/mHalo)
soft_list[i] = r_soft*(Minterp(m_shell[i]/mHalo)/r0)
#TEMP
#m_shell[1:] = m_shell[0]*(1.0001**(np.arange(1,N_shell)))
#nDM_shell += N_inner
#M_shell = m_shell*N_inner
#r_s = np.zeros(N_shell-1)
#for i in range(N_shell-1):
# M_inner = np.sum(M_shell[:(i+1)])
# r_s[i] = Minterp(M_inner/M_max)
print " "
print " Multi-mass scheme:"
print " N_shell:", N_shell
print " Total shell masses [M_solar]:", M_shell
print " Particle masses [M_solar]:", m_shell
print " Number per shell:", nDM_shell
print " Shell radii [pc]:", r_s
print " Softening length (old) [pc]:", r_soft*(m_shell/m_shell[0])**(2.0/3.0)
print " Softening length (new) [pc]:", soft_list
print " "
print " Effective resolution [N_part.]:", mHalo/m_shell[0]
#Cut off a fraction p of the radial distribution
#near the PBH (fraction inside r_soft)
p_cut = (edd.Menc(2.8*r_soft/r_tr) - M_PBH)/M_max
#print "*** WARNING: Using p_cut = 0... ***"
#p_cut = 0.0
p_vals = np.array([(edd.Menc(r/r_tr) - M_PBH)/M_max for r in r_s])
p_vals = np.append(p_vals, 1.0)
p_vals = np.append(p_cut, p_vals)
#print p_vals
#Calculate and set the pseudo-particle mass
frac = quad(P_r, r_min, r_tr)[0]/quad(P_r, r_min, r_max)[0]
frac01 = quad(P_r, r_min, 0.1*r_tr)[0]/quad(P_r, r_min, r_max)[0]
Menc_tr = quad(P_r, r_min, r_tr)[0]
#DM positions
#rvals = np.zeros(nDM,N_shell)
rvals_all = [np.zeros(nDM_shell[i]) for i in range(N_shell)]
for i in range(N_shell):
rvals_all[i] = Minterp(np.asarray(p_vals[i] + (p_vals[i+1] - p_vals[i])*np.random.rand(nDM_shell[i]), dtype='float'))
#rvals = [ for i in range(N_shell)]
#rvals[:, i] = Minterp(np.asarray(p_vals[i] + (p_vals[i+1] - p_vals[i])*np.random.rand(nDM_shell[i]), dtype='float'))
#print " Number of particles below 5e-5 pc:", np.sum(rvals < 5e-5)
#-------------
#rDM = 1.0*rvals
do_plots = False
if (do_plots):
cols = ['r','g','b','c']
pl.figure()
for i in range(N_shell):
pl.hist(np.log10(rvals_all[i]), bins=np.linspace(-8, -1.5, 66), alpha = 0.5, color=cols[i])
pl.axvline(np.log10(soft_list[i]), linestyle=':', color=cols[i], lw=2)
pl.axvline(np.log10(r_soft), linestyle='--', color='k')
for i in range(N_shell-1):
pl.axvline(np.log10(r_s[i]), linestyle=':', color='k')
#pl.axvline(np.log10(r_outer), linestyle=':', color='k')
pl.show()
#---------
#m_shell[1:] = m_shell[0] + np.arange(1,N_shell)*1e-10
for i, nDM,rvals in reversed(zip(range(N_shell), nDM_shell, rvals_all)):
print " For shell number", i+1
#Generate some random directions for setting particle positions
ctvals = 2.0*np.random.rand(nDM) - 1.0
thetavals = np.arccos(ctvals)
phivals = 2*np.pi*np.random.rand(nDM)
xvals = rvals*np.cos(phivals)*np.sin(thetavals)
yvals = rvals*np.sin(phivals)*np.sin(thetavals)
zvals = rvals*np.cos(thetavals)
#rvals = np.append(rvals, rvals)
#xvals = np.append(xvals, -xvals)
#yvals = np.append(yvals, -yvals)
#zvals = np.append(zvals, -zvals)
"""
if (verbose):
print "p_cut:",p_cut
print "Mass enclosed inside r_tr (calc):", Menc_tr
print "Mass enclosed inside r_tr (MC):", m1*np.sum(rvals < r_tr)
print "Fraction of DM particles inside r_tr:", frac
print "Smallest DM radius, r_min/r_tr = ", np.min(rvals)/r_tr
print "Largest DM radius, r_max/r_tr = ", np.max(rvals)/r_tr
print "DM particle mass [M_solar]:",m1
print "Typical DM separation inside r_tr:", (frac*nDM)**(-1.0/3.0)
print "Typical DM separation inside 0.1 r_tr:", 0.1*(frac01*nDM)**(-1.0/3.0)
print " "
"""
#DM velocities
print " Sampling DM velocities..."
vvals = np.zeros(nDM)
for ind in tqdm(range(nDM)):
r = rvals[ind]
#Now sample f(v) at given r to get the speed v
found = 0
while (found == 0):
v = np.random.rand(1)*edd.vmax(r/r_tr)
#Use 5/vmax as the 'maximum' values of f(v)
#but in some cases it might not be enough...
if (np.random.rand(1)*(5.0/edd.vmax(r/r_tr)) < edd.f(r, v)):
#pl.show()
found = 1
vvals[ind] = v
#Get a new set of random directions for the velocities
ctvals2 = 2.0*np.random.rand(nDM) - 1.0
thetavals2 = np.arccos(ctvals2)
phivals2 = 2*np.pi*np.random.rand(nDM)
vxvals = vvals*np.cos(phivals2)*np.sin(thetavals2)
vyvals = vvals*np.sin(phivals2)*np.sin(thetavals2)
vzvals = vvals*np.cos(thetavals2)
pos_vals[i] = np.array([xvals, yvals, zvals]).T
vel_vals[i] = np.array([vxvals, vyvals, vzvals]).T
#Begin orbit refinement
orbit_refine = False
if (orbit_refine):
r_mor = 10*r_s[0]
fk_list = np.zeros(nDM)
for j,x,v in zip(range(nDM),pos_vals[i], vel_vals[i]):
r0 = np.sqrt(np.sum(x**2))
hsq = np.sum(np.cross(x,v)**2)
eps = 0.5*np.sum(v**2) - edd.psi(r0/edd.r_tr)
rootfunc = lambda lr: 10**(-2*lr) - 2*(edd.psi((10**lr)/edd.r_tr) + eps)/hsq
r_peri = 10**brenth(rootfunc,-10, np.log10(r0))
r_bound = np.append(r_s,1e100)
f_k = 1.0
if (r_peri <= r_s[0]):
f_k = m_shell[i]/m_shell[0]
elif (r_mor <= r_peri):
f_k = 1.0
elif (r_mor < r_bound[i]):
f_k = m_shell[i]/m_shell[0] + (1-m_shell[i]/m_shell[0])*np.log(r_peri/r_s[0])/np.log(r_mor/r_s[0])
else:
f_k = m_shell[i]/m_shell[0] + (1-m_shell[i]/m_shell[0])*np.log(r_peri/r_s[0])/np.log(r_bound[i]/r_s[0])
fk_list[j] = int(delta_Rm**np.ceil(math.log(f_k*0.999, delta_Rm)))
#f_k//delta_Rm
#print "Split factor:", f_k, fk_list[j]
lrvals = np.linspace(-8, -1,100)
plot = False
if (plot):
pl.figure()
pl.loglog(10**lrvals, np.abs(np.vectorize(rootfunc)(lrvals)))
pl.axvline(r0)
pl.axvline(r_peri)
for r in r_s:
pl.axvline(r, linestyle=':', color='k')
pl.show()
print np.sum(fk_list)
#pl.figure()
#pl.hist(fk_list)
#pl.show()
m_vals[i] = np.zeros(nDM) + m_shell[i]
CoM = np.sqrt(np.sum(pos_vals[i]**2, axis=0))/nDM
print " CoM [pc]:", CoM
print " "
#--------------------- NOW JUST NEED TO TRANSFER BACK TO RELEVANT FILES!!!
#Save the output to a halo file if needed
#if (haloID != "nothing"):
# headertxt = "Number of DM particles: " + str(nDM) + ". Softening length [pc]: " + str(r_soft)
# headertxt += "\nColumns: x [pc], y [pc], z [pc], vx [km/s], vy [km/s], vz [km/s], m [M_solar]"
# np.savetxt("halos/" + haloID + ".txt", zip(xvals[1:],yvals[1:],zvals[1:],vxvals[1:],vyvals[1:],vzvals[1:], [1:]), header=headertxt)
"""
#Tell us a bunch of stuff
if (verbose):
print "Initial momentum [M_solar km/s]:", momentum
print "Net DM velocity (before subtraction) [km/s]:", momentum/body.mass[DMinds[0]]
print "Net DM velocity (after subtraction) [km/s]:", momentum/body.mass[DMinds[0]] - nDM*momentum/totmass
print "Max DM velocity:",vDM[np.argmax(vvals),:]
print "Max DM speed:",np.max(vvals)
print " "
"""
#pl.figure()
#pl.plot(rvals/r_tr,np.sqrt(np.sum(vDM**2, axis=-1)), "+")
#pl.xlabel(r"$r/r_\mathrm{tr}$")
#pl.ylabel(r"$v_\mathrm{DM}$ [km/s]")
#pl.show()
mlist_out = np.zeros(1) + M_PBH
xlist_out = np.zeros((1,3))
vlist_out = np.zeros((1,3))
for i in range(N_shell):
mlist_out = np.append(mlist_out, m_vals[i])
xlist_out = np.append(xlist_out, pos_vals[i], axis=0)
vlist_out = np.append(vlist_out, vel_vals[i], axis=0)
print " Total halo mass [pc]:", np.sum(mlist_out[1:])
xvals = xlist_out[1:, 0]
yvals = xlist_out[1:, 1]
zvals = xlist_out[1:, 2]
vxvals = vlist_out[1:, 0]
vyvals = vlist_out[1:, 1]
vzvals = vlist_out[1:, 2]
mvals = mlist_out[1:]
#Save the output to a halo file if needed
if (haloID != "nothing"):
headertxt = "Number of DM particles: " + str(nDM) + ". Softening length [pc]: " + str(r_soft)
headertxt += "\nColumns: x [pc], y [pc], z [pc], vx [km/s], vy [km/s], vz [km/s], m [M_solar]"
np.savetxt("halos/" + haloID + ".txt", zip(xvals,yvals,zvals,vxvals,vyvals,vzvals, mvals), header=headertxt)
#xDM=np.array([xvals, yvals, zvals]).T
#vDM=np.array([vxvals, vyvals, vzvals]).T
#rDM = np.sqrt(np.sum(xDM**2, axis=-1))
#body.mass[DMinds_outer] = 1e-10 + np.zeros(nDM_outer)
"""
x_c_list = np.logspace(np.log10(1e-5), np.log10(np.max(rDM)))
xb_list = 0.0*x_c_list
for i in range(len(xb_list)):
x_c = x_c_list[i]
N1 = np.sum(rDM < x_c)
#print "Number close to centre:", N1
V1 = 4*np.pi*(x_c**3)/3.0
xb_list[i] = (3.0/(4*np.pi*(N1/V1)))**(1.0/3.0)
#print "Mean separation close to centre:", (3.0/(4*np.pi*(N1/V1)))**(1.0/3.0)
print "Softening length should be (r_eq): ", 0.5e-5/r_eq
print "Softening length (1/35) should be (r_eq):", (1.0/35.0)*xb_list[-1]/r_eq
pl.figure()
pl.loglog(x_c_list, xb_list)
pl.show()
"""
"""
#Subtract off any net momentum of the system
totmass = np.sum(body.mass[DMinds])+body.mass[PBHind]
momentum = np.zeros(3)
momentum[0] = np.sum(vDM[:,0]*body.mass[DMinds])
momentum[1] = np.sum(vDM[:,1]*body.mass[DMinds])
momentum[2] = np.sum(vDM[:,2]*body.mass[DMinds])
#vDM -= momentum/totmass
#vPBH -= momentum/totmass
print " v_PBH [pc/kyr]:", np.sqrt(np.sum(vPBH**2))*3.24078e-14*(3600*24.0*365*1000)
print " v_apo:", np.sqrt(np.sum(np.asarray(v0)**2))
#Calculate CoM of the system
position = np.zeros(3)
position[0] = np.sum(xDM[:,0]*body.mass[DMinds])
position[1] = np.sum(xDM[:,1]*body.mass[DMinds])
position[2] = np.sum(xDM[:,2]*body.mass[DMinds])
print " x_CoM [pc]:", np.sqrt(np.sum(position**2))/totmass
#xDM -= position/totmass
#xPBH -= position/totmass
"""
totmass = np.sum(mlist_out)
momentum = np.zeros(3)
momentum[0] = np.sum(vlist_out[:,0]*mlist_out)
momentum[1] = np.sum(vlist_out[:,1]*mlist_out)
momentum[2] = np.sum(vlist_out[:,2]*mlist_out)
vlist_out -= momentum/totmass
position = np.zeros(3)
position[0] = np.sum(xlist_out[:,0]*mlist_out)
position[1] = np.sum(xlist_out[:,1]*mlist_out)
position[2] = np.sum(xlist_out[:,2]*mlist_out)
#print "*** WARNING: Not subtracting CoM position... ***"
print " Total CoM position [pc]:", position/totmass
xlist_out -= position/totmass
#Add on the CoM position and velocity
xlist_out += np.asarray(x0)
#xPBH += np.asarray(x0)
vlist_out += v0
#vPBH += v0
xlist_out /= L_sim
#Set particle ids
#body.id[inds]=inds
#Set positions and velocities
#NB: we divide positions by r_tr
#to get them in units of...r_tr
#body.pos[PBHind,:] = xPBH/L_sim
#body.vel[PBHind,:] = vPBH
#body.pos[DMinds,:] = xDM/L_sim
#body.vel[DMinds,:] = vDM
return mlist_out, xlist_out, vlist_out
#Read in a DM halo from file
def GetDressedPBH_fromfile(nDM_inner, M_PBH, a, halofile_root, verbose=False):
edd.loadDistribution(M_PBH,a)
#M_PBH = edd.M_PBH
#Calculate how many halo files we need to load
nHalos = nDM_inner/(2**4)
#Add the black hole
mlist = np.zeros(1) + M_PBH
xlist = np.zeros((1,3))
vlist = np.zeros((1,3))
halolist = list(range(1,64))
random.shuffle(halolist)
#print nHalos
for i in range(nHalos):
hID = halolist[i]
halofile = halofile_root + "_h" + str(hID) + ".txt"
if (verbose):
"Loading halofile:", halofile
xvals, yvals, zvals, vxvals, vyvals, vzvals, mvals = np.loadtxt(halofile, unpack=True)
mlist = np.append(mlist, mvals/nHalos) #Make sure we divide through to get the correct mass
xDM=np.array([xvals, yvals, zvals]).T
vDM=np.array([vxvals, vyvals, vzvals]).T
xlist = np.append(xlist, xDM, axis=0)
vlist = np.append(vlist, vDM, axis=0)
#Deal with the CoM
halomass = np.sum(mlist[1:])
totmass = np.sum(mlist)
xCoM = np.sum(np.atleast_2d(mlist).T*xlist, axis=0)/totmass
vCoM = np.sum(np.atleast_2d(mlist).T*vlist, axis=0)/totmass
xlist -= xCoM
vlist -= vCoM
if (verbose):
print " Total halo mass [M_solar]:", halomass
print " Centre of mass position [pc]:", np.sqrt(np.sum(xCoM**2))
print " CoM velocity [pc/kyr]:", np.sqrt(np.sum(vCoM**2))*3.24078e-14*3.1536e10
return mlist, xlist, vlist
|
bradkavREPO_NAMEBlackHolesDarkDressPATH_START.@BlackHolesDarkDress_extracted@BlackHolesDarkDress-master@Nbody@PBH.py@.PATH_END.py
|
{
"filename": "test_image.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/geminidr/core/tests/test_image.py",
"type": "Python"
}
|
"""
Tests applied to primitives_image.py
"""
import pytest
import os
from numpy.testing import assert_array_equal
import astrodata, gemini_instruments
from geminidr.niri.primitives_niri_image import NIRIImage
object_mask_datasets = {"N20210512S0018_sourcesDetected.fits": "N20210512S0077_flatCorrected.fits"}
@pytest.mark.regression
@pytest.mark.parametrize("dataset", object_mask_datasets.keys())
def test_transfer_object_mask(path_to_inputs, path_to_refs, dataset):
"""
Test the transferObjectMask primitive
"""
ad_donor = astrodata.open(os.path.join(path_to_inputs, dataset))
ad_target = astrodata.open(os.path.join(path_to_inputs, object_mask_datasets[dataset]))
p = NIRIImage([ad_target])
p.streams['donor'] = [ad_donor]
adout = p.transferObjectMask(source="donor", dq_threshold=0.01, dilation=1.5,
interpolant="linear").pop()
adout.write(overwrite=True)
adref = astrodata.open(os.path.join(path_to_refs, adout.filename))
assert_array_equal(adout[0].OBJMASK, adref[0].OBJMASK)
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@geminidr@core@tests@test_image.py@.PATH_END.py
|
{
"filename": "tests.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/projects/geotail/tests/tests.py",
"type": "Python"
}
|
import os
import unittest
from pytplot import data_exists
import pyspedas
class LoadTestCases(unittest.TestCase):
def test_downloadonly(self):
files = pyspedas.projects.geotail.mgf(downloadonly=True)
self.assertTrue(os.path.exists(files[0]))
def test_load_mgf_data(self):
mgf_vars = pyspedas.projects.geotail.mgf(time_clip=True)
self.assertTrue(data_exists('IB_vector'))
mgf_vars = pyspedas.projects.geotail.mgf(datatype='edb3sec', trange=['1998-11-3/09:18:00', '1998-11-3/09:28:00'])
self.assertTrue(data_exists('BGSE'))
def test_load_efd_data(self):
efd_vars = pyspedas.projects.geotail.efd()
self.assertTrue(data_exists('Es'))
def test_load_lep_data(self):
lep_vars = pyspedas.projects.geotail.lep()
self.assertTrue(data_exists('N0'))
def test_load_cpi_data(self):
cpi_vars = pyspedas.projects.geotail.cpi()
self.assertTrue(data_exists('SW_P_Den'))
def test_load_epic_data(self):
epic_vars = pyspedas.projects.geotail.epic()
self.assertTrue(data_exists('IDiffI_I'))
epic_vars = pyspedas.projects.geotail.epic(notplot=True)
self.assertTrue('IDiffI_I' in epic_vars)
def test_load_pwi_data(self):
pwi_vars = pyspedas.projects.geotail.pwi()
self.assertTrue(data_exists('MCAE_AVE'))
if __name__ == '__main__':
unittest.main()
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@projects@geotail@tests@tests.py@.PATH_END.py
|
{
"filename": "hernquist.py",
"repo_name": "LSSTDESC/CCL",
"repo_path": "CCL_extracted/CCL-master/pyccl/halos/profiles/hernquist.py",
"type": "Python"
}
|
__all__ = ("HaloProfileHernquist",)
import numpy as np
from scipy.special import sici
from . import HaloProfileMatter
class HaloProfileHernquist(HaloProfileMatter):
""" `Hernquist 1990
<https://ui.adsabs.harvard.edu/abs/1990ApJ...356..359H/abstract>`_
profile.
.. math::
\\rho(r) = \\frac{\\rho_0}
{\\frac{r}{r_s}\\left(1+\\frac{r}{r_s}\\right)^3}
where :math:`r_s` is related to the comoving spherical overdensity
halo radius :math:`r_\\Delta(M)` through the concentration
parameter :math:`c(M)` as
.. math::
r_\\Delta(M) = c(M)\\,r_s
and the normalization :math:`\\rho_0` is
.. math::
\\rho_0 = \\frac{M}{2\\pi\\,r_s^3}\\left(\\frac{1+c}{c}\\right)^2
By default, this profile is truncated at :math:`r = r_\\Delta(M)`.
Args:
mass_def (:class:`~pyccl.halos.massdef.MassDef` or :obj:`str`):
a mass definition object, or a name string.
concentration (:class:`~pyccl.halos.halo_model_base.Concentration`):
concentration-mass relation to use with this profile.
fourier_analytic (:obj:`bool`): set to ``True`` if you want to compute
the Fourier profile analytically (and not through FFTLog).
projected_analytic (:obj:`bool`): set to ``True`` if you want to
compute the 2D projected profile analytically (and not
through FFTLog).
cumul2d_analytic (:obj:`bool`): set to ``True`` if you want to
compute the 2D cumulative surface density analytically
(and not through FFTLog).
truncated (:obj:`bool`): set to ``True`` if the profile should be
truncated at :math:`r = r_\\Delta`.
"""
__repr_attrs__ = __eq_attrs__ = (
"fourier_analytic", "projected_analytic", "cumul2d_analytic",
"truncated", "mass_def", "concentration", "precision_fftlog",)
def __init__(self, *, mass_def, concentration,
truncated=True,
fourier_analytic=False,
projected_analytic=False,
cumul2d_analytic=False):
self.truncated = truncated
self.fourier_analytic = fourier_analytic
self.projected_analytic = projected_analytic
self.cumul2d_analytic = cumul2d_analytic
if fourier_analytic:
self._fourier = self._fourier_analytic
if projected_analytic:
if truncated:
raise ValueError("Analytic projected profile not supported "
"for truncated Hernquist. Set `truncated` or "
"`projected_analytic` to `False`.")
self._projected = self._projected_analytic
if cumul2d_analytic:
if truncated:
raise ValueError("Analytic cumuative 2d profile not supported "
"for truncated Hernquist. Set `truncated` or "
"`cumul2d_analytic` to `False`.")
self._cumul2d = self._cumul2d_analytic
super().__init__(mass_def=mass_def, concentration=concentration)
self.update_precision_fftlog(padding_hi_fftlog=1E2,
padding_lo_fftlog=1E-4,
n_per_decade=1000,
plaw_fourier=-2.)
def _norm(self, M, Rs, c):
# Hernquist normalization from mass, radius and concentration
return M / (2 * np.pi * Rs**3 * (c / (1 + c))**2)
def _real(self, cosmo, r, M, a):
r_use = np.atleast_1d(r)
M_use = np.atleast_1d(M)
# Comoving virial radius
R_M = self.mass_def.get_radius(cosmo, M_use, a) / a
c_M = self.concentration(cosmo, M_use, a)
R_s = R_M / c_M
norm = self._norm(M_use, R_s, c_M)
x = r_use[None, :] / R_s[:, None]
prof = norm[:, None] / (x * (1 + x)**3)
if self.truncated:
prof[r_use[None, :] > R_M[:, None]] = 0
if np.ndim(r) == 0:
prof = np.squeeze(prof, axis=-1)
if np.ndim(M) == 0:
prof = np.squeeze(prof, axis=0)
return prof
def _fx_projected(self, x):
def f1(xx):
x2m1 = xx * xx - 1
sqx2m1 = np.sqrt(-x2m1)
return (-3 / 2 / x2m1**2
+ (x2m1+3) * np.arcsinh(sqx2m1 / xx) / 2 / (-x2m1)**2.5)
def f2(xx):
x2m1 = xx * xx - 1
sqx2m1 = np.sqrt(x2m1)
return (-3 / 2 / x2m1**2
+ (x2m1+3) * np.arcsin(sqx2m1 / xx) / 2 / x2m1**2.5)
xf = x.flatten()
return np.piecewise(xf,
[xf < 1, xf > 1],
[f1, f2, 2./15.]).reshape(x.shape)
def _projected_analytic(self, cosmo, r, M, a):
r_use = np.atleast_1d(r)
M_use = np.atleast_1d(M)
# Comoving virial radius
R_M = self.mass_def.get_radius(cosmo, M_use, a) / a
c_M = self.concentration(cosmo, M_use, a)
R_s = R_M / c_M
x = r_use[None, :] / R_s[:, None]
prof = self._fx_projected(x)
norm = 2 * R_s * self._norm(M_use, R_s, c_M)
prof = prof[:, :] * norm[:, None]
if np.ndim(r) == 0:
prof = np.squeeze(prof, axis=-1)
if np.ndim(M) == 0:
prof = np.squeeze(prof, axis=0)
return prof
def _fx_cumul2d(self, x):
def f1(xx):
x2m1 = xx * xx - 1
sqx2m1 = np.sqrt(-x2m1)
return (1 + 1 / x2m1
+ (x2m1 + 1) * np.arcsinh(sqx2m1 / xx) / (-x2m1)**1.5)
def f2(xx):
x2m1 = xx * xx - 1
sqx2m1 = np.sqrt(x2m1)
return (1 + 1 / x2m1
- (x2m1 + 1) * np.arcsin(sqx2m1 / xx) / x2m1**1.5)
xf = x.flatten()
f = np.piecewise(xf,
[xf < 1, xf > 1],
[f1, f2, 1./3.]).reshape(x.shape)
return f / x**2
def _cumul2d_analytic(self, cosmo, r, M, a):
r_use = np.atleast_1d(r)
M_use = np.atleast_1d(M)
# Comoving virial radius
R_M = self.mass_def.get_radius(cosmo, M_use, a) / a
c_M = self.concentration(cosmo, M_use, a)
R_s = R_M / c_M
x = r_use[None, :] / R_s[:, None]
prof = self._fx_cumul2d(x)
norm = 2 * R_s * self._norm(M_use, R_s, c_M)
prof = prof * norm[:, None]
if np.ndim(r) == 0:
prof = np.squeeze(prof, axis=-1)
if np.ndim(M) == 0:
prof = np.squeeze(prof, axis=0)
return prof
def _fourier_analytic(self, cosmo, k, M, a):
M_use = np.atleast_1d(M)
k_use = np.atleast_1d(k)
# Comoving virial radius
R_M = self.mass_def.get_radius(cosmo, M_use, a) / a
c_M = self.concentration(cosmo, M_use, a)
R_s = R_M / c_M
x = k_use[None, :] * R_s[:, None]
Si2, Ci2 = sici(x)
P1 = M / ((c_M / (c_M + 1))**2 / 2)
c_Mp1 = c_M[:, None] + 1
if self.truncated:
Si1, Ci1 = sici(c_Mp1 * x)
P2 = x * np.sin(x) * (Ci1 - Ci2) - x * np.cos(x) * (Si1 - Si2)
P3 = (-1 + np.sin(c_M[:, None] * x) / (c_Mp1**2 * x)
+ c_Mp1 * np.cos(c_M[:, None] * x) / (c_Mp1**2))
prof = P1[:, None] * (P2 - P3) / 2
else:
P2 = (-x * (2 * np.sin(x) * Ci2 + np.pi * np.cos(x))
+ 2 * x * np.cos(x) * Si2 + 2) / 4
prof = P1[:, None] * P2
if np.ndim(k) == 0:
prof = np.squeeze(prof, axis=-1)
if np.ndim(M) == 0:
prof = np.squeeze(prof, axis=0)
return prof
|
LSSTDESCREPO_NAMECCLPATH_START.@CCL_extracted@CCL-master@pyccl@halos@profiles@hernquist.py@.PATH_END.py
|
{
"filename": "simple_autocorrelate.py",
"repo_name": "astrostat/LIRA",
"repo_path": "LIRA_extracted/LIRA-master/lira/inst/docs/examples/PyProcUtils/simple_autocorrelate.py",
"type": "Python"
}
|
def autocorr(x,maxlag=-1):
result = numpy.correlate(x, x, mode='full')
if maxlag < 0:
endlag = len(result.size/2)
else :
endlag = maxlag
return result[result.size/2:endlag]
|
astrostatREPO_NAMELIRAPATH_START.@LIRA_extracted@LIRA-master@lira@inst@docs@examples@PyProcUtils@simple_autocorrelate.py@.PATH_END.py
|
{
"filename": "example.ipynb",
"repo_name": "heracles-ec/heracles",
"repo_path": "heracles_extracted/heracles-main/examples/example.ipynb",
"type": "Jupyter Notebook"
}
|
# Two-point statistics from *Heracles*
This notebook demonstrates how *Heracles* extracts the two-point statistics from a 3Γ2pt catalogue.
<div class="alert alert-info">
**Important note**
This notebook is only meant to give you an idea of **how *Heracles* works**.
It does **not** show everything that *Heracles* can do.
**This is a toy, treat it is such!**
</div>
## Setup
Some required imports, nothing fancy.
```python
import numpy as np
import healpy as hp
import matplotlib.pyplot as plt
import matplotlib as mpl
```
Now the *Heracles* imports:
* The top-level `heracles` module contains all general user-facing functionality.
* The `heracles.healpy` module contains mappers based on the `healpy` package.
* The `heracles.notebook` module contains a progress bar based on the `ipywidgets` package.
```python
import heracles
import heracles.healpy
from heracles.notebook import Progress
```
If there is an import error on the last line of the previous block, it means you need to install the `ipywidgets` package.
## Data set
The example uses a prepared [*example data set*](https://dx.doi.org/10.5281/zenodo.13622598). The catalogue contains about 75 million galaxies over a contiguous 5% of the sky.
To download the example data, run the following cell.
```python
import helpers
with Progress("example data") as progress:
helpers.get_example_data(progress)
```
VBox()
## Basic parameters
This is the resolution parameter for measuring spectra from *HEALPix* maps. Here, we use `nside = 1024` since that is the resolution at which the example data has been created. A value `lmax` of approximate 1.5x `nside` is fairly safe in terms of errors introduced by *HEALPix*.
```python
nside = 1024
lmax = 1500
```
## Catalogues
*Heracles* provides a flexible interface for loading catalogues from FITS files or arrays. It also provides a base class that can quickly be extended, e.g., to databases or more.
Here we use the FITS interface to read a catalogue from file. We could specify the columns to read, but the example catalogue does not have much more than we need here.
Importantly, catalogues are never read into memory all at once. Their `page_size` attribute determines how many rows are read at a time.
```python
# load the FITS catalogue
catalog = heracles.FitsCatalog("catalog.fits")
```
Using the `catalog.add_filter()` method, we could add filters to the catalogue here, e.g., to strip rows with invalid values, or apply an extra mask.
### Playground
The `Catalog` interface essentially provides an iterator over pages of rows. You can use it as such:
```python
nrows = 0
for page in catalog:
nrows += page.size
# no need to iterate to get the number of rows, really
assert nrows == catalog.size
print(f'there are {nrows:_} rows in your catalogue')
```
there are 76_889_615 rows in your catalogue
The `page` object is a mapping of column names to rows, with some additional features:
* The number of rows, `page.size`
* The names of columns, `page.names`
* A view of the underlying data, `page.data`
* Make a copy of the page, `page.copy()`
* Delete specific rows, `page.delete(where)`
* Return multiple columns at once, using `page['a', 'b']` or `page[['a', 'b']]`
* Return columns while checking for invalid values, using `page.get('a', 'b')`
## Visibility maps
The statistics of galaxy positions require knowledge of the a priori probability of detecting a galaxy at each point in the sky. We call this the *visibility*, and we use it in the form of a *visibility maps*, which is a full-sky maps of detection probabilities.
```python
vmap = hp.read_map("vmap.fits.gz")
# fix UNSEEN pixels to zero and rescale to nside
vmap[vmap == hp.UNSEEN] = 0.
vmap = hp.ud_grade(vmap, 2*nside)
```
For a real survey, the visibility is a complicated function of position, observing conditions, selection, and tomographic bin. However, in this simulated example, the selection is the same for all positions and tomographic bins, and the visibility map is simply the footprint map of the survey.
```python
# set visibility map of entire catalogue
catalog.visibility = vmap
```
### Playground
Quick inspection of the visibility map:
```python
hp.mollview(vmap, title="visibility", cmap="binary", bgcolor="none")
hp.graticule()
plt.show()
```

## Tomographic binning
The `catalog` object can be used to read the entire catalogue. Of course, we would like to split our galaxies into individual tomographic bins. In the example data set, each galaxy has already been assigned a label for its tomographic bin, in the `"BIN"` column.
To perform the tomographic binning, we construct a dictionary that assigns an integer bin ID to a selection from the catalogue for each tomographic bin. This is done using the usual `[]` indexing syntax, which returns a new view of the `FitsCatalog` with the given selection applied.
```python
catalogs = {i: catalog[f"BIN == {i}"] for i in range(1, 7)}
```
```python
catalogs
```
{1: catalog.fits['BIN == 1'],
2: catalog.fits['BIN == 2'],
3: catalog.fits['BIN == 3'],
4: catalog.fits['BIN == 4'],
5: catalog.fits['BIN == 5'],
6: catalog.fits['BIN == 6']}
Catalogue views (i.e., selections) have their own individual `visibility`, and inherit the visibility of the base catalogue by default.
Instead of the `[]` syntax, views can also be created using the `catalog.where()` method, where the visibility can be given as a parameter.
```python
catalogs[3].visibility is vmap
```
True
Note that `catalogs` is a simple mapping of integer bin IDs to instances of type `Catalog`. You can create such a mapping in any way you like; the values do not have to come from the same `FitsCatalog`, or even the same type of catalogue.
### Playground
Return the number of rows in tomographic bin 5.
```python
# need to sum because FITS cannot tell the size a priori
nrows = sum(page.size for page in catalogs[5])
print(f"tomographic bin 5 contains {nrows:_} rows")
```
tomographic bin 5 contains 12_752_343 rows
## Fields
To turn catalogues into spectra, *Heracles* requires a so-called mapper object that knows how to turn positions and values into spherical functions.
Here, we construct a `HealpixMapper` instance with our desired `nside` and `lmax` parameters.
When computing spherical harmonic coefficients, the *HEALPix* mapper will also remove the pixel window function, unless `deconvolve=False` is passed.
```python
mapper = heracles.healpy.HealpixMapper(nside, lmax)
```
To specify the fields we wish to analyse, we construct a dictionary of keys and field definitions.
Each field receives a mapper and a list of columns that it reads, plus potentially some other options.
For a standard 3Γ2pt analysis in harmonic space, we need
* A position field (`"P"`) for angular clustering and galaxy-galaxy lensing;
* A shear field (`"G"`) for cosmic shear and galaxy-galaxy lensing.
When passing the column names, we could specify that the shear field should flip the sign of a column by adding a minus sign (e.g., `"-E2"` to flip the second shear component). However, we do not need to do that here.
Finally, we define the optional names of the masks (`"V"`, `"W"`) of the fields. These will be used further down below to compute mixing matrices.
```python
fields = {
"P": heracles.Positions(
mapper,
"RA",
"DEC",
mask="V",
),
"G": heracles.Shears(
mapper,
"RA",
"DEC",
"E1",
"E2",
"W",
mask="W",
),
}
```
## Mapping
The next step is to map the catalogues to position and shear fields for each tomographic bin. *Heracles* can map a set of catalogues all at once, using the `map_catalogs()` functions. We only need to pass in the fields and catalogues constructed previously.
If there is enough memory to hold _all_ maps in memory at the same time, we can use `parallel=True` to construct maps for all entries in `catalogs` (i.e., tomographic bins) at the same time. Here, this has the advantage of reading the entire FITS file only once.
```python
with Progress("mapping") as progress:
data = heracles.map_catalogs(fields, catalogs, parallel=True, progress=progress)
```
VBox()
/Users/ntessore/code/heracles-ec/heracles/heracles/fields.py:302: UserWarning: positions and visibility have different size
warnings.warn("positions and visibility have different size")
The resulting `data` object is a dictionary with keys corresponding to each field (`"P"`, `"G"`) and catalogue (`0`, `1`, ...). Results from *Heracles* are always of this form.
```python
list(data.keys())[:6] + ["..."]
```
[('P', 1), ('G', 1), ('P', 2), ('G', 2), ('P', 3), ('G', 3), '...']
### Playground
Since the *HEALPix* mapper produces actual maps, we can take a quick look at the data for a tomographic bin ID, say 3.
```python
i = 3
fig, ax = plt.subplots(2, 2, figsize=(12, 6))
fig.tight_layout()
plt.sca(ax[0, 0])
hp.cartview(data["P", i], title="P", cmap="binary", min=-1., max=4., hold=True,
lonra=[329.5, 53.5], latra=[-45.6, -15.7])
ax[0, 1].axis('off')
plt.sca(ax[1, 0])
hp.cartview(data['G', i][0], title="G1", cmap="RdGy", min=-0.5, max=0.5, hold=True,
lonra=[329.5, 53.5], latra=[-45.6, -15.7])
plt.sca(ax[1, 1])
hp.cartview(data['G', i][1], title="G2", cmap="RdGy", min=-0.5, max=0.5, hold=True,
lonra=[329.5, 53.5], latra=[-45.6, -15.7])
plt.show()
```

## Alms
Since we are working in harmonic space, the real-space maps we created so far are not what we are actually after. We therefore transform the data into harmonic space (i.e. $a_{lm}$) using the `transform()` function.
```python
with Progress("transform") as progress:
alms = heracles.transform(fields, data, progress=progress)
```
VBox()
The resulting `alms` dictionary has the same keys as `data`, but contains the spherical harmonic coefficients of the maps.
```python
list(alms.keys())[:6] + ["..."]
```
[('P', 1), ('G', 1), ('P', 2), ('G', 2), ('P', 3), ('G', 3), '...']
## Two-point statistics
We are now able to compute two-point statistics in the form of angular power spectra.
To do so, we simply call the `angular_power_spectra()` function on `alms`.
The function will automatically remove the noise bias from the spectra (unless `debias=False` is passed), and can optionally compute binned spectra (by passing the `bins=` and `weights=` parameters).
```python
cls = heracles.angular_power_spectra(alms)
```
We could limit the `cls` we want computed using the `include=` and `exclude=` parameters of `angular_power_specta()`, but here we have computed all combinations. They are very many, arranged into the familiar dictionary format with entries such as `("P", "G_E", 5, 4)` for the position and *E*-mode cross-spectrum for bins 5 and 4 (in order).
```python
list(cls.keys())[:6] + ["..."] + list(cls.keys())[-6:]
```
[('P', 'P', 1, 1),
('P', 'G_E', 1, 1),
('P', 'G_B', 1, 1),
('P', 'P', 1, 2),
('P', 'G_E', 1, 2),
('P', 'G_B', 1, 2),
'...',
('P', 'P', 6, 6),
('P', 'G_E', 6, 6),
('P', 'G_B', 6, 6),
('G_E', 'G_E', 6, 6),
('G_E', 'G_B', 6, 6),
('G_B', 'G_B', 6, 6)]
## Mixing matrices
We usually do not have data over the entire sky, but are limited by the survey footprint and visibility.
This affects the harmonic-space two-point statistics, and is modelled by the so-called mixing matrices.
Computing the mixing matrices requires additional computation for the visibility and shear weights in each tomographic bin.
Square mixing matrices up to `lmax` require input spectra with 2x `lmax`, for which we create a new *HEALPix* mapper at twice the resolution.
```python
mapper2 = heracles.healpy.HealpixMapper(2 * nside, 2 * lmax)
```
We now use the same catalogues to map the visibility and weights, using the `map_catalogs()` function as before.
Since each field gets its own mapper, we could also have computed all maps in one go.
```python
# visibility maps are taken as-is from catalogue, so no columns
fields2 = {
"V": heracles.Visibility(
mapper2,
),
"W": heracles.Weights(
mapper2,
"RA",
"DEC",
"W",
),
}
with Progress("mapping") as progress:
data2 = heracles.map_catalogs(fields2, catalogs, parallel=True, progress=progress)
```
VBox()
The `data2` output has the same format as earlier, but now contains `"V"` and `"W"` maps as expected.
```python
list(data2.keys())[:6] + ["..."]
```
[('V', 1), ('W', 1), ('V', 2), ('W', 2), ('V', 3), ('W', 3), '...']
Next we transform the maps for the mixing matrices ...
```python
with Progress("transform") as progress:
alms2 = heracles.transform(fields2, data2, progress=progress)
```
VBox()
... and compute their angular power spectra.
```python
cls2 = heracles.angular_power_spectra(alms2)
```
With the angular power spectra of visibility and weight maps available, we can compute all mixing matrices with the `mixing_matrices()` function.
It can optionally compute mixing matrices for binned spectra, by providing the `bins=` and `weights=` parameters.
```python
with Progress("mixmats") as progress:
mms = heracles.mixing_matrices(fields, cls2, l1max=lmax, l2max=lmax, progress=progress)
```
VBox()
OMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.
The mixing matrices are returned in a dictionary with names matching the _output_ angular power spectra, after mixing by a given matrix:
* `("P", "P", i, j)` for
* `("P", "P", i, j) β ("P", "P", i, j)`;
* `("P", "G_E", i, j)` for
* `("P", "G_E", i, j) β ("P", "G_E", i, j)`, as well as
* `("P", "G_B", i, j) β ("P", "G_B", i, j)`;
* `("G_E", "G_E", i, j)` for
* `("G_E", "G_E", i, j) β ("G_E", "G_E", i, j)`, as well as
* `("G_B", "G_B", i, j) β ("G_B", "G_B", i, j)`;
* `("G_B", "G_B", i, j)` for
* `("G_E", "G_E", i, j) β ("G_B", "G_B", i, j)`, as well as
* `("G_B", "G_B", i, j) β ("G_E", "G_E", i, j)`;
* `("G_E", "G_B", i, j)` for
* `("G_E", "G_B", i, j) β ("G_E", "G_B", i, j)`, as well as
* `("G_E", "G_B", j, i) β ("G_E", "G_B", j, i)`.
For more details on mixing matrices in general, see, e.g., the paper by Brown, Castro & Taylor (2005).
```python
list(mms.keys())[:6] + ["..."] + list(mms.keys())[-6:]
```
[('P', 'P', 1, 1),
('P', 'G_E', 1, 1),
('P', 'P', 1, 2),
('P', 'G_E', 1, 2),
('P', 'P', 1, 3),
('P', 'G_E', 1, 3),
'...',
('G_E', 'G_B', 5, 6),
('P', 'P', 6, 6),
('P', 'G_E', 6, 6),
('G_E', 'G_E', 6, 6),
('G_B', 'G_B', 6, 6),
('G_E', 'G_B', 6, 6)]
### Playground
As before, the mapper has produced *HEALPix* maps, so we can have a look at the visibility and weight map for bin ID 3, say.
```python
i = 3
fig, ax = plt.subplots(1, 2, figsize=(12, 8))
fig.tight_layout()
plt.sca(ax[0])
hp.cartview(data2["V", i], title="V", cmap="binary", hold=True,
lonra=[329.5, 53.5], latra=[-45.6, -15.7])
plt.sca(ax[1])
hp.cartview(data2["W", i], title="W", cmap="binary", hold=True,
lonra=[329.5, 53.5], latra=[-45.6, -15.7])
plt.show()
```

And here is the corresponding `("G_E", "G_E", i, i)` mixing matrix:
```python
plt.imshow(mms["G_E", "G_E", i, i], cmap="binary",
norm=mpl.colors.LogNorm(vmin=1e-7))
plt.colorbar(pad=0.025, fraction=0.0465)
plt.show()
```

## Theory
To model the expected angular power spectra, we require the mixing matrices and the expected full-sky angular power spectra from theory.
Here, we use *CAMB* to compute the latter.
```python
import camb
from camb.sources import SplinedSourceWindow
```
We set the *CAMB* cosmology to match the simulation that created the example data set.
```python
# cosmology for the simulation
h = 0.7
Oc = 0.25
Ob = 0.05
# set up CAMB parameters for matter angular power spectrum
pars = camb.set_params(H0=100*h, omch2=Oc*h**2, ombh2=Ob*h**2,
NonLinear=camb.model.NonLinear_both)
pars.Want_CMB = False
pars.min_l = 1
pars.set_for_lmax(2 * lmax, lens_potential_accuracy=1);
```
We also need the redshift distributions for the tomographic bins.
```python
with np.load("nz.npz") as npz:
z, nz = npz["z"], npz["nz"]
```
Given the redshift distributions, we can construct the *CAMB* source distributions for positions (counts) and shears (lensing).
```python
sources = []
for i, nz_i in enumerate(nz):
sources += [
SplinedSourceWindow(source_type="counts", z=z, W=nz_i),
SplinedSourceWindow(source_type="lensing", z=z, W=nz_i),
]
pars.SourceWindows = sources
```
Use the `pars` we constructed above to compute the full sky theory spectra up to `lmax`, setting `raw_cl=True` to return unscaled full-sky spectra.
```python
results = camb.get_results(pars)
camb_cls = results.get_source_cls_dict(lmax=lmax, raw_cl=True)
```
This is the factor needed to convert from the convergence spectra returned by *CAMB* to shear *E*-mode spectra.
```python
l = np.arange(lmax + 1)
fl = -np.sqrt((l + 2) * (l + 1) * l * (l - 1))
fl /= np.clip(l * (l + 1), 1, None)
```
Now we can compute the theory spectra for our observations, using the *CAMB* results and the mixing matrices we computed earlier. We store everything in dictionary using the same format as before.
```python
theory = {}
for i in range(1, 7):
for j in range(i, 7):
# get the full-sky spectra; B-mode is assumed zero
cl_pp = camb_cls[f"W{2 * i - 1}xW{2 * j - 1}"]
cl_pe = fl * camb_cls[f"W{2 * i - 1}xW{2 * j}"]
cl_pb = np.zeros_like(cl_pe)
cl_ep = fl * camb_cls[f"W{2 * i}xW{2 * j - 1}"]
cl_bp = np.zeros_like(cl_ep)
cl_ee = fl**2 * camb_cls[f"W{2 * i}xW{2 * j}"]
cl_bb = np.zeros_like(cl_ee)
cl_eb = np.zeros_like(cl_ee)
cl_be = np.zeros_like(cl_ee)
# all mixing matrix combinations
theory["P", "P", i, j] = mms["P", "P", i, j] @ cl_pp
theory["P", "G_E", i, j] = mms["P", "G_E", i, j] @ cl_pe
theory["P", "G_B", i, j] = mms["P", "G_E", i, j] @ cl_pb
theory["P", "G_E", j, i] = mms["P", "G_E", j, i] @ cl_ep
theory["P", "G_B", j, i] = mms["P", "G_E", j, i] @ cl_bp
theory["G_E", "G_E", i, j] = mms["G_E", "G_E", i, j] @ cl_ee + mms["G_B", "G_B", i, j] @ cl_bb
theory["G_B", "G_B", i, j] = mms["G_B", "G_B", i, j] @ cl_ee + mms["G_E", "G_E", i, j] @ cl_bb
theory["G_E", "G_B", i, j] = mms["G_E", "G_B", i, j] @ cl_eb
theory["G_E", "G_B", j, i] = mms["G_E", "G_B", i, j] @ cl_be
```
## Results
Finally, we can plot the results for positions and shears.
```python
ell = np.arange(lmax + 1)
```
```python
fig, ax = plt.subplots(6, 6, figsize=(6, 6), sharex=True, sharey=True)
for i in range(1, 7):
for j in range(1, i):
ax[j - 1, i - 1].axis("off")
for j in range(i, 7):
ax[j - 1, i - 1].plot(ell[1:], cls["P", "P", i, j][1:], c="C0", lw=1.5, zorder=3.0, alpha=0.5)
ax[j - 1, i - 1].plot(ell[1:], theory["P", "P", i, j][1:], c="C0", lw=1.0, zorder=4.0)
ax[j - 1, i - 1].axhline(0.0, c="k", lw=0.8, zorder=-1)
ax[j - 1, i - 1].tick_params(axis="both", which="both", direction="in")
ax[0, 0].set_xscale("log")
ax[0, 0].set_xlim(1 / 3, lmax * 3)
ax[0, 0].xaxis.get_major_locator().set_params(numticks=99)
ax[0, 0].xaxis.get_minor_locator().set_params(numticks=99, subs=np.arange(0.1, 1.0, 0.1))
ax[0, 0].set_yscale("symlog", linthresh=1e-7, linscale=0.45, subs=np.arange(0.1, 1.0, 0.1))
ax[0, 0].set_ylim(-2e-7, 2e-6)
fig.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=1.0, wspace=0.0, hspace=0.0)
fig.supxlabel("angular mode $\\ell$", y=-0.05, va="top")
fig.supylabel("galaxy clustering $C_\\ell$", x=-0.1, ha="right")
plt.show()
```

```python
fig, ax = plt.subplots(6, 6, figsize=(6, 6), sharex=True, sharey=True)
for i in range(1, 7):
for j in range(1, i):
ax[j - 1, i - 1].axis("off")
for j in range(i, 7):
ax[j - 1, i - 1].plot(ell[2:], cls["G_E", "G_E", i, j][2:], c="C0", lw=1.5, zorder=3.0, alpha=0.5)
ax[j - 1, i - 1].plot(ell[2:], theory["G_E", "G_E", i, j][2:], c="C0", lw=1.0, zorder=4.0)
ax[j - 1, i - 1].plot(ell[2:], cls["G_B", "G_B", i, j][2:], c="C1", lw=1.5, zorder=1.0, alpha=0.5)
ax[j - 1, i - 1].plot(ell[2:], theory["G_B", "G_B", i, j][2:], c="C1", lw=1.0, zorder=2.0)
ax[j - 1, i - 1].axhline(0.0, c="k", lw=0.8, zorder=-1)
ax[j - 1, i - 1].tick_params(axis="both", which="both", direction="in")
ax[0, 0].set_xscale("log")
ax[0, 0].set_xlim(1 / 3, lmax * 3)
ax[0, 0].xaxis.get_major_locator().set_params(numticks=99)
ax[0, 0].xaxis.get_minor_locator().set_params(numticks=99, subs=np.arange(0.1, 1.0, 0.1))
ax[0, 0].set_yscale("symlog", linthresh=1e-10, linscale=0.45, subs=np.arange(0.1, 1.0, 0.1))
ax[0, 0].set_ylim(-3e-10, 5e-9)
fig.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=1.0, wspace=0.0, hspace=0.0)
fig.supxlabel("angular mode $\\ell$", y=-0.05, va="top")
fig.supylabel("cosmic shear $C_\\ell$", x=-0.1, ha="right")
plt.show()
```

```python
fig, ax = plt.subplots(6, 6, figsize=(6, 6), sharex=True, sharey=True)
for i in range(1, 7):
for j in range(1, 7):
ax[j - 1, i - 1].plot(ell[2:], cls["P", "G_E", i, j][2:], c="C0", lw=1.5, zorder=3.0, alpha=0.5)
ax[j - 1, i - 1].plot(ell[2:], theory["P", "G_E", i, j][2:], c="C0", lw=1.0, zorder=4.0)
ax[j - 1, i - 1].plot(ell[2:], cls["P", "G_B", i, j][2:], c="C1", lw=1.5, zorder=1.0, alpha=0.5)
ax[j - 1, i - 1].plot(ell[2:], theory["P", "G_B", i, j][2:], c="C1", lw=1.0, zorder=2.0)
ax[j - 1, i - 1].axhline(0.0, c="k", lw=0.8, zorder=-1)
ax[j - 1, i - 1].tick_params(axis="both", which="both", direction="in")
ax[0, 0].set_xscale("log")
ax[0, 0].set_xlim(1 / 3, lmax * 3)
ax[0, 0].xaxis.get_major_locator().set_params(numticks=99)
ax[0, 0].xaxis.get_minor_locator().set_params(numticks=99, subs=np.arange(0.1, 1.0, 0.1))
ax[0, 0].set_yscale("symlog", linthresh=1e-9, linscale=0.45, subs=np.arange(0.1, 1.0, 0.1))
ax[0, 0].set_ylim(-8e-8, 4e-8)
fig.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=1.0, wspace=0.0, hspace=0.0)
fig.supxlabel("angular mode $\\ell$", y=-0.05, va="top")
fig.supylabel("galaxy--galaxy lensing $C_\\ell$", x=-0.1, ha="right")
plt.show()
```

## Output
Additionally, we can write the data we produced to file.
```python
heracles.write_cls("example-spectra.fits", cls, clobber=True)
heracles.write_mms("example-mixmats.fits", mms, clobber=True)
```
|
heracles-ecREPO_NAMEheraclesPATH_START.@heracles_extracted@heracles-main@examples@example.ipynb@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.