metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "test_pipeline.py",
"repo_name": "igrins/plp",
"repo_path": "plp_extracted/plp-master/tests/test_pipeline.py",
"type": "Python"
}
|
from igrins.pipeline import create_pipeline
from igrins.driver import Step
def test1():
def step1(obsset):
print(1)
def step2(obsset, lacosmic_thresh):
print(2, lacosmic_thresh)
steps = [Step("step 1", step1, args0=True),
Step("step 2", step2,
lacosmic_thresh=0.)]
f = create_pipeline("flat", steps)
print(f.__doc__)
if __name__ == "__main__":
test1()
|
igrinsREPO_NAMEplpPATH_START.@plp_extracted@plp-master@tests@test_pipeline.py@.PATH_END.py
|
{
"filename": "generate_examples.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/testing/generate_examples.py",
"type": "Python"
}
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate a series of TensorFlow graphs that become tflite test cases.
Usage:
generate_examples <output directory>
bazel run //tensorflow/lite/testing:generate_examples
To more easily debug failures use (or override) the --save_graphdefs flag to
place text proto graphdefs into the generated zip files.
"""
import argparse
import os
import sys
import tensorflow as tf
from tensorflow.lite.testing import generate_examples_lib
from tensorflow.lite.testing import mlir_convert
MLIR_CONVERTER_KNOWN_BUGS = {
# We need to support dynamic_rnn case.
r"unidirectional_sequence_rnn.*is_dynamic_rnn=True": "128997102",
r"unidirectional_sequence_lstm.*is_dynamic_rnn=True": "128997102",
# TODO(b/124314620): Test cases work with tf_tfl_translate binary
# but not TFLiteConverter interface.
# Concat & SpaceToDepth with uint8 doesn't work.
r"concat.*type=tf\.uint8": "124314620",
r"space_to_depth.*type=tf\.uint8": "124314620",
r"l2norm.*fully_quantize=True": "134594898",
# Below are not really a converter bug, but our kernels doesn't support
# int64.
r"div.*dtype=tf\.int64": "119126484",
r"floor_div.*dtype=tf\.int64": "119126484",
r"relu.*dtype=tf\.int64": "119126484",
r"squared_difference.*dtype=tf\.int64": "119126484",
# Post-training quantization support missing for below op in mlir.
r"prelu.*fully_quantize=True": "156112683",
# ResizeBilinear op kernel supports only float32 and quantized 8-bit
# integers.
r"resize_bilinear.*dtype=tf\.int32": "156569626",
}
# Disable GPU for now since we are just testing in TF against CPU reference
# value and creating non-device-specific graphs to export.
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
parser = argparse.ArgumentParser(description="Script to generate TFLite tests.")
parser.add_argument(
"output_path", help="Directory where the outputs will be go.")
parser.add_argument(
"--zip_to_output",
type=str,
help="Particular zip to output.",
required=True)
parser.add_argument(
"--known_bugs_are_errors",
action="store_true",
help=("If a particular model is affected by a known bug,"
" count it as a converter error."))
parser.add_argument(
"--ignore_converter_errors",
action="store_true",
help="Raise an exception if any converter error is encountered.")
parser.add_argument(
"--save_graphdefs",
action="store_true",
help="Include intermediate graphdefs in the output zip files.")
parser.add_argument(
"--run_with_flex",
action="store_true",
help="Whether the TFLite Flex converter is being used.")
parser.add_argument(
"--make_edgetpu_tests",
action="store_true",
help="Whether to generate test cases for edgetpu.")
parser.add_argument(
"--make_tf_ptq_tests",
action="store_true",
help="Whether to generate test cases for TF post-training quantization.")
parser.add_argument(
"--hlo_aware_conversion",
action="store_true",
help="For TF Quantization only: whether conversion for HLO target.")
parser.add_argument(
"--make_forward_compat_test",
action="store_true",
help="Make tests by setting TF forward compatibility horizon to the future")
parser.add_argument(
"--no_tests_limit",
action="store_true",
help="Remove the limit of the number of tests.")
parser.add_argument(
"--test_sets",
type=str,
help=("Comma-separated list of test set names to generate. "
"If not specified, a test set is selected by parsing the name of "
"'zip_to_output' file."))
parser.add_argument(
"--mlir_quantizer",
action="store_true",
help=("Whether the new MLIR quantizer is being used."))
parser.add_argument(
"--skip_high_dimension_inputs",
action="store_true",
help=("Whether to skip generating tests with high dimension input shape."))
def main(unused_args):
options = generate_examples_lib.Options()
options.output_path = FLAGS.output_path
options.zip_to_output = FLAGS.zip_to_output
options.known_bugs_are_errors = FLAGS.known_bugs_are_errors
options.ignore_converter_errors = FLAGS.ignore_converter_errors
options.save_graphdefs = FLAGS.save_graphdefs
options.run_with_flex = FLAGS.run_with_flex
options.make_edgetpu_tests = FLAGS.make_edgetpu_tests
options.make_tf_ptq_tests = FLAGS.make_tf_ptq_tests
options.tflite_convert_function = mlir_convert.mlir_convert
options.known_bugs = MLIR_CONVERTER_KNOWN_BUGS
options.make_forward_compat_test = FLAGS.make_forward_compat_test
options.no_tests_limit = FLAGS.no_tests_limit
options.mlir_quantizer = FLAGS.mlir_quantizer
options.skip_high_dimension_inputs = FLAGS.skip_high_dimension_inputs
if FLAGS.test_sets:
test_sets = FLAGS.test_sets.split(",")
generate_examples_lib.generate_multi_set_examples(options, test_sets)
else:
generate_examples_lib.generate_examples(options)
if __name__ == "__main__":
FLAGS, unparsed = parser.parse_known_args()
if unparsed:
print("\nGot the following unparsed args, %r please fix.\n" % unparsed +
"Usage: %s <path out> <zip file to generate>")
exit(1)
else:
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@testing@generate_examples.py@.PATH_END.py
|
{
"filename": "_familysrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattersmith/hoverlabel/font/_familysrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="familysrc",
parent_name="scattersmith.hoverlabel.font",
**kwargs,
):
super(FamilysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattersmith@hoverlabel@font@_familysrc.py@.PATH_END.py
|
{
"filename": "partition_ranking.py",
"repo_name": "MIT-STARLab/deconfuser",
"repo_path": "deconfuser_extracted/deconfuser-main/deconfuser/partition_ranking.py",
"type": "Python"
}
|
import numpy as np
def get_ranked_partitions(possible_groups):
"""
The partitions of groups are ranked such that the top partition has the largest first group (a trajectory corresponding to most observations), the largest second group from the remaining observations, etc.
partitions that are "included" in partitions with more grouped observations, are excluded.
Parameters
----------
possible_groups : np.array of floats of shape (?,2)
positions of planet candidates in image plane
Returns
-------
an iterator over
list of lists of ints
lists of indices of observations grouped together by belonging to the same planet
"""
filtered_partitions = []
for partition in get_ranked_set_partitions(possible_groups):
if not any(_is_sub_partition(partition,other_partition) for other_partition in filtered_partitions):
#there if no previous partition that already includes this one (with even more planets matched)
filtered_partitions.append(partition)
yield list(map(sorted, partition))
def _get_number_partitions_up_to_sum(ranked_numbers, sum_):
"""
Iterator over all possible partitions (with repetitions) of numbers that sum up to a target sum (larger numbers appear first).
Used by get_ranked_set_partitions.
(note that in the contect of planet matching, 1 is always in ranked_numbers - representing a single observation of a planet)
Parameters
----------
ranked_numbers : list of ints
list of numbers to be summed (ranked in increasing order)
sum_ : int
target sum
Returns
-------
an iterator over
tuple of ints
partitions from ranked_numbers in decreasing lexical order
"""
#index of the highest number that is lower than the target sum
i = np.searchsorted(ranked_numbers, sum_, side="right")
#if all number are higher, yield an empty partition and return to end the recursion
if i == 0:
yield ()
return
#iterate over numbers smaller than the target sum in decreasing order
for j in range(i-1,-1,-1):
#iterate over partitions of numbers summping up to the remainder (after the largest number is accounted for)
for p in _get_number_partitions_up_to_sum(ranked_numbers[:j+1], sum_ - ranked_numbers[j]):
#yield the partitions including the largest number
yield (ranked_numbers[j],) + p
def _get_set_partitions_specific(sets, specific_lengths, excluded_set):
"""
Iterator over all possible partitions of disjoint subsets with specified lengths
Used by get_ranked_set_partitions.
Parameters
----------
sets : list of python sets ranked by length (in decreasing order)
sets to choose from
specific_lengths : list of ints
the lenghts that returned subsets should be of
Returns
-------
an iterator over
tuple sets
disjoint subsets (same number of elements as specific_lengths)
"""
#if all data points were processed yield an empty partition and terminate recursion
if len(specific_lengths) == 0:
yield ()
return
#iterate over all sets with the largest lengths that were not already processed (not in excluded_set)
for i,set_ in enumerate(sets):
if len(set_) != specific_lengths[0] or excluded_set.intersection(set_): continue
#iterate over partitions with the rest of the sets
for kp in _get_set_partitions_specific(sets[i+1:], specific_lengths[1:], excluded_set|set_):
#yield the partition with the currently processed set
yield (set_,) + kp
def get_ranked_set_partitions(sets):
"""
Iterator over partitions of disjoint subsets from a given list of sets, ranked by subset lengths.
partitions with larger subsets are yielded first.
Parameters
----------
sets : list of python sets
sets to choose from
Returns
-------
an iterator over
tuple sets
disjoint subsets
"""
sets = sorted(map(set, sets), key=len, reverse=True)
#get all allowable numbers of data points per trajectory (lenghts)
lengths = sorted(set(map(len, sets)))
#get partitions of lengths that sum up to the total number of data points
for specific_lengths in _get_number_partitions_up_to_sum(lengths, len(set.union(*sets))):
#recursively iterate over set partitions with specific lenghts
for kp in _get_set_partitions_specific(sets, specific_lengths, set()):
yield kp
def _is_sub_partition(sets, other_sets):
"""
Checks if all the sets in one list are subsets of one set in another list.
Used by get_ranked_trajectory_partitions
Parameters
----------
sets : list of python sets
partition to check
other_sets : list of python sets
partition to check against
Returns
-------
bool
whether all sets are included in other_sets
"""
for set_ in sets:
if not any(map(set_.issubset, other_sets)):
#here if at least one set is not fully included in another set in the other partition
return False
return True
|
MIT-STARLabREPO_NAMEdeconfuserPATH_START.@deconfuser_extracted@deconfuser-main@deconfuser@partition_ranking.py@.PATH_END.py
|
{
"filename": "ipunittest.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipython/py3/IPython/testing/ipunittest.py",
"type": "Python"
}
|
"""Experimental code for cleaner support of IPython syntax with unittest.
In IPython up until 0.10, we've used very hacked up nose machinery for running
tests with IPython special syntax, and this has proved to be extremely slow.
This module provides decorators to try a different approach, stemming from a
conversation Brian and I (FP) had about this problem Sept/09.
The goal is to be able to easily write simple functions that can be seen by
unittest as tests, and ultimately for these to support doctests with full
IPython syntax. Nose already offers this based on naming conventions and our
hackish plugins, but we are seeking to move away from nose dependencies if
possible.
This module follows a different approach, based on decorators.
- A decorator called @ipdoctest can mark any function as having a docstring
that should be viewed as a doctest, but after syntax conversion.
Authors
-------
- Fernando Perez <Fernando.Perez@berkeley.edu>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2009-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import re
import sys
import unittest
from doctest import DocTestFinder, DocTestRunner, TestResults
from IPython.terminal.interactiveshell import InteractiveShell
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def count_failures(runner):
"""Count number of failures in a doctest runner.
Code modeled after the summarize() method in doctest.
"""
if sys.version_info < (3, 13):
return [TestResults(f, t) for f, t in runner._name2ft.values() if f > 0]
else:
return [
TestResults(failure, try_)
for failure, try_, skip in runner._stats.values()
if failure > 0
]
class IPython2PythonConverter(object):
"""Convert IPython 'syntax' to valid Python.
Eventually this code may grow to be the full IPython syntax conversion
implementation, but for now it only does prompt conversion."""
def __init__(self):
self.rps1 = re.compile(r'In\ \[\d+\]: ')
self.rps2 = re.compile(r'\ \ \ \.\.\.+: ')
self.rout = re.compile(r'Out\[\d+\]: \s*?\n?')
self.pyps1 = '>>> '
self.pyps2 = '... '
self.rpyps1 = re.compile (r'(\s*%s)(.*)$' % self.pyps1)
self.rpyps2 = re.compile (r'(\s*%s)(.*)$' % self.pyps2)
def __call__(self, ds):
"""Convert IPython prompts to python ones in a string."""
from . import globalipapp
pyps1 = '>>> '
pyps2 = '... '
pyout = ''
dnew = ds
dnew = self.rps1.sub(pyps1, dnew)
dnew = self.rps2.sub(pyps2, dnew)
dnew = self.rout.sub(pyout, dnew)
ip = InteractiveShell.instance()
# Convert input IPython source into valid Python.
out = []
newline = out.append
for line in dnew.splitlines():
mps1 = self.rpyps1.match(line)
if mps1 is not None:
prompt, text = mps1.groups()
newline(prompt+ip.prefilter(text, False))
continue
mps2 = self.rpyps2.match(line)
if mps2 is not None:
prompt, text = mps2.groups()
newline(prompt+ip.prefilter(text, True))
continue
newline(line)
newline('') # ensure a closing newline, needed by doctest
# print("PYSRC:", '\n'.join(out)) # dbg
return '\n'.join(out)
#return dnew
class Doc2UnitTester(object):
"""Class whose instances act as a decorator for docstring testing.
In practice we're only likely to need one instance ever, made below (though
no attempt is made at turning it into a singleton, there is no need for
that).
"""
def __init__(self, verbose=False):
"""New decorator.
Parameters
----------
verbose : boolean, optional (False)
Passed to the doctest finder and runner to control verbosity.
"""
self.verbose = verbose
# We can reuse the same finder for all instances
self.finder = DocTestFinder(verbose=verbose, recurse=False)
def __call__(self, func):
"""Use as a decorator: doctest a function's docstring as a unittest.
This version runs normal doctests, but the idea is to make it later run
ipython syntax instead."""
# Capture the enclosing instance with a different name, so the new
# class below can see it without confusion regarding its own 'self'
# that will point to the test instance at runtime
d2u = self
# Rewrite the function's docstring to have python syntax
if func.__doc__ is not None:
func.__doc__ = ip2py(func.__doc__)
# Now, create a tester object that is a real unittest instance, so
# normal unittest machinery (or Nose, or Trial) can find it.
class Tester(unittest.TestCase):
def test(self):
# Make a new runner per function to be tested
runner = DocTestRunner(verbose=d2u.verbose)
for the_test in d2u.finder.find(func, func.__name__):
runner.run(the_test)
failed = count_failures(runner)
if failed:
# Since we only looked at a single function's docstring,
# failed should contain at most one item. More than that
# is a case we can't handle and should error out on
if len(failed) > 1:
err = "Invalid number of test results: %s" % failed
raise ValueError(err)
# Report a normal failure.
self.fail('failed doctests: %s' % str(failed[0]))
# Rename it so test reports have the original signature.
Tester.__name__ = func.__name__
return Tester
def ipdocstring(func):
"""Change the function docstring via ip2py.
"""
if func.__doc__ is not None:
func.__doc__ = ip2py(func.__doc__)
return func
# Make an instance of the classes for public use
ipdoctest = Doc2UnitTester()
ip2py = IPython2PythonConverter()
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipython@py3@IPython@testing@ipunittest.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "POSYDON-code/POSYDON",
"repo_path": "POSYDON_extracted/POSYDON-main/posydon/interpolation/README.md",
"type": "Markdown"
}
|
POSYDON-codeREPO_NAMEPOSYDONPATH_START.@POSYDON_extracted@POSYDON-main@posydon@interpolation@README.md@.PATH_END.py
|
|
{
"filename": "bpl_distribution.py",
"repo_name": "grburgess/popsynth",
"repo_path": "popsynth_extracted/popsynth-master/popsynth/distributions/bpl_distribution.py",
"type": "Python"
}
|
import numpy as np
import scipy.integrate as integrate
import scipy.stats as stats
from popsynth.distribution import DistributionParameter, LuminosityDistribution
class BPLDistribution(LuminosityDistribution):
_distribution_name = "BPLDistribution"
Lmin = DistributionParameter(vmin=0)
alpha = DistributionParameter()
Lbreak = DistributionParameter(vmin=0)
beta = DistributionParameter()
Lmax = DistributionParameter(vmin=0)
def __init__(self, seed: int = 1234, name: str = "bpl"):
"""A broken power law luminosity distribution.
L ~ L^``alpha`` for L <= ``Lbreak``
L ~ L^``beta`` for L > ``Lbreak``
:param seed: Random seed
:type seed: int
:param name: Name of the distribution
:type name: str
:param Lmin: Minimum value of the luminosity
:type Lmin: :class:`DistributionParameter`
:param alpha: Index of the lower power law
:type alpha: :class:`DistributionParameter`
:param Lbreak: Luminosity of the power law break
:type Lbreak: :class:`DistributionParameter`
:param beta: Index of the upper power law
:type beta: :class:`DistributionParameter`
:param Lmax: Maximum value of the luminosity
:type Lmax: :class:`DistributionParameter`
"""
lf_form = r"\begin{cases} C L^{\alpha} & \mbox{if } L"
lf_form += r"\leq L_\mathrm{break},\\ C L^{\beta} "
lf_form += r"L_\mathrm{break}^{\alpha - \beta}"
lf_form += r" & \mbox{if } L > L_\mathrm{break}. \end{cases}"
super(BPLDistribution, self).__init__(
seed=seed, name=name, form=lf_form
)
def phi(self, L):
f = lambda ll: bpl(
ll, self.Lmin, self.Lbreak, self.Lmax, self.alpha, self.beta
)
integrand = integrate.quad(f, self.Lmin, self.Lmax)[0]
# normalize
return f(L) / integrand
def draw_luminosity(self, size=1):
u = np.atleast_1d(np.random.uniform(size=size))
return sample_bpl(
u, self.Lmin, self.Lbreak, self.Lmax, self.alpha, self.beta
)
def integrate_pl(x0, x1, x2, a1, a2):
"""
Integrate a broken power law between bounds.
:param x0: Lower bound
:param x1: Break point
:param x2: Upper bound
:param a1: Lower power law index
:param a2: Upper power low index
"""
# compute the integral of each piece analytically
int_1 = (np.power(x1, a1 + 1.0) - np.power(x0, a1 + 1.0)) / (a1 + 1)
int_2 = (
np.power(x1, a1 - a2)
* (np.power(x2, a2 + 1.0) - np.power(x1, a2 + 1.0))
/ (a2 + 1)
)
# compute the total integral
total = int_1 + int_2
# compute the weights of each piece of the function
w1 = int_1 / total
w2 = int_2 / total
return w1, w2, total
def bpl(x, x0, x1, x2, a1, a2):
"""
Broken power law between bounds.
:param x: The domain of the function
:param x0: Lower bound
:param x1: Break point
:param x2: Upper bound
:param a1: Lower power law index
:param a2: Upper power low index
"""
# creatre a holder for the values
x = np.atleast_1d(x)
out = np.zeros_like(x)
# get the total integral to compute the normalization
_, _, C = integrate_pl(x0, x1, x2, a1, a2)
norm = 1.0 / C
# create an index to select each piece of the function
idx = (x > x0) & (x < x1)
# compute the lower power law
out[idx] = np.power(x[idx], a1)
# compute the upper power law
idx = (x >= x1) & (x < x2)
out[idx] = np.power(x[idx], a2) * np.power(x1, a1 - a2)
return out * norm
def sample_bpl(u, x0, x1, x2, a1, a2):
"""
Sample from a broken power law
between bounds.
:param u: Uniform random number on {0,1}
:param x0: Lower bound
:param x1: Break point
:param x2: Upper bound
:param a1: Lower power law index
:param a2: Upper power low index
"""
# compute the weights with our integral function
w1, w2, _ = integrate_pl(x0, x1, x2, a1, a2)
# create a holder array for our output
out = np.empty_like(u)
# compute the bernoulli trials for lower piece of the function
# *if we wanted to do the upper part... we just reverse our index*
# We also compute these to bools for numpy array selection
idx = stats.bernoulli.rvs(w1, size=len(u)).astype(bool)
# inverse transform sample the lower part for the "successes"
out[idx] = np.power(
u[idx] * (np.power(x1, a1 + 1.0) - np.power(x0, a1 + 1.0))
+ np.power(x0, a1 + 1.0),
1.0 / (1 + a1),
)
# inverse transform sample the upper part for the "failures"
out[~idx] = np.power(
u[~idx] * (np.power(x2, a2 + 1.0) - np.power(x1, a2 + 1.0))
+ np.power(x1, a2 + 1.0),
1.0 / (1 + a2),
)
return out
|
grburgessREPO_NAMEpopsynthPATH_START.@popsynth_extracted@popsynth-master@popsynth@distributions@bpl_distribution.py@.PATH_END.py
|
{
"filename": "_variant.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/funnel/outsidetextfont/_variant.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="variant", parent_name="funnel.outsidetextfont", **kwargs
):
super(VariantValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop(
"values",
[
"normal",
"small-caps",
"all-small-caps",
"all-petite-caps",
"petite-caps",
"unicase",
],
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@funnel@outsidetextfont@_variant.py@.PATH_END.py
|
{
"filename": "gaussian.py",
"repo_name": "rhayes777/PyAutoFit",
"repo_path": "PyAutoFit_extracted/PyAutoFit-main/autofit/mapper/prior/gaussian.py",
"type": "Python"
}
|
from typing import Optional
from autofit.jax_wrapper import register_pytree_node_class
from autofit.messages.normal import NormalMessage
from .abstract import Prior
@register_pytree_node_class
class GaussianPrior(Prior):
__identifier_fields__ = ("lower_limit", "upper_limit", "mean", "sigma")
__database_args__ = ("mean", "sigma", "lower_limit", "upper_limit", "id_")
def __init__(
self,
mean: float,
sigma: float,
lower_limit: float = float("-inf"),
upper_limit: float = float("inf"),
id_: Optional[int] = None,
):
"""
A prior with a uniform distribution, defined between a lower limit and upper limit.
The conversion of an input unit value, ``u``, to a physical value, ``p``, via the prior is as follows:
.. math::
p = \mu + (\sigma * sqrt(2) * erfcinv(2.0 * (1.0 - u))
For example for ``prior = GaussianPrior(mean=1.0, sigma=2.0)``, an
input ``prior.value_for(unit=0.5)`` is equal to 1.0.
The mapping is performed using the message functionality, where a message represents the distirubtion
of this prior.
Parameters
----------
mean
The mean of the Gaussian distribution defining the prior.
sigma
The sigma value of the Gaussian distribution defining the prior.
lower_limit
A lower limit of the Gaussian distribution; physical values below this value are rejected.
upper_limit
A upper limit of the Gaussian distribution; physical values below this value are rejected.
Examples
--------
prior = af.GaussianPrior(mean=1.0, sigma=2.0, lower_limit=0.0, upper_limit=2.0)
physical_value = prior.value_for(unit=0.5)
"""
super().__init__(
lower_limit=lower_limit,
upper_limit=upper_limit,
message=NormalMessage(
mean=mean,
sigma=sigma,
lower_limit=lower_limit,
upper_limit=upper_limit,
),
id_=id_,
)
def tree_flatten(self):
return (self.mean, self.sigma, self.lower_limit, self.upper_limit), (self.id,)
@classmethod
def with_limits(cls, lower_limit: float, upper_limit: float) -> "GaussianPrior":
"""
Create a new gaussian prior centred between two limits
with sigma distance between this limits.
Note that these limits are not strict so exceptions will not
be raised for values outside of the limits.
This function is typically used in prior passing, where the
result of a model-fit are used to create new Gaussian priors
centred on the previously estimated median PDF model.
Parameters
----------
lower_limit
The lower limit of the new Gaussian prior.
upper_limit
The upper limit of the new Gaussian Prior.
Returns
-------
A new GaussianPrior
"""
return cls(
mean=(lower_limit + upper_limit) / 2,
sigma=upper_limit - lower_limit,
)
def dict(self) -> dict:
"""
A dictionary representation of this prior
"""
prior_dict = super().dict()
return {**prior_dict, "mean": self.mean, "sigma": self.sigma}
@property
def parameter_string(self) -> str:
return f"mean = {self.mean}, sigma = {self.sigma}"
|
rhayes777REPO_NAMEPyAutoFitPATH_START.@PyAutoFit_extracted@PyAutoFit-main@autofit@mapper@prior@gaussian.py@.PATH_END.py
|
{
"filename": "SolidMaterial_PYB11.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/src/PYB11/SolidMaterial/SolidMaterial_PYB11.py",
"type": "Python"
}
|
"""
Spheral SolidMaterial module.
Provides equations of state and material models for solids in Spheral.
"""
from PYB11Generator import *
from SpheralCommon import *
from spheralDimensions import *
dims = spheralDimensions()
from SolidEquationOfState import *
from LinearPolynomialEquationOfState import *
from GruneisenEquationOfState import *
from OsborneEquationOfState import *
from TillotsonEquationOfState import *
from MurnaghanEquationOfState import *
from StrengthModel import *
from NullStrength import *
from ConstantStrength import *
from SteinbergGuinanStrength import *
from JohnsonCookStrength import *
from CollinsStrength import *
from iSALEROCKStrength import *
from PhysicsEvolvingMaterialLibrary import *
#-------------------------------------------------------------------------------
# Includes
#-------------------------------------------------------------------------------
PYB11includes += ['"SolidMaterial/SolidEquationOfState.hh"',
'"SolidMaterial/LinearPolynomialEquationOfState.hh"',
'"SolidMaterial/GruneisenEquationOfState.hh"',
'"SolidMaterial/OsborneEquationOfState.hh"',
'"SolidMaterial/TillotsonEquationOfState.hh"',
'"SolidMaterial/MurnaghanEquationOfState.hh"',
'"SolidMaterial/StrengthModel.hh"',
'"SolidMaterial/ConstantStrength.hh"',
'"SolidMaterial/NullStrength.hh"',
'"SolidMaterial/PolynomialFit.hh"',
'"SolidMaterial/SteinbergGuinanStrength.hh"',
'"SolidMaterial/SteinbergGuinanLundStrength.hh"',
'"SolidMaterial/JohnsonCookStrength.hh"',
'"SolidMaterial/CollinsStrength.hh"',
'"SolidMaterial/iSALEROCKStrength.hh"',
'"SolidMaterial/PhysicsEvolvingMaterialLibrary.hh"',
'"FileIO/FileIO.hh"']
#-------------------------------------------------------------------------------
# Namespaces
#-------------------------------------------------------------------------------
PYB11namespaces = ["Spheral"]
#-------------------------------------------------------------------------------
# NinthOrderPolynomialFit
#-------------------------------------------------------------------------------
class NinthOrderPolynomialFit:
"Used by Steinberg-Guinnan strength to approximate cold and melt energies"
def pyinit(self,
C0 = "const double",
C1 = "const double",
C2 = "const double",
C3 = "const double",
C4 = "const double",
C5 = "const double",
C6 = "const double",
C7 = "const double",
C8 = "const double",
C9 = "const double"):
"Construct with coefficients"
@PYB11const
def __call__(self, x="const double"):
return "double"
#-------------------------------------------------------------------------------
# Instantiate our dimensional types
#-------------------------------------------------------------------------------
for ndim in dims:
exec('''
SolidEquationOfState%(ndim)id = PYB11TemplateClass(SolidEquationOfState, template_parameters="%(Dimension)s")
StrengthModel%(ndim)id = PYB11TemplateClass(StrengthModel, template_parameters="%(Dimension)s")
LinearPolynomialEquationOfState%(ndim)id = PYB11TemplateClass(LinearPolynomialEquationOfState, template_parameters="%(Dimension)s")
GruneisenEquationOfState%(ndim)id = PYB11TemplateClass(GruneisenEquationOfState, template_parameters="%(Dimension)s")
OsborneEquationOfState%(ndim)id = PYB11TemplateClass(OsborneEquationOfState, template_parameters="%(Dimension)s")
TillotsonEquationOfState%(ndim)id = PYB11TemplateClass(TillotsonEquationOfState, template_parameters="%(Dimension)s")
MurnaghanEquationOfState%(ndim)id = PYB11TemplateClass(MurnaghanEquationOfState, template_parameters="%(Dimension)s")
NullStrength%(ndim)id = PYB11TemplateClass(NullStrength, template_parameters="%(Dimension)s")
ConstantStrength%(ndim)id = PYB11TemplateClass(ConstantStrength, template_parameters="%(Dimension)s")
SteinbergGuinanStrength%(ndim)id = PYB11TemplateClass(SteinbergGuinanStrength, template_parameters="%(Dimension)s")
JohnsonCookStrength%(ndim)id = PYB11TemplateClass(JohnsonCookStrength, template_parameters="%(Dimension)s")
CollinsStrength%(ndim)id = PYB11TemplateClass(CollinsStrength, template_parameters="%(Dimension)s")
iSALEROCKStrength%(ndim)id = PYB11TemplateClass(iSALEROCKStrength, template_parameters="%(Dimension)s")
PhysicsEvolvingMaterialLibrary%(ndim)id = PYB11TemplateClass(PhysicsEvolvingMaterialLibrary, template_parameters="%(Dimension)s")
''' % {"ndim" : ndim,
"Dimension" : "Dim<" + str(ndim) + ">"})
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@src@PYB11@SolidMaterial@SolidMaterial_PYB11.py@.PATH_END.py
|
{
"filename": "_showexponent.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattermapbox/marker/colorbar/_showexponent.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowexponentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="showexponent",
parent_name="scattermapbox.marker.colorbar",
**kwargs,
):
super(ShowexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattermapbox@marker@colorbar@_showexponent.py@.PATH_END.py
|
{
"filename": "script_wandb.py",
"repo_name": "ThomasHelfer/multimodal-supernovae",
"repo_path": "multimodal-supernovae_extracted/multimodal-supernovae-main/script_wandb.py",
"type": "Python"
}
|
import os, sys
import wandb
from ruamel.yaml import YAML
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
import numpy as np
import torch
import pytorch_lightning as pl
from torch.utils.data import TensorDataset, DataLoader, random_split, Subset
from sklearn.model_selection import train_test_split
from src.models_multimodal import LightCurveImageCLIP, load_pretrain_lc_model
from src.utils import (
set_seed,
get_valid_dir,
LossTrackingCallback,
plot_ROC_curves,
plot_loss_history,
get_embs,
)
from src.dataloader import (
load_data,
NoisyDataLoader,
)
from src.wandb_utils import continue_sweep, schedule_sweep
def train_sweep(config=None):
with wandb.init(config=config) as run:
print(f"run name: {run.name}", flush=True)
path_run = os.path.join(model_path, run.name)
os.makedirs(path_run, exist_ok=True)
cfg = wandb.config
set_seed(cfg.seed)
number_of_samples = len(dataset)
print(f"Number of samples: {number_of_samples}", flush=True)
if stratifiedkfoldindices is None:
inds_train, inds_val = train_test_split(
range(number_of_samples),
test_size=val_fraction,
random_state=cfg.seed,
)
else:
inds_train = stratifiedkfoldindices[cfg.foldnumber]["train_indices"]
inds_val = stratifiedkfoldindices[cfg.foldnumber]["test_indices"]
dataset_train = Subset(dataset, inds_train)
dataset_val = Subset(dataset, inds_val)
# save val file names
np.savetxt(
os.path.join(path_run, "val_filenames.txt"),
np.array(filenames)[inds_val],
fmt="%s",
)
np.savetxt(
os.path.join(path_run, "train_filenames.txt"),
np.array(filenames)[inds_train],
fmt="%s",
)
# dump config
config_dict = {k: v for k, v in cfg.items()}
with open(os.path.join(path_run, "config.yaml"), "w") as f:
YAML().dump(config_dict, f)
# Default to 1 if the environment variable is not set
cpus_per_task = int(os.getenv("SLURM_CPUS_PER_TASK", 1))
# Assuming you want to leave one CPU for overhead
num_workers = max(1, cpus_per_task - 1)
print(f"Using {num_workers} workers for data loading", flush=True)
train_loader_no_aug = NoisyDataLoader(
dataset_train,
batch_size=cfg.batchsize,
noise_level_img=0,
noise_level_mag=0,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
combinations=combinations,
)
val_loader_no_aug = NoisyDataLoader(
dataset_val,
batch_size=cfg.batchsize,
noise_level_img=0,
noise_level_mag=0,
shuffle=False,
num_workers=num_workers,
pin_memory=True,
combinations=combinations,
)
# Create custom noisy data loaders
train_loader = NoisyDataLoader(
dataset_train,
batch_size=cfg.batchsize,
noise_level_img=1,
noise_level_mag=1,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
combinations=combinations,
)
val_loader = NoisyDataLoader(
dataset_val,
batch_size=cfg.batchsize,
noise_level_img=0,
noise_level_mag=0,
shuffle=False,
num_workers=num_workers,
pin_memory=True,
combinations=combinations,
)
transformer_kwargs = {
"n_out": cfg.n_out,
"emb": cfg.emb,
"heads": cfg.heads,
"depth": cfg.transformer_depth,
"dropout": cfg.dropout,
"time_norm": cfg.time_norm,
"agg": cfg.agg,
}
transformer_spectral_kwargs = {
"n_out": cfg.n_out,
"emb": cfg.emb_spectral,
"heads": cfg.heads_spectral,
"depth": cfg.transformer_depth_spectral,
"dropout": cfg.dropout,
"time_norm": cfg.time_norm_spectral,
"agg": cfg.agg_spectral,
}
if "host_galaxy" in combinations:
conv_kwargs = {
"dim": cfg.cnn_dim,
"depth": cfg.cnn_depth,
"channels": cfg.cnn_channels,
"kernel_size": cfg.cnn_kernel_size,
"patch_size": cfg.cnn_patch_size,
"n_out": cfg.n_out,
"dropout_prob": cfg.dropout,
}
else:
conv_kwargs = None
if "meta" in combinations:
meta_kwargs = {
"input_dim": cfg.meta_input_dim,
"hidden_dim": cfg.meta_hidden_dim,
"num_layers": cfg.meta_num_layers,
"dropout": cfg.dropout,
}
else:
meta_kwargs = None
optimizer_kwargs = {"weight_decay": cfg.weight_decay}
clip_model = LightCurveImageCLIP(
logit_scale=cfg.logit_scale,
lr=cfg.lr,
nband=nband,
loss="softmax",
transformer_kwargs=transformer_kwargs,
transformer_spectral_kwargs=transformer_spectral_kwargs,
conv_kwargs=conv_kwargs,
meta_kwargs=meta_kwargs,
optimizer_kwargs=optimizer_kwargs,
combinations=combinations,
regression=regression,
classification=classification,
n_classes=n_classes,
)
# Loading up pretrained models
if pretrain_lc_path:
load_pretrain_lc_model(pretrain_lc_path, clip_model, freeze_backbone_lc)
# Custom call back for tracking loss
loss_tracking_callback = LossTrackingCallback()
device = "gpu" if torch.cuda.is_available() else "cpu"
if device == "gpu": # Set float32 matmul precision for A100 GPUs
cuda_name = torch.cuda.get_device_name(torch.cuda.current_device())
if cuda_name.startswith("NVIDIA A100-SXM4"):
torch.set_float32_matmul_precision("high")
wandb_logger = WandbLogger()
if classification:
checkpoint_callback = ModelCheckpoint(
dirpath=path_run,
save_top_k=2,
monitor="f1_val",
save_last=True,
mode="max",
)
early_stop_callback = EarlyStopping(
monitor="f1_val",
min_delta=0.00,
patience=cfg.patience,
verbose=False,
mode="max",
)
else:
checkpoint_callback = ModelCheckpoint(
dirpath=path_run,
save_top_k=2,
monitor="val_loss",
save_last=True,
)
early_stop_callback = EarlyStopping(
monitor="val_loss",
min_delta=0.00,
patience=cfg.patience,
verbose=False,
mode="min",
)
trainer = pl.Trainer(
max_epochs=cfg.epochs,
accelerator=device,
callbacks=[
loss_tracking_callback,
checkpoint_callback,
early_stop_callback,
],
logger=wandb_logger,
enable_progress_bar=False,
)
if len(combinations) == 2:
wandb.define_metric("AUC_val", summary="max")
trainer.fit(
model=clip_model, train_dataloaders=train_loader, val_dataloaders=val_loader
)
if (not regression) and (not classification):
wandb.run.summary["best_auc"] = np.max(
loss_tracking_callback.auc_val_history
)
wandb.run.summary["best_val_loss"] = np.min(
loss_tracking_callback.val_loss_history
)
plot_loss_history(
loss_tracking_callback.train_loss_history,
loss_tracking_callback.val_loss_history,
path_base=path_run,
)
# Get embeddings for all images and light curves
embs_train = get_embs(clip_model, train_loader_no_aug, combinations)
embs_val = get_embs(clip_model, val_loader_no_aug, combinations)
plot_ROC_curves(
embs_train,
embs_val,
combinations,
path_base=path_run,
)
wandb.finish()
if __name__ == "__main__":
wandb.login()
arg = sys.argv[1]
analysis_path = "./analysis/"
if arg.endswith(".yaml"):
config = arg
sweep_id, model_path, cfg = schedule_sweep(config, analysis_path)
else:
sweep_id = os.path.basename(arg)
model_path = os.path.join(analysis_path, sweep_id)
cfg = continue_sweep(model_path)
print("model path: " + model_path, flush=True)
# define constants
val_fraction = cfg["extra_args"]["val_fraction"]
# Data preprocessing
data_dirs = [
"/home/thelfer1/scr4_tedwar42/thelfer1/ZTFBTS/",
"ZTFBTS/",
"data/ZTFBTS/",
"/ocean/projects/phy230064p/shared/ZTFBTS/",
"/n/home02/gemzhang/repos/Multimodal-hackathon-2024/data/ZTFBTS/",
]
# Get the first valid directory
data_dir = get_valid_dir(data_dirs)
# Get what data combinations are used
combinations = cfg["extra_args"]["combinations"]
regression = cfg["extra_args"]["regression"]
classification = cfg["extra_args"]["classification"]
if classification:
n_classes = cfg["extra_args"]["n_classes"]
else:
n_classes = 5
pretrain_lc_path = cfg["extra_args"].get("pretrain_lc_path")
freeze_backbone_lc = cfg["extra_args"].get("freeze_backbone_lc")
# Check if the config file has a spectra key
if "spectral" in combinations:
data_dirs = ["ZTFBTS_spectra/", "data/ZTFBTS_spectra/"]
spectra_dir = get_valid_dir(data_dirs)
else:
spectra_dir = None
max_spectral_data_len = cfg["extra_args"][
"max_spectral_data_len"
] # Spectral data is cut to this length
dataset, nband, filenames, stratifiedkfoldindices = load_data(
data_dir,
spectra_dir,
max_data_len_spec=max_spectral_data_len,
combinations=combinations,
n_classes=n_classes,
spectral_rescalefactor=cfg["extra_args"]["spectral_rescalefactor"],
kfolds=cfg["extra_args"].get("kfolds", None),
)
wandb.agent(
sweep_id=sweep_id,
entity=cfg["entity"],
project=cfg["project"],
function=train_sweep,
count=cfg["extra_args"]["nruns"],
)
|
ThomasHelferREPO_NAMEmultimodal-supernovaePATH_START.@multimodal-supernovae_extracted@multimodal-supernovae-main@script_wandb.py@.PATH_END.py
|
{
"filename": "build.ipynb",
"repo_name": "seap-udea/iWander",
"repo_path": "iWander_extracted/iWander-master/db/build.ipynb",
"type": "Jupyter Notebook"
}
|
# AstroRV Catalog Ensambling
```python
# =============================================================================
# Libraries
# =============================================================================
import pandas as pd
import numpy as np
import urllib.request
import collections
import os,re
import time
pd.options.mode.chained_assignment = None
# =============================================================================
# Data directories
# =============================================================================
dir_gaia = "src/Astro/"
dir_hip = "src/Astro/"
dir_simbad = "src/Astro/"
dir_rv = "src/RV/"
dir_results = "src/"
```
## =============================================================================
## Load Gaia database
## =============================================================================
```python
t0 = time.time()
cols_gaia = ["source_id", "hip", "tycho2_id", "ra", "ra_error", "dec", "dec_error", "parallax", "parallax_error",
"pmra", "pmra_error", "pmdec", "pmdec_error", "ra_dec_corr", "ra_parallax_corr", "ra_pmra_corr",
"ra_pmdec_corr", "dec_parallax_corr", "dec_pmra_corr", "dec_pmdec_corr", "parallax_pmra_corr",
"parallax_pmdec_corr", "pmra_pmdec_corr", "phot_g_mean_flux", "phot_g_mean_flux_error", "phot_g_mean_mag",
"l", "b", "ecl_lon", "ecl_lat"]
gaia = pd.DataFrame()
for i in range(16):
filename = dir_gaia + "TgasSource_000-000-0" + str(i).zfill(2) + ".csv.gz"
gaia = gaia.append(pd.read_csv(filename, usecols=cols_gaia))
gaia.source_id = gaia.source_id.astype(str)
gaia.hip = gaia.hip.astype(str)
gaia.tycho2_id = gaia.tycho2_id.astype(str)
gaia["hip"] = gaia["hip"].map(lambda x: x.replace(".0", ""))
gaia.hip.replace("nan", "", inplace=True)
gaia.tycho2_id.replace("nan", "", regex=True, inplace=True)
n1 = len(gaia)
gaia = gaia[gaia.parallax > 0]
n2 = len(gaia)
print("\nDatabase Gaia-TGAS:", "\nOriginal objects:", n1,"\nDiscarded objects:", n1-n2,"\nFinal objects:", n2)
gaia_hip = gaia[gaia.hip != ""]
gaia_tyc = gaia[gaia.tycho2_id != ""]
```
Database Gaia-TGAS:
Original objects: 2057050
Discarded objects: 30840
Final objects: 2026210
## =============================================================================
## Load Hipparcos database
## =============================================================================
```python
# Available in: http://cdsarc.u-strasbg.fr/viz-bin/Cat?cat=I%2F239&target=readme&#sRM2.1
names_hip={}
names_hip[1] = "hip"
names_hip[8] = "ra_hip"
names_hip[9] = "dec_hip"
names_hip[14] = "ra_error_hip"
names_hip[15] = "dec_error_hip"
names_hip[19] = "ra_dec_corr_hip"
names_hip[20] = "ra_parallax_corr_hip"
names_hip[21] = "dec_parallax_corr_hip"
names_hip[22] = "ra_pmra_corr_hip"
names_hip[23] = "dec_pmra_corr_hip"
names_hip[24] = "parallax_pmra_corr_hip"
names_hip[25] = "ra_pmdec_corr_hip"
names_hip[26] = "dec_pmdec_corr_hip"
names_hip[27] = "parallax_pmdec_corr_hip"
names_hip[28] = "pmra_pmdec_corr_hip"
names_hip[5] = "Vmag_hip"
names_hip[71] = "HenryDraperId_hip"
#The following columns are not read and left to the improved Hipparcos catalogue:
# names_hip[11] = "parallax_hip"
# names_hip[12] = "pmra_hip"
# names_hip[13] = "pmdec_hip"
# names_hip[16] = "parallax_error_hip"
# names_hip[17] = "pmra_error_hip"
# names_hip[18] = "pmdec_error_hip"
#names2_hip[76] = "sptype_hip"
names_hip = collections.OrderedDict(sorted(names_hip.items()))
hipparcos = pd.read_csv(dir_hip + "hip_main.dat.gz", delimiter="|", usecols=names_hip.keys(), names=names_hip.values())
# Columns format
cols = hipparcos.columns.tolist()
objects = ["hip", "HenryDraperId_hip"]
cols = [x for x in cols if x not in objects]
hipparcos[cols] = hipparcos[cols].apply(pd.to_numeric, errors='coerce')
hipparcos.hip = hipparcos.hip.astype(str)
hipparcos.hip = hipparcos.hip.map(lambda x: x.replace(".0", ""))
# Delete null astrometry values
n1 = len(hipparcos)
hipparcos.dropna(subset=["ra_hip", "dec_hip"], how="any", inplace=True)
n2 = len(hipparcos)
print("\nDatabase Hipparcos:", "\nOriginal objects:", n1,"\nDiscarded objects:", n1-n2,"\nFinal objects:", n2)
```
/home/iwander/.local/lib/python3.5/site-packages/IPython/core/interactiveshell.py:2728: DtypeWarning: Columns (5) have mixed types. Specify dtype option on import or set low_memory=False.
interactivity=interactivity, compiler=compiler, result=result)
Database Hipparcos:
Original objects: 118218
Discarded objects: 263
Final objects: 117955
## =============================================================================
## Load Hipparcos improved
## =============================================================================
```python
comments=list(range(95))+[96,97]
hipvan=pd.read_csv(dir_hip + "HipVanLeeuwen2007.tsv",sep=";",skiprows=comments)
hipvan=hipvan[['_RAJ2000', '_DEJ2000', 'HIP', 'Plx', 'e_Plx', 'pmRA', 'e_pmRA',
'pmDE', 'e_pmDE','Hpmag', 'e_Hpmag', 'B-V', 'e_B-V', 'V-I']]
columns={'_RAJ2000':'ra_hip', '_DEJ2000':'dec_hip', 'HIP':'hip',
'Plx':'parallax_hip', 'e_Plx':'parallax_error_hip',
'pmRA':'pmra_hip', 'e_pmRA':'pmra_error_hip',
'pmDE':'pmdec_hip', 'e_pmDE':'pmdec_error_hip','Hpmag':'Hpmag_hip',
'e_Hpmag':'e_Hpmag_hip', 'B-V':'B_V_hip', 'e_B-V':'e_B_V_hip',
'V-I':'V_I_hip'}
hipvan.rename(columns=columns,inplace=True)
cols = hipvan.columns.tolist()
objects = ["hip"]
cols = [x for x in cols if x not in objects]
hipvan[cols] = hipvan[cols].apply(pd.to_numeric, errors='coerce')
hipvan.hip = hipvan.hip.astype(str)
hipvan.hip = hipvan.hip.map(lambda x: x.replace(".0", ""))
hipvan=hipvan[hipvan.parallax_hip > 0]
# Delete null astrometry values
n1 = len(hipvan)
hipvan.dropna(subset=["ra_hip", "dec_hip", "parallax_hip"], how="any", inplace=True)
n2 = len(hipvan)
print("\nDatabase Hipparcos improved:", "\nOriginal objects:", n1,"\nDiscarded objects:", n1-n2,"\nFinal objects:", n2)
```
Database Hipparcos improved:
Original objects: 113942
Discarded objects: 0
Final objects: 113942
```python
# Merge Hipparcos + HipVanLeeuwen2007
cols = ['hip', 'parallax_hip', 'pmra_hip', 'pmdec_hip', 'parallax_error_hip', 'pmra_error_hip','pmdec_error_hip']
hipparcos = hipparcos.merge(hipvan[cols], on='hip', how='left')
```
## =============================================================================
## Load Tycho database
## =============================================================================
```python
# Available in: http://cdsarc.u-strasbg.fr/viz-bin/Cat?cat=I%2F239&target=readme&#sRM2.13
names_tyc={}
names_tyc[1] = "tycho2_id"
names_tyc[8] = "ra_tyc"
names_tyc[9] = "dec_tyc"
names_tyc[11] = "parallax_tyc"
names_tyc[12] = "pmra_tyc"
names_tyc[13] = "pmdec_tyc"
names_tyc[14] = "ra_error_tyc"
names_tyc[15] = "dec_error_tyc"
names_tyc[16] = "parallax_error_tyc"
names_tyc[17] = "pmra_error_tyc"
names_tyc[18] = "pmdec_error_tyc"
names_tyc[19] = "ra_dec_corr_tyc"
names_tyc[20] = "ra_parallax_corr_tyc"
names_tyc[21] = "dec_parallax_corr_tyc"
names_tyc[22] = "ra_pmra_corr_tyc"
names_tyc[23] = "dec_pmra_corr_tyc"
names_tyc[24] = "parallax_pmra_corr_tyc"
names_tyc[25] = "ra_pmdec_corr_tyc"
names_tyc[26] = "dec_pmdec_corr_tyc"
names_tyc[27] = "parallax_pmdec_corr_tyc"
names_tyc[28] = "pmra_pmdec_corr_tyc"
names_tyc[53] = "HenryDraperId_tyc"
names_tyc[5] = "Vmag_tyc"
names_tyc = collections.OrderedDict(sorted(names_tyc.items()))
tycho = pd.read_csv(dir_hip + "tyc_main.zip", delimiter="|", usecols = names_tyc.keys(), names = names_tyc.values())
# Split original tycho-id header which separated by white-space
tycho["a"], tycho["b"], tycho["c"] = tycho["tycho2_id"].str.split().str
# Concatenate tycho-id headers using "-"
tycho["tycho2_id"] = tycho["a"] + "-" + tycho["b"] + "-" + tycho["c"]
# Delete auxiliar columns used in conversion
del tycho["a"], tycho["b"], tycho["c"]
# Columns format
cols = tycho.columns.tolist()
objects = ["tycho2_id", "HenryDraperId_tyc"]
cols = [x for x in cols if x not in objects]
tycho[cols] = tycho[cols].apply(pd.to_numeric, errors='coerce')
tycho=tycho[tycho.parallax_tyc > 0]
# Delete null astrometry values
n1 = len(tycho)
tycho.dropna(subset=["ra_tyc", "dec_tyc", "parallax_tyc"], how="any", inplace=True)
n2 = len(tycho)
print("\nDatabase Tycho:", "\nOriginal objects:", n1,"\nDiscarded objects:", n1-n2,"\nFinal objects:", n2)
```
Database Tycho:
Original objects: 579054
Discarded objects: 0
Final objects: 579054
## =============================================================================
## Load Simbad-Hipparcos database
## =============================================================================
```python
cols = ["typedident","identifier", "radvel", "coord1(ICRS,J2000/2000)", "plx", "pm", "MagV", "spec.type"]
simbad = pd.read_csv(dir_simbad + "simbad.zip", usecols=cols, delimiter="|")
# Modify ID
simbad["hip"] = simbad["typedident"].map(lambda x: str(x)[4:]).astype(str)
simbad["hip"] = simbad["hip"].str.strip()
del simbad["typedident"]
# Right ascension and declination format
# 1. Delete white-spaces on both sides of the text
simbad["coord1(ICRS,J2000/2000)"] = simbad["coord1(ICRS,J2000/2000)"].str.strip()
# 2. Split string into 6 values (hh:mm:ss for RA and hh:mm:ss for DEC)
simbad["ra_h"], simbad["ra_m"], simbad["ra_s"], simbad["dec_h"], simbad["dec_m"], simbad["dec_s"] = \
simbad["coord1(ICRS,J2000/2000)"].str.split(" ").str
# 3. Concatenate the first 3 fields (RA) using conversion formule from hh:mm:ss to degrees
simbad["ra_simbad"] = simbad["ra_h"].astype(float)*15 + simbad["ra_m"].astype(float)/60 + simbad["ra_s"].astype(float)/3600
# 4. Concatenate the last 3 fields (DEC) using conversion formule from hh:mm:ss to degrees
simbad["dec_simbad"] = np.sign(simbad["dec_h"].astype(float)) * ( \
np.abs(simbad["dec_h"].astype(float)) + simbad["dec_m"].astype(float)/60 + simbad["dec_s"].astype(float)/3600 )
# 5. Delete auxiliar columns used in conversion
del simbad["coord1(ICRS,J2000/2000)"]
del simbad["ra_h"], simbad["ra_m"], simbad["ra_s"], simbad["dec_h"], simbad["dec_m"], simbad["dec_s"]
# Proper motion format
# 1. Delete white-spaces on both sides of the text
simbad["pm"] = simbad["pm"].str.strip()
# 2. Split string into 2 values (pm_ra and pm_dec)
simbad["pmra_simbad"], simbad["pmdec_simbad"] = simbad["pm"].str.split(" ").str
# 3. Delete auxiliar columns used in conversion
del simbad["pm"]
# Modify name columns
simbad = simbad.rename(columns={"identifier": "name_simbad"})
simbad = simbad.rename(columns={"plx": "parallax_simbad"})
simbad = simbad.rename(columns={"spec.type": "sptype_simbad"})
simbad = simbad.rename(columns={"radvel": "RV_simbad"})
simbad = simbad.rename(columns={"MagV": "Vmag_simbad"})
# Columns format
cols = simbad.columns.tolist()
objects = ["hip", "name_simbad", "sptype_simbad"]
cols = [x for x in cols if x not in objects]
simbad[cols] = simbad[cols].apply(pd.to_numeric, errors='coerce')
# Delete null astrometry values
n1 = len(simbad)
simbad.dropna(subset=["ra_simbad", "dec_simbad", "parallax_simbad"], how="any", inplace=True)
n2 = len(simbad)
print("\nDatabase Simbad:", "\nOriginal objects:", n1,"\nDiscarded objects:", n1-n2,"\nFinal objects:", n2)
```
Database Simbad:
Original objects: 118179
Discarded objects: 175
Final objects: 118004
## =============================================================================
## Create database with unique astrometry values
## =============================================================================
```python
print("\nBuilding database with just one astrometry source per star:")
# db = Gaia + Hipparcos
db = pd.DataFrame()
gaia["source_astro"] = "gaia"
exclusive_hip = hipparcos[~hipparcos.hip.isin(gaia_hip.hip)]
exclusive_hip.rename(columns=lambda x: x.replace("_hip", ""), inplace=True)
exclusive_hip["source_astro"] = "hipparcos"
db = pd.concat([gaia, exclusive_hip], axis=0)
print("Input from Hipparcos:", len(exclusive_hip))
# db = db + Tycho
exclusive_tyc = tycho[~tycho.tycho2_id.isin(gaia_tyc.tycho2_id)]
exclusive_tyc.rename(columns=lambda x: x.replace("_tyc", ""), inplace=True)
exclusive_tyc["source_astro"] = "tycho"
db = pd.concat([db, exclusive_tyc], axis=0)
print("Input from Tycho:", len(exclusive_tyc))
# db = db + Simbad
exclusive_simbad = simbad[~simbad.hip.isin(db.hip)]
exclusive_simbad.rename(columns=lambda x: x.replace("_simbad", ""), inplace=True)
exclusive_simbad = exclusive_simbad.loc[:,["hip", "ra", "dec", "parallax", "pmra", "pmdec"]]
exclusive_simbad["source_astro"] = "simbad"
db = pd.concat([db, exclusive_simbad], axis=0)
print("Input from Simbad:", len(exclusive_simbad))
# Clean np.NaN in objects produced by concat method
db.replace({"hip":{np.NaN:""},"tycho2_id":{np.NaN:""},"source_id":{np.NaN:""},"HenryDraperId":{np.NaN:""}}, inplace=True)
# Merge db + Simbad new columns
db = db.merge(simbad[["hip", "name_simbad","sptype_simbad","RV_simbad","Vmag_simbad"]], on="hip", how="left")
# Merge db + HipVanLeeuwen2007 new columns
db = db.merge(hipvan[['hip', 'Hpmag_hip', 'e_Hpmag_hip', 'B_V_hip', 'e_B_V_hip', 'V_I_hip']], on='hip', how='left')
#db["id"] = db.hip.combine(db.tycho2_id, lambda x, y: x if x is not "" else y)
db["id"] = db.hip.astype(str) + db.tycho2_id.astype(str)
# Change the order of database columns
cols = ["id", "source_astro"] + cols_gaia + ["Vmag","HenryDraperId"] + \
["name_simbad","sptype_simbad","RV_simbad","Vmag_simbad"] + \
['Hpmag_hip', 'e_Hpmag_hip', 'B_V_hip', 'e_B_V_hip', 'V_I_hip']
db = db[cols]
#Remove all stars without astrometric errors
errors=["ra_error","dec_error","parallax_error","pmra_error","pmdec_error"]
db.dropna(subset=errors,how="any",inplace=True)
db = db.reset_index(drop=True)
print("Total lenght:", len(db))
```
Building database with just one astrometry source per star:
Input from Hipparcos: 24557
Input from Tycho: 164550
Input from Simbad: 67
Total lenght: 2214419
# =============================================================================
# Full database with the information of all the catalogs
# =============================================================================
```python
print("\nMerging astrometry databases (it keeps data of all sources):")
database = pd.DataFrame()
n1 = len(gaia)
print("Gaia objects (initial lenght of database):", n1)
database = gaia.merge(hipparcos, on="hip", how="outer")
n2 = len(database)
print("Hipparcos objects:", len(hipparcos), "| Input from Hipparcos:", n2-n1, "| Final lenght of database:", n2)
database = database.merge(hipvan, on="hip", how="outer")
n21 = len(database)
print("Hipparcos improved objects:", len(hipvan), "| Input from Hipparcos improved:", n21-n2, "| Final lenght of database:", n21)
database = database.merge(tycho, on="tycho2_id", how="outer")
n3 = len(database)
print("Tycho objects:", len(tycho), "| Input from Tycho:", n3-n2, "| Final lenght of database:", n3)
database = database.merge(simbad, on="hip", how="outer")
n4 = len(database)
print("Simbad objects:", len(simbad), "| Input from Simbad:", n4-n3, "| Final lenght of database:", n4)
#database["id"] = database.hip.combine(database.tycho2_id, lambda x, y: x if x is not "" else y)
database["id"] = database.hip.astype(str) + database.tycho2_id.astype(str)
#Remove all stars without astrometric errors
database.dropna(subset=["ra_error","dec_error","parallax_error","pmra_error","pmdec_error"],how="any",inplace=True)
database = database.reset_index(drop=True)
```
Merging astrometry databases (it keeps data of all sources):
Gaia objects (initial lenght of database): 2026210
Hipparcos objects: 117955 | Input from Hipparcos: 24557 | Final lenght of database: 2050767
Hipparcos improved objects: 113942 | Input from Hipparcos improved: 0 | Final lenght of database: 2050767
Tycho objects: 579054 | Input from Tycho: 164550 | Final lenght of database: 2215317
Simbad objects: 118004 | Input from Simbad: 67 | Final lenght of database: 2215384
# =============================================================================
# Load radial velocities databases
# =============================================================================
```python
t1 = time.time()
print("\nRadial velocity databases:")
radialv = {}
```
Radial velocity databases:
## RAVE-DR5
```python
# =============================================================================
# RAVE-DR5
# =============================================================================
file = "RAVE-DR5.tsv"
cols = ["HRV", "e_HRV", "TGAS", "TYCHO2"]
delimiter = ";"
data = pd.DataFrame()
data = pd.DataFrame(pd.read_csv(dir_rv + file, usecols=cols, delimiter=delimiter,
skiprows = list(np.arange(78)) + [79, 80]))
data.rename(columns={"HRV": "RV", "e_HRV": "e_RV", "TGAS": "source_id", "TYCHO2": "tycho2_id"}, inplace=True)
data = data.merge(gaia.loc[:,["source_id","hip"]], left_on="source_id", right_on="source_id", how="left")
data.source_id = data.source_id.str.strip()
data.tycho2_id = data.tycho2_id.str.strip()
data.hip.replace(np.nan,"", inplace=True)
data["source_rv"] = file
data["source_erv"] = "Catalogue"
data.source_erv[data.e_RV.isnull()] = "median"
median = data.e_RV[data.e_RV>0].median()
data.e_RV[data.e_RV.isnull()] = median
data.e_RV[data.e_RV==0] = median
n1 = len(data)
data.dropna(subset=["RV"], inplace=True)
n2 = len(data)
radialv[file] = data
print("\n", file, "\nOriginal objects:", n1, "\nDiscarded:", n1-n2, "\nFinal objects:", n2)
```
RAVE-DR5.tsv
Original objects: 520701
Discarded: 0
Final objects: 520701
## BB2000
```python
# =============================================================================
# BB2000
# =============================================================================
file = "BB2000.csv"
cols = ["RV", "e_RV", "TYC1", "TYC2", "TYC3"]
delimiter = ","
data = pd.DataFrame()
data = pd.DataFrame(pd.read_csv(dir_rv + file, usecols=cols, delimiter=delimiter))
data["tycho2_id"] = data.TYC1.astype(str) + "-" + data.TYC2.astype(str) + "-" + data.TYC3.astype(str)
del data["TYC1"], data["TYC2"], data["TYC3"]
data["source_rv"] = file
data["source_erv"] = "Catalogue"
data.source_erv[data.e_RV.isnull()] = "median"
median = data.e_RV[data.e_RV>0].median()
data.e_RV[data.e_RV.isnull()] = median
data.e_RV[data.e_RV==0] = median
n1 = len(data)
data.dropna(subset=["RV"], inplace=True)
n2 = len(data)
radialv[file] = data
print("\n", file, "\nOriginal objects:", n1, "\nDiscarded:", n1-n2, "\nFinal objects:", n2)
```
BB2000.csv
Original objects: 673
Discarded: 0
Final objects: 673
## Famaey2005.tsv
```python
# =============================================================================
# Famaey2005.tsv
# =============================================================================
file = "Famaey2005.tsv"
cols = ["RV", "e_RV", "HIP"]
delimiter = ";"
data = pd.DataFrame()
data = pd.DataFrame(pd.read_csv(dir_rv + file, usecols=cols, delimiter=delimiter,
skiprows = list(np.arange(118)) + [119, 120]))
data.rename(columns={"HIP": "hip"}, inplace=True)
data.hip = data.hip.astype(str)
data.RV = pd.to_numeric(data.RV, errors="coerce")
data["source_rv"] = file
data["source_erv"] = "Catalogue"
data.source_erv[data.e_RV.isnull()] = "median"
median = data.e_RV[data.e_RV>0].median()
data.e_RV[data.e_RV.isnull()] = median
data.e_RV[data.e_RV==0] = median
n1 = len(data)
data.dropna(subset=["RV"], inplace=True)
n2 = len(data)
radialv[file] = data
print("\n", file, "\nOriginal objects:", n1, "\nDiscarded:", n1-n2, "\nFinal objects:", n2)
```
Famaey2005.tsv
Original objects: 6690
Discarded: 662
Final objects: 6028
## Galah.tsv - Without e_RV
```python
# =============================================================================
# Galah.tsv - Without e_RV
# =============================================================================
file = "Galah.tsv"
cols = ["RV", "TYC2"]
delimiter = ";"
data = pd.DataFrame()
data = pd.DataFrame(pd.read_csv(dir_rv + file, usecols=cols, delimiter=delimiter,
skiprows = list(np.arange(54)) + [55, 56]))
data.rename(columns={"TYC2": "tycho2_id"}, inplace=True)
data["tycho2_id"] = data["tycho2_id"].map(lambda x: x.replace("-", " "))
data["a"], data["b"], data["c"] = data["tycho2_id"].str.split().str
data["a"] = pd.to_numeric(data["a"])
data["b"] = pd.to_numeric(data["b"])
data["c"] = pd.to_numeric(data["c"])
data["tycho2_id"] = data["a"].astype(str) + "-" + data["b"].astype(str) + "-" + data["c"].astype(str)
del data["a"], data["b"], data["c"]
data["source_rv"] = file
data["source_erv"] = "Without_info"
n1 = len(data)
data.dropna(subset=["RV"], inplace=True)
data["e_RV"] = 0.6
n2 = len(data)
radialv[file] = data
print("\n", file, "\nOriginal objects:", n1, "\nDiscarded:", n1-n2, "\nFinal objects:", n2)
```
Galah.tsv
Original objects: 10680
Discarded: 0
Final objects: 10680
## GCS2011.tsv
```python
# =============================================================================
# GCS2011.tsv
# =============================================================================
file = "GCS2011.tsv"
cols = ["RV", "e_RV", "HIP", "Name"]
delimiter = "|"
data = pd.DataFrame()
data = pd.DataFrame(pd.read_csv(dir_rv + file, usecols=cols, delimiter=delimiter,
skiprows = list(np.arange(174)) + [175, 176]))
data.rename(columns={"HIP":"hip", "Name":"name"}, inplace=True)
data.RV = pd.to_numeric(data.RV, errors="coerce")
data.e_RV = pd.to_numeric(data.e_RV, errors="coerce")
data.hip = data.hip.str.strip()
data.name = data.name.str.strip()
data["source_rv"] = file
data["source_erv"] = "Catalogue"
data.source_erv[data.e_RV.isnull()] = "median"
median = data.e_RV[data.e_RV>0].median()
data.e_RV[data.e_RV.isnull()] = median
data.e_RV[data.e_RV==0] = median
n1 = len(data)
data.dropna(subset=["RV"], inplace=True)
n2 = len(data)
radialv[file] = data
print("\n", file, "\nOriginal objects:", n1, "\nDiscarded:", n1-n2, "\nFinal objects:", n2)
```
GCS2011.tsv
Original objects: 16682
Discarded: 2543
Final objects: 14139
## Malaroda2012.csv - Without e_RV
```python
# =============================================================================
# Malaroda2012.csv - Without e_RV
# =============================================================================
file = "Malaroda2012.csv"
cols = ["RV", "HIP", "TYC1", "TYC2", "TYC3"]
delimiter = ","
data = pd.DataFrame()
data = pd.DataFrame(pd.read_csv(dir_rv + file, usecols=cols, delimiter=delimiter))
data.rename(columns={"HIP":"hip"}, inplace=True)
data["tycho2_id"] = data.TYC1.astype(str) + "-" + data.TYC2.astype(str) + "-" + data.TYC3.astype(str)
del data["TYC1"], data["TYC2"], data["TYC3"]
data["e_RV"]=1.0
data["source_rv"] = file
data["source_erv"] = "Without_info"
n1 = len(data)
data.dropna(subset=["RV"], inplace=True)
n2 = len(data)
radialv[file] = data
print("\n", file, "\nOriginal objects:", n1, "\nDiscarded:", n1-n2, "\nFinal objects:", n2)
```
Malaroda2012.csv
Original objects: 2178
Discarded: 146
Final objects: 2032
## Maldonado2010.tsv
```python
# =============================================================================
# Maldonado2010.tsv
# =============================================================================
file = "Maldonado2010.tsv"
cols = ["RV", "e_RV", "HIP"]
delimiter = ";"
data = pd.DataFrame()
data = pd.DataFrame(pd.read_csv(dir_rv + file, usecols=cols, delimiter=delimiter,
skiprows = list(np.arange(82)) + [83, 84]))
data.rename(columns={"HIP":"hip"}, inplace=True)
data.RV = pd.to_numeric(data.RV, errors="coerce")
data.e_RV = pd.to_numeric(data.e_RV, errors="coerce")
data.hip = data.hip.astype(str)
data["source_rv"] = file
data["source_erv"] = "Catalogue"
data.source_erv[data.e_RV.isnull()] = "median"
median = data.e_RV[data.e_RV>0].median()
data.e_RV[data.e_RV.isnull()] = median
data.e_RV[data.e_RV==0] = median
n1 = len(data)
data.dropna(subset=["RV"], inplace=True)
n2 = len(data)
radialv[file] = data
print("\n", file, "\nOriginal objects:", n1, "\nDiscarded:", n1-n2, "\nFinal objects:", n2)
```
Maldonado2010.tsv
Original objects: 495
Discarded: 22
Final objects: 473
## Pulkovo.tsv
```python
# =============================================================================
# Pulkovo.tsv
# =============================================================================
file = "Pulkovo.tsv"
cols = ["RV", "e_RV", "HIP"]
delimiter = ";"
data = pd.DataFrame()
data = pd.DataFrame(pd.read_csv(dir_rv + file, usecols=cols, delimiter=delimiter,
skiprows = list(np.arange(61)) + [62, 63]))
data.rename(columns={"HIP":"hip"}, inplace=True)
data.hip = data.hip.astype(str)
data["source_rv"] = file
data["source_erv"] = "Catalogue"
data.source_erv[data.e_RV.isnull()] = "median"
median = data.e_RV[data.e_RV>0].median()
data.e_RV[data.e_RV.isnull()] = median
data.e_RV[data.e_RV==0] = median
n1 = len(data)
data.dropna(subset=["RV"], inplace=True)
n2 = len(data)
radialv[file] = data
print("\n", file, "\nOriginal objects:", n1, "\nDiscarded:", n1-n2, "\nFinal objects:", n2)
```
Pulkovo.tsv
Original objects: 35493
Discarded: 0
Final objects: 35493
## Web1995-HIP.csv
```python
# =============================================================================
# Web1995-HIP.csv
# =============================================================================
file = "Web1995-HIP.csv"
cols = ["RV", "e_RV", "HIP"]
delimiter = ","
data = pd.DataFrame()
data = pd.DataFrame(pd.read_csv(dir_rv + file, usecols=cols, delimiter=delimiter))
data.rename(columns={"HIP":"hip"}, inplace=True)
data.hip = data.hip.astype(str)
data["source_rv"] = file
data["source_erv"] = "Catalogue"
data.source_erv[data.e_RV.isnull()] = "median"
median = data.e_RV[data.e_RV>0].median()
data.e_RV[data.e_RV.isnull()] = median
data.e_RV[data.e_RV==0] = median
n1 = len(data)
data.dropna(subset=["RV"], inplace=True)
n2 = len(data)
radialv[file] = data
print("\n", file, "\nOriginal objects:", n1, "\nDiscarded:", n1-n2, "\nFinal objects:", n2)
```
Web1995-HIP.csv
Original objects: 494
Discarded: 0
Final objects: 494
## Web1995-TYC2.csv
```python
# =============================================================================
# Web1995-TYC2.csv
# =============================================================================
file = "Web1995-TYC2.csv"
cols = ["RV", "e_RV", "TYC1", "TYC2", "TYC3"]
delimiter = ","
data = pd.DataFrame()
data = pd.DataFrame(pd.read_csv(dir_rv + file, usecols=cols, delimiter=delimiter))
data["tycho2_id"] = data.TYC1.astype(str) + "-" + data.TYC2.astype(str) + "-" + data.TYC3.astype(str)
del data["TYC1"], data["TYC2"], data["TYC3"]
data["source_rv"] = file
data["source_erv"] = "Catalogue"
data.source_erv[data.e_RV.isnull()] = "median"
median = data.e_RV[data.e_RV>0].median()
data.e_RV[data.e_RV.isnull()] = median
data.e_RV[data.e_RV==0] = median
n1 = len(data)
data.dropna(subset=["RV"], inplace=True)
n2 = len(data)
radialv[file] = data
print("\n", file, "\nOriginal objects:", n1, "\nDiscarded:", n1-n2, "\nFinal objects:", n2)
```
Web1995-TYC2.csv
Original objects: 673
Discarded: 0
Final objects: 673
## APOGEE2 (taken from RVcat of Bailer-Jones 2017)
```python
# =============================================================================
# RVcat.csv.zip
# =============================================================================
file = "RVcat.csv.zip"
cols = ["RV", "RVerr", "tychoID","cat"]
delimiter = ","
data = pd.DataFrame()
data = pd.DataFrame(pd.read_csv(dir_rv + file, usecols=cols, delimiter=delimiter))
#Choose only objects from apogee database
data = data[data.cat==10]
data.rename(columns={"RVerr":"e_RV", "tychoID":"tycho2_id"}, inplace=True)
data["source_rv"] = file
data["source_erv"] = "Catalogue"
n1 = len(data)
data.dropna(subset=["e_RV"], inplace=True)
data.dropna(subset=["RV"], inplace=True)
n2 = len(data)
radialv[file] = data
print("\n", file, "\nOriginal objects:", n1, "\nDiscarded:", n1-n2, "\nFinal objects:", n2)
```
RVcat.csv.zip
Original objects: 29173
Discarded: 0
Final objects: 29173
## =============================================================================
## Radial velocity databases integration
## =============================================================================
```python
print("\nRadial velocity databases integration:")
t2 = time.time()
RV = pd.DataFrame()
RV = pd.concat([radialv["RAVE-DR5.tsv"], radialv["BB2000.csv"], radialv["Famaey2005.tsv"],
radialv["Galah.tsv"], radialv["GCS2011.tsv"], radialv["Malaroda2012.csv"],
radialv["Maldonado2010.tsv"], radialv["Pulkovo.tsv"], radialv["Web1995-HIP.csv"],
radialv["Web1995-TYC2.csv"], radialv["RVcat.csv.zip"]], axis = 0)
RV.hip.replace(np.NaN, "", inplace=True)
RV.tycho2_id.replace(np.NaN, "", inplace=True)
RV.source_id.replace(np.NaN, "", inplace=True)
RV.name.replace(np.NaN, "", inplace=True)
RV["id"] = RV.hip.astype(str) + RV.tycho2_id.astype(str)
# Drop objects with incomplete astrometry information or with both RV and e_RV duplicates
RV1 = RV.copy()
RV1 = RV1[RV1.e_RV.notnull()]
RV1 = RV1[RV1.id != ""]
# RV1 => Various records per star with different VR and e_VR combination are allowed
RV1.drop_duplicates(subset=["id","RV","e_RV"], keep="first", inplace=True)
# RV2 => Keep only object with minimum e_RV value (various records per star with the same e_VR are allowed)
RV2 = pd.DataFrame()
RV_min_eRV = RV1.groupby("id", as_index=False).e_RV.min()
RV2 = RV1.merge(RV_min_eRV, on=["id","e_RV"], how="inner")
# RV3 => Keep only one object with minimum e_RV value (Just one record per star is allowed)
RV3 = pd.DataFrame()
RV3 = RV2.drop_duplicates(subset=["id","e_RV"], keep="first")
print("Objects number with different VR and e_VR combination:", len(RV1))
print("Objects number with different VR and the same minimun e_VR:", len(RV2))
print("Objects number without duplicates (just one register per star):", len(RV3))
t3 = time.time()
```
Radial velocity databases integration:
Objects number with different VR and e_VR combination: 407189
Objects number with different VR and the same minimun e_VR: 335367
Objects number without duplicates (just one register per star): 332551
## =============================================================================
## Full integration
## =============================================================================
```python
# Building AstroRV's databases
AstroRVDuplicates = pd.DataFrame()
AstroRVDuplicates = db.merge(RV1[["id", "RV", "e_RV", "source_rv", "source_erv"]], on="id", how="right")
AstroRV = pd.DataFrame()
AstroRV = db.merge(RV3[["id", "RV", "e_RV", "source_rv", "source_erv"]], on="id", how="right")
# Drop objects with incomplet astrometry information
AstroRVDuplicates.dropna(subset=["parallax"], inplace=True)
AstroRV.dropna(subset=["parallax"], inplace=True)
# Remove spaces from simbad names
for txt_field in "name_simbad","sptype_simbad","HenryDraperId":
AstroRV[txt_field]=AstroRV[txt_field].fillna('')
AstroRV[txt_field]=AstroRV[txt_field].map(lambda x:'NULL' if x=='' else re.sub('[\s\",]+','_',x).strip('_'))
AstroRVDuplicates[txt_field]=AstroRVDuplicates[txt_field].fillna('')
AstroRVDuplicates[txt_field]=AstroRVDuplicates[txt_field].map(lambda x:'NULL' if x=='' else re.sub('[\s\",]+','_',x).strip('_'))
# Report
print("\nObjects in AstroRV (without duplicates):", len(AstroRV))
print("\nObjects in AstroRV per source:", AstroRV.groupby(AstroRV.source_rv).source_rv.count())
print("Number of objects with hipparcos id:",(AstroRV.hip!="").sum())
report=dict()
print("Number of objects with tycho2 id:",(AstroRV.tycho2_id!="").sum())
```
Objects in AstroRV (without duplicates): 285114
Objects in AstroRV per source: source_rv
BB2000.csv 503
Famaey2005.tsv 5544
GCS2011.tsv 7091
Galah.tsv 7837
Malaroda2012.csv 416
Maldonado2010.tsv 301
Pulkovo.tsv 23412
RAVE-DR5.tsv 217257
RVcat.csv.zip 22501
Web1995-HIP.csv 252
Name: source_rv, dtype: int64
Number of objects with hipparcos id: 36699
Number of objects with tycho2 id: 248415
```python
#Save databases
AstroRVDuplicates.to_csv(dir_results + "AstroRVFull.csv", index=False)
AstroRV.to_csv(dir_results + "AstroRV.csv", index=False)
```
## =============================================================================
## Summaries
## =============================================================================
```python
report=dict()
print("GAIA TGAS:")
rep=report["Gaia TGAS"]=dict()
rep["ref"]="I/337/tgas & (1)"
rep["total"]=len(gaia)
rep["hip"]=(gaia.hip!="").sum()
rep["tyc"]=(gaia.tycho2_id!="").sum()
rep["cont"]=len(gaia)
print("\tNumber of objects:",rep["total"])
print("\tHipparcos:",rep["hip"])
print("\tTycho:",rep["tyc"])
print("\tContribution:",rep["cont"])
print("Hipparcos:")
rep=report["Hipparcos"]=dict()
rep["ref"]="I/239/hip\_main & (2)"
rep["total"]=len(hipparcos)
rep["hip"]=(hipparcos.hip!="").sum()
rep["tyc"]=0
rep["cont"]=len(exclusive_hip)
print("\tNumber of objects:",rep["total"])
print("\tHipparcos:",rep["hip"])
print("\tTycho:",rep["tyc"])
print("\tContribution:",rep["cont"])
print("Tycho:")
rep=report["Tycho"]=dict()
rep["ref"]="I/259/tyc2 & (2)"
rep["total"]=len(tycho)
rep["hip"]=0
rep["tyc"]=(tycho.tycho2_id!="").sum()
rep["cont"]=len(exclusive_tyc)
print("\tNumber of objects:",rep["total"])
print("\tHipparcos:",rep["hip"])
print("\tTycho:",rep["tyc"])
print("\tContribution:",rep["cont"])
print("Simbad:")
rep=report["Simbad"]=dict()
rep["ref"]="-- & (3)"
rep["total"]=len(simbad)
rep["hip"]=(simbad.hip!="").sum()
rep["tyc"]=0
rep["cont"]=len(exclusive_simbad)
print("\tNumber of objects:",rep["total"])
print("\tHipparcos:",rep["hip"])
print("\tTycho:",rep["tyc"])
print("\tContribution:",rep["cont"])
astro_cats=["Gaia TGAS","Hipparcos","Tycho","Simbad"]
report["astro_total"]=0
report["astro_hip"]=0
report["astro_tyc"]=0
report["astro_cont"]=0
for cat in astro_cats:report["astro_total"]+=report[cat]["total"]
for cat in astro_cats:report["astro_hip"]+=report[cat]["hip"]
for cat in astro_cats:report["astro_tyc"]+=report[cat]["tyc"]
for cat in astro_cats:report["astro_cont"]+=report[cat]["cont"]
print("Summary:")
print("\tTotal:",report["astro_total"])
print("\tHipparcos:",report["astro_hip"])
print("\tTycho:",report["astro_tyc"])
print("\tCatalog:",report["astro_cont"])
rv_cats=collections.OrderedDict([
["WEB1995",dict(cat=["Web1995-TYC2.csv","Web1995-HIP.csv"],ref=r"III/213 & (5)")],
["GCS",dict(cat=["GCS2011.tsv"],ref=r"J/A+A/530/A138 & (6)")],
["RAVE-DR5",dict(cat=["RAVE-DR5.tsv"],ref=r"III/279/rave\_dr5 & (7)")],
["PULKOVO",dict(cat=["Pulkovo.tsv"],ref=r"III/252/table8 & (8)")],
["FAMAEY2005",dict(cat=["Famaey2005.tsv"],ref=r"J/A+A/430/165/tablea1 & (9)")],
["BB2000",dict(cat=["BB2000.csv"],ref=r"III/213 & (10)")],
["MALARODA",dict(cat=["Malaroda2012.csv"],ref=r"III/249/catalog & (11)")],
["GALAH",dict(cat=["Galah.tsv"],ref=r"J/MNRAS/465/3203 & (12)")],
["MALDONADO",dict(cat=["Maldonado2010.tsv"],ref=r"J/A+A/521/A12/table1 & (13)")],
["APOGEE2",dict(cat=["RVcat.csv.zip"],ref="-- & (14)")]
])
i=0
for key in rv_cats.keys():
cats=rv_cats[key]["cat"]
print("Catalog ",key)
report[key]=dict()
for k in "total","hip","tyc","cont":report[key][k]=0
for cat in cats:
print("\tCatalog %s..."%cat)
report[key]["total"]+=len(radialv[cat])
try:report[key]["hip"]+=(radialv[cat].hip!="").sum()
except:report[key]["hip"]+=0
try:report[key]["tyc"]+=(radialv[cat].tycho2_id!="").sum()
except:report[key]["tyc"]+=0
report[key]["cont"]+=(AstroRV.source_rv==cat).sum()
print("\tTotal:",report[key]["total"])
print("\tHipparcos:",report[key]["hip"])
print("\tTycho:",report[key]["tyc"])
print("\tContribution:",report[key]["cont"])
i+=1
report["rv_total"]=0
report["rv_hip"]=0
report["rv_tyc"]=0
report["rv_cont"]=0
cats=rv_cats.keys()
for cat in cats:report["rv_total"]+=report[cat]["total"]
for cat in cats:report["rv_hip"]+=report[cat]["hip"]
for cat in cats:report["rv_tyc"]+=report[cat]["tyc"]
for cat in cats:report["rv_cont"]+=report[cat]["cont"]
print("Summary radial velocities:")
print("\tTotal:",report["rv_total"])
print("\tHipparcos:",report["rv_hip"])
print("\tTycho:",report["rv_tyc"])
print("\tCatalog:",report["rv_cont"])
```
GAIA TGAS:
Number of objects: 2026210
Hipparcos: 93398
Tycho: 1932812
Contribution: 2026210
Hipparcos:
Number of objects: 117955
Hipparcos: 117955
Tycho: 0
Contribution: 24557
Tycho:
Number of objects: 579054
Hipparcos: 0
Tycho: 579054
Contribution: 164550
Simbad:
Number of objects: 118004
Hipparcos: 118004
Tycho: 0
Contribution: 67
Summary:
Total: 2841223
Hipparcos: 329357
Tycho: 2511866
Catalog: 2215384
Catalog WEB1995
Catalog Web1995-TYC2.csv...
Catalog Web1995-HIP.csv...
Total: 1167
Hipparcos: 494
Tycho: 673
Contribution: 252
Catalog GCS
Catalog GCS2011.tsv...
Total: 14139
Hipparcos: 12977
Tycho: 0
Contribution: 7091
Catalog RAVE-DR5
Catalog RAVE-DR5.tsv...
Total: 520701
Hipparcos: 121
Tycho: 309596
Contribution: 217257
Catalog PULKOVO
Catalog Pulkovo.tsv...
Total: 35493
Hipparcos: 35493
Tycho: 0
Contribution: 23412
Catalog FAMAEY2005
Catalog Famaey2005.tsv...
Total: 6028
Hipparcos: 6028
Tycho: 0
Contribution: 5544
Catalog BB2000
Catalog BB2000.csv...
Total: 673
Hipparcos: 0
Tycho: 673
Contribution: 503
Catalog MALARODA
Catalog Malaroda2012.csv...
Total: 2032
Hipparcos: 0
Tycho: 2032
Contribution: 416
Catalog GALAH
Catalog Galah.tsv...
Total: 10680
Hipparcos: 0
Tycho: 10680
Contribution: 7837
Catalog MALDONADO
Catalog Maldonado2010.tsv...
Total: 473
Hipparcos: 473
Tycho: 0
Contribution: 301
Catalog APOGEE2
Catalog RVcat.csv.zip...
Total: 29173
Hipparcos: 0
Tycho: 29173
Contribution: 22501
Summary radial velocities:
Total: 620559
Hipparcos: 55586
Tycho: 352827
Catalog: 285114
/home/iwander/.local/lib/python3.5/site-packages/pandas/core/ops.py:816: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
result = getattr(x, name)(y)
```python
```
```python
f=open(dir_results+"AstroRVSummary.tex","w")
#Header
f.write(r"""\begin{table*}
\centering
\scriptsize
\begin{tabular}{lllllll}
\hline
Catalog name & Number of objects & Hipparcos ID & Tycho-2 ID & Contribution & CDS Code & Reference \\\hline\hline
\multicolumn{7}{c}{\tt Astrometric} \\
\hline
""")
#Astrometry
for cat in astro_cats:
hip=str(report[cat]["hip"]) if report[cat]["hip"]>0 else "--"
tyc=str(report[cat]["tyc"]) if report[cat]["tyc"]>0 else "--"
f.write(r"""%s & %d & %s & %s & %d & \tt %s \\\
"""%(cat,report[cat]["total"],hip,tyc,report[cat]["cont"],report[cat]["ref"]))
f.write(r"""
\hline
{\tt Totals} & \bf %d & \bf %d & \bf %d & \bf %d & This work \\
\hline
"""%(report["astro_total"],report["astro_hip"],report["astro_tyc"],report["astro_cont"]))
#Radial Velocity catalogues
f.write(r"""
\multicolumn{7}{c}{\tt Radial velocities} \\
\hline
""")
for cat in rv_cats.keys():
hip=str(report[cat]["hip"]) if report[cat]["hip"]>0 else "--"
tyc=str(report[cat]["tyc"]) if report[cat]["tyc"]>0 else "--"
f.write(r"""%s & %d & %s & %s & %d & \tt %s \\\
"""%(cat,report[cat]["total"],hip,tyc,report[cat]["cont"],rv_cats[cat]["ref"]))
f.write(r"""
\hline
{\tt AstroRV} & \bf %d & \bf %d & \bf %d & \bf %d & This work \\
\hline
"""%(report["rv_total"],report["rv_hip"],report["rv_tyc"],report["rv_cont"]))
#Footer
f.write(r"""
\end{tabular}
\caption{Catalogs used to compile the {\tt AstroRV} catalog for this work. References: (1) \citealt{Brown2016}, (2) \citealt{ESA1997} (3) \citealt{Wenger2000}, (5) \citealt{Barbier2000a}, (6) \citealt{Casagrande2011}, (7) \citealt{Kunder2017}, (8) \citealt{Gontcharov2006}, (9) \citealt{Famaey2005}, (10) \citealt{Barbier2000b}, (11) \citealt{Malaroda2000}, (12) \citealt{Martell2016}, (13) \citealt{Maldonado2010}, (14) \citealt{Bailer2017}.
\label{tab:AstroRV}}
\end{table*}
""")
f.close()
print("Done.")
```
Done.
```python
```
|
seap-udeaREPO_NAMEiWanderPATH_START.@iWander_extracted@iWander-master@db@build.ipynb@.PATH_END.py
|
{
"filename": "lg4_envs.py",
"repo_name": "mit-ll/spacegym-kspdg",
"repo_path": "spacegym-kspdg_extracted/spacegym-kspdg-main/src/kspdg/private_src/python3_9/Darwin_x86_64/kspdg_envs/lbg1/lg4_envs.py",
"type": "Python"
}
|
# Pyarmor 8.5.11 (trial), 000000, non-profits, 2024-12-09T10:19:40.946963
from kspdg.private_src.python3_9.Darwin_x86_64.pyarmor_runtime_000000 import __pyarmor__
__pyarmor__(__name__, __file__, b'PY000000\x00\x03\t\x00a\r\r\n\x80\x00\x01\x00\x08\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00\x9a%\x00\x00\x12\t\x04\x00R\xde\xcavh\t\x9fO\x12\xa5\xa8^R\xc2cZ\x00\x00\x00\x00\x00\x00\x00\x00\x90GH\xb1\xbcU\xb7\xc8\x04\x92f0\xb1D"\xfe\xafV\xc9\xfbw\xda\xa4\xa5\x05\x98\xb3\xa1\xc3\xb9\xa3y*\x80m\xe2\x19\x9e\x9e\x84\x10\xa0g\xb3(x\xde:\xaa<S\xfc\x0b&\x19\xe5bql\xf36\xef\x90\x1al\x11\x1a\xe3\xfcM\xc0s\xb2V\xeb\xaf4\x91n\xba;\xc0\xc9\xf6j\xee\xb4\x94\x83\x14K\x1d\xa7\xb4\xff\xba\xf9\x173\x05#\xf8\x8bO\xe36<-\'\xf9>\xb7\xc5\xb4\\\x8c\xd0N\x10\xcd\x96x\x0e\xb8\n\xd1O\x12\x0c\x07\xcf\xa7\x0f\x87]\xa7\x96\x9ew\x10\xa0\xc7\xa0\x98D\xfb\xdb\xd4^V\x9b\xe2\x84nv3B\xd6\xff7+\x07[\x0c\xb6\x1f\x11\xd2Mx\x97\x05\x1e\xba\xb9Z\x93P\x012\x01c\x18\x04\'AeS\xa7NU\xcd\xbf\xb6\x0e\x00x\xf5{X\xaa6&L\x02t\x9f\xe0u\xfc\xe9\xf3*\xff\xe1\x86|B\xe2\xfb\x08\xa4+\x9d\xe9\xe3\x9b8@\x9d/2\x88\x91\x81\xba\xb6p\xddq\x92U{s}t0\xde\xff\xd0\'\xf5\xa0\xc0\x1d\x1e/\x10\xd6\xcf\xa1\x14\xe9C\xba\xf9Z\xcf\x87\x03\x9a\r`\x89y"\x87\x00b\x94<\xa0\x83\xef\x07\xad\xa3m\xa5\x89N\xc4\xd26\xad\xac\xc2\xf6\x9eN\x10&k\xaa\x96\x95e\xdfP\xbd\x90\x0e\xb5\x8a\x8a|-\xc1\x86\xcf\xb5\xa6\x1e\xd9\xa2S~\xd9y\xa0\xb8\x93\xdeT\x88\nl\x1d\xa1\x08\x1e\xb1#\xf8s\xc1\xad|P\x1c\x7fw\xe3\xc1\xa4\x0bo\xe7\xbdY\x87\x9a\x1c\xad\x18\xf5\xcc\xe5\xec$;\xa7x\x99\x07\xbdE\xb8C\xf5\xef\xcc\x83\xc4\xdb\x15)\xb6;H\xa1\xe1\xc0\xee\xf9g\xcb\x89w\x81\xa8\xcd\x11\x18\xeb\x1b\xfd]e\xf4\xc3+\xde\xeeOdY\x88N\xeb\x9fE\xc0\x90\xd2\xf0\x16\xfc{(jx\x7f\x81\xe1\x0bb\xf0\x164\x19\x19\x85\x9d\xb6\x19\x8c\xfc\r\t^@\xc0\x1bN"\xdeo\xbf\xe6]\xf6\x95\xc9\x9b\xad`\x89\x98\x02Oc\n\xcd(*\x97f\x0b\xed\x02\xa8\xce\x99\xafS\x0e\xae\xcb\xc3 4\x9f\xd2\xfe3K*\xec\xa3\x88\xd3\x92Y}\xa6\x1b\xb8\xb4\xdc>\xb30+\xbd\xde\x0e\xc7\xc2\xa5G\x05e;\xe4\xc4_\x12\xd5At\xe5\xf3\x98\x07u\xb451\xb2\x93\xe0\xb5\x1e\x988\xc1\xf9cw\xc1M\xb4\x0e\x00\xbd\xbd\x82\xec\xa2k\xd8\x16\x9f\xd9\xbcQ\x9b\n\xfa\x8c\xfa\x8b\xe87\x8ai\xde7\xd4\xa9\xfe\x11\xc7\xf3H\x87\x18\xc2d\x9al\'\xca&\xfb\xae\xf2\xf5E\x1d\'\xe3VXk\xf8a\xea\xfe\xcf\xf1\xdbtxA\xa0\xbbu{\x9a\x80\xdb\xd7-\xd8\xec(*\xae\x13\xee\xf9\x1fL\xa9WI\xbf\x08\xd1\x0cQjH{\xd8G\x91M@X\x061\xe6\xbd\'\x13\xc3\xcf\xea\x0e\xec#\xdaX\xd8\x8a\xfc\x0b\x1bNo\xd8\xee\x13\x0b\x95:vh\x93\x05\xd2X\xad7\xd4K\x07\xe3pD\x1as\xfe\xfc>\x81\xe2\x89\x87<v\xff\x1e\x9e\xa0$k\\}b\r\x8d\xba\x84\x1d\x91\x1e\x14)\x1a\xc6\x04\x99\xcb\x86\xf1\xb9\xfe\x13\x0eL+\x1d\xfb\xceF\x1ak\x86l\x88#\x14f+\xe3x4\xae\xb1\x8e\xae\xfd1\xa1\xe8\xfd\xa9d\xddx\xf2\xc6\x80L\xdc\x15_*b\xd45\xd5\xc4\x90\xee\xc8A\xd1\x85!)a\xba\x17\xedfN\xfa\xb8\xc7\x1a\x84\xb1\x18!\x1e\xea\x8d\xb6\x91\x14H\xb5\x1f4\x1b\xb1 <m\x89c\x18\xb7\xc6\xfb|<4\x9c\xaa\xe6`\xf4\x17\xban\x1daW1V^\xfc\xa4t\xcfO\xa6Y\x99\xca\xf3\xab\x89\\z\xaa\xb4\x102)j\xd5e\xf6\xc0XH\xad\x00J\xc4\x1a\xb8\x85\x14T\x02\xf8\x04L\x00\x87\xf4N\x12[-\xaa\xd4r\xd3\x136\xab\xb0"\xd8=C\x9f\xb5(\x89\x99 \xfb\xe7\x1d\xf2\x12\'\'\xafj!\x06\x94g\xb1\x08?\xc2\xe2\xa6\x03\xbfn\xb1\xfb\x0b\x88?\xa9*\x01\x0fc\x0ev\xfb\xcd\x01\xd0>\xdb@*XL\x98\xed\xa0\x89O9\xf0[\xea(\x9f\xeed\xc4\x98\xe7_\xeb\xab"\xf00\r\xb4\xd6\xd3\x17\xa5B\\\xb1\xf0D\x04Ziw5BY\xb7\x1e+/\x80.\xec\xe2b\x8c3\xddQ\xa3)o\xc8\xcd\xf23\'v\xb5y&\x7f\xeb_;\x81\x1e\xb6\x84\x95e\x0e\x072^q\xe2{\xa4\x81+\x1e5ih\xf6(\xdf\xa0\x7f\x91\xd5v\xac{\xf9N\xa7g\xdd\xdfyj\xaf\xcb\xca\xbd"X\x8b\xeeU{\xbbF\xe1\xdah\x1a\xdbe\x18\x7f\xc8h\xf9\x91\xa7\tX\xb2\t\x05}\xf6\x90\xf6[e\x9e!\x85\x15\xb5\x87\x9b\x93-\xe6\x8ahi\xec|\x1d\xa6A~\xd3\xdf\xbd\x97X*Ht\xd9\x9d,Ka\x9e\xfd\x1ee<\x88\xf6\xad\xc9\xcb~\x9e\xf7\x92\xdc\x8d\x92\xa0\x0fY\xff[\x1bl\xb5\xb6\x98\xfc=\xd9\x0bG\x91b\xae\x03\xb6\x04\xde\xda\xa7\xce\xd0ldw\xa8\x8a^\xeb\x95\xe6_\xe7\x93\x84M\xe7\x8a\x8b7\xaa\x14\xfem\x06\xb4\x01p\xc5\xd9v]\x06c+\xac\x17J\x91\x0c\xc2%\x12\xf3\xf9\x99\xf9}\xaaK5j\xa9WC|T\xa9\xe0\xec*\xed[v\xad\x16XfY\xe4\x046\xd7|\xe4\x8d\x17g\xb2"\x81\xfd\xef\xf6*L\xc3ve{\x14y\xbcE"\xaa61Fc4$\x84\x16;\xd3.\xdfu\xca\xf3\xd5\\F\xaf\x08\xf7\xd4^\x92\x11\xc4H\xb5\x7fIE-\x93\xd5\xeb\xd4?,\xe0\x9aD#\xf5\xc9\'Si+X\x8c\xad[^\x04ug\xb0\x8b\xa3\xac4\t\xb2\xdb\xe2\x06\xcf\xce8\xab\xd9\xc0\x16\xc9\x10\x03\n\x99C\xec\x08\xfa\xef9/\xb2!\x19\xe4\xb9a\xbay\xba%\xd6\xec\xf7\xc0;\xa3K\x99\xb3\x89\xfa\x10\x12\x12\xd5\xb8\r\x82W\x0ej\xa5\xba\xe6&\x13/2\x8b\xd6\x93\x19\x7f\xeb\xa5\xecW\xa8^ \xc1\xa5(`Xo\xe7n\x91\xb2g\xb8\x9e\xa6\x0e\xf4\xad\xee\x91SS\x13\x87^\n\xbf\xed\x86\xdbE\x97\xd2\xb4\xd0\xf6\x8a\xa2"}f}\xbaP\x83\x8c\xc1\xc3dN\xf7\xf7\xe3\xff\xc4A\xa4\xd1\xf1\xb41\xdb\xfc\xcfz\x0e\xd4\x022\xb9\x9a\x92\x899\t\x86&k]R\x08\x0e\x0b{\xa2\xe3\xd7yp*C\x9c\xf6O\xfcj=\xecM\n\x1c\xbd\xe5\xfe\xc0\x1f\xed\xd9@\x999=9\xe5.\x14\xaa\xbfM\xed~\x99\xeehJ\xb6\xd7)\n\x0cJh*\xd0a|\xb7#a\x9at\xe2\x95zP\xafV\xb8\xf0?\xf1-\xca\xc6C(\xa8\x94\xadLZ{\xda&\xe2\xf0\xd3\xe2\xfd\xcd\xa0 \xa7I\xce\x96V\x92\xae,\xd9\xf1+4\x19\xfa\xec\x08\xf7\x129\x99\xa3\x90i#\xe8`\x1c_P\xb4l\xa6e\\\n\xad!\x04\xbb\xdc\x87\xcd\xc8\x11\xc3\xf7\x98\xdbx7\x9a\x83\xbfY\xb1*I\x0eV\xec\x86\xfb\xdb\xe5\xe8\xd3S\xd5\xa7\xafbS\xd9\nh:\x90>\xd6b^ur\x9b\xe0\xd3\xb4^:_\x16h\xba\xa8\x94\xe1\xdd!XF\xd3\xf9U14\xa7\xfa\xc7\xd4q\xf3\xac\xd0\xc2[u2]&(V\xd5M\xd0\tf\x1ebb\xf7u\xae\x12\xf40\x92E\xda\xfdQ<O.\x089\xd3\xe7\xe0\xd2\xfa\xf0o\xc4\xa9\xa5\x0c\x84d\xb6/\xfd1\xbd\xc9\x00k\xc0]\xd7P\x94\xe4\x1d>\xb2\xcd.\xe5\x80\x10\xcf\x03n\x05r\x19Zw\xe2\xc6wP\xb5\xd3\xae\xa0\xc4v\xe2Y\xa67\xe8WL\x1a(\xbc6:\x0cH\x1a\xc0`\x94`3\xe2\x1cH\xc6\xbdN\x00=,\xc3\x0e\xae\x0f/\x1a\xb4\x81\xce\x8b(o%{\x06\xdb\x1d\xceK\x94\x06\xcd\xf0\xa9#\x99\xae\xba\xce\x01W\x13?\x02\xe3\xc2\xed\xb5\x15\xca\x04\x85\x0cq\x87U\x0f\xb1\x97\xdau\xc6^S\x97\x08"\xa1Q%\xec\x9e\xc1\x18\x85\xf9\x15\x1b\xac\xcb\x1c\xd2\xda\x96\xeeCeN%\x1dV\xdc\x83\xba\xf0\xc1tp\xdbo\x12\x12\n_\xe0\xe2\xaer\xc1W\x86\x17\xd5\xeb\xee\xd1r\x0f\xcc\x10\x1a\x9a\xe0\x95\xe2\x9d\xf4\x94{Y%\x03\xea\x1ej\x10\xe1n\xe5~\xc2\xcd\xc6\x1a\xfd|\xc9\x14\xba\xc0\x17"\x99P/\xff[\xdf\x06p\xf8R\xd9\x91\xaft\xc1\x8f\xe9\xcd\xf0\xd1\xef\xfb\x17x\xcc\x80I\xd5\xac,[t\'=\xdfgM}\xaf\x8d\xe6\xa0\xeb\xd7\xa1\xec\xa6\xd6\x1b\xaa\xe7\xbetdB\xac\xbc\'\xbc\xce\x0b\xc7\x10\xdc\x82\xfe\x8f&x\xb3\x02\x02\xe6R1\x9c~@E\xde}\xb67\xb9\xf0{\xaeY\x10a\xe15\xad\xdaF_\xd6m\x8e&\xa3\xb6\xad8\xa3\xd7\xc4\xc0\xca\xd4\xf0\x97\xe2\x94$\x06\xef\xefR\xe2\xa7q\xf0\xe0\xde\xb3 \x01F\x18\xd2\x7fy%\x87Ou\x8duQ\xc1ud\x88\xc3+s\xd4\x1b8\xdf4\x8fO\xce\xdd\xdc(\x0b\xa2\x14g\xed8\x04\xc5B;\xc2q\x9aw\x88\xd8\xed\xadu\xf0\xc5n\x92\n\xfc+<\x03\x13\x03\xf0\x9f\xc3\xc8\x05\xe9\xd6\xbe\x8e\xbd\x06i\xe9kv\x0e\x99\x19\x8b\x00iE\xef\x15\xef\xd9\xc3\x90\xd5k\xf3U;H\xbc@Fw\xec\xac\x15$\x15.\xca\xf5\xfa.-\xaa\xaa\n\xbf\x8e=\xdb\xce\xbf\xec\x01\x9c\xa8\xd9\xb0>c\x0b\xc6~\xa9\x7fG{ 5@\xc7,.\x8b\x87o\xa0BQu\x06<p\xc9\x8d8ly\xb7\xf5E\xae\xa58\x01o\xba\xcb\xb4\xde\xe7\xd6\x82i\x04\xe1\\\x82\xf1\x04{L\xbf\xfa\xc3\x06\xff-\xa1\xc2Q\xe2\xa9\xdf\xf3\xb1/\xd9\xa82HT-\x10 \xfaH\xcc\x19)\x00E\t|u:\x97:\x7f\xf3\xf1%S\x99\xe2S\xa61\x0f\xafb6X\x1a\x83DuR\x7fl\xb6\x12\x81\x81\x8b>\xc9\x9bo}!\xffcE\x91\xf0XV\xf4U\xf2\xe2\x92P+\xcd\xfe\x08\xb2\x0cuB\xecEPD\xe3\xc5\x8ar);.C\xbbV\r\xedI\xf4\xd4t4\xf0A\x97\x9by\x87\x1c\x843mD&\x8e\xcd\xd5.r\'K\x94\xea\xd3\\\xac\x17=\x96\xd8J\xcd1\x80w]\x811\xb4z\xf1\xe0\n\x10(\xf4\xfd\x88\\`\xdd\xee\x98\x0f\xd4=\x0b \x1f:^0\xc1\x14\x0cT\xab\xc2\x01\x17\xa7\x02\x19 s_\xe8\xc8k"#\xab\xb3a\x01\x0e\x89\x00\xfa\xda\xa5\x98\xb7\x9e\xb5F\xfd\xca\xc0\xc7\t#\xef4y}\xa2\xc7\xb2X\x1e\x1d"\\h\x1d58\xac8\xfc\xce\xd7\xb5\xfd2\x1b\xf9\xd1D-\xb7\xd3I\xfaVCn\xed\xd7\xcdV\xcf\xdal8\xef\xab\x939\xd2P=\x95\xddb\xa1\xe1\xf8\xcd\xc7a5q\xa8\xae\xd2"\x8fO \xad\x13\xa4iJMm\x91`&#\xa3\x0c\xd0\x06\x1eI\xfb\xd5\xa3<\x8f\xe5\x01 \xeb\xff\xe0=\xe8\x93\x07\xddA\x02[\x8ae\xa6\x07R\xa4un!Fe\x15\xef\x15TiC\x87z\x9d\xc1\x08h\xef\x11\x9c/n\x8b\xb2\x82\x84q\xcbl\xb6\xbb\xff?Qo\xc5Y{\xe9\xf3\xc3\xb8o\xc1\ri`\xe7_\x17*\x7f\xbar|\xba\xf6\x039\x18=\xfd\x17\x81\xb4\xb0\x00x\xa5.4\xd1\x86]\x89\x9c\xcbK\x8d\r;Nw\xc8e\xdfr\xd2\x04<\xc0wE\xba\xabF\x9e\x16\xc7zs\x9bkxSj\xbfa\\\xf31\xccMi\xcb\xb0W\xc0y\xaf\xd7`\x90\xa9\xe9\xbeMo\xac>![k\x94\x1e\x96\x7fj\xa7_\x9c\xd8\xb7K\x88\'1iE\t\xf7U$\xdf\xcc\xb5=\xb0\xa0(\xf5\x87:y\x1d,+_e\xf3;\xdaE/,\x03\x8b>[\xf1\x0f\xbdLL\x81\xa5\xa2{\x95;r\xe5\x84\xeb\xdc\x12\x9c\x87\x99\xecp=\x0f\x10A\x8e\x1f\x84\xbe\xff/\'\xb0s\xdf\xe6^\x8d\x12\x89w\x00\xe2\x83\x15G\x92\x95\xcf\xe2\rl1mo\x82/\'m\xf6\x95R\x9b\xd2\x10\xb5\xeeT\x05\xbf\xd7\xde;O\x139\x80\x80\xb1^\\\x16\xed\xa3!\x13\x82&\x93\x91\x83\x04\xc6B\xaaV\x986\x99!\xddx\xe6\xfd\x90P\xf8<\xb0\x12\xac\xe2\x89\xcb\x95:\x86bf\x1c\xb8\x1aD\xd9_f\x8d\xaf!x\t\r\xd7\x0f{\xc1K\xfb\x8b\x13\xd5\x06|\xb5G\xe20*d\xcf\xa0/"kJ\x95\x8f\xec\x00\x11\x7f\xc7\x8c\x0e\x17\x9c\xef9)Ct//\x950\x9e\xe7 \x07\xf7\xec^\xb9\x07KB\x1c\xf5\xd4\xc7>\xcf)1\x14!>(.\xc4r\x9b\x9e\xe9>\xd9\x7fM>\x97\x17#\xa5/\x12\xe8\xa6=\xa2\xb3\xc1\x8al1\xc2\xa2\x8a\xc8Q\xcd\xafzro\x85p\x03\n\xf0\xce\xe0\xd6\x83\xc0\x80\xe2\xbc\xfd\x9f\xac\x16\xed\x9ct\xebA~Ns\x98\x83\x06\xff\x06\xe4\x9fd\xf1\xdd\xef\xae\xb9p"\xaf\x1ef\x9a\x9a4\x17\x9d|\xb7\x17TW{\xa4\t\xa8\xc6\xe7\xeeco\xa7\xfa\xbeD\xcc\xbaR\x1c\xc4\x81w&\xa7\xe6\xe7\x93*3\x0f\t\x93D\x08\xfa4o\xbc\x81\n\xc7\x1fj\xd4\x05B\x13D|!*R\xfd{\xf1A\x7f,\xae\xbe\xc33+!`7\xc6QH\xac\x1e\xa9\x9a4\xef\xf8\x87N^\xbbG\xfa%Y\xbf\xde\xe6\x00JPI\r\x84\xb4@\x17\x18\x8d\xb2\x0eI\xcd\r\xebVX7\xe5tIn\xdeM\x1d\xa9~\xbb\xeaY@\x91\xe9v\x8f\xbd%;\x97\xdd[\xe24\x90\xee\x02\xfa"4\x14\xb1\xab\x83JB\x00E\x1f\xcd\xaa\x19\xf1\x80\xfc\xf9G\xdc{X_\x9c\x9d\x11\xa1\x0f\x9b\x07\x98\xcba\xb2A\tJ\xf1\x95\x0385\x97\x8f3\xd4\x13\x83\xa9\xe6\x02<tj\xb9\x80\xe0\x08\xb2\xfa\xe4\x94+\x1eD\x1f\xaf\xc4U\x1fjR\xc2\x7fS\x87:\xd3R\xde\x1c\xbb+\xa1\x7f?\x83\x1b\x80\xda\x16iN\xb0(H\x91\xfb%\xcc\xce\xda\xe2l\xe2\xa5c\xe7\xcf\x86\xe2\x10\xfa_]\xc0\x8a\xa4\xd93\x89#\x91\xc7\x15\x94\\\xef\xae\x9a\x1c!\xce3\x11\xa7\xac\x9cF\xeb\xbaF\xc8\xe0\x8fHNn\xad|\xc3\xb4vEC\xffZ\xbd(\xadriv\xd4\xf0w\xdf\x16"\x02\xf3\xb0s\x84W\xdc\x90\x94\xb2Q\xb3\xf8\xe3\x1d\x1f\x1d\x1b\xef\x11\xd8\x13\xba\xb42\xe5R\xb8\xdeV\xde{\x9e\xa1q\x82JJ7\xbb$\x81\x14\xa8c\xa1/u\xeb\xc1oV\x1d}3/nf@\xfc\xdb):\x0b\xa9jl\x0c\xe4\xa61K\xf5\xf5\xa1cL\xc7.\xe0\x05\x80\x06\x9d\xbcY67\xe7\xf6\x06\xb3\xb3\xd2\xe0\xa6\tQ\x1dv\x93\xd8\x10\xd5\x802%\xf3mN\xd3\xb2\x89\xaf\xdbP$\xa80\xe1\xdeL\xaaon\xa6=\xa4I\x16\xb5\xd2\xe0\xc7\xf5\xdaj\xae\x1d\xdaj\xb5v\xe7\xf3\x10\x98N>\x88ee\x002\xdf\xb4B\x1d\x14;\x18:\xef\xbb\xc6\x84!\x85\x18WS\x17\xae\xa0\xc1\xb2(\x98>\x10\xd9\x11XT#\xf4I\x97\xee\xa4/\x16h\x168U\xda\xf6L!\x80\xe8\xc4\xc3\x8e\xa1v\xdaSO>\xb6\x97\xf34{&\xc5$9\xc3\xe6\xc2\x15\x90\xcdc\x89\x92+\xbaP7\x89\x98\x81\x81\x88)RDj\xba\xcd\x11\xc2\xa2\x8bH\rP\xe4 IY\x8a\xf7N\x10\xdb\x81\xb0Wy\x9d\xacL\x97Y\xa4Zk\xcd\x83"\x84\xeb\xf55!0\xfe\xb3\xf8\xe8.3p2\x1eq\xbb \xf5]\x00\x8e\x83F\xce\xdc\xd5YM\xfb\x19\xb9\xa4\xfd\x14\xba\xb9\x0c\xadV}O-ayd\x1c\x9a\x97R\xdb\x1e,\xa46\x0b\xf9\x81\xec\xb5\x08FVJ\xd3\x8a\xa0\x1d\xcbw\xff;>\xf5<\r\xe6\x9d\x8b6{\xc1\x9d\xb6\x82bOs\\\xe5R\xc9\xe8\xd4\xbfa\x13\xc1D\xbe;\x04\x12\x84\xffk\xf4\xac\xba2b\x8crI{\x90\xa2\x07\x1c\xd0og\x07\x1e\xe8\xc8juW\xab\x1ew\xb5\xda\x05X\xb1~p\xa5jK<\xe4&QN\x1e-\x08\xe5#\xb3\x95\xcf\x92\x87E\xde\xa7R\xbb\x04\xba\x18\xd8v\xfa\x7f\x97,&\xdb\x96J\xba\xcb)t\'\x83z7\xb9M\x0eR\xe81\n\x11\xd4\x9b\x07\xf8K\xed?S\xd7e\xf6\x00[~\x03J\xc1v\xeb-Wm\xbc\xf7^\xd2bz\xc3\xbd\xe6(\xb8\xbe\xea\xc9\xdf\x88$d\xab\xc6\xa0\xdeh\x0bj\x81\x9b\xc6\xc3\x07\xb0_m\xd3\x9ft\x8f\xad\xcf&\x9d\xde\xdc\xad\xb3\xfd\x071\x9e\x85r,>\xc0\x0f\xf0?\xd1\xa8~\x9d\x0eae\xcaF\x13\xd2\x1e\xb8\x9f]+\x97\xc3Z\x8a\x9b\xaa\xff\xd2\xfa\\lD\x10\xa3Sb\x86\xa72 O{\x1ef\x02\xe5\xbe\xbc#~\x08\x0f\xac\x1d\x02`\xda}\x8f\xa2.q\x9b-R\xa0X&\x85\xc8o#klq\xeeC\x9bY2\xca\xeb\x8c\xd2\xad-\'j\x81[\xee\xc1\x9b\xd2\xc3\xe2fda\xd2\xfdI\xca\xfde\x93\x0c!1N}\x08\x85\xeb+\xfc\x8f\xe7\xa5_\xf3\nd\x8b\xb8\x01\xea\x88A\x01\x9f\x17\x89\x85+YK}\x8bA0\x01\xf7\x9d \xce`\x0c\x1c+\x93\xab\x9d,$b\x8ao\x85\x02\xba\nW\xfe$\xd1$%\xf4\x98\x98\xba\xb1\x15\x84\x1bo\x05gB6\x05\x94\xb6\xb6\xb9\x1ft(\x8b\xa8\xf0\xd6\xb7\xf08\xd5\xbb\xa3\xdel\x0fa\x9e\xfd\xf8p\xe6\xd4F\x1c\\\x9d7fh\xd9\xe2|\x93\xaa\xc3\xf9\xd3,\xbe\xeb{\xc7\xc5I\xde\xca\x9a\xe1X\xa0\xc3\xf5oQ\xd3^\x05\xfc+\'\xc2\x0cs\x17igIk\xacth<_\xde\x1f&\xaa?=\xd3\xd0\xf8\x14\x11\x94\x17\x8c\xd0\xd8\x82\xe0EB\xad\xad\xc1\xbdO\xe1(f\xb4\'\x8e\xdb>\x11f>&x},bG\x89\xa3\xd1[\xc0\xc3\xb7\xce\x081\xa8\x10P0\x15H\x0e\xbe\xa5|\xb9\xde,R\x88\x87[\x92Jy\xc7\xde\x94\x03\xe98\xc7_\xc9fj\t\xa1!\xf3\xa6\x9dSA\xf3\x06\x84\xe8([\xb5\xef\xbd\xbd\xd2lB\x81\xa2\x97$e\x18\xc8\x97"6\xb8\x9a\x04\x18\xc2\xee\x7f\'\x80\xdb\x8f\x89\xedp}\x18x\x1b\x911\xd8\x989$\xb2\'\xd0\x8a\xdd\x90=\xb0\xaaE\x14\x02a#\xcc\xc6\xcf\xb3mii$+\xbc\xb04r\x99\xe1\x88\xb9d\x86Q\xd7\xc6!\x8fR\x98c\x15\xe7,\xf6\x048~e/\x0c9\\\x9e\xf9\x8e\\\x94\xc9\xef5\xfb\xcc\xbf\x12Q\ncZ7e\xc8\'/\xe12,\xa0\x94\x1d,\x9d\x88S!\x06\xd6\rS\x1f\\>D\xcf\xe6w\xfdZ>\x0c\xb6\x022\x95N\x17\xd5\xf1\x18\xb4o\xeb\x15\xb0 \xe7\xf9M\x90o\x1a\\\x0e\xec\xa53\x8e\x91`\x07\xba\x8f\x9c>=J\x13\x9a\xb2\x88j\xfaL\xc5\x8e\xef@\x84\xe2\x83\xde\x9d\x86-\x86\x14\xf6r\xc4\xe3\xee\x0f\xbc3\x0b\xe3\x9c5\x08H\xd2\xea\xac\x106\xf7c\xca\xd9ET\x07H\x8cF\x86\x86g\x01\xe9\xb4\xdf\x01>\xebKQx:(\xee\xdeo\xcd\xb2e\x84`\x0e\xe1\xc39;\x8f\x94]#\x0b\xb0\xd4Q\xfd\xaa8\x1al\xec\xbb\x98l}<\x18\xdd0]\x9d7\xd8\x88t \xb7\xb4\xe1\xc7\xf0\xf5\x95\x16\xf7\xe2wD\xd1\xcb\xa5\x84\x13O\x1cp\x1a)\x1aw\xdc\x8e\xa2\xb1\x9a\x1d1\xeb\xf2\xd7\xed\xf8\x9eK \x7fj\xb3o\xd4T2\xfcR!)v/\xf8/\xa6O\xed~\x17\x98\xe7K\x81\x01\xd6\x9f\x92jflX}\x95\x80\r\xc1[\xfa\x14z\x96\n\x96\x0e\x81\n\xfeO\x9e=\x8e\xa5\x97\xfd\x18\xa3\xd8\x11\xe90!\xb13#\x95X\x07\x85-\xb0\xb27\x05#\xf4 \xc4n\x156\xfcC}"g\'\xab\x17\xc3U\x1c\xdf\x87\x92J\x15E9\xd8g\xaa>4\xf09\x83\xed_\xf3\xad\xff#z^H\xce\xdaU\xe4\xf6g\xcf\xaaGu\xf4\xb6\xc9\xc5\x07\xcd\xd6\xde\x83%\xe1B\t\xe7\x17\xae\xfa \x9dj\xd1\xe9\xec@\xbbU\x13\xaa\x14\x9c{\xa1\xeeQ_\x9c\xc8\t\x97O\xa7K\xd6#_\x99s\xe6\xee\xa2\x84\xc3P\xad@\xec\x1azVM6h\xe1#\xd8\xb2\x0fy\xe2X\x8a\xd6\xcd\x1aL[W\x8e\x9eA\xfc\xa5\x8f\xe8\x14\xc9\x00\xb5\x05\xb8\x1f\x9d\x03zyjb\xec\xcc\xca\x18\x04\x9b\xe9\x16t\x942\x00^\xb1\xb7g\x8fz\x85\x89\xd7|ly\tk\x94\x08\xc2\xb2s/\xf7\xdbq\'\xa5\x1c4\x92N\xda\x9d\x0bi_\x858X\x97\x0em\xb7\xa4%\xdbg\x85o\xe5\xfa\x1fnG\xa2C\xc8\xff\xa6G3\xb0\x05\xf5\x0f\xb6n\x8c\xef*\xa7\xbb$\xe6\x84g\x97\xe1\xea#"\xd3\xbb\xc2\x82q0\xda^\xa8\xf7G\xbc\xbe \x05\xf8\x1c+\x08\x82K8U\xa6\x058hX(\x13-\xd6\x19\xbf\xee9(\xe19\xdeK\x11\xd1\xf6\xd3\x80\x06\xb1\x15\xe9\xfc\x9b\xc9\x94\xc9\\\n\xc4e\xc2\x1b{\xa3 bkC\x9e\x1b\xb2\xca5+\xef\xc7E\xffo\x8cL\x8a\xb3\xb4\x06\x1f%en\x8c\xbcD$\xeb\x9f\x7f\xdd\xb5\xbf!\xa6\xab\xfd\xb9\xf2\x87\x9dHv\xad\xeaU\x8edev\xa4\x7f\x80\x7f6y:\xe2Xx\x08\xba\n\xd9@\x95\xb1d\xc6Y\x0f\xf8\xb8"@oH\x0cK*\xb4O\x89[|\xc3\xa9\xb6VP\x06\x1a\x18\xeb\x87\xff\xaf\xf1\xb0\xb1h*I\xac\xb3\xce"\x9f\x9c\xe9^3\xbc\xaa\xdc\x0cV6\x85\x95\x82\xf0\xcdz>\x11.\xefp p:\xc9Q\x82v\xd5+\x96+\xde\n\xb03m!]\xd2e\x8c0_A\xfc\xce\x96\xfc\xe5\xc5l\x93A\x9c1-\xb3\xab\xf0\x14\xa6\xb1"\x83p"\x88(\xff\xa8rb\x1a ;m2\x97\xe7\xd4\x90\xe9\xbc\xb9\x95{\xdaV\xc0\xd81^v\xd4\xe4\xeb\x10*4\n\xbf\xed?\xc4aJ\x89o\xb3O"$\x8e\xf5\xa6\xf6l\xe4U\xb3\xdc\xbb\xfe\xaf\xe9\xea\x06*\xb1\xb0K\x1f\xdc\x91V\xdb\xc9\x16\x0c\xc3\x06\x89\x01\x1f\xfeh\x1f4\xd3o\xa4\xc0+L\xcbk9\x87\xb9n\x10Pq\x9b/\n\xeb\xbb\xf0\xdf*\xf9\xe2h?\x95\xa6\xf2;\x1d\x8a\xfa\xde\x19)1F\xc4\xa2\x8f\xcb}\xb1#av\x02\xa8L\x8e\x15\xf68f}\xad=^\x81\x10\x0e\xb2\x8c\xfa[\xb4\xb7\xb9\x97YaJy\xad\x85\xed\xb7lO$\x93/\x93\x19\xa7L\xe0p\x133\xc8N\xc4\x8a\xe2W\xd5H)\xba(\x1f-\x88\x95\x05\x13C\xd7\'1\x1e\x1b!\xf9\xf5\x1d\x83\t\xd1n\x96\xcfT\xa6\x15\x94\xf0\x10x\xca\x8e\xf23N\xa0O)O\xc7\xec\xf9\xab\xee3;a\xfd|0\xc9=<\xf4DVC\x92\xb0b\xdb)-\xc8\x8d\xbej6\xac5\xf6\x98(\x1e\n\xad\xf0\xa3/\xc1dA\xf8BQ\x88\x9d\xc2\xc8\x1b\xe4\xc3\xa2\xfa,O\x1b\xb6\xfc\xfe\xd4\xa2/z=K\'\x8c\xb6\x8a\xaf_\xda\xf1\x19n\x9bcE\xc6 oB\xcd\xe8l!\x94m\xc1#\xa5v\x7f\xf0\x9br\x92\x98W\xe6\xd8D\xc3m\x88\xf4\x86a\xcf\xda\x0e\xb7>i\xba\xd3\x8c\xe6>\xcb\x8a\x12\x82\x99\xd1\xa0\xea\xb6e9: \xbf~\x88W\xc2\x1c*\x0f\x13u\xb4\xd0\xc2\x07E\xdf!\xf7\r!\xbd&\xbe\xbc\x98\xd3\x83@\x12\xc7\x93P\x99\xaa\xa2\x9f\xba\xe6\x91\xd7\xbd\x9e\xd0[\xe2\x14~\x9cD\x1f\x86G~%\x84T\xb9c\xf8\xb4\xa186\n\xb1K\xbf\xcf2\x12\xf7\xaf\xb99}\x84p\xf8\xab2\xbf\x89\x1f~\x05\x02\x04\xf5\x03m9U\x95\x1c\x94\xd0\x1a\xd8\xc5v\x8ei\xb1[\xa1f\x05u\xda\xcac\xcd\x9f\xa5\xa3_\x19\x00\xd7s\xae\x8e\xa9a\xc2\xdf\x1b\xd4\x12\xd2\x075\x10qH\xe9\xdb\x94?}\xea\x18%\x8c\xdf<C\x07\x1e\xaa\xa9~\'\xa3\xffmPV\xc1\xfb\xee\xb8\x1e\x07\xcfWP\xd4\x06\t\xc4\xc8\x13\xb3\xad\x00+\xf9\xf6\xcaH\x82\x0b\xba\xdfr\xcb\xcb\xcc\x9al\xa5\xef\xf9\xff\x96\x9c53\x04\x07\x9c\xb9&T\x88\x0e{Nr\xca\x93\xecP%j|nJ\xb1\xb9Z\\\x0f5/\xb7Y\x7f\xb1\x8b\xf5\x97)\x13\xa4\x12\x91e\xd0\xf6\x10`|wv\x86^"\xbaV\xa57\xe8$l\xf9X\'eL\xf7\xc8}\x858\x86s\xeb\xb2\xcf\\\xa9q=B\xccU\xdc9 \x98\xd1\xd4\xcda\xc8m\t\xc9\xab<\xd6\x7f\x9f\x7f@(\xe0\xbfCl\xd6\xed\xc1E\x11p\xe4w\xf6|&\xa0\xa7\xb1\xe3\xbadX\xa0j\x94\xc9m\x16\x02[3\xf8\x7f\x07"\xed%o\xcc&h\xd0H\xb1\xc8B\x84\xd9GP\xcf\xfc\xbb-A\xd0\x0e\xc3ad\x8d\xa1\xdfb\xf80S\xe9\xe0\x15q\xce\xb2\x17b\x89W~T\x16r\xcah\x9f\xe3.\x84^\t\xcb\xd2\xc8\x02\xcd\xbef`\xe9Y\xcc\x15\xdb>#r\nA\xe0D\xc4M\xf9#\xe9Z!~\x16\xdc,\xdb\t\xee@\x9b\xbe\xeck\nU)\x00\x18&\xe5\xb6\xf0{\xae\xf8\x98O\xe9Aa\xa49\xc2\xdb\x05\x0f\xd1\xd4\xb3\xe4\xb62O\xcdo\xb4\x05\xc6y\xf5\xbe\xd7)#\xf0TF?\xa0L\x99w\x97\x847\x9d\x10\x98\x8bC\xb6\xec\x1dDr\xd52\x87\x8e0j\xe3\x1c\xa4\x84\xc4\xa0!\xd5\xad[P\x05\xd2f7\xda@\xf6\xcc\xd3\x8a\x17\xa3\xd32\xec\xa1g\x93\x9a~wf\xe6..\x80\x02\x1aI\xb8\xdd9~sH\xd2\xaa\xe1\xb7\xa9\xa7\xf5\x8b.C\xe7\xca\xee\x0cK\x1d\xe1nv\'H\x0b\xce>=\x1a"/\x93\x1f\xf9\xdb<mt\xe8q\xa51\xcf\x98\xae1E\xd2f\xb2\x1b\xc7\xc7\x18a\x1c\xe1\x80\xb9\xfaEtgA\xd6\xa3\x8b\xd2M%\x83\'\xd4\x0b\x00\x80cr\xb3\xac\xbb#\xa1\xf2QM\x84\x1e\xbfz\xfb\x05\xed\xf3\xb2#\x1fy\xae\xe5}`\x0c\xef\x00\x81qa\xfd\xcb\x05\x14\xd6Z\xb9\x14Hdj\xd6b\\\xbaj\xb2\xa5\x9a\x08\xb9.\x94\x17\x00\xdf]\x11\xf1\x9f\x9bY\xc7{\x14V\xf4\'\x1d\x025\xa7VC\xb1\x11v\x1f\x8fk\x92\x10l\xf6h\x8a\x84\x15\x13\x84\xbeaeFwt\x93\x02\xd0jq|\xfe\xf6\xfcby\xa0\x85\x08\xe8\x8d\x8f\x1d\x01\x8cL\x95&\xd15-\x8b(M\xd5\xc9\x03\xed\xcam#C\xd9\xd3\xfd\xadJ2\xa5X\xfc\xc7H\x02a^\x11\xb9\xa6\n\xa4\xd1\xd6\xcao\xf8/?\x15t\xde;\xd4\xfd\xfe7\xcb}rO]\x82#n\xb3\xa79\x05dC!p\xc2v\x1c"<\x02\xda\x82\xf0=g\xa2\xb1*\xccg$\xd7\xae\x80\x16v\x91\x17\x8a\x9a\xea\xdb*JFr\xff\xcaBH\xdc3B\x8f\x03\'\xb5\xef\xbcy\xa1&Gem\xadvn\xbd^\xb3[\xd6\x80~\xa4*\xa7\x1a7>\xb7\x83\x0b@\x03Hrc\xb2\xfdM,&\xee=x\xf3\xc2\xaf\xcfZ?i{\x18~\xd2v"\xe2\xdfM\t\xa0\xed\xb4\x12\x01O\xb6\xb6\xf1!\xda\xc5\xb7\xc6}nM%\xde0\xe4\xdcm\xfc\x19!1\x96\x99!\xd7\x1a\xb4\x1c\x0euruU\x06\xb9\xaf\x97o\xec\x13\xfd\xa5\xfd\x85QM\xefs\x8c\xd4=\xe3\xb2\xa5\xb2d{\x93\'|55\x9f\xa3\x95\x01W/3\xdfCS>\x1a\xaa\x98e\x9a\xa4\xc9\n\x1f&\xff\xe5\xe8\xde\tt\xafcp`\xc8\x84\x19G\xc0]P\xc04\xec\x8bDL\x9a\x91\xed\x11~\xbf\xb5z\x9c\xc9E\xa9\xa6\xe1\x0f\xd3\x80\xdaP\x94\x89My\x8a\xaa9\x94\xa9\xe0\x02A\xfaP\x86\xa8\xd2\xd3<\x1d\xb3\xda4gZJ\x8b\x91\xb2K\xc0\xc3\xbeK\xaa\x95_\xd7b\x19H[O\x18\x05\xe9wwV2@\xdfzY[\xb5\xf8\xf7j\xff\x9d\xfbw\xfc\x87\xdeg^\x1cNJ=\x8c\xa2\x91\x11\xff&\x03\x0eD\x08gx\xb4:\x0e@\xd3\x85\x96:\xe8\xe6\x99d\x92P\x11\x07\x94\x1d\xba`Y\n\xd1]\x04Y2\xb8\x153\xed\x0f\xce\x1eC\x96}XEn\x89!b|\x1f\x95:\xcc>_=<\xd1TD\xbc\xe5\x8a\xf07\x12\xde\xc1\xc2)\xb9\x9dg}8\x17bY\xd0A\xd2;\xd9E\x95\x14\xc2^e\xf0\x92\x8f\xff]\xa0,\xc9w\x80\xa7\xe9\x83\x8fZ\x0b\x04I\x87\x16`\xd3\xf1\x12Lz0a\r\xe8\x17\xb8\xe4G\xcb$\x1at\xd9\xfe\x85\xe1\x8f\xc2lVD\xca=\xca\x1e&w\xb2\xbf\xf3\x92w\xd8\xaa\xee\x99}\xa9\x92\\\xfe\xf5\xe9=N\xbf\xe8\xadr\xf8\xa1\x148Bb\x13t\xf8\x9e\x90M\xfax\x05\x920\xf6\x86FHwQT\xa6e\x12\x9b\xfa\x80-\x86nC\x9f[6<\x13\n\xb5\x1a8Uo\x9a\xa8\xcd\xba\x93\x13\x82\x1a5yL\x1e4+\xd3\x8c\xb3\xbe]\xa8\xbdaW\xe9Q \xfb\xdd\x94\x05\xa4\x96$\xb1V\xe3\xa3\x86\xe8\x1e\xa3\x94<F\xf8Z\x91\xfd\xd1\x87*\x85\x93 \xa9\xa1\x7f\xa5\\\xcc\x03\x98Qi6\x82\xb1NGN\xf7\x80\xe7|\x1b\x7fV:\xa0]L\\\t\xe2\'\x99O\'\xd5\xc5,3\xe6\xdf{\xef\x86}$L\x1d$\x0c\xf9\xd2&\xa8\x0f\xc5[\x81\x0fe\'\xb2\x81y@\xd9\xf1`\xb5\xfa}\xea4\xc6\xde\xc4\xf7\xba\xa4\xca\xb3\xbf\x1d\xa8]~\xdb\xb8 t\x8a\xf9Ir\x07\x94+!\x94h\x7f{\xc3\xd0\xd4\xeb\x8fm\xf1\xf0\xc1\xe4\x18\xd6)\x94\xa3$\xb1\x82(\xbe\x8bx\x8b\xe8\xaf\xed\x1b@,\xd89\x8e\x05u \xc9#\x85\xd6\x93\x8bc\x9fB\x00\xa7\xe3\xf1.E\xb8\xbe\x14\x98/\x1b\x18\xc8y\xd2]\xf8%\x1a\x02\x84\xd8I\xeb+\xd5\xfd\x8c7\xa4\x95\x14J.\xda"6\x9f\xbfv\xb4K%\x06cTl\xa8\xc3\xbe}\x1du\xb1\xfeus)\xe9\xeb\x92<\xde\x7f\xab\x01p\x1f\x97\x03\xe6S\xac\xe0\x89\xa5\xc4]\xdd\x92\xf0\xd2\xa2\xd6\x90p\xf7 t \x81\x81\x19\x0b\xc7\x03t?\xc1\x82,4\xf6\xc2dt\xda\xc4(\xf5\xf73\xcc\x1a?\x86\x89\xdd>\xa3\x0f\xaf\xf7\xbd$x3\xac\n\\Y\xd0c\xf3\xfd\xf9\x99\xf7\x05\x0cK5\xc0\xdf\x18M\xec\xcbic>\xeeX\xd7\xe7]?\x02\xc1{\xe9:\x03\xc2\xf3a\xd7\xc6\xbabC\xf1\x8cu\xbfz(?\xea\xc7\xf1\xe8\xaf\xe3"T\xc9\xb2.\xd5|\xdd($\xd0\x8fC6(\x03\xa6\x1f\xc14w\xb7\xfek=\xa0\xa9\xf7\x80O\xcb\xeaT\x07\xcdl\x17d\xf9O\x1fJ\x91oH_\xadC\xb0\xee\xed\xbac\x8c\x15\xc5\xee\x1d\x8d\xb6;\x82D\xa5\x05GwGg\xb8\xa9v\xb0\x13LxD\xe3\'\xa5\xbe\xf8^\xd3%$\x17\xf6(\xda\xae\x9a\x90\xe5\xd1\xa9\xd7\x95\x92agq\xa8\xff\x18\x80Jf\xe0\x05}\xa19l\x18\x84Y\x1a)\x9b\xe1\xf0\xd6\x0b\x92\x85\x88\x8f`\x00|#\x1aCy\xb8Z\xd83\xfd"o\x80Cz\x1c\x1cf\x80\xea\x984\xd6W\x8b\xab\xd8CE]k&q\xd79%\xd1x)6\xeb\xa3O\x9f\x8e\x80iL\x14\xd9\xdcX\x9f\xc3\xea\xef\xc0\x96VoNA\xea\xa4k\x9bL5\xf9\xab\x8d\x85{=J\x033#v\x93\xc6C\xd4\x1e\x8c\xc3*\xbe*p\x88\xffz\x87\x9b\xf1W\'\x8d?\x1a\x19\x98\xfb03\x14\xdb\xd5!\xc4?\x8c\xffGw\x8a\xe3\xcb\xafC^]_\xda\xe5\x19\xf6\xb2p\xb6\x86-N\xb9\xdc\xaf\xf0"\x9dB7w\x83k\x84\xd8@\x0e\xc7\xa0\x1a\xff\xab2\x0b\xbbR\xd2\x1fZ\xd2\xe9<\x99l\x84+0N\x060A\xa4J\xa3\xcc\x82\xdc?\x7fv:\x01\xcel\xf2l\x01\xa4\x93\x11\xf7|\x94\xb8\xd5\xbc!Q\x8b\xc4\xfey{\x00\xd5q\xcf\xa3\x90\xb72\x01\nZ\xafQ\xc2\x1a-p|j\xfdP\xb6\x0c\x87\xa4\x97\xc8E\x94z\x84<\xce\x90SJf2+Y!\xd6o\xf2\xe5\x82\xce\xf8\x9cG\x9a\xa2g\xf2\x064\x01\x82\xac\xa6T\x96\xeb\xc7\xbd\xc1\xba\xa9\x08v\xe1n\xfd\xe5>\x9a\xae\xab\xac\x98PH\xe8\xa7\x97\xa0;\x82T\xde\xee:Q\x15\x8b\xa9\x93\x14G\x96\xf4\xb8\x1ct,=\x1do\x065\xa7\t]\x93\xe2\xbcZ2)i\xf3\x82\x9e\x92\xa3L\x87F\xadZv\xf8\xfcr]\x1dz\\\xc2h`\xb4\xa5$\x83{\xf6H\xe7\xd6,\x9d2L\x97\xa0T\xe8\xdbQef \x07\xdb0\x80V\xbf\x92\xeeg\x06\x1f\xd5\xbaT\x0b!x\x18@p:\xf4I3\xf8\x99\x8d\xbe\x87Wq\xe3\xeb\x9d\xa4\xbd\xebY\x0f\x04w\xe4\xd9\x924\xee\xfa\xda\xce\xd2\xa3\xd1T\xd9p\xc2\xf5\x9e\xc1\xcd\x98`\xe2\x9274f\xbd\xce\xb2\xeb\x0cK\xef\xb4\xd3\xc8\xf8\xa9\xa3F\xd8}\xe5\x91\xb1\xbd\xef\xe5c"`^\x89y\xdd_[\xef\xa2\xe9\xd9\xf5\x07\x07\xdf\xc4\x95\x18\xd7^&\x1c\xa2\xb93\x9eK:\xa0;\x91]?;\xbb\xcb\x12\xcf\r6\x02\xa2\xffsOH\xfa\x98\xda\xfaa\x89f\x027\x80\x07h|\xb3\xb8\x02\xd3W\xc3\x99\xde\x84Me\tW\xa6f\xa9S \x02\xb23\x10\xdc\xe5\x03\xb2\'\xdaTt\xac\xd5\xaf\xc9\xc9\xe8\xa0S\x85&\xdd\xee\xdb\x02$\xee\x86\xee\x82\xeb\xe8\xe7\t\xdf\x99\xf0\x9a\x9f\xc2\xdfZ^\x84H\xf5\xd7\x92\t\xfbK\\\r\xbfD\x06\x00l\xbb)\x83\xc5\xd4\x12\x8c\x03\xbc\x1c_#\x0b\xd5\x00\x92>\xbd\xe2\x95\xbc\xa43\xb5A\xd3.\xd0T\xf0!\x1c\xb5\xbc\xcf\xfc h\xd0*\x9f"\xb1\xd6R\xb3b\x8cuw\x19\x19\xd0\x01\xb7\xd7\x81A{\xeb\xb3\xf8O~\xf6$\x8f\x1a\x8cR#O\x83[\xc2\'\x16\x0c:\xbaV\xec\xe8\xee*M\x81\xfc\n\xee\xde\x8e\x11\xd8;\x05g\x159\xa8w5\x0e\xeb\xa0\x8avr\xebn\x86\'e2\xb5p\xfd\x01\x1d\x1dOP05\xbcA:\x7f\x8d\x1a\x98\x02\xbbE\x17-\xa4hS\xb9\xd3e\xa4Y\xe4s\xc3\x9f\xb6\xc1\x00P\xd6\xa2B\x8a(rF\xc0\n\x15>\xe5\xa7d\xb1\r\x9e\xba\xa9\x9f\x04p]\xc6\x9e2\xe3E\x9c8\xe5\x8c\xa0Qb\xa2\xb9\x9a\xc1\xfaS\x96\xbaJ\xa7\xc9\xac\xa0s\xb8nz\xef\xe7\rN\xab\x02\xdaomv\x9c\x91|\xd8\xd0r\xf2Nm\xe2P)O\xca\xd4\x9b\xfa4\xbd\xccf\xb9\x86*r\xbaO\xc0\x19\xff4\x1f\x1a\xbfy\xea\xb3\xbf\x97\x99\x95\xb6\xc9\xaa\x1cr0F\x8d\x01=\xdc\x8b\xb9&\xed\xb2\xc5.\xf9\xb8\x91\x9f\xc7\xd8\x1b@3s\xeac\x03Q\xedP\xb2)jXv\x8b\xa3T\xce\xe1\xc7\x8c\xe9]\x10\x15?\x9f\xdb\xd2\x1e"\xd4\xd5\\\x85\x03\x13\xb03X\x88?\xbb\xba\xa2\xc5:\xdfld\x9ax\xa9\xf0\n\xc9C\x8d\x94\x80\xb2k\x0e\x94fH\x8c\xb5c\xc8^m\x8a\xa2\xd6\x06\xb5\x1b\x85\x82$\xf7\xf6~\x7f\xf4\x05z\xec\xc1\x1f\xf8/\\\xf8}\x95\xda\xffv~\x08XR6deQ\xf9*\\\x86\xd4\x11@\x06\x90\x7f\xe0I\xde\xe2\xa7\xb8\x0e\x97\xebyI\xbf4\xd2:\xf3\xf5\xaf\xb6<\x16\xb2"\x06\xe7\xb8\x1b\xa5I\xa7\'\x04\xd2\xf7g\xa6\x9c\x91\x14X/\xc9\x99&\xe9\x99\xa1\x82&\xfe\x1br|\x16\xd6\xb4\xd4\x8f\x8b\x89\xba\x8c\x12to\xde=_\xcb\xf8%\x01,o\xdfU\xd0\xcc,G\xf3@\x8c\xd3\xd0\xa7\x11S\xf7\x8f\x9b\xe3\xc8\x15\xdan\x89\xe2\xffX\xff\xf2g\xfb\xd7\x02h\xdfM\xec$\xa0\xe9s\x0f\x06\x92a\\\x97\xb4\x1d\x8f\x08h\xb4b\tT\xbeO2\xd3\xaaL\xf1Z\x95\xa6\xe8s\xac}\x0c\x82\xccR\x8cs\xa0\x13qZ\xfaY\x00\xd1~9\xdcI\x1b\x8c\xaa\x9b0\x80i\xe4@\x08\x8ecO\xa7\xc7\x98//\xf7z\x88\xfeHcx\xab\x04\x1a\xff\xf6\xa5\x1e\xb0s\xf9\xd0\x8f\x159y\x08\x0b\xe3\x15h4\x10\xeb\x02V<p\xaa\x02\xca3X\xb0a{\x0c\xc47bU\x91j\xcdc\xdc\xdf\xee\xdc\x81\x806\x0fv\xdc\xf4\t\xb4\xd3\x92\x01Y\'3\xd3\x04\x1e\xd5\x7f\x19\x1c\xd9\xf3P\x94\x8e\x87J\x0c\xfaPMKQt\x02\xe2G\xed\tY\x8e3\xa1*\x98c\x9dK\x03\x1c]\x9bM\x9f0\xbd\x8a\x0e\xf2u; \xc5e\x87+\xda\xaa\xf3`\x92%\x90\x8f-\x08\xf0=\xd88\xd7<\xaa\xda\x08;U\xd4\xc9)\x13\xfc\xa3\xdc\x89\x97\xea\xf8\x99\xc7\xbc\x1d\xb9\xd55\x90y\x17\xacpA\xe0\xfe\xa6\xb5xlbc\x91\xbd\xff\x82z\x91\xcd\x8fT\x9eCf|\x1e\xce\x06\xa8{-\x8e\x05}G\x94\x92 \xa8\xb5\xae\xba\x00\n/\xa5\x99q\xabi\x81zh\x82\xdb@\xf8/\xd7\xfa\x17\xb9\x1c\xe0\x1b\x0c\xa0\xa7\xa1Y\x7f\x9d\x1b\xd5\xf7\xdf\x0b\x1e\xa2\xb3,\xbd\xde\x00\x06_\xde\x1a`\x12s\xdcV3Z>k\xe3\xcc\xe7\xef.d89;J\xce.\x14\xa5\xfc\x0b\x90\xa0\xce\xa0\xe8\xae\xff#s\r*\x1b\xa2\xf2\xe3.\x02\x91\xd4Y\x8f<\xa5\xad\xd5\x99\xb1\x1cE\x80\xf7\xc0\xb0\x86\xbb\xbdLfK\xfcm[~\xdc\xa3\x86\xf8,\xa7A\x90\xcf\x87\x1c\xfaWUVv~\xecT\xb7E\xbbQKu:j\x1a\xadeU\xeb7s\xfcb\x10\xbag{PV\xf5x\xbd\xc4\x9c\xbe\x85G\xe8\x1d\x03g<=\xb8\xc4p\xea\x9f\xb0\xb6\x9dwCroi\x83!\xec9$\x18=\xcf\xf9\xa9$>\xfa`\xe1r\xd3\x91w&\xbdj\xb4\x8c\xde\xbe\x9c\x19kH^\xb3>\x9b\xc9\xa0\xaen\xb8\xf8\xcf\x91\x16\'\x13\x1e\x1a:\xfa\xff~#N\xf7\xdb\xb9)\xbc\xfd\'}}V\x9eF\x9dAW~W\xd8\xb77\x01\x0b\xe9\xc1\x85\xb5M\xf12\x7f\x85g\x9e\xedB\xb3SW\x9cvX\x01\xd1y)K\xe7\xdc(\xc1\x17b\x11\xe7\x17\x9f\x1d\x9e\xb9ka\x08O;\xa0\x96\x10\xack<:(\n\xfd":+\x88\x9b\x1d\x8eo\xfd\x11d\xf1~\xcc\x0enj\xcfK\x9d\xce\x86\xbd(\x146\xaa\x08\x85\xd2\x04RU\xa8\xbf\xf2`\x1aC?\xa9\x9at\xc4\xaf\xaf\x93\xc9\x950\xc6v\xb0\xbd;\x9fMb\x08lA\x88w\xdfa7&yt\xf6\xcd#+\x98\xa8UX\xb3\xa2xCm\xfa\xa8\xfa@~l\xb7\x7f\xa1\x83\x8b\xeb\xf3\xf5\x81\x90j]\xfd-\xdf`\x94t4Fu\xbc=\xe81\xce\xa0:\xf0\xea\xc4\xf4\x83\xb8\xea\x9e\xaf\\\xd5h>P\xb3\xa1e\x0bhY\xd12\x0e(}\xac\'\xb3cA\xf5+\x8d\xd9\xb3\xb4\x15?\xc6,\xe5\xc2\x03\n\x91\x137\x19]\x87\x8c\xdf\x18\xdf\xbc|\'\xd1\x91\xbc\x18\xe7\x14\x86\x97}\xf2\x1a\xbe*ds\x1b\xfc\xa9+\x98S2j\xa2\x8a\x1d\x04x\x84k\xc1?\xb9\xebqW\x80\xdd\xf2\xc4(\xac\x1e\x17A\xc2\x87Q\x15S\xf0\xeb\xe2\x16\xe2\x9b\x88\xd48\xb7\x95\x95x\xdc\xf2\x88\xe4\x06\xd42\xe1\xc3L\x8f\xa7\xceU\xf1\x81\xa5\x8e\xecOG\x1f\xf0\x18J\x87\x92\x19\x0eF\xa1\x0b\xd7\xf7\x87*C\xec\x9e\xf4p\xed\xd6x)\x12\x0c\xd5($\xf1\xf0\xe3\xa7\xc63\x15\xe1l=\x0e\x96z\xc6)\x0b\x17\xe00-\x92\x18\xe4"{\x8b\xec4\xd0\xf8\xe9\xd3\xfe\x9f\xc6v\x8fM\x95U>\x8d_\xa15\xe8\xce!\xe2Xj1\xc5\xa4y\x9d\xc2\xa7\xf2\xf7v\x1aO\xd6\xd3`\xb1\xff\xaa\x97C\x85\xc9\x07\xfbV\xdb\x8e\xe8\x96R@t\xed\xcd\xe2\xb2\xf9\x97a`\xc4\x17\xf3]\x04t\x9f\xed\xc6\xac7\xe3\x11wC\xfe\x808\x13M\x1a\xce\xe6\xa2v\xabi\xe8\xa3\xdf=\x8d\xf0[D\x82\xb9\xbb\x19[\xc4M\xbca\xd5\r-c\xec\xb2\x14\x0f\x81jU\xe3\x94\x004\xef\x8bA\xfb\xaf\x85\xa63iVs\x07\x9a\xf1f\xd3\xb3\xf4}NO|!\xc7\xcd\xd4Kh\x80\xeb\xe9ShM\x9c\xfaE\x86\x12\xdfz\xfc\x89\xd9\x81\\J\xc5jR\xcb\xdeq\x02n\x0c\xa6\xb8\xc5"\xdb\xb2.\xd3no\x9c\xc5\x05*\xcf\x0e\xfa!=\xb14\x98W\x003\xc0Z\x1d\x0f\xec3\x9e\x10v)\xca\xe8\xa7\r\xb8\xf7A\xa7Ls\xdb\x07I\x843{\xdc\x08\xff\xf9\xd8Q\xe1\xb9rh\xa9\x9f\'\xb2\xf1\x80\xf1\xb8\xb7\x04\xba\x0e\xec\x05cz\x99\xe4\xb9\xc4c\x9af<j\x15~\x16\xb8\xed\x17\x1eQ Z\x84\xa4\x95g\x8fZe\x95\xc3o\xa0%-_\x11T@\xf4\xeb\xf50\xf4q\xaa\xa9\x18Y\xd4\x1eC\xc7\x1d=\x17\xf1\xff\n7$\xab\x94*g\xfaCD\xdf\x85u\xf0\xae\xed\x1f\x9e\x94\x12\xbd|')
|
mit-llREPO_NAMEspacegym-kspdgPATH_START.@spacegym-kspdg_extracted@spacegym-kspdg-main@src@kspdg@private_src@python3_9@Darwin_x86_64@kspdg_envs@lbg1@lg4_envs.py@.PATH_END.py
|
{
"filename": "linear.py",
"repo_name": "bccp/nbodykit",
"repo_path": "nbodykit_extracted/nbodykit-master/nbodykit/cosmology/power/linear.py",
"type": "Python"
}
|
import numpy
from . import transfers
from ..cosmology import Cosmology
class LinearPower(object):
"""
An object to compute the linear power spectrum and related quantities,
using a transfer function from the CLASS code or the analytic
Eisenstein & Hu approximation.
Parameters
----------
cosmo : :class:`Cosmology`, astropy.cosmology.FLRW
the cosmology instance; astropy cosmology objects are automatically
converted to the proper type
redshift : float
the redshift of the power spectrum
transfer : str, optional
string specifying the transfer function to use; one of
'CLASS', 'EisensteinHu', 'NoWiggleEisensteinHu'
Attributes
----------
cosmo : class:`Cosmology`
the object giving the cosmological parameters
sigma8 : float
the z=0 amplitude of matter fluctuations
redshift : float
the redshift to compute the power at
transfer : str
the type of transfer function used
"""
def __init__(self, cosmo, redshift, transfer='CLASS'):
from astropy.cosmology import FLRW
# convert astropy
if isinstance(cosmo, FLRW):
from nbodykit.cosmology import Cosmology
cosmo = Cosmology.from_astropy(cosmo)
# store a copy of the cosmology
self.cosmo = cosmo.clone()
# set sigma8 to the cosmology value
self._sigma8 = self.cosmo.sigma8
# setup the transfers
if transfer not in transfers.available:
raise ValueError("'transfer' should be one of %s" %str(transfers.available))
self.transfer = transfer
# initialize internal transfers
c = self.cosmo.clone() # transfers get an internal copy
self._transfer = getattr(transfers, transfer)(c, redshift)
self._fallback = transfers.EisensteinHu(c, redshift) # fallback to analytic when out of range
# normalize to proper sigma8
self._norm = 1.
self.redshift = 0;
self._norm = (self._sigma8 / self.sigma_r(8.))**2 # sigma_r(z=0, r=8)
# set redshift
self.redshift = redshift
# store meta-data
self._attrs = {}
self._attrs['transfer'] = transfer
self._attrs['cosmo'] = dict(cosmo)
@property
def attrs(self):
"""
The meta-data dictionary
"""
self._attrs['redshift'] = self.redshift
self._attrs['sigma8'] = self.sigma8
return self._attrs
@property
def redshift(self):
"""
The redshift of the power spectrum
"""
return self._z
@redshift.setter
def redshift(self, value):
self._z = value
self._transfer.redshift = value
self._fallback.redshift = value
@property
def sigma8(self):
"""
The present day value of ``sigma_r(r=8 Mpc/h)``, used to normalize
the power spectrum, which is proportional to the square of this value.
The power spectrum can re-normalized by setting a different
value for this parameter
"""
return self._sigma8
@sigma8.setter
def sigma8(self, value):
"""
Set the sigma8 value and normalize the power spectrum to the new value
"""
# re-scale the normalization
self._norm *= (value / self._sigma8)**2
# update to this sigma8
self._sigma8 = value
def __call__(self, k):
"""
Return the linear power spectrum in units of
:math:`h^{-3} \mathrm{Mpc}^3` at the redshift specified by
:attr:`redshift`.
The transfer function used to evaluate the power spectrum is
specified by the ``transfer`` attribute.
Parameters
---------
k : float, array_like
the wavenumber in units of :math:`h Mpc^{-1}`
Returns
-------
Pk : float, array_like
the linear power spectrum evaluated at ``k`` in units of
:math:`h^{-3} \mathrm{Mpc}^3`
"""
if self.transfer != "CLASS":
Pk = k**self.cosmo.n_s * self._transfer(k)**2
else:
k = numpy.asarray(k)
kmax = self.cosmo.P_k_max
inrange = k < 0.99999*kmax # prevents rounding errors
# the return array (could be scalar array)
Pk = numpy.zeros_like(k)
# k values in and out of valid range
k_in = k[inrange]; k_out = k[~inrange]
# use CLASS in range
Pk[inrange] = k_in**self.cosmo.n_s * self._transfer(k_in)**2
# use Eisentein-Hu out of range
if len(k_out):
analytic_Tk = self._fallback(k_out)
analytic_Tk *= self._transfer(kmax)/ self._fallback(kmax)
Pk[~inrange] = k_out**self.cosmo.n_s * analytic_Tk**2
return self._norm * Pk
def velocity_dispersion(self, kmin=1e-5, kmax=10., **kwargs):
r"""
The velocity dispersion in units of of :math:`\mathrm{Mpc/h}` at
``redshift``.
This returns :math:`\sigma_v`, defined as
.. math::
\sigma_v^2 = \frac{1}{3} \int_a^b \frac{d^3 q}{(2\pi)^3} \frac{P(q,z)}{q^2}.
Parameters
----------
kmin : float, optional
the lower bound for the integral, in units of :math:`\mathrm{Mpc/h}`
kmax : float, optional
the upper bound for the integral, in units of :math:`\mathrm{Mpc/h}`
"""
from scipy.integrate import quad
def integrand(logq):
q = numpy.exp(logq)
return q*self(q)
sigmasq = quad(integrand, numpy.log(kmin), numpy.log(kmax), **kwargs)[0] / (6*numpy.pi**2)
return sigmasq**0.5
def sigma_r(self, r, kmin=1e-5, kmax=1e1):
r"""
The mass fluctuation within a sphere of radius ``r``, in
units of :math:`h^{-1} Mpc` at ``redshift``.
This returns :math:`\sigma`, where
.. math::
\sigma^2 = \int_0^\infty \frac{k^3 P(k,z)}{2\pi^2} W^2_T(kr) \frac{dk}{k},
where :math:`W_T(x) = 3/x^3 (\mathrm{sin}x - x\mathrm{cos}x)` is
a top-hat filter in Fourier space.
The value of this function with ``r=8`` returns
:attr:`sigma8`, within numerical precision.
Parameters
----------
r : float, array_like
the scale to compute the mass fluctation over, in units of
:math:`h^{-1} Mpc`
kmin : float, optional
the lower bound for the integral, in units of :math:`\mathrm{Mpc/h}`
kmax : float, optional
the upper bound for the integral, in units of :math:`\mathrm{Mpc/h}`
"""
import mcfit
from scipy.interpolate import InterpolatedUnivariateSpline as spline
k = numpy.logspace(numpy.log10(kmin), numpy.log10(kmax), 1024)
Pk = self(k)
R, sigmasq = mcfit.TophatVar(k, lowring=True)(Pk, extrap=True)
return spline(R, sigmasq)(r)**0.5
def NoWiggleEHPower(cosmo, redshift):
import warnings
warnings.warn("NoWiggleEHPower is deprecated. Use LinearPower with transfer set to 'NoWiggleEisensteinHu'", FutureWarning)
return LinearPower(cosmo=cosmo, redshift=redshift, transfer="NoWiggleEisensteinHu")
def EHPower(cosmo, redshift):
import warnings
warnings.warn("NoWiggleEHPower is deprecated. Use LinearPower with transfer set to 'EisensteinHu'", FutureWarning)
return LinearPower(cosmo=cosmo, redshift=redshift, transfer="EisensteinHu")
|
bccpREPO_NAMEnbodykitPATH_START.@nbodykit_extracted@nbodykit-master@nbodykit@cosmology@power@linear.py@.PATH_END.py
|
{
"filename": "docstring_example.py",
"repo_name": "dsavransky/EXOSIMS",
"repo_path": "EXOSIMS_extracted/EXOSIMS-master/documentation/docstring_example.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""Example Google style docstrings.
This module demonstrates documentation as specified by the `Google Python
Style Guide`_. Docstrings may extend over multiple lines. Sections are created
with a section header and a colon followed by a block of indented text.
Example:
Examples can be given using either the ``Example`` or ``Examples``
sections. Sections support any reStructuredText formatting, including
literal blocks::
$ python example_google.py
Section breaks are created by resuming unindented text. Section breaks
are also implicitly created anytime a new section starts.
Attributes:
module_level_variable1 (int):
Module level variables may be documented in either the ``Attributes``
section of the module docstring, or in an inline docstring immediately
following the variable.
Either form is acceptable, but the two should not be mixed. Choose
one convention to document module level variables and be consistent
with it.
.. _Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
module_level_variable1 = 12345
module_level_variable2 = 98765
"""int: Module level variable documented inline.
The docstring may span multiple lines. The type may optionally be specified
on the first line, separated by a colon.
"""
def module_level_function(param1, param2=None, *args, **kwargs):
"""This is an example of a module level function.
Function parameters should be documented in the ``Args`` section. The name
of each parameter is required. The type and description of each parameter
is optional, but should be included if not obvious.
Parameter types -- if given -- should be specified according to
`PEP 484`_, though `PEP 484`_ conformance isn't required or enforced.
If \*args or \*\*kwargs are accepted,
they should be listed as ``*args`` and ``**kwargs``.
The format for a parameter is::
name (type):
The description may span multiple indented lines. The "(type)" is
optional.
Multiple paragraphs are supported in parameter
descriptions.
Args:
param1 (int):
The first parameter.
param2 (Optional[str]):
The second parameter. Defaults to None.
*args:
Variable length argument list.
**kwargs:
Arbitrary keyword arguments.
Returns:
bool:
True if successful, False otherwise.
The return type is optional and may be specified at the beginning of
the ``Returns`` section followed by a colon.
The ``Returns`` section may span multiple lines and paragraphs.
Following lines should be indented to match the first line.
The ``Returns`` section supports any reStructuredText formatting,
including literal blocks::
{
'param1': param1,
'param2': param2
}
Raises:
AttributeError:
The ``Raises`` section is a list of all exceptions that are
relevant to the interface.
ValueError:
If `param2` is equal to `param1`.
.. _PEP 484:
https://www.python.org/dev/peps/pep-0484/
"""
if param1 == param2:
raise ValueError("param1 may not be equal to param2")
return True
def example_generator(n):
"""Generators have a ``Yields`` section instead of a ``Returns`` section.
Args:
n (int):
The upper limit of the range to generate, from 0 to `n` - 1.
Yields:
int:
The next number in the range of 0 to `n` - 1.
Examples:
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> print([i for i in example_generator(4)])
[0, 1, 2, 3]
"""
for i in range(n):
yield i
class ExampleError(Exception):
"""Exceptions are documented in the same way as classes.
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Either form is acceptable, but the two should not be mixed. Choose one
convention to document the __init__ method and be consistent with it.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
msg (str):
Human readable string describing the exception.
code (Optional[int]):
Error code.
Attributes:
msg (str):
Human readable string describing the exception.
code (int):
Exception error code.
"""
def __init__(self, msg, code):
self.msg = msg
self.code = code
class ExampleClass(object):
"""The summary line for a class docstring should fit on one line.
If the class has public attributes, they may be documented here
in an ``Attributes`` section and follow the same formatting as a
function's ``Args`` section. Alternatively, attributes may be documented
inline with the attribute's declaration (see __init__ method below).
Properties created with the ``@property`` decorator should be documented
in the property's getter method.
Attribute and property types -- if given -- should be specified according
to `PEP 484`_, though `PEP 484`_ conformance isn't required or enforced.
Attributes:
attr1 (str):
Description of `attr1`.
attr2 (Optional[int]):
Description of `attr2`.
.. _PEP 484:
https://www.python.org/dev/peps/pep-0484/
"""
def __init__(self, param1, param2, param3):
"""Example of docstring on the __init__ method.
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Either form is acceptable, but the two should not be mixed. Choose one
convention to document the __init__ method and be consistent with it.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1 (str):
Description of `param1`.
param2 (Optional[int]):
Description of `param2`. Multiple lines are supported.
param3 (List[str]):
Description of `param3`.
"""
self.attr1 = param1
self.attr2 = param2
self.attr3 = param3 #: Doc comment *inline* with attribute
#: List[str]: Doc comment *before* attribute, with type specified
self.attr4 = ["attr4"]
self.attr5 = None
"""Optional[str]: Docstring *after* attribute, with type specified."""
@property
def readonly_property(self):
"""str: Properties should be documented in their getter method."""
return "readonly_property"
@property
def readwrite_property(self):
"""List[str]: Properties with both a getter and setter should only
be documented in their getter method.
If the setter method contains notable behavior, it should be
mentioned here.
"""
return ["readwrite_property"]
@readwrite_property.setter
def readwrite_property(self, value):
value
def example_method(self, param1, param2):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1:
The first parameter.
param2:
The second parameter.
Returns:
True if successful, False otherwise.
"""
return True
def __special__(self):
"""By default special members with docstrings are included.
Special members are any methods or attributes that start with and
end with a double underscore. Any special member with a docstring
will be included in the output.
This behavior can be disabled by changing the following setting in
Sphinx's conf.py::
napoleon_include_special_with_doc = False
"""
pass
def __special_without_docstring__(self):
pass
def _private(self):
"""By default private members are not included.
Private members are any methods or attributes that start with an
underscore and are *not* special. By default they are not included
in the output.
This behavior can be changed such that private members *are* included
by changing the following setting in Sphinx's conf.py::
napoleon_include_private_with_doc = True
"""
pass
def _private_without_docstring(self):
pass
|
dsavranskyREPO_NAMEEXOSIMSPATH_START.@EXOSIMS_extracted@EXOSIMS-master@documentation@docstring_example.py@.PATH_END.py
|
{
"filename": "_fill.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattermap/_fill.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FillValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="fill", parent_name="scattermap", **kwargs):
super(FillValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["none", "toself"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattermap@_fill.py@.PATH_END.py
|
{
"filename": "ragged_bitcast_op_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/ops/ragged/ragged_bitcast_op_test.py",
"type": "Python"
}
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_array_ops.bitcast."""
from absl.testing import parameterized
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedSplitOpTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.parameters([
#=========================================================================
# Cast to same-size dtype.
#=========================================================================
dict(
descr='int32 to int32 cast',
inputs=ragged_factory_ops.constant_value(
[[1, 2], [3]],
dtype=dtypes.int32,
),
outputs=ragged_factory_ops.constant_value(
[[1, 2], [3]],
dtype=dtypes.int32,
)),
dict(
descr='int32 to uint32 cast',
inputs=ragged_factory_ops.constant_value(
[[1, 2], [-1]],
dtype=dtypes.int32,
),
outputs=ragged_factory_ops.constant_value(
[[1, 2], [4294967295]],
dtype=dtypes.uint32,
)),
dict(
descr='uint32 to int32 cast',
inputs=ragged_factory_ops.constant_value(
[[1, 2], [4294967295]],
dtype=dtypes.uint32,
),
outputs=ragged_factory_ops.constant_value(
[[1, 2], [-1]],
dtype=dtypes.int32,
)),
#=========================================================================
# Cast to larger dtype.
#=========================================================================
dict(
descr='int32 to int64 cast',
inputs=ragged_factory_ops.constant_value(
[[[1, 0], [2, 0]], [[3, 0]]],
dtype=dtypes.int32,
ragged_rank=1,
),
outputs=ragged_factory_ops.constant_value(
[[1, 2], [3]],
dtype=dtypes.int64,
)),
#=========================================================================
# Cast to smaller dtype.
#=========================================================================
dict(
descr='int64 to int32 cast',
inputs=ragged_factory_ops.constant_value(
[[1, 2], [3]],
dtype=dtypes.int64,
),
outputs=ragged_factory_ops.constant_value(
[[[1, 0], [2, 0]], [[3, 0]]],
dtype=dtypes.int32,
ragged_rank=1,
)),
]) # pyformat: disable
def testBitcast(self, descr, inputs, outputs, name=None):
result = ragged_array_ops.bitcast(inputs, outputs.dtype, name)
self.assertEqual(result.dtype, outputs.dtype)
self.assertEqual(result.ragged_rank, outputs.ragged_rank)
self.assertAllEqual(result, outputs)
@parameterized.parameters([
dict(
descr='Upcast requires uniform inner dimension',
inputs=ragged_factory_ops.constant_value(
[[[1, 0], [2, 0]], [[3, 0]]],
dtype=dtypes.int32,
ragged_rank=2,
),
cast_to_dtype=dtypes.int64,
exception=ValueError,
message='`input.flat_values` is required to have rank >= 2'),
]) # pyformat: disable
def testBitcastError(self,
descr,
inputs,
cast_to_dtype,
exception,
message,
name=None):
with self.assertRaisesRegex(exception, message):
result = ragged_array_ops.bitcast(inputs, cast_to_dtype, name)
self.evaluate(result)
if __name__ == '__main__':
googletest.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@ops@ragged@ragged_bitcast_op_test.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/yaxis/title/_font.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="font", parent_name="layout.yaxis.title", **kwargs):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
size
style
Sets whether a font should be styled with a
normal or italic face from its family.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@yaxis@title@_font.py@.PATH_END.py
|
{
"filename": "_uirevision.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choroplethmap/_uirevision.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class UirevisionValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="uirevision", parent_name="choroplethmap", **kwargs):
super(UirevisionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choroplethmap@_uirevision.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "microsoft/vscode",
"repo_path": "vscode_extracted/vscode-main/extensions/json-language-features/server/README.md",
"type": "Markdown"
}
|
# VSCode JSON Language Server
[](https://npmjs.org/package/vscode-json-languageserver)
[](https://npmjs.org/package/vscode-json-languageserver)
[](https://npmjs.org/package/vscode-json-languageserver)
The JSON Language server provides language-specific smarts for editing, validating and understanding JSON documents. It runs as a separate executable and implements the [language server protocol](https://microsoft.github.io/language-server-protocol/overview) to be connected by any code editor or IDE.
## Capabilities
### Server capabilities
The JSON language server supports requests on documents of language id `json` and `jsonc`.
- `json` documents are parsed and validated following the [JSON specification](https://tools.ietf.org/html/rfc7159).
- `jsonc` documents additionally accept single line (`//`) and multi-line comments (`/* ... */`). JSONC is a VSCode specific file format, intended for VSCode configuration files, without any aspirations to define a new common file format.
The server implements the following capabilities of the language server protocol:
- [Code completion](https://microsoft.github.io/language-server-protocol/specification#textDocument_completion) for JSON properties and values based on the document's [JSON schema](http://json-schema.org/) or based on existing properties and values used at other places in the document. JSON schemas are configured through the server configuration options.
- [Hover](https://microsoft.github.io/language-server-protocol/specification#textDocument_hover) for values based on descriptions in the document's [JSON schema](http://json-schema.org/).
- [Document Symbols](https://microsoft.github.io/language-server-protocol/specification#textDocument_documentSymbol) for quick navigation to properties in the document.
- [Document Colors](https://microsoft.github.io/language-server-protocol/specification#textDocument_documentColor) for showing color decorators on values representing colors and [Color Presentation](https://microsoft.github.io/language-server-protocol/specification#textDocument_colorPresentation) for color presentation information to support color pickers. The location of colors is defined by the document's [JSON schema](http://json-schema.org/). All values marked with `"format": "color-hex"` (VSCode specific, non-standard JSON Schema extension) are considered color values. The supported color formats are `#rgb[a]` and `#rrggbb[aa]`.
- [Code Formatting](https://microsoft.github.io/language-server-protocol/specification#textDocument_rangeFormatting) supporting ranges and formatting the whole document.
- [Folding Ranges](https://microsoft.github.io/language-server-protocol/specification#textDocument_foldingRange) for all folding ranges in the document.
- Semantic Selection for semantic selection for one or multiple cursor positions.
- [Goto Definition](https://microsoft.github.io/language-server-protocol/specification#textDocument_definition) for $ref references in JSON schemas
- [Diagnostics (Validation)](https://microsoft.github.io/language-server-protocol/specification#textDocument_publishDiagnostics) are pushed for all open documents
- syntax errors
- structural validation based on the document's [JSON schema](http://json-schema.org/).
In order to load JSON schemas, the JSON server uses NodeJS `http` and `fs` modules. For all other features, the JSON server only relies on the documents and settings provided by the client through the LSP.
### Client requirements
The JSON language server expects the client to only send requests and notifications for documents of language id `json` and `jsonc`.
The JSON language server has the following dependencies on the client's capabilities:
- Code completion requires that the client capability has *snippetSupport*. If not supported by the client, the server will not offer the completion capability.
- Formatting support requires the client to support *dynamicRegistration* for *rangeFormatting*. If not supported by the client, the server will not offer the format capability.
## Configuration
### Initialization options
The client can send the following initialization options to the server:
- `provideFormatter: boolean | undefined`. If defined, the value defines whether the server provides the `documentRangeFormattingProvider` capability on initialization. If undefined, the setting `json.format.enable` is used to determine whether formatting is provided. The formatter will then be registered through dynamic registration. If the client does not support dynamic registration, no formatter will be available.
- `handledSchemaProtocols`: The URI schemas handles by the server. See section `Schema configuration` below.
- `customCapabilities`: Additional non-LSP client capabilities:
- `rangeFormatting: { editLimit: x } }`: For performance reasons, limit the number of edits returned by the range formatter to `x`.
### Settings
Clients may send a `workspace/didChangeConfiguration` notification to notify the server of settings changes.
The server supports the following settings:
- http
- `proxy`: The URL of the proxy server to use when fetching schema. When undefined or empty, no proxy is used.
- `proxyStrictSSL`: Whether the proxy server certificate should be verified against the list of supplied CAs.
- json
- `format`
- `enable`: Whether the server should register the formatting support. This option is only applicable if the client supports *dynamicRegistration* for *rangeFormatting* and `initializationOptions.provideFormatter` is not defined.
- `validate`
- `enable`: Whether the server should validate. Defaults to `true` if not set.
- `schemas`: Configures association of file names to schema URL or schemas and/or associations of schema URL to schema content.
- `fileMatch`: an array of file names or paths (separated by `/`). `*` can be used as a wildcard. Exclusion patterns can also be defined and start with '!'. A file matches when there is at least one matching pattern and the last matching pattern is not an exclusion pattern.
- `folderUri`: If provided, the association is only used if the document is located in the given folder (directly or in a subfolder)
- `url`: The URL of the schema, optional when also a schema is provided.
- `schema`: The schema content, optional
- `resultLimit`: The max number of color decorators and outline symbols to be computed (for performance reasons)
- `jsonFoldingLimit`: The max number of folding ranges to be computed for json documents (for performance reasons)
- `jsoncFoldingLimit`: The max number of folding ranges to be computed for jsonc documents (for performance reasons)
```json
{
"http": {
"proxy": "",
"proxyStrictSSL": true
},
"json": {
"format": {
"enable": true
},
"schemas": [
{
"fileMatch": [
"foo.json",
"*.superfoo.json"
],
"url": "http://json.schemastore.org/foo",
"schema": {
"type": "array"
}
}
]
}
}
```
### Schema configuration and custom schema content delivery
[JSON schemas](http://json-schema.org/) are essential for code assist, hovers, color decorators to work and are required for structural validation.
To find the schema for a given JSON document, the server uses the following mechanisms:
- JSON documents can define the schema URL using a `$schema` property
- The settings define a schema association based on the documents URL. Settings can either associate a schema URL to a file or path pattern, and they can directly provide a schema.
- Additionally, schema associations can also be provided by a custom 'schemaAssociations' configuration call.
Schemas are identified by URLs. To load the content of a schema, the JSON language server either tries to load from that URI or path itself or delegates to the client.
The `initializationOptions.handledSchemaProtocols` initialization option defines which URLs are handled by the server. Requests for all other URIs are sent to the client.
`handledSchemaProtocols` is part of the initialization options and can't be changed while the server is running.
```ts
let clientOptions: LanguageClientOptions = {
initializationOptions: {
handledSchemaProtocols: ['file'] // language server should only try to load file URLs
}
...
}
```
If `handledSchemaProtocols` is not set, the JSON language server will load the following URLs itself:
- `http`, `https`: Loaded using NodeJS's HTTP support. Proxies can be configured through the settings.
- `file`: Loaded using NodeJS's `fs` support.
#### Schema content request
Requests for schemas with URLs not handled by the server are forwarded to the client through an LSP request. This request is a JSON language server-specific, non-standardized, extension to the LSP.
Request:
- method: 'vscode/content'
- params: `string` - The schema URL to request.
- response: `string` - The content of the schema with the given URL
#### Schema content change notification
When the client is aware that a schema content has changed, it will notify the server through a notification. This notification is a JSON language server-specific, non-standardized, extension to the LSP.
The server will, as a response, clear the schema content from the cache and reload the schema content when required again.
#### Schema associations notification
In addition to the settings, schemas associations can also be provided through a notification from the client to the server. This notification is a JSON language server-specific, non-standardized, extension to the LSP.
Notification:
- method: 'json/schemaAssociations'
- params: `ISchemaAssociations` or `ISchemaAssociation[]` defined as follows
```ts
interface ISchemaAssociations {
/**
* An object where:
* - keys are file names or file paths (using `/` as path separator). `*` can be used as a wildcard.
* - values are an arrays of schema URIs
*/
[pattern: string]: string[];
}
interface ISchemaAssociation {
/**
* The URI of the schema, which is also the identifier of the schema.
*/
uri: string;
/**
* A list of file path patterns that are associated to the schema. The '*' wildcard can be used. Exclusion patterns starting with '!'.
* For example '*.schema.json', 'package.json', '!foo*.schema.json'.
* A match succeeds when there is at least one pattern matching and last matching pattern does not start with '!'.
*/
fileMatch: string[];
/**
* If provided, the association is only used if the validated document is located in the given folder (directly or in a subfolder)
*/
folderUri?: string;
/*
* The schema for the given URI.
* If no schema is provided, the schema will be fetched with the schema request service (if available).
*/
schema?: JSONSchema;
}
```
`ISchemaAssociations`
- keys: a file names or file path (separated by `/`). `*` can be used as a wildcard.
- values: An array of schema URLs
Notification:
- method: 'json/schemaContent'
- params: `string` the URL of the schema that has changed.
### Item Limit
If the setting `resultLimit` is set, the JSON language server will limit the number of color symbols and document symbols computed.
If the setting `jsonFoldingLimit` or `jsoncFoldingLimit` is set, the JSON language server will limit the number of folding ranges computed.
## Try
The JSON language server is shipped with [Visual Studio Code](https://code.visualstudio.com/) as part of the built-in VSCode extension `json-language-features`. The server is started when the first JSON file is opened. The [VSCode JSON documentation](https://code.visualstudio.com/docs/languages/json) for detailed information on the user experience and has more information on how to configure the language support.
## Integrate
If you plan to integrate the JSON language server into an editor and IDE, check out [this page](https://microsoft.github.io/language-server-protocol/implementors/tools/) if there's already an LSP client integration available.
You can also launch the language server as a command and connect to it.
For that, install the `vscode-json-languageserver` npm module:
`npm install -g vscode-json-languageserver`
Start the language server with the `vscode-json-languageserver` command. Use a command line argument to specify the preferred communication channel:
```
vscode-json-languageserver --node-ipc
vscode-json-languageserver --stdio
vscode-json-languageserver --socket=<port>
```
To connect to the server from NodeJS, see Remy Suen's great write-up on [how to communicate with the server](https://github.com/rcjsuen/dockerfile-language-server-nodejs#communicating-with-the-server) through the available communication channels.
## Participate
The source code of the JSON language server can be found in the [VSCode repository](https://github.com/microsoft/vscode) at [extensions/json-language-features/server](https://github.com/microsoft/vscode/tree/master/extensions/json-language-features/server).
File issues and pull requests in the [VSCode GitHub Issues](https://github.com/microsoft/vscode/issues). See the document [How to Contribute](https://github.com/microsoft/vscode/wiki/How-to-Contribute) on how to build and run from source.
Most of the functionality of the server is located in libraries:
- [jsonc-parser](https://github.com/microsoft/node-jsonc-parser) contains the JSON parser and scanner.
- [vscode-json-languageservice](https://github.com/microsoft/vscode-json-languageservice) contains the implementation of all features as a re-usable library.
- [vscode-languageserver-node](https://github.com/microsoft/vscode-languageserver-node) contains the implementation of language server for NodeJS.
Help on any of these projects is very welcome.
## Code of Conduct
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
## License
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the [MIT](https://github.com/microsoft/vscode/blob/master/LICENSE.txt) License.
|
microsoftREPO_NAMEvscodePATH_START.@vscode_extracted@vscode-main@extensions@json-language-features@server@README.md@.PATH_END.py
|
{
"filename": "concat_bitweights.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/scripts/mock_tools/concat_bitweights.py",
"type": "Python"
}
|
from astropy.table import Table,vstack
import numpy as np
def concatenateBWFiles(BWDir, hpList, survey = 'main', obscon = 'dark', OFName = '{0}bw-{1}-allTiles.fits', skipFailures = False, overwrite = False):
BWBaseFN = BWDir + '{0}/{1}/{0}bw-{1}-hp-{2:d}.fits'
assert(len(hpList) > 1)
AllBWFiles = Table.read(BWBaseFN.format(survey, obscon, hpList[0]), hdu = 1)
notCompleted = []
for hp in hpList[1:]:
print(BWBaseFN.format(survey, obscon, hp))
try:
BWTemp = Table.read(BWBaseFN.format(survey, obscon, hp), hdu = 1)
except Exception as e:
if skipFailures:
notCompleted.append(hp)
continue
else:
raise(e)
AllBWFiles = vstack([AllBWFiles, BWTemp])
# print(', '.join(map(str, notCompleted)))
AllBWFiles.write(BWDir + '{0}/{1}/'.format(survey, obscon) + OFName.format(survey, obscon), format = 'fits', overwrite = overwrite)
hpL_file = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1/initled/hpxlist_dark.txt'
HPList = np.array(open(hpL_file, 'r').readlines()[0].split(',')).astype(int)
BW_dir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1_R64/BitweightFiles/'
concatenateBWFiles(BW_dir, HPList, skipFailures=False, overwrite=True)
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@scripts@mock_tools@concat_bitweights.py@.PATH_END.py
|
{
"filename": "config.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/projects/lanl/config.py",
"type": "Python"
}
|
import os
CONFIG = {
"local_data_dir": "lanl_data/",
"remote_data_dir": "https://spdf.gsfc.nasa.gov/pub/data/lanl/",
}
# override local data directory with environment variables
if os.environ.get("SPEDAS_DATA_DIR"):
CONFIG["local_data_dir"] = os.sep.join([os.environ["SPEDAS_DATA_DIR"], "lanl"])
if os.environ.get("LANL_DATA_DIR"):
CONFIG["local_data_dir"] = os.environ["LANL_DATA_DIR"]
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@projects@lanl@config.py@.PATH_END.py
|
{
"filename": "_colorscale.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/barpolar/marker/line/_colorscale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(
self, plotly_name="colorscale", parent_name="barpolar.marker.line", **kwargs
):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@barpolar@marker@line@_colorscale.py@.PATH_END.py
|
{
"filename": "script.py",
"repo_name": "enthought/mayavi",
"repo_path": "mayavi_extracted/mayavi-master/mayavi/plugins/script.py",
"type": "Python"
}
|
"""This represents the scripting API for MayaVi.
The Script class provides a scriptable view of the MayaVi Engine. It is
safe to instantiate as many Script instances as desired.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005-2020, Enthought, Inc.
# License: BSD Style.
# Enthought imports
from traits.api import HasTraits, Instance
# Local imports
from mayavi.core.engine import Engine
from mayavi.core.common import exception
##############################################################################
# Utility functions.
##############################################################################
def get_imayavi_engine(window):
"""Returns the MayaVi Engine given the Envisage worbench window.
"""
return window.get_service(Engine)
def get_imayavi(window):
"""Given the Envisage workbench window, returns the
mayavi.script.Script instance (registered as
`mayavi.services.IMAYAVI`).
"""
return window.get_service(Script)
##############################################################################
# `Script` class.
##############################################################################
class Script(HasTraits):
"""This class basically presents a scriptable 'view' of the MayaVi
Engine. It is registered as the IMayaVi service (via an
ApplicationObject) because this is the interface users should be
using when they script.
"""
# The workbench window we are associated with.
window = Instance('pyface.workbench.api.WorkbenchWindow')
# The MayaVi engine that we are managing.
engine = Instance(Engine)
######################################################################
# `Script` interface
######################################################################
def add_source(self, src, scene=None):
"""Adds a given source to the MayaVi pipeline.
"""
try:
self.engine.add_source(src, scene=scene)
except:
exception()
def add_module(self, mod, obj=None):
"""Adds a given module to the MayaVi pipeline. Adds it to the selected
object, or to an object passed thought the kwarg `obj`.
"""
try:
self.engine.add_module(mod, obj=obj)
except:
exception()
def add_filter(self, fil, obj=None):
"""Adds a given filter to the MayaVi pipeline. Adds it to the selected
object, or to an object passed thought the kwarg `obj`.
"""
try:
self.engine.add_filter(fil, obj=obj)
except:
exception()
def new_scene(self):
"""Creates a new VTK scene window.
"""
return self.engine.new_scene()
def load_visualization(self, fname):
"""Given a file/file name this loads the visualization.
"""
try:
self.engine.load_visualization(fname)
except:
exception()
def save_visualization(self, fname):
"""Given a file or a file name, this saves the current
visualization to the file.
"""
try:
self.engine.save_visualization(fname)
except:
exception()
def get_active_window(self):
"""Get the currently active window."""
return self.window
def open(self, filename):
"""Open a data file if possible.
"""
try:
return self.engine.open(filename)
except:
exception()
######################################################################
# Non-public interface
######################################################################
def _window_changed(self, window):
"""Traits handler for changes to application.
"""
self.engine = get_imayavi_engine(window)
|
enthoughtREPO_NAMEmayaviPATH_START.@mayavi_extracted@mayavi-master@mayavi@plugins@script.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "trevisanj/PFANT",
"repo_path": "PFANT_extracted/PFANT-master/data/sun-asplund-2009/README.md",
"type": "Markdown"
}
|
# Sun
Data files for the :sunny: Sun.
Abundances table taken from (Asplund, Grevesse, Sauval & Scott 2009):
http://arxiv.org/pdf/0909.0948.pdf
|
trevisanjREPO_NAMEPFANTPATH_START.@PFANT_extracted@PFANT-master@data@sun-asplund-2009@README.md@.PATH_END.py
|
{
"filename": "_x.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram/marker/colorbar/_x.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="x", parent_name="histogram.marker.colorbar", **kwargs
):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram@marker@colorbar@_x.py@.PATH_END.py
|
{
"filename": "quickstart.py",
"repo_name": "samuelyeewl/specmatch-emp",
"repo_path": "specmatch-emp_extracted/specmatch-emp-master/docs/quickstart.py",
"type": "Python"
}
|
# code-start-imports
import pandas as pd
from pylab import *
import specmatchemp.library
import specmatchemp.plots as smplot
# code-stop-imports
rc('savefig',dpi=160)
# code-start-loadlibrary: load in the library around the Mgb triplet
lib = specmatchemp.library.read_hdf(wavlim=[5140,5200])
# code-stop-loadlibrary
# code-start-library: Here's how the library spans the HR diagram.
fig = figure()
plot(lib.library_params.Teff, lib.library_params.radius,'k.',)
smplot.label_axes('Teff','radius')
# code-stop-library
fig.savefig('quickstart-library.png')
# code-start-library-labeled
fig = figure()
g = lib.library_params.groupby('source')
colors = ['Red','Orange','LimeGreen','Cyan','RoyalBlue','Magenta','ForestGreen']
i = 0
for source, idx in g.groups.items():
cut = lib.library_params.ix[idx]
color = colors[i]
plot(
cut.Teff, cut.radius,'+', label=source, color=color, alpha=1, ms=5,
mew=1.5
)
i+=1
legend()
smplot.label_axes('Teff','radius')
# code-stop-library-labeled
fig.savefig('quickstart-library-labeled.png')
# code-start-library-selected-stars
cut = lib.library_params.query('radius < 1.5 and -0.25 < feh < 0.25')
g = cut.groupby(pd.cut(cut.Teff,bins=arange(3000,7000,500)))
cut = g.first()
fig = figure()
plot(lib.library_params.Teff, lib.library_params.radius,'b.', label='_nolegend_')
plot(cut.Teff, cut.radius,'ro', label='Selected Stars')
legend()
smplot.label_axes('Teff','radius')
fig.savefig('quickstart-library-selected-stars.png')
# code-stop-library-selected-stars
# code-start-spectra-selected-stars
from matplotlib.transforms import blended_transform_factory
fig,ax = subplots(figsize=(8,4))
trans = blended_transform_factory(ax.transAxes,ax.transData)
bbox = dict(facecolor='white', edgecolor='none',alpha=0.8)
step = 1
shift = 0
for _,row in cut.iterrows():
spec = lib.library_spectra[row.lib_index,0,:]
plot(lib.wav,spec.T + shift,color='RoyalBlue',lw=0.5)
s = "{cps_name:s}, Teff={Teff:.0f}".format(**row)
text(0.01, 1+shift, s, bbox=bbox, transform=trans)
shift+=step
grid()
xlabel('Wavelength (Angstroms)')
ylabel('Normalized Flux (Arbitrary Offset)')
# code-stop-spectra-selected-stars
fig.set_tight_layout(True)
fig.savefig('quickstart-spectra-selected-stars.png')
# code-start-pop-library
idx1 = lib.get_index('190406')
G_star = lib.pop(idx1)
idx2 = lib.get_index('GL699')
M_star = lib.pop(idx2)
# code-stop-pop-library
# code-start-read-spectrum-G
from specmatchemp import spectrum
G_spectrum = spectrum.read_hires_fits('../samples/rj59.1923.fits').cut(5130,5210)
G_spectrum.name = 'HD190406'
# code-stop-read-spectrum-G
# code-start-shift-spectrum-G
from specmatchemp.specmatch import SpecMatch
sm_G = SpecMatch(G_spectrum, lib)
sm_G.shift()
# code-stop-shift-spectrum-G
# code-start-plot-shifts-G
fig = plt.figure(figsize=(10,5))
sm_G.target_unshifted.plot(normalize=True, plt_kw={'color':'forestgreen'}, text='Target (unshifted)')
sm_G.target.plot(offset=0.5, plt_kw={'color':'royalblue'}, text='Target (shifted): HD190406')
sm_G.shift_ref.plot(offset=1, plt_kw={'color':'firebrick'}, text='Reference: '+sm_G.shift_ref.name)
plt.xlim(5160,5200)
plt.ylim(0,2.2)
# code-stop-plot-shifts-G
fig.set_tight_layout(True)
fig.savefig('quickstart-Gstar-shifts.png')
# code-start-shift-spectrum-M
# Load spectrum
M_spectrum = spectrum.read_hires_fits('../samples/rj130.2075.fits').cut(5130,5210)
M_spectrum.name = 'GL699'
# Shift spectrum
sm_M = SpecMatch(M_spectrum, lib)
sm_M.shift()
# Plot shifts
fig = plt.figure(figsize=(10, 5))
sm_M.plot_shifted_spectrum(wavlim=(5160, 5200))
# code-stop-shift-spectrum-M
fig.set_tight_layout(True)
fig.savefig('quickstart-Mstar-shifts.png')
print("Running brute-force match, G-star")
# code-start-match-G
sm_G.match(wavlim=(5140,5200))
# Plot chi-squared surfaces
fig = figure(figsize=(12, 8))
sm_G.plot_chi_squared_surface()
# Indicate library parameters for target star.
axes = fig.axes
axes[0].axvline(G_star[0]['Teff'], color='k')
axes[1].axvline(G_star[0]['radius'], color='k')
axes[2].axvline(G_star[0]['feh'], color='k')
# code-stop-match-G
fig.set_tight_layout(True)
fig.savefig('quickstart-Gstar-chisquared-surface.png')
print("Running linear combinations, G-star")
# code-start-lincomb-G
sm_G.lincomb()
print('Derived Parameters: ')
print('Teff: {0:.0f}, Radius: {1:.2f}, [Fe/H]: {2:.2f}'.format(
sm_G.results['Teff'], sm_G.results['radius'], sm_G.results['feh']))
print('Library Parameters: ')
print('Teff: {0:.0f}, Radius: {1:.2f}, [Fe/H]: {2:.2f}'.format(
G_star[0]['Teff'], G_star[0]['radius'], G_star[0]['feh']))
# code-stop-lincomb-G
# code-start-plot-lincomb-G
# Plot HR diagram
fig1 = figure(figsize=(12, 10))
sm_G.plot_references(verbose=True)
# plot target onto HR diagram
axes = fig1.axes
axes[0].plot(G_star[0]['Teff'], G_star[0]['radius'], '*', ms=15, color='red', label='Target')
axes[1].plot(G_star[0]['Teff'], G_star[0]['radius'], '*', ms=15, color='red')
axes[2].plot(G_star[0]['feh'], G_star[0]['radius'], '*', ms=15, color='red')
axes[3].plot(G_star[0]['feh'], G_star[0]['radius'], '*', ms=15, color='red')
axes[0].legend(numpoints=1, fontsize='small', loc='best')
# Plot reference spectra and linear combinations
fig2 = plt.figure(figsize=(12,6))
sm_G.plot_lincomb()
# code-stop-plot-lincomb-G
fig1.set_tight_layout(True)
fig1.savefig('quickstart-Gstar-lincomb-references.png')
fig2.set_tight_layout(True)
fig2.savefig('quickstart-Gstar-lincomb-spectra.png')
# code-start-mstar
# Perform match
sm_M.match(wavlim=(5140,5200))
# Plot chi-squared surfaces
fig1 = figure(figsize=(12,8))
sm_M.plot_chi_squared_surface()
# Indicate library parameters for target star.
axes = fig1.axes
axes[0].axvline(M_star[0]['Teff'], color='k')
axes[1].axvline(M_star[0]['radius'], color='k')
axes[2].axvline(M_star[0]['feh'], color='k')
# Perform lincomb
sm_M.lincomb()
print('Derived Parameters: ')
print('Teff: {0:.0f}, Radius: {1:.2f}, [Fe/H]: {2:.2f}'.format(
sm_M.results['Teff'], sm_M.results['radius'], sm_M.results['feh']))
print('Library Parameters: ')
print('Teff: {0:.0f}, Radius: {1:.2f}, [Fe/H]: {2:.2f}'.format(
M_star[0]['Teff'], M_star[0]['radius'], M_star[0]['feh']))
# Plot HR diagram
fig2 = figure(figsize=(12,10))
sm_M.plot_references(verbose=True)
# plot target onto HR diagram
axes = fig2.axes
axes[0].plot(M_star[0]['Teff'], M_star[0]['radius'], '*', ms=15, color='red', label='Target')
axes[1].plot(M_star[0]['Teff'], M_star[0]['radius'], '*', ms=15, color='red')
axes[2].plot(M_star[0]['feh'], M_star[0]['radius'], '*', ms=15, color='red')
axes[3].plot(M_star[0]['feh'], M_star[0]['radius'], '*', ms=15, color='red')
axes[0].legend(numpoints=1, fontsize='small', loc='best')
# Plot reference spectra and linear combinations
fig3 = plt.figure(figsize=(12,6))
sm_M.plot_lincomb()
# code-stop-mstar
fig1.set_tight_layout(True)
fig1.savefig('quickstart-Mstar-chisquared-surface.png')
fig2.set_tight_layout(True)
fig2.savefig('quickstart-Mstar-lincomb-references.png')
fig3.set_tight_layout(True)
fig3.savefig('quickstart-Mstar-lincomb-spectra.png')
|
samuelyeewlREPO_NAMEspecmatch-empPATH_START.@specmatch-emp_extracted@specmatch-emp-master@docs@quickstart.py@.PATH_END.py
|
{
"filename": "_tickvalssrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/indicator/gauge/axis/_tickvalssrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickvalssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="tickvalssrc", parent_name="indicator.gauge.axis", **kwargs
):
super(TickvalssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@indicator@gauge@axis@_tickvalssrc.py@.PATH_END.py
|
{
"filename": "igm_inoue2014.py",
"repo_name": "ACCarnall/bagpipes",
"repo_path": "bagpipes_extracted/bagpipes-master/bagpipes/models/making/igm_inoue2014.py",
"type": "Python"
}
|
from __future__ import print_function, division, absolute_import
import numpy as np
import os
from astropy.io import fits
""" This code is called once when Bagpipes is first installed in order
to generate the IGM absorption table which is subsequently used for
all IGM calculations. """
path = os.path.dirname(os.path.realpath(__file__)) + "/../grids"
coefs = np.loadtxt(path + "/lyman_series_coefs_inoue_2014_table2.txt")
def get_Inoue14_trans(rest_wavs, z_obs):
""" Calculate IGM transmission using Inoue et al. (2014) model. """
if isinstance(rest_wavs, float):
rest_wavs = np.array([rest_wavs])
tau_LAF_LS = np.zeros((39, rest_wavs.shape[0]))
tau_DLA_LS = np.zeros((39, rest_wavs.shape[0]))
tau_LAF_LC = np.zeros(rest_wavs.shape[0])
tau_DLA_LC = np.zeros(rest_wavs.shape[0])
# Populate tau_LAF_LS
for j in range(39):
if z_obs < 1.2:
wav_slice = ((rest_wavs*(1.+z_obs) > coefs[j, 1])
& (rest_wavs*(1.+z_obs)
< (1+z_obs)*coefs[j, 1]))
tau_LAF_LS[j, wav_slice] = (coefs[j, 2]
* (rest_wavs[wav_slice]
* (1.+z_obs)/coefs[j, 1])**1.2)
elif z_obs < 4.7:
wav_slice_1 = ((rest_wavs*(1.+z_obs) > coefs[j, 1])
& (rest_wavs*(1.+z_obs) < 2.2*coefs[j, 1]))
wav_slice_2 = ((rest_wavs*(1.+z_obs) > 2.2*coefs[j, 1])
& (rest_wavs*(1.+z_obs)
< (1+z_obs)*coefs[j, 1]))
tau_LAF_LS[j, wav_slice_1] = (coefs[j, 2]
* (rest_wavs[wav_slice_1]
* (1.+z_obs)/coefs[j, 1])**1.2)
tau_LAF_LS[j, wav_slice_2] = (coefs[j, 3]
* (rest_wavs[wav_slice_2]
* (1.+z_obs)/coefs[j, 1])**3.7)
else:
wav_slice_1 = ((rest_wavs*(1.+z_obs) > coefs[j, 1])
& (rest_wavs*(1.+z_obs) < 2.2*coefs[j, 1]))
wav_slice_2 = ((rest_wavs*(1.+z_obs) > 2.2*coefs[j, 1])
& (rest_wavs*(1.+z_obs) < 5.7*coefs[j, 1]))
wav_slice_3 = ((rest_wavs*(1.+z_obs) > 5.7*coefs[j, 1])
& (rest_wavs*(1.+z_obs)
< (1+z_obs)*coefs[j, 1]))
tau_LAF_LS[j, wav_slice_1] = (coefs[j, 2]
* (rest_wavs[wav_slice_1]
* (1.+z_obs)/coefs[j, 1])**1.2)
tau_LAF_LS[j, wav_slice_2] = (coefs[j, 3]
* (rest_wavs[wav_slice_2]
* (1.+z_obs)/coefs[j, 1])**3.7)
tau_LAF_LS[j, wav_slice_3] = (coefs[j, 4]
* (rest_wavs[wav_slice_3]
* (1.+z_obs)/coefs[j, 1])**5.5)
# Populate tau_DLA_LS
for j in range(39):
if z_obs < 2.0:
wav_slice = ((rest_wavs*(1.+z_obs) > coefs[j, 1])
& (rest_wavs*(1.+z_obs)
< (1+z_obs)*coefs[j, 1]))
tau_DLA_LS[j, wav_slice] = (coefs[j, 5]
* (rest_wavs[wav_slice]
* (1.+z_obs)/coefs[j, 1])**2.0)
else:
wav_slice_1 = ((rest_wavs*(1.+z_obs) > coefs[j, 1])
& (rest_wavs*(1.+z_obs) < 3.0*coefs[j, 1]))
wav_slice_2 = ((rest_wavs*(1.+z_obs) > 3.0*coefs[j, 1])
& (rest_wavs*(1.+z_obs) < (1+z_obs)
* coefs[j, 1]))
tau_DLA_LS[j, wav_slice_1] = (coefs[j, 5]
* (rest_wavs[wav_slice_1]
* (1.+z_obs)/coefs[j, 1])**2.0)
tau_DLA_LS[j, wav_slice_2] = (coefs[j, 6]
* (rest_wavs[wav_slice_2]
* (1.+z_obs)/coefs[j, 1])**3.0)
# Populate tau_LAF_LC
if z_obs < 1.2:
wav_slice = ((rest_wavs*(1.+z_obs) > 911.8)
& (rest_wavs*(1.+z_obs) < 911.8*(1.+z_obs)))
tau_LAF_LC[wav_slice] = (0.325*((rest_wavs[wav_slice]
* (1.+z_obs)/911.8)**1.2
- (((1+z_obs)**-0.9)
* (rest_wavs[wav_slice]
* (1.+z_obs)/911.8)**2.1)))
elif z_obs < 4.7:
wav_slice_1 = ((rest_wavs*(1.+z_obs) > 911.8)
& (rest_wavs*(1.+z_obs) < 911.8*2.2))
wav_slice_2 = ((rest_wavs*(1.+z_obs) > 911.8*2.2)
& (rest_wavs*(1.+z_obs) < 911.8*(1.+z_obs)))
tau_LAF_LC[wav_slice_1] = (((2.55*10**-2)*((1+z_obs)**1.6)
* (rest_wavs[wav_slice_1]
* (1.+z_obs)/911.8)**2.1)
+ (0.325*((rest_wavs[wav_slice_1]
* (1.+z_obs)/911.8)**1.2))
- (0.25*((rest_wavs[wav_slice_1]
* (1.+z_obs)/911.8)**2.1)))
tau_LAF_LC[wav_slice_2] = ((2.55*10**-2)
* (((1.+z_obs)**1.6)
* ((rest_wavs[wav_slice_2]
* (1.+z_obs)/911.8)**2.1)
- ((rest_wavs[wav_slice_2]
* (1.+z_obs)/911.8)**3.7)))
else:
wav_slice_1 = ((rest_wavs*(1.+z_obs) > 911.8)
& (rest_wavs*(1.+z_obs) < 911.8*2.2))
wav_slice_2 = ((rest_wavs*(1.+z_obs) > 911.8*2.2)
& (rest_wavs*(1.+z_obs) < 911.8*5.7))
wav_slice_3 = ((rest_wavs*(1.+z_obs) > 911.8*5.7)
& (rest_wavs*(1.+z_obs) < 911.8*(1.+z_obs)))
tau_LAF_LC[wav_slice_1] = (((5.22*10**-4)*((1+z_obs)**3.4)
* (rest_wavs[wav_slice_1]
* (1.+z_obs)/911.8)**2.1)
+ (0.325*(rest_wavs[wav_slice_1]
* (1.+z_obs)/911.8)**1.2)
- ((3.14*10**-2)*((rest_wavs[wav_slice_1]
* (1.+z_obs)/911.8)**2.1)))
tau_LAF_LC[wav_slice_2] = (((5.22*10**-4)*((1+z_obs)**3.4)
* (rest_wavs[wav_slice_2]
* (1.+z_obs)/911.8)**2.1)
+ (0.218*((rest_wavs[wav_slice_2]
* (1.+z_obs)/911.8)**2.1))
- ((2.55*10**-2)*((rest_wavs[wav_slice_2]
* (1.+z_obs)
/ 911.8)**3.7)))
tau_LAF_LC[wav_slice_3] = ((5.22*10**-4)
* (((1+z_obs)**3.4)
* (rest_wavs[wav_slice_3]
* (1.+z_obs)/911.8)**2.1
- (rest_wavs[wav_slice_3]
* (1.+z_obs)/911.8)**5.5))
# Populate tau_DLA_LC
if z_obs < 2.0:
wav_slice = ((rest_wavs*(1.+z_obs) > 911.8)
& (rest_wavs*(1.+z_obs) < 911.8*(1.+z_obs)))
tau_DLA_LC[wav_slice] = (0.211*((1+z_obs)**2.)
- (7.66*10**-2)*(((1+z_obs)**2.3)
* (rest_wavs[wav_slice]
* (1.+z_obs)/911.8)**-0.3)
- 0.135*((rest_wavs[wav_slice]
* (1.+z_obs)/911.8)**2.0))
else:
wav_slice_1 = ((rest_wavs*(1.+z_obs) > 911.8)
& (rest_wavs*(1.+z_obs) < 911.8*3.0))
wav_slice_2 = ((rest_wavs*(1.+z_obs) > 911.8*3.0)
& (rest_wavs*(1.+z_obs) < 911.8*(1.+z_obs)))
tau_DLA_LC[wav_slice_1] = (0.634 + (4.7*10**-2)*(1.+z_obs)**3.
- ((1.78*10**-2)*((1.+z_obs)**3.3)
* (rest_wavs[wav_slice_1]
* (1.+z_obs)/911.8)**-0.3)
- (0.135*(rest_wavs[wav_slice_1]
* (1.+z_obs)/911.8)**2.0)
- 0.291*(rest_wavs[wav_slice_1]
* (1.+z_obs)/911.8)**-0.3)
tau_DLA_LC[wav_slice_2] = ((4.7*10**-2)*(1.+z_obs)**3.
- ((1.78*10**-2)*((1.+z_obs)**3.3)
* (rest_wavs[wav_slice_2]
* (1.+z_obs)/911.8)**-0.3)
- ((2.92*10**-2)
* (rest_wavs[wav_slice_2]
* (1.+z_obs)/911.8)**3.0))
tau_LAF_LS_sum = np.sum(tau_LAF_LS, axis=0)
tau_DLA_LS_sum = np.sum(tau_DLA_LS, axis=0)
tau = tau_LAF_LS_sum + tau_DLA_LS_sum + tau_LAF_LC + tau_DLA_LC
return np.exp(-tau)
def make_table(z_array, rest_wavs):
""" Make up the igm absorption table used by bagpipes. """
print("BAGPIPES: Generating IGM absorption table.")
d_IGM_grid = np.zeros((z_array.shape[0], rest_wavs.shape[0]))
for i in range(z_array.shape[0]):
d_IGM_grid[i, :] = get_Inoue14_trans(rest_wavs, z_array[i])
hdulist = fits.HDUList(hdus=[fits.PrimaryHDU(),
fits.ImageHDU(name="trans", data=d_IGM_grid),
fits.ImageHDU(name="wavs", data=rest_wavs),
fits.ImageHDU(name="zred", data=z_array)])
if os.path.exists(path + "/d_igm_grid_inoue14.fits"):
os.system("rm " + path + "/d_igm_grid_inoue14.fits")
hdulist.writeto(path + "/d_igm_grid_inoue14.fits")
def test():
""" Test the above code by generating a plot from Inoue et al.
(2014). """
import matplotlib.pyplot as plt
plt.figure()
for i in range(2, 7):
z_obs = float(i)
rest_wavs = np.arange(0.5, 1500., 0.5)
trans = get_Inoue14_trans(rest_wavs, z_obs)
plt.plot(rest_wavs*(1+z_obs), trans, color="black")
plt.xlim(3000., 9000.)
plt.ylim(0., 1.)
plt.xlabel("$\\mathrm{Observed\\ Wavelength\\ (\\AA)}$")
plt.ylabel("Transmission")
plt.show()
|
ACCarnallREPO_NAMEbagpipesPATH_START.@bagpipes_extracted@bagpipes-master@bagpipes@models@making@igm_inoue2014.py@.PATH_END.py
|
{
"filename": "test_teff_retrieval.py",
"repo_name": "mwanakijiji/rrlfe",
"repo_path": "rrlfe_extracted/rrlfe-main/rrlfe/tests/test_teff_retrieval.py",
"type": "Python"
}
|
#!/usr/bin/env python
# coding: utf-8
# This makes plots showing the effective temperature retrievals based on synthetic spectra
# produced by R.W.
# Created from parent restacking_scraped_data.ipynb 2021 March 17 by E.S.
import pandas as pd
import os, sys
from astropy.io.fits import getdata
from configparser import ConfigParser, ExtendedInterpolation
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
current_dir = os.path.dirname(__file__)
target_dir = os.path.abspath(os.path.join(current_dir, "../"))
sys.path.insert(0, target_dir)
from . import *
from rrlfe import teff_retrieval
from conf import *
# configuration data for reduction
config_gen = ConfigParser(interpolation=ExtendedInterpolation()) # for parsing values in .init file
# config for reduction to find a, b, c, d
config_gen.read(os.path.join(os.path.dirname(__file__), '../conf', 'config_gen.ini'))
def test_TempVsBalmer(test_df_poststack_file_name_read = config_gen["data_dirs"]["TEST_DIR_SRC"]+config_gen["file_names"]["TEST_RESTACKED_EW_DATA_W_METADATA_STANDALONE"],
test_df_poststack_file_name_write = config_gen["data_dirs"]["TEST_DIR_SRC"]+config_gen["file_names"]["TEST_RESTACKED_EW_DATA_GOOD_ONLY_TEFFFIT"],
test_teff_data_write = config_gen["data_dirs"]["TEST_DIR_BIN"] + config_gen["file_names"]["TEST_TREND_TEFF_VS_BALMER"]):
inst = teff_retrieval.TempVsBalmer(module_name = "test1",
file_ew_poststack_read = test_df_poststack_file_name_read,
file_ew_tefffit_write = test_teff_data_write,
plot_tefffit_write = "dummy.png",
data_tefffit_write = test_df_poststack_file_name_write,
plot=False,
test_flag=True)
df_test = inst.run_step(attribs = config_gen)
# check that returned filetype is a pandas dataframe, and that new column 'teff_bestfit' exists
assert isinstance(df_test, pd.DataFrame)
assert 'teff_bestfit' in df_test.keys()
# needs fake data
def test_line_fit_temp_range(test_df_poststack_file_name_read = config_gen["data_dirs"]["TEST_DIR_SRC"]+config_gen["file_names"]["TEST_RESTACKED_EW_DATA_W_METADATA_STANDALONE"]):
# read in data
df_poststack = pd.read_csv(test_df_poststack_file_name_read)
# find linear trend of net Balmer EW with Teff
teff_test = df_poststack["teff"].values.astype(float)
# fit a straight line: net Balmer
ews_Balmer_test = df_poststack["EW_Balmer"].values.astype(float)
m_test, err_m_test, b_test, err_b_test = teff_retrieval.line_fit_temp_range(x_data_pass=ews_Balmer_test, y_data_pass=teff_test, t_min=5900, t_max=7350)
# check line is being fit correctly
assert round(m_test, 2) == 192.76
assert round(b_test, 2) == 5433.73
|
mwanakijijiREPO_NAMErrlfePATH_START.@rrlfe_extracted@rrlfe-main@rrlfe@tests@test_teff_retrieval.py@.PATH_END.py
|
{
"filename": "_textcasesrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/funnelarea/textfont/_textcasesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextcasesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="textcasesrc", parent_name="funnelarea.textfont", **kwargs
):
super(TextcasesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@funnelarea@textfont@_textcasesrc.py@.PATH_END.py
|
{
"filename": "periodograms-verifying-the-location-of-a-signal.ipynb",
"repo_name": "lightkurve/lightkurve",
"repo_path": "lightkurve_extracted/lightkurve-main/docs/source/tutorials/3-science-examples/periodograms-verifying-the-location-of-a-signal.ipynb",
"type": "Jupyter Notebook"
}
|
# Verifying the location of a periodic signal in the pixel data
## Learning Goals
By the end of this tutorial, you will:
* Understand the causes of signal blending and aperture contamination in _Kepler_ and _K2_ data.
* Be able to use Lightkurve's [plot_pixels()](https://docs.lightkurve.org/reference/api/lightkurve.KeplerTargetPixelFile.plot_pixels.html?highlight=plot_pixels) function to visually identify the pixel source of a signal.
* Be able to implement difference imaging to find the pixel source of a sinusoidal signal.
## Introduction
This tutorial is part of a series on handling _Kepler_ and _K2_ data with Astropy and Lightkurve. To work through this tutorial, you should be familiar with downloading and handling both **light curves** and **target pixel files** with Lightkurve, and you should have experience working with **periodograms**. We'll use light curves and periodograms to detect signal, and follow that up with detailed analysis on the pixel scale to pinpoint the signal's source.
Some useful terms to keep in mind when working with signal verification are _contamination_ and _blending_. These terms are often used interchangeably. Here, we'll use blending to refer to any scenario where flux from two or more targets become visible in one target's aperture. We use contamination to refer, more specifically, to the signal that erroneously enters the aperture.
## Imports
We'll use [Lightkurve](https://docs.lightkurve.org/) for downloading and handling _Kepler_ data throughout this tutorial. We'll also use [NumPy](https://numpy.org/) to handle arrays for aperture masks, and [Matplotlib](https://matplotlib.org) for visualizing data.
```python
import lightkurve as lk
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
## 1. Background
The _Kepler_ space telescope observed with 4x4 arcsecond square pixels. This means that if two stars are within the same _Kepler_ pixel, it is difficult to identify which is the source of an observed signal. Fortunately, these coincidences are unlikely. But the _Kepler_ mission observed areas close to the Galactic plane, and both _Kepler_ and _K2_ observed stellar clusters, all of which are visually crowded regions. Due to crowding, there are still many cases where the true source of a signal is unclear.
The process of signal verification begins with a light curve: first, we must detect our signal. In the case of potential exoplanet transits, these will be evident in the light curve; for other phenomena, including binarity and stellar variability, we might need a periodogram to pick up on the signal. To make sure our transits really do belong to a planet, or if there's any uncertainty about the source of a stellar signal (such as, is the star in a crowded region? Is there a bright star nearby? Are we seeing multiple signals?), we need to look at the target pixel files, which will give us a more complete picture, and help us identify nearby contaminating sources.
## 2. Pixel Level Analysis
The most basic method of signal verification is to look at the pixels. In this section, we'll use tools included in Lightkurve to examine light curves and periodograms in individual pixels and help us identify the source of a signal.
### 2.1 Initial analysis
KIC 2435971 is a star of [_Kepler_ magnitude](https://keplerscience.arc.nasa.gov/the-kepler-space-telescope.html#flux-calibration) Kp = 14.7, with a massive signal. It's not impossible that this signal comes from the star, but KIC 2435971 is in the field of open cluster NGC 6791, which means it's subject to a higher level of crowding than much of the _Kepler_ field. This could be a case of contamination; to be absolutely certain, we're going to use this star for our signal verification exercise. Let's begin by downloading a light curve; then we can produce a Lomb-Scargle Periodogram to search for repeating signals.
```python
lcs = lk.search_lightcurve('KIC 2435971', author='Kepler', cadence="long").download_all()
```
```python
lc = lcs.stitch().remove_outliers()
pg = lc.to_periodogram()
pg.plot();
```
These high-amplitude, narrow peaks in the periodogram suggests a compact binary star.
### 2.2 Pixel-level analysis
Often, the identification of a much brighter target in the frame is enough to confirm contamination. Let's begin by looking at the target pixel file (TPF) we're going to be working with:
```python
tpf = lk.search_targetpixelfile('KIC 2435971', author='Kepler', quarter=9).download()
```
```python
tpf.plot();
```
As you can see, there's another star very close by, at the top of the frame. This star is brighter than our target in the center, and could conceivably produce a high-amplitude periodic signal. But it's not a huge difference in flux, so we should double-check before we go ahead and claim that this is our contaminant.
Luckily, there is a utility in Lightkurve to deal with this scenario. We can look at the light curves — and periodograms — in each individual pixel, using the `plot_pixels()` function.
Let's have a look at what the function does at its most basic:
```python
tpf.plot_pixels();
```
These are the light curves in every pixel of this TPF, for Quarter 9. We can't tell much just by looking at this, but we can give [plot_pixels()](https://docs.lightkurve.org/reference/api/lightkurve.KeplerTargetPixelFile.plot_pixels.html?highlight=plot_pixels) a corrector function to flatten these light curves. To create the corrector function, we use a Python construction called a [lambda function](https://docs.python.org/3/howto/functional.html#small-functions-and-the-lambda-expression). This kind of function is known as an "anonymous" function, as it has no name. Lambda functions are just one line of code, so they're very useful for when we want a function to take another function as an argument. Here, we take a variable `x` and, assuming it's a `LightCurve` object, we can apply methods like [.remove_outliers()](https://docs.lightkurve.org/reference/api/lightkurve.LightCurve.remove_outliers.html?highlight=remove_outliers) as normal.
We can also overlay the TPF colors to remind us of the stars we're looking at:
```python
tpf.plot_pixels(corrector_func=lambda x: x.remove_nans().flatten().remove_outliers(), show_flux=True);
```
Looking at it like this, we can start to see some evidence of the signal from our periodogram above, but not enough to make a call. Let's go back to `plot_pixels()`: we can tell the function to compute a periodogram for each pixel, which should make it clear where the signal is strongest. This time, we're also going to overlay the pipeline aperture, which is the selection of pixels that went into producing the light curve for this quarter of data. The pipeline aperture is shown by red boxes around all included pixels.
```python
tpf.plot_pixels(corrector_func=lambda x: x.remove_nans().flatten().remove_outliers(), periodogram=True, show_flux=True, aperture_mask='pipeline');
```
We can see the two high-amplitude peaks in most of the pixels in this TPF. The key feature is the ratio of that signal's amplitude to the noise level: pixels with higher signal-to-noise ratios contain more flux from the source of the signal. This is often something we can check by eye; here, we can confirm that the signal is coming from our target star, KIC 2435971. The bright star at the top of the frame, in the yellow pixel, shows a significantly higher noise level than the green pixels below it, and the placement of the pipeline aperture reflects this.
In fact, KIC 2435971 is a [known eclipsing binary](http://simbad.u-strasbg.fr/simbad/sim-basic?Ident=KIC+2435971). This example challenges the assumption that any high-amplitude signal usually comes from the brightest star in the frame, but nevertheless shows that it's always useful to verify our signal, especially in crowded fields. If we wanted to study the bright star, we could perform **custom aperture photometry** (see the dedicated tutorial on this topic), though we wouldn't be able to exclude all of this signal, as it has such a high amplitude and contaminates so many of these pixels.
### 2.3 Identifying a contaminant
For this exercise, we're going to switch to a different star. KIC 7024511 was flagged as a _Kepler_ Object of Interest (KOI) in Quarter 6 of the mission, and given the designation KOI 311. This means that it's a potential exoplanet host, based on transits detected. Let's have a look at one quarter of data:
```python
lc_koi = lk.search_lightcurve('KIC 7024511', author='Kepler', quarter=11).download(quality_bitmask='hard').flatten()
lc_koi.plot();
```
It's hard to pick out the transits by eye, but you should be able to see one point lower than the others around 1030 BKJD (Barycentric *Kepler* Julian Date). This dip has a depth of less than 0.01%, which is consistent with the signal coming from an exoplanet. Unfortunately, it's also consistent with the diulted signal of a nearby eclipsing binary — dilution here referring to the diffuse flux towards the edges of a star's spread on the detector, where the amplitude of any variable signal can decrease. We can look at the TPF using `plot_pixels()` to confirm whether or not this is the case:
```python
tpf_koi = lk.search_targetpixelfile('KIC 7024511', author='Kepler', quarter=11).download()
tpf_koi.plot_pixels(aperture_mask='pipeline', corrector_func=lambda x: x.remove_nans().flatten());
```
In this case, we can clearly see an eclipse to the left of our target. The eclipsing binary is outside of the pipeline aperture, but the signal is strong enough to have contaminated it. And sure enough, if we check on the [NASA Exoplanet Archive](https://exoplanetarchive.ipac.caltech.edu/cgi-bin/DisplayOverview/nph-DisplayOverview?objname=K00311.01&type=KEPLER_CANDIDATE), KOI 311 was designated a false positive in the first data release it was included in.
We can double-check this by using Lightkurve's [cone search](https://docs.lightkurve.org/reference/api/lightkurve.search.search_lightcurvefile.html#lightkurve.search.search_lightcurvefile) to look for the eclipsing binary in question. So long as there's a light curve for this contaminant — and we can reasonably expect one, for such a star — we'll be able to find it with Lightkurve's search function.
(As an aside, if you run this notebook yourself, you can use the [interact_sky()](https://docs.lightkurve.org/reference/api/lightkurve.KeplerTargetPixelFile.interact_sky.html?highlight=interact_sky#lightkurve.KeplerTargetPixelFile.interact_sky) function, which returns an interactive TPF widget with targets labelled by _Gaia_ ID. This includes many targets which were not collected by _Kepler_ or _K2_.)
Making a rough guess from the size of the TPF above, we set a search radius of 20 arcseconds. And we're only going to search Quarter 11, as otherwise the function will return an entry for every available quarter.
```python
lk.search_lightcurve('KIC 7024511', radius=20, quarter=11)
```
Sure enough, there's a nearby target. The **distance** column in the search result tells us that this star is about 1.8 arcseconds away. Let's have a look at it:
```python
lc_contam = lk.search_lightcurve('KIC 7024530', author='Kepler', quarter=11).download()
lc_contam.plot();
```
That's our eclipse, and it seems like there's a rotational period here too. We can also confirm this by looking in the [_Kepler_ Eclipsing Binary Catalog](http://keplerebs.villanova.edu/overview/?k=7024530), or on [Simbad](http://simbad.u-strasbg.fr/simbad/sim-basic?Ident=KIC+7024530). Simbad shows an image of the contaminant, where we can see KOI 311, fainter, just beside it.
## 3. Advanced: Difference Imaging
The focus of most _Kepler_ and _K2_ signal verification research has been in validating exoplanets. Often, this is done by measuring and tracking image centroids. A centroid is the weighted average along both axes of the image, which measures the point where most flux is concentrated. If a true transit is observed, the centroid of the image will shift in time with it ([Batalha et al., 2010](https://iopscience.iop.org/article/10.1088/2041-8205/713/2/L103/pdf)). Another technique, and the focus of this section, is difference imaging ([Bryson et al., 2013](https://iopscience.iop.org/article/10.1086/671767/pdf)). Difference imaging has been used to validate signals from exocomets ([Rappaport et al., 2018](https://arxiv.org/pdf/1708.06069.pdf)), flaring stars ([Yang et al., 2017](https://iopscience.iop.org/article/10.3847/1538-4357/aa8ea2/pdf)), and binary systems ([Colman et al., 2017](https://arxiv.org/pdf/1705.00621.pdf)).
For vetting exoplanet candidates, difference imaging begins with a selection of flux measurements, both in- and out-of-phase with the transit. Using the frames from TPF images, we take an average of each collection of frames and subtract the in-phase fluxes from the out-of-phase fluxes, resulting in a _difference image_. We can then measure the centroid of the difference image, which will move outside of the optimal aperture if the star is contaminated. We can also visually identify the pixel where the variable flux is "brightest."
We're going to work through a basic case here, which is applicable to a broad variety of stellar signals. For any periodic and quasi-sinusoidal signal, difference imaging works by selecting flux frames at timestamps around the maxima and minima of the signal, which is equivalent to working in- and out-of-phase with a transit. The result is a pixel image where the brightest pixels are those where the periodic signal is strongest.
### 3.1 Determining the signal period
Let's go back to KIC 2435971. We're going to use the TPF we downloaded above for difference imaging; as it's a pixel-level method, we can only use one quarter or campaign of _Kepler_ data at a time. But for difference imaging to work well, we need as much data as possible to extract the period for differencing, so we'll use the stitched light curve we prepared earlier. We've already downloaded the data above, so let's remind ourselves of what the periodogram looks like:
```python
pg.plot();
```
From this, we can extract the period of the highest amplitude signal; you will have seen the [period_at_max_power](https://docs.lightkurve.org/reference/api/lightkurve.periodogram.Periodogram.period_at_max_power.html?highlight=period_at_max_power) argument in one of the periodogram tutorials.
```python
peak = pg.frequency_at_max_power
period = pg.period_at_max_power
print(f'Frequency: {peak:.3f}\nPeriod: {period:.3f}')
```
### 3.2 Determining the maximum and minimum phase times
To find our maxima and minima, we're going to start by phase-folding the light curve. This will help us identify the maxima and minima of the periodic signal.
Note that when we phase-fold the curve below, we use the `epoch_time` argument to shift the phase curve's zero position. This helps us pick out the maxima and minima of the phase curve more clearly; however, it's a matter of trial and error to choose a suitable epoch time for any given target. In many cases, it will suffice not to set the argument at all. An alternative to experimenting with the epoch time is appending the phased light curve to itself, which guarantees at least one clear maximum and one clear minimum. This is a better method for an automated difference imaging pipeline, but is more complicated to implement.
```python
folded = lc.fold(period, epoch_time=0.5)
folded.plot();
```
This looks promising, but there's still a great deal of noise in the light curve. When we smooth the light curve by putting it into 0.001-day bins, we can see the sinusoidal trend more clearly:
```python
folded2 = folded.bin(time_bin_size=0.001)
folded2.plot();
```
We're going to identify the maxima and minima using the binned phase curve. Once we have identified two ranges in phase space, we'll collect the timestamps within those ranges from the original phase curve. Then we'll compare those timestamps to our TPF data to pick out the corresponding flux frames for each range.
It can be tricky to decide how much flux to collect on either side of the maxima and minima. A good value is a tolerance of 5% of the phase curve, which means that we end up using 10% of the light curve at maximum phase and 10% of the light curve at minimum phase for the difference image. This ensures that we're using enough to get a reliable difference image, given some uncertainty in where the maxima and minima actually are, but not so much that the difference image is meaningless.
This next part uses some Python tricks to help us quickly determine which timestamps to use in calculating our difference image. If you find the code below unfamiliar, it might be helpful to read up on [NumPy `where()`](https://numpy.org/doc/stable/reference/generated/numpy.where.html) and [list comprehension](https://www.pythonforbeginners.com/basics/list-comprehensions-in-python).
```python
full_phase_range = folded2.phase[-1].value - folded2.phase[0].value
tolerance = 0.05 * full_phase_range
min_phase = folded2.time[np.argmin(folded2.flux)].value
max_phase = folded2.time[np.argmax(folded2.flux)].value
min_timestamps = folded.time_original[np.where((folded.time > min_phase - tolerance)
& (folded.time < min_phase + tolerance))].value
max_timestamps = folded.time_original[np.where((folded.time > max_phase - tolerance)
& (folded.time < max_phase + tolerance))].value
```
```python
one_quarter_minima = [f for (f, t) in zip(tpf.flux.value, tpf.time.value) if t in min_timestamps]
one_quarter_maxima = [f for (f, t) in zip(tpf.flux.value, tpf.time.value) if t in max_timestamps]
```
### 3.3 Computing the difference image
Now that we have identified the maximum and minimum phase timestamps to use, we can calculate our difference image! We're also going to calculate an average of the whole quarter, and we can use Matplotlib to display them side by side for clear comparison. Note that we're also flipping the images, so that they have the same orientation as the Lightkurve TPF plots.
```python
avg_image = np.nanmean(tpf.flux.value, axis=0)
diff_image = np.abs(np.nanmean(one_quarter_maxima, axis=0) - np.nanmean(one_quarter_minima, axis=0))
```
```python
fig, ax = plt.subplots(1,2)
ax[0].imshow(np.flipud(avg_image))
ax[0].set_title('Quarter 9 average')
ax[1].imshow(np.flipud(diff_image))
ax[1].set_title('Quarter 9 difference image')
fig.set_size_inches((15,6))
```
Let's take a moment to think about what these images tell us. In the average image, we can see our two stars, the target and the brighter star at the top of the frame. In the difference image, the pixel with the highest difference flux is one of the central pixels, indicating that the signal is coming from the target star, just as we saw when we looked at pixel-level periodograms with `plot_pixels()`.
We can still see a little bit of signal in some other pixels. This is down to noise, which we saw in our first folded light curve. Even with a clear signal like the one from KIC 2435971, most difference images will show some degree of noise. But it's good to note that the difference flux in the pixel that hosts the bright nearby star is greatly diminished, as we would expect.
So, since we can get this information from `plot_pixels()`, why would we want to go to the effort of difference imaging? Sometimes the signal in a periodogram may not be visually clear from using `plot_pixels()`, but there would still be enough signal for it to show up in a difference image. Difference imaging is also useful for clarifying association in a crowded field, or where bright stars are involved. If a signal is spread out across a lot of pixels, difference imaging can pinpoint where it's strongest; this is particularly evident in the case of KIC 2435971. And of course, it's always good to confirm our conclusions using a different method!
## About this Notebook
**Author:** [Isabel Colman](http://orcid.org/0000-0001-8196-516X) (`isabel.colman@sydney.edu.au`)
**Updated on:** 2020-09-15
## Citing Lightkurve and Astropy
If you use `lightkurve` or `astropy` for published research, please cite the authors. Click the buttons below to copy BibTeX entries to your clipboard.
```python
lk.show_citation_instructions()
```
<img style="float: right;" src="https://raw.githubusercontent.com/spacetelescope/notebooks/master/assets/stsci_pri_combo_mark_horizonal_white_bkgd.png" alt="Space Telescope Logo" width="200px"/>
|
lightkurveREPO_NAMElightkurvePATH_START.@lightkurve_extracted@lightkurve-main@docs@source@tutorials@3-science-examples@periodograms-verifying-the-location-of-a-signal.ipynb@.PATH_END.py
|
{
"filename": "qlGrid.py",
"repo_name": "petigura/terra",
"repo_path": "terra_extracted/terra-master/scripts/qlGrid.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""
Split h5 file into individual files
"""
from argparse import ArgumentParser
from matplotlib.pylab import *
from matplotlib.gridspec import GridSpec
import h5py
import keptoy
prsr = ArgumentParser()
prsr.add_argument('inp',type=str, help='input file')
prsr.add_argument('out',type=str, help='out file')
args = prsr.parse_args()
nsteps = 10
h5 = h5py.File(args.inp)
res = h5['RES']
gs = GridSpec(4,1)
fig = figure(figsize=(18,10))
axSm = fig.add_subplot(gs[0])
axBg = fig.add_subplot(gs[1:])
P = res['Pcad']*keptoy.lc
s2n = res['s2n']
axSm.plot(P,s2n)
xshft = int(P.ptp())/nsteps
yshft = np.percentile(s2n,99) - np.percentile(s2n,1)
rcParams.update({'axes.color_cycle':['b','r']})
for i in range(nsteps):
axBg.plot(P-xshft*i,s2n-yshft*i)
xlim(P[0],P[0]+xshft)
tight_layout()
fig.savefig(args.out)
|
petiguraREPO_NAMEterraPATH_START.@terra_extracted@terra-master@scripts@qlGrid.py@.PATH_END.py
|
{
"filename": "test_printing.py",
"repo_name": "pymc-devs/pymc",
"repo_path": "pymc_extracted/pymc-main/tests/test_printing.py",
"type": "Python"
}
|
# Copyright 2024 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from pytensor.tensor.random import normal
from pymc import Bernoulli, Censored, CustomDist, Gamma, HalfCauchy, Mixture, StudentT, Truncated
from pymc.distributions import (
Dirichlet,
DirichletMultinomial,
HalfNormal,
KroneckerNormal,
MvNormal,
NegativeBinomial,
Normal,
Uniform,
ZeroInflatedPoisson,
)
from pymc.math import dot
from pymc.model import Deterministic, Model, Potential
from pymc.pytensorf import floatX
class BaseTestStrAndLatexRepr:
def test__repr_latex_(self):
for distribution, tex in zip(self.distributions, self.expected[("latex", True)]):
assert distribution._repr_latex_() == tex
model_tex = self.model._repr_latex_()
# make sure each variable is in the model
for tex in self.expected[("latex", True)]:
for segment in tex.strip("$").split(r"\sim"):
assert segment in model_tex
def test_str_repr(self):
for str_format in self.formats:
for dist, text in zip(self.distributions, self.expected[str_format]):
assert dist.str_repr(*str_format) == text
model_text = self.model.str_repr(*str_format)
for text in self.expected[str_format]:
if str_format[0] == "latex":
for segment in text.strip("$").split(r"\sim"):
assert segment in model_text
else:
assert text in model_text
class TestMonolith(BaseTestStrAndLatexRepr):
def setup_class(self):
# True parameter values
alpha, sigma = 1, 1
beta = [1, 2.5]
# Size of dataset
size = 100
# Predictor variable
X = np.random.normal(size=(size, 2)).dot(np.array([[1, 0], [0, 0.2]]))
# Simulate outcome variable
Y = alpha + X.dot(beta) + np.random.randn(size) * sigma
with Model() as self.model:
# TODO: some variables commented out here as they're not working properly
# in v4 yet (9-jul-2021), so doesn't make sense to test str/latex for them
# Priors for unknown model parameters
alpha = Normal("alpha", mu=0, sigma=10)
b = Normal("beta", mu=0, sigma=10, size=(2,), observed=beta)
sigma = HalfNormal("sigma", sigma=1)
# Test Cholesky parameterization
Z = MvNormal("Z", mu=np.zeros(2), chol=np.eye(2), size=(2,))
# NegativeBinomial representations to test issue 4186
# nb1 = pm.NegativeBinomial(
# "nb_with_mu_alpha", mu=pm.Normal("nbmu"), alpha=pm.Gamma("nbalpha", mu=6, sigma=1)
# )
nb2 = NegativeBinomial("nb_with_p_n", p=Uniform("nbp"), n=10)
# SymbolicRV
zip = ZeroInflatedPoisson("zip", 0.5, 5)
# Nested SymbolicRV
comp_1 = ZeroInflatedPoisson.dist(0.5, 5)
comp_2 = Censored.dist(Bernoulli.dist(0.5), -1, 1)
w = Dirichlet("w", [1, 1])
nested_mix = Mixture("nested_mix", w, [comp_1, comp_2])
# Expected value of outcome
mu = Deterministic("mu", floatX(alpha + dot(X, b)))
# KroneckerNormal
n, m = 3, 4
covs = [np.eye(n), np.eye(m)]
kron_normal = KroneckerNormal("kron_normal", mu=np.zeros(n * m), covs=covs, size=n * m)
# MatrixNormal
# matrix_normal = MatrixNormal(
# "mat_normal",
# mu=np.random.normal(size=n),
# rowcov=np.eye(n),
# colchol=np.linalg.cholesky(np.eye(n)),
# size=(n, n),
# )
# DirichletMultinomial
dm = DirichletMultinomial("dm", n=5, a=[1, 1, 1], size=(2, 3))
# Likelihood (sampling distribution) of observations
Y_obs = Normal("Y_obs", mu=mu, sigma=sigma, observed=Y)
# add a potential as well
pot = Potential("pot", mu**2)
# add a deterministic that depends on an unnamed random variable
pred = Deterministic("pred", Normal.dist(0, 1))
self.distributions = [alpha, sigma, mu, b, Z, nb2, zip, w, nested_mix, Y_obs, pot]
self.deterministics_or_potentials = [mu, pot, pred]
# tuples of (formatting, include_params)
self.formats = [("plain", True), ("plain", False), ("latex", True), ("latex", False)]
self.expected = {
("plain", True): [
r"alpha ~ Normal(0, 10)",
r"sigma ~ HalfNormal(0, 1)",
r"mu ~ Deterministic(f(beta, alpha))",
r"beta ~ Normal(0, 10)",
r"Z ~ MultivariateNormal(f(), f())",
r"nb_with_p_n ~ NegativeBinomial(10, nbp)",
r"zip ~ MarginalMixture(f(), DiracDelta(0), Poisson(5))",
r"w ~ Dirichlet(<constant>)",
(
r"nested_mix ~ MarginalMixture(w, "
r"MarginalMixture(f(), DiracDelta(0), Poisson(5)), "
r"Censored(Bernoulli(0.5), -1, 1))"
),
r"Y_obs ~ Normal(mu, sigma)",
r"pot ~ Potential(f(beta, alpha))",
r"pred ~ Deterministic(f(<normal>))",
],
("plain", False): [
r"alpha ~ Normal",
r"sigma ~ HalfNormal",
r"mu ~ Deterministic",
r"beta ~ Normal",
r"Z ~ MultivariateNormal",
r"nb_with_p_n ~ NegativeBinomial",
r"zip ~ MarginalMixture",
r"w ~ Dirichlet",
r"nested_mix ~ MarginalMixture",
r"Y_obs ~ Normal",
r"pot ~ Potential",
r"pred ~ Deterministic",
],
("latex", True): [
r"$\text{alpha} \sim \operatorname{Normal}(0,~10)$",
r"$\text{sigma} \sim \operatorname{HalfNormal}(0,~1)$",
r"$\text{mu} \sim \operatorname{Deterministic}(f(\text{beta},~\text{alpha}))$",
r"$\text{beta} \sim \operatorname{Normal}(0,~10)$",
r"$\text{Z} \sim \operatorname{MultivariateNormal}(f(),~f())$",
r"$\text{nb\_with\_p\_n} \sim \operatorname{NegativeBinomial}(10,~\text{nbp})$",
r"$\text{zip} \sim \operatorname{MarginalMixture}(f(),~\operatorname{DiracDelta}(0),~\operatorname{Poisson}(5))$",
r"$\text{w} \sim \operatorname{Dirichlet}(\text{<constant>})$",
(
r"$\text{nested\_mix} \sim \operatorname{MarginalMixture}(\text{w},"
r"~\operatorname{MarginalMixture}(f(),~\operatorname{DiracDelta}(0),~\operatorname{Poisson}(5)),"
r"~\operatorname{Censored}(\operatorname{Bernoulli}(0.5),~-1,~1))$"
),
r"$\text{Y\_obs} \sim \operatorname{Normal}(\text{mu},~\text{sigma})$",
r"$\text{pot} \sim \operatorname{Potential}(f(\text{beta},~\text{alpha}))$",
r"$\text{pred} \sim \operatorname{Deterministic}(f(\text{<normal>}))",
],
("latex", False): [
r"$\text{alpha} \sim \operatorname{Normal}$",
r"$\text{sigma} \sim \operatorname{HalfNormal}$",
r"$\text{mu} \sim \operatorname{Deterministic}$",
r"$\text{beta} \sim \operatorname{Normal}$",
r"$\text{Z} \sim \operatorname{MultivariateNormal}$",
r"$\text{nb\_with\_p\_n} \sim \operatorname{NegativeBinomial}$",
r"$\text{zip} \sim \operatorname{MarginalMixture}$",
r"$\text{w} \sim \operatorname{Dirichlet}$",
r"$\text{nested\_mix} \sim \operatorname{MarginalMixture}$",
r"$\text{Y\_obs} \sim \operatorname{Normal}$",
r"$\text{pot} \sim \operatorname{Potential}$",
r"$\text{pred} \sim \operatorname{Deterministic}",
],
}
class TestData(BaseTestStrAndLatexRepr):
def setup_class(self):
with Model() as self.model:
import pymc as pm
with pm.Model() as model:
a = pm.Normal("a", pm.Data("a_data", (2,)))
b = pm.Normal("b", pm.Data("b_data", (2, 3)))
c = pm.Normal("c", pm.Data("c_data", (2,)))
d = pm.Normal("d", pm.Data("d_data", (2, 3)))
self.distributions = [a, b, c, d]
# tuples of (formatting, include_params)
self.formats = [("plain", True), ("plain", False), ("latex", True), ("latex", False)]
self.expected = {
("plain", True): [
r"a ~ Normal(2, 1)",
r"b ~ Normal(<shared>, 1)",
r"c ~ Normal(2, 1)",
r"d ~ Normal(<shared>, 1)",
],
("plain", False): [
r"a ~ Normal",
r"b ~ Normal",
r"c ~ Normal",
r"d ~ Normal",
],
("latex", True): [
r"$\text{a} \sim \operatorname{Normal}(2,~1)$",
r"$\text{b} \sim \operatorname{Normal}(\text{<shared>},~1)$",
r"$\text{c} \sim \operatorname{Normal}(2,~1)$",
r"$\text{d} \sim \operatorname{Normal}(\text{<shared>},~1)$",
],
("latex", False): [
r"$\text{a} \sim \operatorname{Normal}$",
r"$\text{b} \sim \operatorname{Normal}$",
r"$\text{c} \sim \operatorname{Normal}$",
r"$\text{d} \sim \operatorname{Normal}$",
],
}
def test_model_latex_repr_three_levels_model():
with Model() as censored_model:
mu = Normal("mu", 0.0, 5.0)
sigma = HalfCauchy("sigma", 2.5)
normal_dist = Normal.dist(mu=mu, sigma=sigma)
censored_normal = Censored(
"censored_normal", normal_dist, lower=-2.0, upper=2.0, observed=[1, 0, 0.5]
)
latex_repr = censored_model.str_repr(formatting="latex")
expected = [
"$$",
"\\begin{array}{rcl}",
"\\text{mu} &\\sim & \\operatorname{Normal}(0,~5)\\\\\\text{sigma} &\\sim & "
"\\operatorname{HalfCauchy}(0,~2.5)\\\\\\text{censored\\_normal} &\\sim & "
"\\operatorname{Censored}(\\operatorname{Normal}(\\text{mu},~\\text{sigma}),~-2,~2)",
"\\end{array}",
"$$",
]
assert [line.strip() for line in latex_repr.split("\n")] == expected
def test_model_latex_repr_mixture_model():
with Model() as mix_model:
w = Dirichlet("w", [1, 1])
mix = Mixture("mix", w=w, comp_dists=[Normal.dist(0.0, 5.0), StudentT.dist(7.0)])
latex_repr = mix_model.str_repr(formatting="latex")
expected = [
"$$",
"\\begin{array}{rcl}",
"\\text{w} &\\sim & "
"\\operatorname{Dirichlet}(\\text{<constant>})\\\\\\text{mix} &\\sim & "
"\\operatorname{MarginalMixture}(\\text{w},~\\operatorname{Normal}(0,~5),~\\operatorname{StudentT}(7,~0,~1))",
"\\end{array}",
"$$",
]
assert [line.strip() for line in latex_repr.split("\n")] == expected
def test_model_repr_variables_without_monkey_patched_repr():
"""Test that model repr does not rely on individual variables having the str_repr method monkey patched."""
x = normal(name="x")
assert not hasattr(x, "str_repr")
model = Model()
model.register_rv(x, "x")
str_repr = model.str_repr()
assert str_repr == "x ~ Normal(0, 1)"
def test_truncated_repr():
with Model() as model:
x = Truncated("x", Gamma.dist(1, 1), lower=0, upper=20)
str_repr = model.str_repr(include_params=False)
assert str_repr == "x ~ TruncatedGamma"
def test_custom_dist_repr():
with Model() as model:
def dist(mu, size):
return Normal.dist(mu, 1, size=size)
def random(rng, mu, size):
return rng.normal(mu, size=size)
x = CustomDist("x", 0, dist=dist, class_name="CustomDistNormal")
x = CustomDist("y", 0, random=random, class_name="CustomRandomNormal")
str_repr = model.str_repr(include_params=False)
assert str_repr == "\n".join(["x ~ CustomDistNormal", "y ~ CustomRandomNormal"])
class TestLatexRepr:
@staticmethod
def simple_model() -> Model:
with Model() as simple_model:
error = HalfNormal("error", 0.5)
alpha_a = Normal("alpha_a", 0, 1)
Normal("y", alpha_a, error)
return simple_model
def test_latex_escaped_underscore(self):
"""
Ensures that all underscores in model variable names are properly escaped for LaTeX representation
"""
model = self.simple_model()
model_str = model.str_repr(formatting="latex")
assert "\\_" in model_str
assert "_" not in model_str.replace("\\_", "")
|
pymc-devsREPO_NAMEpymcPATH_START.@pymc_extracted@pymc-main@tests@test_printing.py@.PATH_END.py
|
{
"filename": "power_gal.py",
"repo_name": "nickhand/pyRSD",
"repo_path": "pyRSD_extracted/pyRSD-master/pyRSD/rsd/power/gal/power_gal.py",
"type": "Python"
}
|
import contextlib
from pyRSD import numpy as np
from pyRSD.rsd._cache import parameter, cached_property
from pyRSD.rsd import tools, BiasedSpectrum
from .fog_kernels import FOGKernel
from . import Pgal
class GalaxySpectrum(BiasedSpectrum):
"""
The model for the galaxy redshift space power spectrum
Parameters
----------
kmin : float, optional
The minimum wavenumber to compute the power spectrum at
[units: :math:`h/\mathrm{Mpc}`]; default is 1e-3
kmax : float, optional
The maximum wavenumber to compute the power spectrum at
[units: :math:`h/\mathrm{Mpc}`]; default is 0.5
Nk : int, optional
The number of log-spaced bins to use as the underlying domain for
splines; default is 200
z : float, optional
The redshift to compute the power spectrum at. Default = 0.
params : :class:`~pyRSD.rsd.cosmology.Cosmology`, str
Either a :class:`~pyRSD.rsd.cosmology.Cosmology` instance or the name
of a file to load parameters from; see the 'data/params' directory
for examples
include_2loop : bool, optional
If `True`, include 2-loop contributions in the model terms. Default
is `False`.
transfer_fit : str, optional
The name of the transfer function fit to use. Default is `CLASS`
and the options are {`CLASS`, `EH`, `EH_NoWiggle`, `BBKS`},
or the name of a data file holding (k, T(k))
max_mu : {0, 2, 4, 6, 8}, optional
Only compute angular terms up to mu**(``max_mu``). Default is 4.
interpolate: bool, optional
Whether to return interpolated results for underlying power moments
k0_low : float, optional (`5e-3`)
below this wavenumber, evaluate any power in "low-k mode", which
essentially just uses SPT at low-k
linear_power_file : str, optional (`None`)
string specifying the name of a file which gives the linear
power spectrum, from which the transfer function in ``cosmo``
will be initialized
Pdv_model_type : {'jennings', 'sim', None}, optional
The type of model to use to evaluate Pdv
fog_model : str, optional
the string specifying the FOG model to use; one of
['modified_lorentzian', 'lorentzian', 'gaussian'].
Default is 'modified_lorentzian'
use_so_correction : bool, optional
Boost the centrals auto spectrum with a correction
accounting for extra structure around centrals due
to SO halo finders; default is `False`
"""
def __init__(self, fog_model='modified_lorentzian',
use_so_correction=False,
**kwargs):
"""
Initialize the GalaxySpectrum
Additional `GalaxySpectrum`-specific parameters are listed
below. Keywords accepted by :class:`pyRSD.rsd.BiasedSpectrum`
and :class:`pyRSD.rsd.DarkMatterSpectrum`. See their documenation for
further details.
Parameters
----------
fog_model : str, optional
the string specifying the FOG model to use; one of
['modified_lorentzian', 'lorentzian', 'gaussian'].
Default is 'modified_lorentzian'
use_so_correction : bool, optional
Boost the centrals auto spectrum with a correction
accounting for extra structure around centrals due
to SO halo finders; default is `False`
"""
# the base class
super(GalaxySpectrum, self).__init__(**kwargs)
# the underlying driver
self._Pgal = Pgal(self)
# set the defaults
self.fog_model = fog_model
self.include_2loop = False
self.fs = 0.10
self.fcB = 0.08
self.fsB = 0.40
self.b1_cA = 1.85
self.b1_cB = 2.8
self.b1_sA = 2.6
self.b1_sB = 3.6
self.sigma_c = 1.
self.sigma_s = 5.
self.sigma_sA = 4.2
self.sigma_sB = 6.
self.NcBs = 3e4
self.NsBsB = 9e4
self.N = 0.
# SO corretion
self.use_so_correction = use_so_correction
self.f_so = 0.
self.sigma_so = 0.
def default_params(self):
"""
A GalaxyPowerParameters object holding the default model parameters
The model associated with the parameter is ``self``
"""
from pyRSD.rsdfit.theory import GalaxyPowerParameters
return GalaxyPowerParameters.from_defaults(model=self)
#---------------------------------------------------------------------------
# parameters
#---------------------------------------------------------------------------
@parameter
def f_so(self, val):
"""
The fraction of satellites in SO halo finders compared to FOF
"""
return val
@parameter
def sigma_so(self, val):
"""
The FOG velocity dispersion for type A centrals in Mpc/h, accounting
for FOG from SO/FOF differences around central type A galaxies
"""
return val
@parameter
def fog_model(self, val):
"""
Function to return the FOG suppression factor, which reads in a
single variable `x = k \mu \sigma`
"""
allowable = ['modified_lorentzian', 'lorentzian', 'gaussian']
if val not in allowable:
raise ValueError("`fog_model` must be one of %s" % allowable)
return val
@parameter
def fs(self, val):
"""
The satellite fraction, fs = N_sat / N_gal
"""
return val
@parameter
def fcB(self, val):
"""
The centrals with sats (cB) fraction, fcB = N_cB / N_cen
"""
return val
@parameter
def fsB(self, val):
"""
The satellite with sats fraction, fsB = N_sB / N_sat
"""
return val
@parameter
def b1_cA(self, val):
"""
The linear bias factor for the centrals with no sats in same halo.
"""
return val
@parameter
def b1_cB(self, val):
"""
The linear bias factor for the centrals with sats in same halo.
"""
return val
@parameter
def b1_sA(self, val):
"""
The linear bias factor for satellites with no other sats in same halo.
"""
return val
@parameter
def b1_sB(self, val):
"""
The linear bias factor for satellites with other sats in same halo.
"""
return val
@parameter
def sigma_c(self, val):
"""
The FOG velocity dispersion for centrals in Mpc/h
"""
return val
@parameter
def sigma_s(self, val):
"""
The FOG velocity dispersion for satellites in Mpc/h
"""
return val
@parameter
def sigma_sA(self, val):
"""
The FOG velocity dispersion for "type A" satellites in Mpc/h
"""
return val
@parameter
def sigma_sB(self, val):
"""
The FOG velocity dispersion for "type B" satellites in Mpc/h
"""
return val
@parameter
def NcBs(self, val):
"""
Constant for the P_cBs 1-halo term
"""
return val
@parameter
def NsBsB(self, val):
"""
Constant for the P_sBsB 1-halo term
"""
return val
@parameter
def N(self, val):
"""
Constant offset to model, set to 0 by default
"""
return val
#---------------------------------------------------------------------------
# cached properties
#---------------------------------------------------------------------------
@cached_property("fog_model")
def FOG(self):
"""
Return the FOG function
"""
return FOGKernel.factory(self.fog_model)
@cached_property('fcB', 'b1_cB', 'b1_cA')
def b1_c(self):
"""
The linear bias factor for all centrals. This is not a free parameter,
but is computed as weighted mean of b1_cA and b1_cB.
"""
return self.fcB*self.b1_cB + (1.-self.fcB)*self.b1_cA
@cached_property('fsB', 'b1_sB', 'b1_sA')
def b1_s(self):
"""
The linear bias factor for all satellites. This is not a free parameter,
but is computed as weighted mean of b1_sA and b1_sB.
"""
return self.fsB*self.b1_sB + (1.-self.fsB)*self.b1_sA
#---------------------------------------------------------------------------
# centrals power spectrum
#---------------------------------------------------------------------------
@tools.alcock_paczynski
def Pgal_cAcA(self, k, mu, flatten=False):
"""
The auto power spectrum of type A centrals
"""
toret = self._Pgal['Pcc']['PcAcA'](k, mu)
return toret if not flatten else np.ravel(toret, order='F')
@tools.alcock_paczynski
def Pgal_cAcB(self, k, mu, flatten=False):
"""
The cross power spectrum of type A and type B centrals
"""
toret = self._Pgal['Pcc']['PcAcB'](k, mu)
return toret if not flatten else np.ravel(toret, order='F')
@tools.alcock_paczynski
def Pgal_cBcB(self, k, mu, flatten=False):
"""
The auto power spectrum of type B centrals
"""
toret = self._Pgal['Pcc']['PcBcB'](k, mu)
return toret if not flatten else np.ravel(toret, order='F')
@tools.alcock_paczynski
def Pgal_cc(self, k, mu, flatten=False):
"""
The auto power spectrum of all centrals
"""
toret = self._Pgal['Pcc'](k, mu)
return toret if not flatten else np.ravel(toret, order='F')
#---------------------------------------------------------------------------
# central-satellite cross spectrum
#---------------------------------------------------------------------------
@tools.alcock_paczynski
def Pgal_cAsA(self, k, mu, flatten=False):
"""
The cross power spectrum of type A centrals and type A sats
"""
toret = self._Pgal['Pcs']['PcAsA'](k, mu)
return toret if not flatten else np.ravel(toret, order='F')
@tools.alcock_paczynski
def Pgal_cAsB(self, k, mu, flatten=False):
"""
The cross power spectrum of type A centrals and type B sats
"""
toret = self._Pgal['Pcs']['PcAsB'](k, mu)
return toret if not flatten else np.ravel(toret, order='F')
@tools.alcock_paczynski
def Pgal_cBsA(self, k, mu, flatten=False):
"""
The cross power spectrum of type B centrals and type A sats
"""
toret = self._Pgal['Pcs']['PcBsA'](k, mu)
return toret if not flatten else np.ravel(toret, order='F')
@tools.alcock_paczynski
def Pgal_cBsB(self, k, mu, flatten=False):
"""
The cross power spectrum of type B centrals and type B sats
"""
toret = self._Pgal['Pcs']['PcBsB'](k, mu)
return toret if not flatten else np.ravel(toret, order='F')
@tools.alcock_paczynski
def Pgal_cs(self, k, mu, flatten=False):
"""
The cross power spectrum of centrals and satellites
"""
toret = self._Pgal['Pcs'](k, mu)
return toret if not flatten else np.ravel(toret, order='F')
#---------------------------------------------------------------------------
# satellites auto spectrum
#---------------------------------------------------------------------------
@tools.alcock_paczynski
def Pgal_sAsA(self, k, mu, flatten=False):
"""
The auto power spectrum of type A satellites
"""
toret = self._Pgal['Pss']['PsAsA'](k, mu)
return toret if not flatten else np.ravel(toret, order='F')
@tools.alcock_paczynski
def Pgal_sAsB(self, k, mu, flatten=False):
"""
The cross power spectrum of type A and type B satellites
"""
toret = self._Pgal['Pss']['PsAsB'](k, mu)
return toret if not flatten else np.ravel(toret, order='F')
@tools.alcock_paczynski
def Pgal_sBsB(self, k, mu, flatten=False):
"""
The auto power spectrum of type B satellites
"""
toret = self._Pgal['Pss']['PsBsB'](k, mu)
return toret if not flatten else np.ravel(toret, order='F')
@tools.alcock_paczynski
def Pgal_ss(self, k, mu, flatten=False):
"""
The auto power spectrum of all satellites
"""
toret = self._Pgal['Pss'](k, mu)
return toret if not flatten else np.ravel(toret, order='F')
#---------------------------------------------------------------------------
# total galaxy P(k,mu)
#---------------------------------------------------------------------------
@tools.broadcast_kmu
@tools.alcock_paczynski
def power(self, k, mu, flatten=False):
"""
The total redshift-space galaxy power spectrum at ``k`` and ``mu``
Parameters
----------
k : float, array_like
The wavenumbers to evaluate the power spectrum at, in `h/Mpc`
mu : float, array_like
The cosine of the angle from the line of sight. If a float is provided,
the value is used for all input `k` values. If array-like and `mu` has
the same shape as `k`, the power at each (k,mu) pair is returned. If
`mu` has a shape different than `k`, the returned power has shape
``(len(k), len(mu))``.
flatten : bool, optional
If `True`, flatten the return array, which will have a length of
`len(k) * len(mu)`
"""
toret = self._Pgal(k, mu)
return toret if not flatten else np.ravel(toret, order='F')
@tools.broadcast_kmu
@tools.alcock_paczynski
def derivative_k(self, k, mu):
"""
The derivative with respect to `k_AP`
"""
return self._Pgal.derivative_k(k, mu)
@tools.broadcast_kmu
@tools.alcock_paczynski
def derivative_mu(self, k, mu):
"""
The derivative with respect to `mu_AP`
"""
return self._Pgal.derivative_mu(k, mu)
def get_gradient(self, pars):
"""
Return a :class:`PkmuGradient` object which can compute
the gradient of :func:`GalaxySpectrum.power` for a set of
desired parameters
Parameters
----------
pars : ParameterSet
"""
from pyRSD.rsd.power.gal.derivatives import PgalDerivative
from pyRSD.rsd.power.gradient import PkmuGradient
registry = PgalDerivative.registry()
return PkmuGradient(self, registry, pars)
|
nickhandREPO_NAMEpyRSDPATH_START.@pyRSD_extracted@pyRSD-master@pyRSD@rsd@power@gal@power_gal.py@.PATH_END.py
|
{
"filename": "nonparametric.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/stats/nonparametric.py",
"type": "Python"
}
|
"""
Rank based methods for inferential statistics
Created on Sat Aug 15 10:18:53 2020
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from scipy import stats
from scipy.stats import rankdata
from statsmodels.tools.testing import Holder
from statsmodels.stats.base import HolderTuple
from statsmodels.stats.weightstats import (
_tconfint_generic,
_tstat_generic,
_zconfint_generic,
_zstat_generic,
)
def rankdata_2samp(x1, x2):
"""Compute midranks for two samples
Parameters
----------
x1, x2 : array_like
Original data for two samples that will be converted to midranks.
Returns
-------
rank1 : ndarray
Midranks of the first sample in the pooled sample.
rank2 : ndarray
Midranks of the second sample in the pooled sample.
ranki1 : ndarray
Internal midranks of the first sample.
ranki2 : ndarray
Internal midranks of the second sample.
"""
x1 = np.asarray(x1)
x2 = np.asarray(x2)
nobs1 = len(x1)
nobs2 = len(x2)
if nobs1 == 0 or nobs2 == 0:
raise ValueError("one sample has zero length")
x_combined = np.concatenate((x1, x2))
if x_combined.ndim > 1:
rank = np.apply_along_axis(rankdata, 0, x_combined)
else:
rank = rankdata(x_combined) # no axis in older scipy
rank1 = rank[:nobs1]
rank2 = rank[nobs1:]
if x_combined.ndim > 1:
ranki1 = np.apply_along_axis(rankdata, 0, x1)
ranki2 = np.apply_along_axis(rankdata, 0, x2)
else:
ranki1 = rankdata(x1)
ranki2 = rankdata(x2)
return rank1, rank2, ranki1, ranki2
class RankCompareResult(HolderTuple):
"""Results for rank comparison
This is a subclass of HolderTuple that includes results from intermediate
computations, as well as methods for hypothesis tests, confidence intervals
and summary.
"""
def conf_int(self, value=None, alpha=0.05, alternative="two-sided"):
"""
Confidence interval for probability that sample 1 has larger values
Confidence interval is for the shifted probability
P(x1 > x2) + 0.5 * P(x1 = x2) - value
Parameters
----------
value : float
Value, default 0, shifts the confidence interval,
e.g. ``value=0.5`` centers the confidence interval at zero.
alpha : float
Significance level for the confidence interval, coverage is
``1-alpha``
alternative : str
The alternative hypothesis, H1, has to be one of the following
* 'two-sided' : H1: ``prob - value`` not equal to 0.
* 'larger' : H1: ``prob - value > 0``
* 'smaller' : H1: ``prob - value < 0``
Returns
-------
lower : float or ndarray
Lower confidence limit. This is -inf for the one-sided alternative
"smaller".
upper : float or ndarray
Upper confidence limit. This is inf for the one-sided alternative
"larger".
"""
p0 = value
if p0 is None:
p0 = 0
diff = self.prob1 - p0
std_diff = np.sqrt(self.var / self.nobs)
if self.use_t is False:
return _zconfint_generic(diff, std_diff, alpha, alternative)
else:
return _tconfint_generic(diff, std_diff, self.df, alpha,
alternative)
def test_prob_superior(self, value=0.5, alternative="two-sided"):
"""test for superiority probability
H0: P(x1 > x2) + 0.5 * P(x1 = x2) = value
The alternative is that the probability is either not equal, larger
or smaller than the null-value depending on the chosen alternative.
Parameters
----------
value : float
Value of the probability under the Null hypothesis.
alternative : str
The alternative hypothesis, H1, has to be one of the following
* 'two-sided' : H1: ``prob - value`` not equal to 0.
* 'larger' : H1: ``prob - value > 0``
* 'smaller' : H1: ``prob - value < 0``
Returns
-------
res : HolderTuple
HolderTuple instance with the following main attributes
statistic : float
Test statistic for z- or t-test
pvalue : float
Pvalue of the test based on either normal or t distribution.
"""
p0 = value # alias
# diff = self.prob1 - p0 # for reporting, not used in computation
# TODO: use var_prob
std_diff = np.sqrt(self.var / self.nobs)
# corresponds to a one-sample test and either p0 or diff could be used
if not self.use_t:
stat, pv = _zstat_generic(self.prob1, p0, std_diff, alternative,
diff=0)
distr = "normal"
else:
stat, pv = _tstat_generic(self.prob1, p0, std_diff, self.df,
alternative, diff=0)
distr = "t"
res = HolderTuple(statistic=stat,
pvalue=pv,
df=self.df,
distribution=distr
)
return res
def tost_prob_superior(self, low, upp):
'''test of stochastic (non-)equivalence of p = P(x1 > x2)
Null hypothesis: p < low or p > upp
Alternative hypothesis: low < p < upp
where p is the probability that a random draw from the population of
the first sample has a larger value than a random draw from the
population of the second sample, specifically
p = P(x1 > x2) + 0.5 * P(x1 = x2)
If the pvalue is smaller than a threshold, say 0.05, then we reject the
hypothesis that the probability p that distribution 1 is stochastically
superior to distribution 2 is outside of the interval given by
thresholds low and upp.
Parameters
----------
low, upp : float
equivalence interval low < mean < upp
Returns
-------
res : HolderTuple
HolderTuple instance with the following main attributes
pvalue : float
Pvalue of the equivalence test given by the larger pvalue of
the two one-sided tests.
statistic : float
Test statistic of the one-sided test that has the larger
pvalue.
results_larger : HolderTuple
Results instanc with test statistic, pvalue and degrees of
freedom for lower threshold test.
results_smaller : HolderTuple
Results instanc with test statistic, pvalue and degrees of
freedom for upper threshold test.
'''
t1 = self.test_prob_superior(low, alternative='larger')
t2 = self.test_prob_superior(upp, alternative='smaller')
# idx_max = 1 if t1.pvalue < t2.pvalue else 0
idx_max = np.asarray(t1.pvalue < t2.pvalue, int)
title = "Equivalence test for Prob(x1 > x2) + 0.5 Prob(x1 = x2) "
res = HolderTuple(statistic=np.choose(idx_max,
[t1.statistic, t2.statistic]),
# pvalue=[t1.pvalue, t2.pvalue][idx_max], # python
# use np.choose for vectorized selection
pvalue=np.choose(idx_max, [t1.pvalue, t2.pvalue]),
results_larger=t1,
results_smaller=t2,
title=title
)
return res
def confint_lintransf(self, const=-1, slope=2, alpha=0.05,
alternative="two-sided"):
"""confidence interval of a linear transformation of prob1
This computes the confidence interval for
d = const + slope * prob1
Default values correspond to Somers' d.
Parameters
----------
const, slope : float
Constant and slope for linear (affine) transformation.
alpha : float
Significance level for the confidence interval, coverage is
``1-alpha``
alternative : str
The alternative hypothesis, H1, has to be one of the following
* 'two-sided' : H1: ``prob - value`` not equal to 0.
* 'larger' : H1: ``prob - value > 0``
* 'smaller' : H1: ``prob - value < 0``
Returns
-------
lower : float or ndarray
Lower confidence limit. This is -inf for the one-sided alternative
"smaller".
upper : float or ndarray
Upper confidence limit. This is inf for the one-sided alternative
"larger".
"""
low_p, upp_p = self.conf_int(alpha=alpha, alternative=alternative)
low = const + slope * low_p
upp = const + slope * upp_p
if slope < 0:
low, upp = upp, low
return low, upp
def effectsize_normal(self, prob=None):
"""
Cohen's d, standardized mean difference under normality assumption.
This computes the standardized mean difference, Cohen's d, effect size
that is equivalent to the rank based probability ``p`` of being
stochastically larger if we assume that the data is normally
distributed, given by
:math: `d = F^{-1}(p) * \\sqrt{2}`
where :math:`F^{-1}` is the inverse of the cdf of the normal
distribution.
Parameters
----------
prob : float in (0, 1)
Probability to be converted to Cohen's d effect size.
If prob is None, then the ``prob1`` attribute is used.
Returns
-------
equivalent Cohen's d effect size under normality assumption.
"""
if prob is None:
prob = self.prob1
return stats.norm.ppf(prob) * np.sqrt(2)
def summary(self, alpha=0.05, xname=None):
"""summary table for probability that random draw x1 is larger than x2
Parameters
----------
alpha : float
Significance level for confidence intervals. Coverage is 1 - alpha
xname : None or list of str
If None, then each row has a name column with generic names.
If xname is a list of strings, then it will be included as part
of those names.
Returns
-------
SimpleTable instance with methods to convert to different output
formats.
"""
yname = "None"
effect = np.atleast_1d(self.prob1)
if self.pvalue is None:
statistic, pvalue = self.test_prob_superior()
else:
pvalue = self.pvalue
statistic = self.statistic
pvalues = np.atleast_1d(pvalue)
ci = np.atleast_2d(self.conf_int(alpha=alpha))
if ci.shape[0] > 1:
ci = ci.T
use_t = self.use_t
sd = np.atleast_1d(np.sqrt(self.var_prob))
statistic = np.atleast_1d(statistic)
if xname is None:
xname = ['c%d' % ii for ii in range(len(effect))]
xname2 = ['prob(x1>x2) %s' % ii for ii in xname]
title = "Probability sample 1 is stochastically larger"
from statsmodels.iolib.summary import summary_params
summ = summary_params((self, effect, sd, statistic,
pvalues, ci),
yname=yname, xname=xname2, use_t=use_t,
title=title, alpha=alpha)
return summ
def rank_compare_2indep(x1, x2, use_t=True):
"""
Statistics and tests for the probability that x1 has larger values than x2.
p is the probability that a random draw from the population of
the first sample has a larger value than a random draw from the
population of the second sample, specifically
p = P(x1 > x2) + 0.5 * P(x1 = x2)
This is a measure underlying Wilcoxon-Mann-Whitney's U test,
Fligner-Policello test and Brunner-Munzel test, and
Inference is based on the asymptotic distribution of the Brunner-Munzel
test. The half probability for ties corresponds to the use of midranks
and make it valid for discrete variables.
The Null hypothesis for stochastic equality is p = 0.5, which corresponds
to the Brunner-Munzel test.
Parameters
----------
x1, x2 : array_like
Array of samples, should be one-dimensional.
use_t : boolean
If use_t is true, the t distribution with Welch-Satterthwaite type
degrees of freedom is used for p-value and confidence interval.
If use_t is false, then the normal distribution is used.
Returns
-------
res : RankCompareResult
The results instance contains the results for the Brunner-Munzel test
and has methods for hypothesis tests, confidence intervals and summary.
statistic : float
The Brunner-Munzel W statistic.
pvalue : float
p-value assuming an t distribution. One-sided or
two-sided, depending on the choice of `alternative` and `use_t`.
See Also
--------
RankCompareResult
scipy.stats.brunnermunzel : Brunner-Munzel test for stochastic equality
scipy.stats.mannwhitneyu : Mann-Whitney rank test on two samples.
Notes
-----
Wilcoxon-Mann-Whitney assumes equal variance or equal distribution under
the Null hypothesis. Fligner-Policello test allows for unequal variances
but assumes continuous distribution, i.e. no ties.
Brunner-Munzel extend the test to allow for unequal variance and discrete
or ordered categorical random variables.
Brunner and Munzel recommended to estimate the p-value by t-distribution
when the size of data is 50 or less. If the size is lower than 10, it would
be better to use permuted Brunner Munzel test (see [2]_) for the test
of stochastic equality.
This measure has been introduced in the literature under many different
names relying on a variety of assumptions.
In psychology, McGraw and Wong (1992) introduced it as Common Language
effect size for the continuous, normal distribution case,
Vargha and Delaney (2000) [3]_ extended it to the nonparametric
continuous distribution case as in Fligner-Policello.
WMW and related tests can only be interpreted as test of medians or tests
of central location only under very restrictive additional assumptions
such as both distribution are identical under the equality null hypothesis
(assumed by Mann-Whitney) or both distributions are symmetric (shown by
Fligner-Policello). If the distribution of the two samples can differ in
an arbitrary way, then the equality Null hypothesis corresponds to p=0.5
against an alternative p != 0.5. see for example Conroy (2012) [4]_ and
Divine et al (2018) [5]_ .
Note: Brunner-Munzel and related literature define the probability that x1
is stochastically smaller than x2, while here we use stochastically larger.
This equivalent to switching x1 and x2 in the two sample case.
References
----------
.. [1] Brunner, E. and Munzel, U. "The nonparametric Benhrens-Fisher
problem: Asymptotic theory and a small-sample approximation".
Biometrical Journal. Vol. 42(2000): 17-25.
.. [2] Neubert, K. and Brunner, E. "A studentized permutation test for the
non-parametric Behrens-Fisher problem". Computational Statistics and
Data Analysis. Vol. 51(2007): 5192-5204.
.. [3] Vargha, András, and Harold D. Delaney. 2000. “A Critique and
Improvement of the CL Common Language Effect Size Statistics of
McGraw and Wong.” Journal of Educational and Behavioral Statistics
25 (2): 101–32. https://doi.org/10.3102/10769986025002101.
.. [4] Conroy, Ronán M. 2012. “What Hypotheses Do ‘Nonparametric’ Two-Group
Tests Actually Test?” The Stata Journal: Promoting Communications on
Statistics and Stata 12 (2): 182–90.
https://doi.org/10.1177/1536867X1201200202.
.. [5] Divine, George W., H. James Norton, Anna E. Barón, and Elizabeth
Juarez-Colunga. 2018. “The Wilcoxon–Mann–Whitney Procedure Fails as
a Test of Medians.” The American Statistician 72 (3): 278–86.
https://doi.org/10.1080/00031305.2017.1305291.
"""
x1 = np.asarray(x1)
x2 = np.asarray(x2)
nobs1 = len(x1)
nobs2 = len(x2)
nobs = nobs1 + nobs2
if nobs1 == 0 or nobs2 == 0:
raise ValueError("one sample has zero length")
rank1, rank2, ranki1, ranki2 = rankdata_2samp(x1, x2)
meanr1 = np.mean(rank1, axis=0)
meanr2 = np.mean(rank2, axis=0)
meanri1 = np.mean(ranki1, axis=0)
meanri2 = np.mean(ranki2, axis=0)
S1 = np.sum(np.power(rank1 - ranki1 - meanr1 + meanri1, 2.0), axis=0)
S1 /= nobs1 - 1
S2 = np.sum(np.power(rank2 - ranki2 - meanr2 + meanri2, 2.0), axis=0)
S2 /= nobs2 - 1
wbfn = nobs1 * nobs2 * (meanr1 - meanr2)
wbfn /= (nobs1 + nobs2) * np.sqrt(nobs1 * S1 + nobs2 * S2)
# Here we only use alternative == "two-sided"
if use_t:
df_numer = np.power(nobs1 * S1 + nobs2 * S2, 2.0)
df_denom = np.power(nobs1 * S1, 2.0) / (nobs1 - 1)
df_denom += np.power(nobs2 * S2, 2.0) / (nobs2 - 1)
df = df_numer / df_denom
pvalue = 2 * stats.t.sf(np.abs(wbfn), df)
else:
pvalue = 2 * stats.norm.sf(np.abs(wbfn))
df = None
# other info
var1 = S1 / (nobs - nobs1)**2
var2 = S2 / (nobs - nobs2)**2
var_prob = (var1 / nobs1 + var2 / nobs2)
var = nobs * (var1 / nobs1 + var2 / nobs2)
prob1 = (meanr1 - (nobs1 + 1) / 2) / nobs2
prob2 = (meanr2 - (nobs2 + 1) / 2) / nobs1
return RankCompareResult(statistic=wbfn, pvalue=pvalue, s1=S1, s2=S2,
var1=var1, var2=var2, var=var,
var_prob=var_prob,
nobs1=nobs1, nobs2=nobs2, nobs=nobs,
mean1=meanr1, mean2=meanr2,
prob1=prob1, prob2=prob2,
somersd1=prob1 * 2 - 1, somersd2=prob2 * 2 - 1,
df=df, use_t=use_t
)
def rank_compare_2ordinal(count1, count2, ddof=1, use_t=True):
"""
Stochastically larger probability for 2 independent ordinal samples.
This is a special case of `rank_compare_2indep` when the data are given as
counts of two independent ordinal, i.e. ordered multinomial, samples.
The statistic of interest is the probability that a random draw from the
population of the first sample has a larger value than a random draw from
the population of the second sample, specifically
p = P(x1 > x2) + 0.5 * P(x1 = x2)
Parameters
----------
count1 : array_like
Counts of the first sample, categories are assumed to be ordered.
count2 : array_like
Counts of the second sample, number of categories and ordering needs
to be the same as for sample 1.
ddof : scalar
Degrees of freedom correction for variance estimation. The default
ddof=1 corresponds to `rank_compare_2indep`.
use_t : bool
If use_t is true, the t distribution with Welch-Satterthwaite type
degrees of freedom is used for p-value and confidence interval.
If use_t is false, then the normal distribution is used.
Returns
-------
res : RankCompareResult
This includes methods for hypothesis tests and confidence intervals
for the probability that sample 1 is stochastically larger than
sample 2.
See Also
--------
rank_compare_2indep
RankCompareResult
Notes
-----
The implementation is based on the appendix of Munzel and Hauschke (2003)
with the addition of ``ddof`` so that the results match the general
function `rank_compare_2indep`.
"""
count1 = np.asarray(count1)
count2 = np.asarray(count2)
nobs1, nobs2 = count1.sum(), count2.sum()
freq1 = count1 / nobs1
freq2 = count2 / nobs2
cdf1 = np.concatenate(([0], freq1)).cumsum(axis=0)
cdf2 = np.concatenate(([0], freq2)).cumsum(axis=0)
# mid rank cdf
cdfm1 = (cdf1[1:] + cdf1[:-1]) / 2
cdfm2 = (cdf2[1:] + cdf2[:-1]) / 2
prob1 = (cdfm2 * freq1).sum()
prob2 = (cdfm1 * freq2).sum()
var1 = (cdfm2**2 * freq1).sum() - prob1**2
var2 = (cdfm1**2 * freq2).sum() - prob2**2
var_prob = (var1 / (nobs1 - ddof) + var2 / (nobs2 - ddof))
nobs = nobs1 + nobs2
var = nobs * var_prob
vn1 = var1 * nobs2 * nobs1 / (nobs1 - ddof)
vn2 = var2 * nobs1 * nobs2 / (nobs2 - ddof)
df = (vn1 + vn2)**2 / (vn1**2 / (nobs1 - 1) + vn2**2 / (nobs2 - 1))
res = RankCompareResult(statistic=None, pvalue=None, s1=None, s2=None,
var1=var1, var2=var2, var=var,
var_prob=var_prob,
nobs1=nobs1, nobs2=nobs2, nobs=nobs,
mean1=None, mean2=None,
prob1=prob1, prob2=prob2,
somersd1=prob1 * 2 - 1, somersd2=prob2 * 2 - 1,
df=df, use_t=use_t
)
return res
def prob_larger_continuous(distr1, distr2):
"""
Probability indicating that distr1 is stochastically larger than distr2.
This computes
p = P(x1 > x2)
for two continuous distributions, where `distr1` and `distr2` are the
distributions of random variables x1 and x2 respectively.
Parameters
----------
distr1, distr2 : distributions
Two instances of scipy.stats.distributions. The required methods are
cdf of the second distribution and expect of the first distribution.
Returns
-------
p : probability x1 is larger than x2
Notes
-----
This is a one-liner that is added mainly as reference.
Examples
--------
>>> from scipy import stats
>>> prob_larger_continuous(stats.norm, stats.t(5))
0.4999999999999999
# which is the same as
>>> stats.norm.expect(stats.t(5).cdf)
0.4999999999999999
# distribution 1 with smaller mean (loc) than distribution 2
>>> prob_larger_continuous(stats.norm, stats.norm(loc=1))
0.23975006109347669
"""
return distr1.expect(distr2.cdf)
def cohensd2problarger(d):
"""
Convert Cohen's d effect size to stochastically-larger-probability.
This assumes observations are normally distributed.
Computed as
p = Prob(x1 > x2) = F(d / sqrt(2))
where `F` is cdf of normal distribution. Cohen's d is defined as
d = (mean1 - mean2) / std
where ``std`` is the pooled within standard deviation.
Parameters
----------
d : float or array_like
Cohen's d effect size for difference mean1 - mean2.
Returns
-------
prob : float or ndarray
Prob(x1 > x2)
"""
return stats.norm.cdf(d / np.sqrt(2))
def _compute_rank_placements(x1, x2) -> Holder:
"""
Compute ranks and placements for two samples.
This helper is used by `samplesize_rank_compare_onetail`
to calculate rank-based statistics for two input samples.
It assumes that the input data has been validated beforehand.
Parameters
----------
x1, x2 : array_like
Data samples used to compute ranks and placements.
Returns
-------
res : Holder
An instance of Holder containing the following attributes:
n_1 : int
Number of observations in the first sample.
n_2 : int
Number of observations in the second sample.
overall_ranks_pooled : ndarray
Ranks of the pooled sample.
overall_ranks_1 : ndarray
Ranks of the first sample in the pooled sample.
overall_ranks_2 : ndarray
Ranks of the second sample in the pooled sample.
within_group_ranks_1 : ndarray
Internal ranks of the first sample.
within_group_ranks_2 : ndarray
Internal ranks of the second sample.
placements_1 : ndarray
Placements of the first sample in the pooled sample.
placements_2 : ndarray
Placements of the second sample in the pooled sample.
Notes
-----
* The overall rank for each observation is determined
by ranking all data points from both samples combined
(`x1` and `x2`) in ascending order, with ties averaged.
* The within-group rank for each observation is determined
by ranking the data points within each sample separately,
* The placement of each observation is calculated by
taking the difference between the overall rank and the
within-group rank of the observation. Placements can be
thought of as measuress of the degree of overlap or
separation between two samples.
"""
n_1 = len(x1)
n_2 = len(x2)
# Overall ranks for each obs among combined sample
overall_ranks_pooled = rankdata(
np.r_[x1, x2], method="average"
)
overall_ranks_1 = overall_ranks_pooled[:n_1]
overall_ranks_2 = overall_ranks_pooled[n_1:]
# Within group ranks for each obs
within_group_ranks_1 = rankdata(x1, method="average")
within_group_ranks_2 = rankdata(x2, method="average")
placements_1 = overall_ranks_1 - within_group_ranks_1
placements_2 = overall_ranks_2 - within_group_ranks_2
return Holder(
n_1=n_1,
n_2=n_2,
overall_ranks_pooled=overall_ranks_pooled,
overall_ranks_1=overall_ranks_1,
overall_ranks_2=overall_ranks_2,
within_group_ranks_1=within_group_ranks_1,
within_group_ranks_2=within_group_ranks_2,
placements_1=placements_1,
placements_2=placements_2,
)
def samplesize_rank_compare_onetail(
synthetic_sample,
reference_sample,
alpha,
power,
nobs_ratio=1,
alternative="two-sided",
) -> Holder:
"""
Compute sample size for the non-parametric Mann-Whitney U test.
This function implements the method of Happ et al (2019).
Parameters
----------
synthetic_sample : array_like
Generated `synthetic` data representing the treatment
group under the research hypothesis.
reference_sample : array_like
Advance information for the reference group.
alpha : float
The type I error rate for the test (two-sided).
power : float
The desired power of the test.
nobs_ratio : float, optional
Sample size ratio, `nobs_ref` = `nobs_ratio` *
`nobs_treat`. This is the ratio of the reference
group sample size to the treatment group sample
size, by default 1 (balanced design). See Notes.
alternative : str, ‘two-sided’ (default), ‘larger’, or ‘smaller’
Extra argument to choose whether the sample size is
calculated for a two-sided (default) or one-sided test.
See Notes.
Returns
-------
res : Holder
An instance of Holder containing the following attributes:
nobs_total : float
The total sample size required for the experiment.
nobs_treat : float
Sample size for the treatment group.
nobs_ref : float
Sample size for the reference group.
relative_effect : float
The estimated relative effect size.
power : float
The desired power for the test.
alpha : float
The type I error rate for the test.
Notes
-----
In the context of the two-sample Wilcoxon Mann-Whitney
U test, the `reference_sample` typically represents data
from the control group or previous studies. The
`synthetic_sample` is generated based on this reference
data and a prespecified relative effect size that is
meaningful for the research question. This effect size
is often determined in collaboration with subject matter
experts to reflect a significant difference worth detecting.
By comparing the reference and synthetic samples, this
function estimates the sample size needed to acheve the
desired power at the specified Type-I error rate.
Choosing between `one-sided` and `two-sided` tests has
important implications for sample size planning. A
`two-sided` test is more conservative and requires a
larger sample size but covers effects in both directions.
In contrast, a `larger` (`relative_effect > 0.5`) or `smaller`
(`relative_effect < 0.5`) one-sided test assumes the effect
occurs only in one direction, leading to a smaller required
sample size. However, if the true effect is in the opposite
direction, the `one-sided` test have virtually no power to
detect it. Additionally, if a two-sided test ends up being
used instead of the planned one-sided test, the original
sample size may be insufficient, resulting in an underpowered
study. It is important to carefully consider these trade-offs
when planning a study.
For `nobs_ratio > 1`, `nobs_ratio = 1`, or `nobs_ratio < 1`,
the reference group sample size is larger, equal to, or smaller
than the treatment group sample size, respectively.
Example
-------
The data for the placebo group of a clinical trial published in
Thall and Vail [2] is shown below. A relevant effect for the treatment
under investigation is considered to be a 50% reduction in the number
of seizures. To compute the required sample size with a power of 0.8
and holding the type I error rate at 0.05, we generate synthetic data
for the treatment group under the alternative assuming this reduction.
>>> from statsmodels.stats.nonparametric import samplesize_rank_compare_onetail
>>> import numpy as np
>>> reference_sample = np.array([3, 3, 5, 4, 21, 7, 2, 12, 5, 0, 22, 4, 2, 12,
... 9, 5, 3, 29, 5, 7, 4, 4, 5, 8, 25, 1, 2, 12])
>>> # Apply 50% reduction in seizure counts and floor operation
>>> synthetic_sample = np.floor(reference_sample / 2)
>>> result = samplesize_rank_compare_onetail(
... synthetic_sample=synthetic_sample,
... reference_sample=reference_sample,
... alpha=0.05, power=0.8
... )
>>> print(f"Total sample size: {result.nobs_total}, "
... f"Treatment group: {result.nobs_treat}, "
... f"Reference group: {result.nobs_ref}")
References
----------
.. [1] Happ, M., Bathke, A. C., and Brunner, E. "Optimal sample size
planning for the Wilcoxon-Mann-Whitney test". Statistics in Medicine.
Vol. 38(2019): 363-375. https://doi.org/10.1002/sim.7983.
.. [2] Thall, P. F., and Vail, S. C. "Some covariance models for longitudinal
count data with overdispersion". Biometrics, pp. 657-671, 1990.
"""
synthetic_sample = np.asarray(synthetic_sample)
reference_sample = np.asarray(reference_sample)
if not (len(synthetic_sample) > 0 and len(reference_sample) > 0):
raise ValueError(
"Both `synthetic_sample` and `reference_sample`"
" must have at least one element."
)
if not (
np.all(np.isfinite(reference_sample))
and np.all(np.isfinite(synthetic_sample))
):
raise ValueError(
"All elements of `synthetic_sample` and `reference_sample`"
" must be finite; check for missing values."
)
if not (0 < alpha < 1):
raise ValueError("Alpha must be between 0 and 1 non-inclusive.")
if not (0 < power < 1):
raise ValueError("Power must be between 0 and 1 non-inclusive.")
if not (0 < nobs_ratio):
raise ValueError(
"Ratio of reference group to treatment group must be"
" strictly positive."
)
if alternative not in ("two-sided", "larger", "smaller"):
raise ValueError(
"Alternative must be one of `two-sided`, `larger`, or `smaller`."
)
# Group 1 is the treatment group, Group 2 is the reference group
rank_place = _compute_rank_placements(
synthetic_sample,
reference_sample,
)
# Extra few bytes of name binding for explicitness & readability
n_syn = rank_place.n_1
n_ref = rank_place.n_2
overall_ranks_pooled = rank_place.overall_ranks_pooled
placements_syn = rank_place.placements_1
placements_ref = rank_place.placements_2
relative_effect = (
np.mean(placements_syn) - np.mean(placements_ref)
) / (n_syn + n_ref) + 0.5
# Values [0.499, 0.501] considered 'practically' = 0.5 (0.1% atol)
if np.isclose(relative_effect, 0.5, atol=1e-3):
raise ValueError(
"Estimated relative effect is effectively 0.5, i.e."
" stochastic equality between `synthetic_sample` and"
" `reference_sample`. Given null hypothesis is true,"
" sample size cannot be calculated. Please review data"
" samples to ensure they reflect appropriate relative"
" effect size assumptions."
)
if relative_effect < 0.5 and alternative == "larger":
raise ValueError(
"Estimated relative effect is smaller than 0.5;"
" `synthetic_sample` is stochastically smaller than"
" `reference_sample`. No sample size can be calculated"
" for `alternative == 'larger'`. Please review data"
" samples to ensure they reflect appropriate relative"
" effect size assumptions."
)
if relative_effect > 0.5 and alternative == "smaller":
raise ValueError(
"Estimated relative effect is larger than 0.5;"
" `synthetic_sample` is stochastically larger than"
" `reference_sample`. No sample size can be calculated"
" for `alternative == 'smaller'`. Please review data"
" samples to ensure they reflect appropriate relative"
" effect size assumptions."
)
sd_overall = np.sqrt(
np.sum(
(overall_ranks_pooled - (n_syn + n_ref + 1) / 2) ** 2
)
/ (n_syn + n_ref) ** 3
)
var_ref = (
np.sum(
(placements_ref - np.mean(placements_ref)) ** 2
) / (n_ref * (n_syn ** 2))
)
var_syn = (
np.sum(
(placements_syn - np.mean(placements_syn)) ** 2
) / ((n_ref ** 2) * n_syn)
)
quantile_prob = (1 - alpha / 2) if alternative == "two-sided" else (1 - alpha)
quantile_alpha = stats.norm.ppf(quantile_prob, loc=0, scale=1)
quantile_power = stats.norm.ppf(power, loc=0, scale=1)
# Convert `nobs_ratio` to proportion of total allocated to reference group
prop_treatment = 1 / (1 + nobs_ratio)
prop_reference = 1 - prop_treatment
var_terms = np.sqrt(
prop_reference * var_syn + (1 - prop_reference) * var_ref
)
quantiles_terms = sd_overall * quantile_alpha + quantile_power * var_terms
# Add a small epsilon to avoid division by zero when there is no
# treatment effect, i.e. p_hat = 0.5
nobs_total = (quantiles_terms**2) / (
prop_reference
* (1 - prop_reference)
* (relative_effect - 0.5 + 1e-12) ** 2
)
nobs_treat = nobs_total * (1 - prop_reference)
nobs_ref = nobs_total * prop_reference
return Holder(
nobs_total=nobs_total.item(),
nobs_treat=nobs_treat.item(),
nobs_ref=nobs_ref.item(),
relative_effect=relative_effect.item(),
power=power,
alpha=alpha,
)
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@stats@nonparametric.py@.PATH_END.py
|
{
"filename": "_hovertemplate.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterternary/_hovertemplate.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HovertemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="hovertemplate", parent_name="scatterternary", **kwargs
):
super(HovertemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterternary@_hovertemplate.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "kimakan/FaintCOS",
"repo_path": "FaintCOS_extracted/FaintCOS-master/CustomConfLimits/setup.py",
"type": "Python"
}
|
from distutils.core import setup, Extension
import numpy
setup(name = 'CustomConfLim', version = '1.0', \
ext_modules = [Extension('CustomConfLim', ['CustomConfLim.c'])],
include_dirs=[numpy.get_include()])
|
kimakanREPO_NAMEFaintCOSPATH_START.@FaintCOS_extracted@FaintCOS-master@CustomConfLimits@setup.py@.PATH_END.py
|
{
"filename": "_lineposition.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/densitymap/hoverlabel/font/_lineposition.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LinepositionValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(
self,
plotly_name="lineposition",
parent_name="densitymap.hoverlabel.font",
**kwargs,
):
super(LinepositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
extras=kwargs.pop("extras", ["none"]),
flags=kwargs.pop("flags", ["under", "over", "through"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@densitymap@hoverlabel@font@_lineposition.py@.PATH_END.py
|
{
"filename": "test_relation.py",
"repo_name": "rhayes777/PyAutoFit",
"repo_path": "PyAutoFit_extracted/PyAutoFit-main/test_autofit/analysis/test_relation.py",
"type": "Python"
}
|
import pickle
import pytest
import autofit as af
from autofit.mapper.prior.arithmetic.compound import SumPrior
from autofit.non_linear.analysis.indexed import IndexedAnalysis
from autofit.non_linear.analysis.model_analysis import (
CombinedModelAnalysis,
ModelAnalysis,
)
def test_pickle_indexed_analysis():
analysis = IndexedAnalysis(LinearAnalysis(1), 0)
pickle.loads(pickle.dumps(analysis))
@pytest.fixture(name="model_analysis")
def make_model_analysis(Analysis, model):
return Analysis().with_model(model)
def test_analysis_model(model_analysis, model):
assert model_analysis.modify_model(model) is model
@pytest.fixture(name="combined_model_analysis")
def make_combined_model_analysis(model_analysis):
return model_analysis + model_analysis
def test_combined_model_analysis(combined_model_analysis):
assert isinstance(combined_model_analysis, CombinedModelAnalysis)
for analysis in combined_model_analysis.analyses:
assert isinstance(analysis, IndexedAnalysis)
assert isinstance(analysis.analysis, ModelAnalysis)
def test_sum(model):
analyses = 3 * [af.Analysis().with_model(model)]
analysis = sum(analyses)
for analysis_ in analysis.analyses:
assert isinstance(analysis_, IndexedAnalysis)
assert isinstance(analysis_.analysis, ModelAnalysis)
def test_modify(combined_model_analysis, model):
modified = combined_model_analysis.modify_model(model)
first, second = modified
assert first is model
assert second is model
def test_default(combined_model_analysis, Analysis, model):
analysis = combined_model_analysis + Analysis()
assert isinstance(analysis, CombinedModelAnalysis)
modified = analysis.modify_model(model)
first, second, third = modified
assert first is model
assert second is model
assert third is model
def test_fit(combined_model_analysis, model):
assert (
combined_model_analysis.log_likelihood_function(
af.Collection([model, model]).instance_from_prior_medians()
)
== 2
)
def test_prior_arithmetic():
m = af.UniformPrior()
c = af.UniformPrior()
y = m * 10 + c
assert y.prior_count == 2
assert y.instance_from_prior_medians() == 5.5
class LinearAnalysis(af.Analysis):
def __init__(self, value):
self.value = value
def log_likelihood_function(self, instance):
return -abs(self.value - instance)
class ComplexLinearAnalysis(LinearAnalysis):
def log_likelihood_function(self, instance):
return super().log_likelihood_function(instance.centre)
def test_embedded_model():
model = af.Model(af.Gaussian)
copy = model.replacing({model.centre: af.UniformPrior()})
assert copy is not model
assert copy.centre != model.centre
assert copy.sigma == model.sigma
def test_names():
one = 1
two = af.UniformPrior()
assert (one + two).paths == [("two",)]
assert af.Add(one, two).paths == [("two",)]
def data(x):
return 3 * x + 5
@pytest.fixture(name="m")
def make_m():
return af.GaussianPrior(mean=3, sigma=1)
@pytest.fixture(name="c")
def make_c():
return af.GaussianPrior(mean=5, sigma=1)
def test_multiple_models(m, c):
models = [x * m + c for x in (1, 2, 3)]
one, two, three = [model.instance_from_prior_medians() for model in models]
assert one < two < three
def _test_embedded_integration(m, c):
base_model = af.Model(af.Gaussian)
analyses = [
ComplexLinearAnalysis(data(x)).with_model(
base_model.replacing({base_model.centre: m * x + c})
)
for x in range(10)
]
combined = sum(analyses)
optimiser = af.DynestyStatic()
result = optimiser.fit(None, combined)
centres = [result.instance.centre for result in result.child_results]
assert centres == pytest.approx(
list(map(data, range(10))),
rel=0.01,
)
def _test_integration(m, c):
analyses = [LinearAnalysis(data(x)).with_model(m * x + c) for x in range(10)]
combined = sum(analyses)
optimiser = af.DynestyStatic()
result = optimiser.fit(None, combined)
instances = [result.instance for result in result.child_results]
assert instances == pytest.approx(
list(map(data, range(10))),
rel=0.01,
)
def test_remove(model):
model = model.replacing({model.centre: None})
assert model.centre is None
|
rhayes777REPO_NAMEPyAutoFitPATH_START.@PyAutoFit_extracted@PyAutoFit-main@test_autofit@analysis@test_relation.py@.PATH_END.py
|
{
"filename": "_control_flow_tutorial.ipynb",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/autograph/g3doc/reference/_control_flow_tutorial.ipynb",
"type": "Jupyter Notebook"
}
|
TODO(b/138297412): This colab retains some useful code snippets and demonstrations that used to be in the tf.function/AutoGraph customization tutorial, and should be rolled into the existing docs as part of a broader markdown->colab conversion.
```
import tensorflow as tf
```
Define a helper function to demonstrate the kinds of errors you might encounter:
```
import traceback
import contextlib
# Some helper code to demonstrate the kinds of errors you might encounter.
@contextlib.contextmanager
def assert_raises(error_class):
try:
yield
except error_class as e:
print('Caught expected exception \n {}:'.format(error_class))
traceback.print_exc(limit=2)
except Exception as e:
raise e
else:
raise Exception('Expected {} to be raised but no error was raised!'.format(
error_class))
```
## Using AutoGraph
The [autograph](https://www.tensorflow.org/guide/function) library is fully integrated with `tf.function`, and it will rewrite conditionals and loops which depend on Tensors to run dynamically in the graph.
`tf.cond` and `tf.while_loop` continue to work with `tf.function`, but code with control flow is often easier to write and understand when written in imperative style.
## AutoGraph: Conditionals
AutoGraph will convert `if` statements into the equivalent `tf.cond` calls.
This substitution is made if the condition is a Tensor. Otherwise, the conditional is executed during tracing.
Here is a function that checks if the resulting graph uses `tf.cond`:
```
def test_tf_cond(f, *args):
g = f.get_concrete_function(*args).graph
if any(node.name == 'cond' for node in g.as_graph_def().node):
print("{}({}) uses tf.cond.".format(
f.__name__, ', '.join(map(str, args))))
else:
print("{}({}) executes normally.".format(
f.__name__, ', '.join(map(str, args))))
print(" result: ",f(*args).numpy())
```
This substitution is made if the condition is a Tensor. Otherwise, the conditional is executed during tracing.
Passing a python `True` executes the conditional normally:
```
@tf.function
def dropout(x, training=True):
if training:
x = tf.nn.dropout(x, rate=0.5)
return x
```
```
test_tf_cond(dropout, tf.ones([10], dtype=tf.float32), True)
```
But passing a tensor replaces the python `if` with a `tf.cond`:
```
test_tf_cond(dropout, tf.ones([10], dtype=tf.float32), tf.constant(True))
```
`tf.cond` has a number of subtleties.
it works by tracing both sides of the conditional, and then choosing the appropriate branch at runtime, depending on the condition. Tracing both sides can result in unexpected execution of Python code.
```
@tf.function
def f(x):
if x > 0:
x = x + 1.
print("Tracing `then` branch")
else:
x = x - 1.
print("Tracing `else` branch")
return x
```
```
f(-1.0).numpy()
```
```
f(1.0).numpy()
```
```
f(tf.constant(1.0)).numpy()
```
It requires that if one branch creates a tensor used downstream, the other branch must also create that tensor.
```
@tf.function
def f():
if tf.constant(True):
x = tf.ones([3, 3])
return x
# Throws an error because both branches need to define `x`.
with assert_raises(ValueError):
f()
```
If you want to be sure that a particular section of control flow is never converted by autograph, then explicitly convert the object to a python type so an error is raised instead:
```
@tf.function
def f(x, y):
if bool(x):
y = y + 1.
print("Tracing `then` branch")
else:
y = y - 1.
print("Tracing `else` branch")
return y
```
```
f(True, 0).numpy()
```
```
f(False, 0).numpy()
```
```
with assert_raises(TypeError):
f(tf.constant(True), 0.0)
```
## AutoGraph and loops
AutoGraph has a few simple rules for converting loops.
- `for`: Convert if the iterable is a tensor
- `while`: Convert if the while condition depends on a tensor
If a loop is converted, it will be dynamically unrolled with `tf.while_loop`, or in the special case of a `for x in tf.data.Dataset`, transformed into `tf.data.Dataset.reduce`.
If a loop is _not_ converted, it will be statically unrolled
```
def test_dynamically_unrolled(f, *args):
g = f.get_concrete_function(*args).graph
if any(node.name == 'while' for node in g.as_graph_def().node):
print("{}({}) uses tf.while_loop.".format(
f.__name__, ', '.join(map(str, args))))
elif any(node.name == 'ReduceDataset' for node in g.as_graph_def().node):
print("{}({}) uses tf.data.Dataset.reduce.".format(
f.__name__, ', '.join(map(str, args))))
else:
print("{}({}) gets unrolled.".format(
f.__name__, ', '.join(map(str, args))))
```
### For loops
Here is a `tf.function` that demonstrates static unrolling:
```
@tf.function
def for_in_range():
x = 0
for i in range(5):
x += i
return x
test_dynamically_unrolled(for_in_range)
```
```
@tf.function
def for_in_tfrange():
x = tf.constant(0, dtype=tf.int32)
for i in tf.range(5):
x += i
return x
test_dynamically_unrolled(for_in_tfrange)
```
```
@tf.function
def for_in_tfdataset():
x = tf.constant(0, dtype=tf.int64)
for i in tf.data.Dataset.range(5):
x += i
return x
test_dynamically_unrolled(for_in_tfdataset)
```
```
@tf.function
def while_py_cond():
x = 5
while x > 0:
x -= 1
return x
test_dynamically_unrolled(while_py_cond)
```
```
@tf.function
def while_tf_cond():
x = tf.constant(5)
while x > 0:
x -= 1
return x
test_dynamically_unrolled(while_tf_cond)
```
If you have a `break` or early `return` clause that depends on a tensor, the top-level condition or iterable should also be a tensor.
Compare the following examples:
```
@tf.function
def while_py_true_py_break(x):
while True: # py true
if x == 0: # py break
break
x -= 1
return x
test_dynamically_unrolled(while_py_true_py_break, 5)
```
```
@tf.function
def buggy_while_py_true_tf_break(x):
while True: # py true
if tf.equal(x, 0): # tf break
break
x -= 1
return x
with assert_raises(TypeError):
test_dynamically_unrolled(buggy_while_py_true_tf_break, 5)
```
```
@tf.function
def while_tf_true_tf_break(x):
while tf.constant(True): # tf true
if x == 0: # py break
break
x -= 1
return x
test_dynamically_unrolled(while_tf_true_tf_break, 5)
```
```
@tf.function
def buggy_py_for_tf_break():
x = 0
for i in range(5): # py for
if tf.equal(i, 3): # tf break
break
x += i
return x
with assert_raises(TypeError):
test_dynamically_unrolled(buggy_py_for_tf_break)
```
```
@tf.function
def tf_for_py_break():
x = 0
for i in tf.range(5): # tf for
if i == 3: # py break
break
x += i
return x
test_dynamically_unrolled(tf_for_py_break)
```
In order to accumulate results from a dynamically unrolled loop, you'll want to use `tf.TensorArray`.
```
batch_size = 2
seq_len = 3
feature_size = 4
def rnn_step(inp, state):
return inp + state
@tf.function
def dynamic_rnn(rnn_step, input_data, initial_state):
# [batch, time, features] -> [time, batch, features]
input_data = tf.transpose(input_data, [1, 0, 2])
max_seq_len = input_data.shape[0]
states = tf.TensorArray(tf.float32, size=max_seq_len)
state = initial_state
for i in tf.range(max_seq_len):
state = rnn_step(input_data[i], state)
states = states.write(i, state)
return tf.transpose(states.stack(), [1, 0, 2])
dynamic_rnn(rnn_step,
tf.random.uniform([batch_size, seq_len, feature_size]),
tf.zeros([batch_size, feature_size]))
```
### Gotcha's
As with `tf.cond`, `tf.while_loop` also comes with a number of subtleties.
#### Zero iterations
Since a loop can execute 0 times, all tensors used downstream of the while_loop must be initialized above the loop.
Here is an example of incorrect code:
```
@tf.function
def buggy_loop_var_uninitialized():
for i in tf.range(3):
x = i
return x
with assert_raises(ValueError):
buggy_loop_var_uninitialized()
```
And the correct version:
```
@tf.function
def f():
x = tf.constant(0)
for i in tf.range(3):
x = i
return x
f()
```
#### Consistent shapes and types
The shape/dtypes of all loop variables must stay consistent with each iteration.
Here is an incorrect example that attempts to change a tensor's type:
```
@tf.function
def buggy_loop_type_changes():
x = tf.constant(0, dtype=tf.float32)
for i in tf.range(3): # Yields tensors of type tf.int32...
x = i
return x
with assert_raises(TypeError):
buggy_loop_type_changes()
```
Here is an incorrect example that attempts to change a Tensor's shape while iterating:
```
@tf.function
def buggy_concat():
x = tf.ones([0, 10])
for i in tf.range(5):
x = tf.concat([x, tf.ones([1, 10])], axis=0)
return x
with assert_raises(ValueError):
buggy_concat()
```
```
@tf.function
def concat_with_padding():
x = tf.zeros([5, 10])
for i in tf.range(5):
x = tf.concat([x[:i], tf.ones([1, 10]), tf.zeros([4-i, 10])], axis=0)
x.set_shape([5, 10])
return x
concat_with_padding()
```
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@autograph@g3doc@reference@_control_flow_tutorial.ipynb@.PATH_END.py
|
{
"filename": "_weightsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/funnel/textfont/_weightsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WeightsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="weightsrc", parent_name="funnel.textfont", **kwargs
):
super(WeightsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@funnel@textfont@_weightsrc.py@.PATH_END.py
|
{
"filename": "_hovertemplatesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choroplethmapbox/_hovertemplatesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HovertemplatesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="hovertemplatesrc", parent_name="choroplethmapbox", **kwargs
):
super(HovertemplatesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choroplethmapbox@_hovertemplatesrc.py@.PATH_END.py
|
{
"filename": "rfi_inspect_2458136.ipynb",
"repo_name": "HERA-Team/H1C_IDR3_Notebooks",
"repo_path": "H1C_IDR3_Notebooks-main/rfi_inspect/rfi_inspect_2458136.ipynb",
"type": "Jupyter Notebook"
}
|
# RFI Inspection Daily RTP Notebook
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import glob
import os
from astropy import units
from copy import deepcopy
from pyuvdata import UVFlag
import matplotlib.colors as colors
from matplotlib import cm
from IPython.display import display, HTML
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
display(HTML("<style>.container { width:100% !important; }</style>"))
```
<style>.container { width:100% !important; }</style>
```python
# If you want to run this notebook locally, copy the output of the next cell into the first few lines of this cell.
# JD = '2459122'
# data_path = '/lustre/aoc/projects/hera/H4C/2459122'
# os.environ["JULIANDATE"] = JD
# os.environ["DATA_PATH"] = data_path
```
```python
# Use environment variables to figure out path to data
JD = os.environ['JULIANDATE']
data_path = os.environ['DATA_PATH']
print(f'JD = "{JD}"')
print(f'data_path = "{data_path}"')
JD = int(JD)
```
JD = "2458136"
data_path = "/lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458136"
```python
uvf = UVFlag(f'{data_path}/zen.{JD}.total_threshold_and_a_priori_flags.h5')
# Load in the metadata for easier plotting.
freqs = np.unique(uvf.freq_array)
times = np.unique(uvf.time_array)
lsts = np.unique(uvf.lst_array)
chans = np.arange(freqs.size)
plot_times = times - np.floor(times[0])
lsts_hr = lsts * units.rad.to("cycle") * units.day.to("hr")
freqs_MHz = freqs * units.Hz.to("MHz")
extent = (freqs_MHz[0], freqs_MHz[-1], plot_times[-1], plot_times[0])
```
```python
plt.figure(figsize=(16,12))
cax = plt.imshow(uvf.flag_array[:,:,0], aspect='auto', interpolation='nearest',
extent=[uvf.freq_array[0] / 1e6, uvf.freq_array[-1] / 1e6,
uvf.time_array[-1] - JD, uvf.time_array[0] - JD])
plt.xlabel('Frequency (MHz)')
plt.ylabel(f'JD - {JD}')
ax2 = plt.gca().twinx()
ax2.set_ylim([uvf.lst_array[0] * 12 / np.pi, uvf.lst_array[-1] * 12 / np.pi])
ax2.invert_yaxis()
ax2.set_ylabel('LST (hours)')
ax3 = plt.gca().twiny()
ax3.set_xlim([0, uvf.Nfreqs - 1])
ax3.set_xlabel('Channel');
```
Text(0.5, 0, 'Channel')

# Figure 1(a): Full day of XRFI flags
Yellow is flagged. Blue is unflagged.
```python
xrfi_dirs = sorted(glob.glob(f'{data_path}/zen.{JD}.?????.xrfi'))
print(f'Found {len(xrfi_dirs)} directories containing XRFI intermediate data products.')
files1 = [glob.glob(f'{d}/*combined_metrics1.h5')[0] for d in xrfi_dirs]
print(f'Found {len(files1)} combined round 1 XRFI metrics files.')
files2 = [glob.glob(f'{d}/*combined_metrics2.h5')[0] for d in xrfi_dirs]
print(f'Found {len(files2)} combined round 2 XRFI metrics files.')
uvf1 = UVFlag(files1)
uvf2 = UVFlag(files2)
uvf2.metric_array = np.where(np.isinf(uvf2.metric_array), uvf1.metric_array,
uvf2.metric_array)
```
Found 73 directories containing XRFI intermediate data products.
Found 73 combined round 1 XRFI metrics files.
Found 73 combined round 2 XRFI metrics files.
```python
plt.figure(figsize=(16,12))
max_abs = 100
if np.max(uvf2.metric_array) > max_abs:
extend = 'max'
if np.min(uvf2.metric_array) < -max_abs:
extend = 'both'
elif np.min(uvf2.metric_array) < -max_abs:
extend = 'min'
else:
extend = 'neither'
plt.imshow(uvf2.metric_array[:,:,0], aspect='auto', cmap='RdBu_r',
norm=colors.SymLogNorm(linthresh=1,vmin=-max_abs, vmax=max_abs),
extent=[uvf.freq_array[0] / 1e6, uvf.freq_array[-1] / 1e6,
uvf.time_array[-1] - JD, uvf.time_array[0] - JD])
plt.colorbar(pad=.07, extend=extend,
label='RFI Detection Significance ($\sigma$s)')
plt.title('Combined XRFI Metrics')
plt.xlabel('Frequency (MHz)')
plt.ylabel(f'JD - {JD}')
ax2 = plt.gca().twinx()
ax2.set_ylim([uvf.lst_array[0] * 12 / np.pi, uvf.lst_array[-1] * 12 / np.pi])
ax2.invert_yaxis()
ax2.set_ylabel('LST (hours)')
ax3 = plt.gca().twiny()
ax3.set_xlim([0, uvf.Nfreqs - 1])
ax3.set_xlabel('Channel');
```
default base will change from np.e to 10 in 3.4. To suppress this warning specify the base keyword argument.
Text(0.5, 0, 'Channel')

## Figure 2(a): Combined XRFI Detection Significance
This figure shows round 2 XRFI metrics (mean filter outliers) combined in quadrature. When flagged in round 1 of XRFI, round 1's combined median filter metrics are used instead.
```python
# Load in the flags from each round of XRFI flagging.
low_level_flag_labels = (
"abscal_chi_sq_flags1",
"abscal_chi_sq_flags2",
"ag_flags1",
"ag_flags2",
"apriori_flags",
"auto_flags1",
"auto_flags2",
"ax_flags1",
"ax_flags2",
"combined_flags1",
"combined_flags2",
"cross_flags1",
"cross_flags2",
"flags1",
"flags2",
"og_flags1",
"og_flags2",
"omnical_chi_sq_flags1",
"omnical_chi_sq_flags2",
"ox_flags1",
"ox_flags2",
"v_flags1",
"v_flags2",
)
# Keep the thresholded flags separate for easier analysis.
thresholded_flag_labels = (
"abscal_chi_sq_renormed_threshold_flags",
"ag_threshold_flags",
"auto_threshold_flags",
"ax_threshold_flags",
"combined_threshold_flags",
"cross_threshold_flags",
"og_threshold_flags",
"omnical_chi_sq_renormed_threshold_flags",
"ox_threshold_flags",
"v_threshold_flags",
"total_threshold_and_a_priori_flags",
)
low_level_flags = {}
for file_id in low_level_flag_labels:
flag_files = []
for xrfi_dir in xrfi_dirs:
matching_files = glob.glob(os.path.join(xrfi_dir, f"*.{file_id}.h5"))
if len(matching_files) > 0:
flag_files.append(matching_files[0])
if len(flag_files) > 0:
uvf = UVFlag(flag_files)
low_level_flags[file_id] = np.squeeze(uvf.flag_array)
thresholded_flags = {}
for file_id in thresholded_flag_labels:
flag_file = f"{data_path}/zen.{JD}.{file_id}.h5"
if os.path.exists(flag_file):
uvf = UVFlag(flag_file)
thresholded_flags[file_id] = np.squeeze(uvf.flag_array)
all_flags = dict(**low_level_flags, **thresholded_flags)
```
```python
label_mapping = {
f"Round {i}": {
"Priors": ("apriori_flags", "flags1")[i-1],
"Autocorrs": f"auto_flags{i}",
"Crosscorrs": f"cross_flags{i}",
"Omnical\nVisibilities": f"v_flags{i}",
"Omnical\nGains": f"og_flags{i}",
r"Omnical $\chi^2$": f"ox_flags{i}",
"Omnical\nGlobal $\chi^2$": f"omnical_chi_sq_flags{i}",
"Abscal\nGains": f"ag_flags{i}",
r"Abscal $\chi^2$": f"ax_flags{i}",
r"Abscal\nGlobal $\chi^2$": f"abscal_chi_sq_flags{i}",
"Combined\nMetrics": f"combined_flags{i}",
} for i in (1,2)
}
label_mapping["Round 3"] = {
"Priors": "flags2",
"Autocorrs": "auto_threshold_flags",
"Crosscorrs": "cross_threshold_flags",
"Omnical\nGains": "og_threshold_flags",
r"Omnical $\chi^2$": "ox_threshold_flags",
"Omnical\nGlobal $\chi^2$": f"omnical_chi_sq_renormed_threshold_flags",
"Omnical\nVisibilities": "v_threshold_flags",
"Abscal\nGains": "ag_threshold_flags",
r"Abscal $\chi^2$": "ax_threshold_flags",
r"Abscal\nGlobal $\chi^2$": f"abscal_chi_sq_renormed_threshold_flags",
"Combined\nMetrics": "combined_threshold_flags",
'Final\nFlags': "total_threshold_and_a_priori_flags",
}
# remove labels for metrics not used
label_mapping = {rnd: {label: flags for label, flags in labels.items() if flags in all_flags}
for rnd, labels in label_mapping.items()}
```
```python
# Pick easily distinguishable colors
color_palette = (
'#000000', #black
'#ffffff', #white
'#800000', #maroon
'#808000', #olive
'#008b8b', #darkcyan
'#000080', #navy
'#ff8c00', #darkorange
'#ffff00', #yellow
'#00ff00', #lime
'#0000ff', #blue
'#ff00ff', #fuchsia
'#1e90ff', #dodgerblue
'#98fb98', #palegreen
'#ff1493', #deeppink
)
# assign a unique color to a label
label_to_color_map = {"Unflagged": color_palette[0]}
color_index = 1
for mapping in label_mapping.values():
for label in tuple(mapping.keys()) + ("2+ Separate\nMetrics",):
if label not in label_to_color_map:
label_to_color_map[label] = color_palette[color_index]
color_index += 1
```
```python
# Figure out which flags are unique to each step and source
unique_flags_by_stage = {}
for round_label, mapping in label_mapping.items():
unique_flags_by_stage[round_label] = {}
# handle prior flags
prior_flags = low_level_flags[mapping["Priors"]]
unique_flags_by_stage[round_label]["Priors"] = prior_flags
# handle all other flag types
overlap_flags = np.zeros_like(np.squeeze(uvf.flag_array))
for label, file_id in mapping.items():
if label in ["Priors", "Final\nFlags", "Combined\nMetrics"]: # skip these, they are special
continue
flags = all_flags[file_id]
unique_flags = flags.copy()
for other_label, other_file_id in mapping.items():
if other_label in [label, "Priors", "Final\nFlags", "Combined\nMetrics"]:
continue
other_flags = all_flags[other_file_id]
unique_flags &= ~other_flags
overlap_region = flags & other_flags & ~prior_flags
overlap_flags[overlap_region] = True
unique_flags_by_stage[round_label][label] = unique_flags
unique_flags_by_stage[round_label]["2+ Separate\nMetrics"] = overlap_flags
# handle combined metrics separately so that it doesn't affect "2+ Separate\nMetrics"
all_flags_so_far = np.sum(list(unique_flags_by_stage[round_label].values()), axis=0).astype(bool)
combined_metrics_flags = all_flags[mapping["Combined\nMetrics"]]
unique_flags_by_stage[round_label]["Combined\nMetrics"] = combined_metrics_flags & ~all_flags_so_far
# Figure out which flags got applied at the very end when the a priori YAML was used
all_other_round_3_flags = np.sum([flags for flags in unique_flags_by_stage['Round 3'].values()], axis=0).astype(bool)
unique_flags_by_stage['Round 3']["Final\nFlags"] = all_flags[label_mapping['Round 3']["Final\nFlags"]] & (~all_other_round_3_flags)
```
```python
cmap = plt.cm.colors.ListedColormap(list(label_to_color_map.values()))
norm = plt.cm.colors.Normalize(vmin=0, vmax=1)
smap = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
colored_flags = {}
for round_label, flag_dict in unique_flags_by_stage.items():
colored_flags[round_label] = np.zeros(np.squeeze(uvf.flag_array).shape)
for label, flags in flag_dict.items():
colored_flags[round_label][flags] = list(label_to_color_map.keys()).index(label) / (len(label_to_color_map) - 1)
```
```python
def plot_flag_evolution(freq_slice):
fig, axes = plt.subplots(len(colored_flags), figsize=(15, 11 * len(colored_flags)), dpi=300)
# Figure out the details for which part of the flag arrays to plot.
tmin, tmax = plot_times[0], plot_times[-1]
lstmin, lstmax = lsts_hr[0], lsts_hr[-1]
fmin, fmax = freqs_MHz[freq_slice][::freq_slice.size - 1]
extent = (fmin, fmax, tmax, tmin)
# Actually plot the things.
for ax, (label, flags) in zip(axes, colored_flags.items()):
ax.set_title(label, fontsize=16)
ax.imshow(flags[:,freq_slice], aspect="auto", extent=extent, cmap=cmap, vmin=0, vmax=1)
twinx = ax.twinx()
twiny = ax.twiny()
twinx.set_ylim(lstmax, lstmin)
twiny.set_xlim(freq_slice[0], freq_slice[-1])
ax.set_xlabel("Frequency (MHz)", fontsize=12)
ax.set_ylabel(f"JD - {JD}", fontsize=12)
twinx.set_ylabel("LST (hour)", fontsize=12)
twiny.set_xlabel("Channel", fontsize=12)
fig.tight_layout()
for ax in axes.ravel():
cbar = fig.colorbar(smap, ax=ax, orientation="horizontal", pad=0.1)
cbar.set_ticks(np.linspace(0, 1, 2 * len(cmap.colors) + 1)[1::2])
cbar.set_ticklabels(list(label_to_color_map.keys()))
```
```python
# Plot flags in the low-band.
if np.any(freqs_MHz < 100):
freq_slice = np.argwhere(freqs_MHz < 100).flatten() # Low-band, pre-FM
plot_flag_evolution(freq_slice)
```
## Figure 3: Flag Evolution in the Low Band
This figure delineates which steps different flags are introduced in, but does not make a distinction between sources when multiple flagging routines flag the same region of the waterfall. The plot shows flags for frequencies below the FM band, for the entire night. The top plot shows the flags for the first round of flagging (median filter), where the prior flags are the apriori flags; the middle plot shows the flags for the second round of flagging (mean filter), where the prior flags are the combined flags from the first round of flagging (plus extra flags based on the metrics added in quadrature); the bottom plot shows the flags for the final round of flagging (thresholding), where the prior flags are the combined flags from round 2 (plus extra flags based on the metrics added in quadrature). After threshold flagging, the "final flags" also include any apriori flags from the YAML files. *Note: for H1C data, this plot will be skipped.*
```python
# Plot flags in the mid-band.
freq_slice = np.argwhere(np.logical_and(freqs_MHz >= 100, freqs_MHz < 200)).flatten()
plot_flag_evolution(freq_slice)
```

## Figure 4: Flag Evolution in the Mid-Band
This figure delineates which steps different flags are introduced in, but does not make a distinction between sources when multiple flagging routines flag the same region of the waterfall. The plot shows flags for frequencies between the FM band and the analog TV band, for the entire night. The top plot shows the flags for the first round of flagging (median filter), where the prior flags are the apriori flags; the middle plot shows the flags for the second round of flagging (mean filter), where the prior flags are the combined flags from the first round of flagging (plus extra flags based on the metrics added in quadrature); the bottom plot shows the flags for the final round of flagging (thresholding), where the prior flags are the combined flags from round 2 (plus extra flags based on the metrics added in quadrature). After threshold flagging, the "final flags" also include any apriori flags from the YAML files.
```python
# Calculate occupancies for different important sets of flags.
label_mapping = {
"A Priori": "apriori_flags",
"Median Filter": "flags1",
"Mean Filter": "flags2",
"Thresholding": "total_threshold_and_a_priori_flags",
}
occupancies = {}
for axis, axis_label in enumerate(("Frequency", "Time")):
occupancies[axis_label] = {}
for flag_label, flag_id in label_mapping.items():
flags = all_flags[flag_id]
occupancies[axis_label][flag_label] = flags.mean(axis=(1-axis))
```
```python
fig, axes = plt.subplots(2, figsize=(15,14), dpi=200)
for i, items in enumerate(zip(axes.ravel(), occupancies.items())):
ax, (occupancy_axis, flag_dict) = items
xvalues = (plot_times, freqs_MHz)[i]
alt_xvalues = (lsts_hr, chans)[i]
xlabel = (f"JD - {JD}", "Frequency (MHz)")[i]
ylabel = (
"Fraction of Channels Flagged",
"Fraction of Integrations Flagged"
)[i]
alt_xlabel = ("LST (hours)", "Channel")[i]
ax.set_xlabel(xlabel, fontsize=12)
ax.set_ylabel(ylabel, fontsize=12)
for flag_label, occupancy in flag_dict.items():
ax.plot(xvalues, occupancy, label=flag_label)
twin_ax = ax.twiny()
twin_ax.set_xlim(alt_xvalues[0], alt_xvalues[-1])
twin_ax.set_xlabel(alt_xlabel, fontsize=12)
ax.legend()
```

## Figure 5: Flagging Occupancies
These plots show the flagging occupancies for the Round 0 Flags (Apriori), Round 1 Flags (Median Filter), Round 2 Flags (Mean Filter), and Round 3 Flags (Thresholding). The top plot shows the fraction of channels flagged at each integration for each set of flags, and the bottom plot shows the fraction of integrations flagged as a function of frequency.
# Metadata
```python
from hera_qm import version
print(version.construct_version_info())
```
{'version': '1.0', 'git_origin': 'git@github.com:HERA-Team/hera_qm.git', 'git_hash': 'a15c511f7e0fc30602257c9eb5ff761bc83ef6a5', 'git_description': 'v1.1-313-ga15c511', 'git_branch': 'master'}
```python
```
|
HERA-TeamREPO_NAMEH1C_IDR3_NotebooksPATH_START.@H1C_IDR3_Notebooks-main@rfi_inspect@rfi_inspect_2458136.ipynb@.PATH_END.py
|
{
"filename": "Thinning time grids.ipynb",
"repo_name": "HajimeKawahara/juwvid",
"repo_path": "juwvid_extracted/juwvid-master/ipynb/Thinning time grids.ipynb",
"type": "Jupyter Notebook"
}
|
## Reducing computational time and memory
last update: 9/30 (2017)
```julia
import DSP
using PyPlot
```
```julia
include("../juwvid.jl")
```
juwvid
## STFT
```julia
## sin FM
nsample=1024
x,y,iw,ynorm=sampledata.genlinfm(nsample,1.0,0.01);
```
```julia
tfrst=stft.tfrstft(y);
```
Use fft.
### Thinning time grids using 1 grid in 10 grids.
```julia
nthin=10
itc=collect(1:nthin:nsample);
```
```julia
tfrsti=stft.tfrstft(y,NaN,NaN,NaN,itc);
```
Use fft.
```julia
fig=PyPlot.figure()
ax = fig[:add_subplot](1,2,1)
a=juwplot.wtfrshow(abs.(tfrst),x[2]-x[1],x[1],x[end],NaN,NaN,1.0)
PyPlot.xlabel("time [day]")
PyPlot.ylabel("frequency [1/day]")
ax = fig[:add_subplot](1,2,2)
a=juwplot.wtfrshow(abs.(tfrsti),(x[2]-x[1])*nthin,x[1],x[end],NaN,NaN,1.0)
PyPlot.xlabel("time [day]")
```

PyObject <matplotlib.text.Text object at 0x7f91cc9ff9b0>
## WV
```julia
## sin FM
nsample=1024
x,y,iw,ynorm=sampledata.genlinfm(nsample,1.0,0.01);
z=DSP.Util.hilbert(y);
```
```julia
tfrfc=cohenclass.tfrwv(z,NaN,NaN,NaN,NaN,0);
```
Single Wigner Ville
Use fft.
### Thinning time grids using 1 grid in 10 grids.
```julia
nthin=10
itc=collect(1:nthin:nsample);
```
```julia
tfrfi=cohenclass.tfrwv(z,NaN,NaN,NaN,itc,0);
```
Single Wigner Ville
Use fft.
```julia
fig=PyPlot.figure()
ax = fig[:add_subplot](1,2,1)
a=juwplot.tfrshow(abs.(tfrfc),x[2]-x[1],x[1],x[end],NaN,NaN,1.0)
PyPlot.xlabel("time [day]")
PyPlot.ylabel("frequency [1/day]")
ax = fig[:add_subplot](1,2,2)
a=juwplot.tfrshow(abs.(tfrfi),(x[2]-x[1])*nthin,x[1],x[end],NaN,NaN,1.0)
PyPlot.xlabel("time [day]")
```

PyObject <matplotlib.text.Text object at 0x7f91cc6b8dd8>
### pseudo WV
```julia
## sin FM
nsample=4096;
xs,ys,iws,ynorms=sampledata.genfm(nsample,2*pi,2*pi/100.0,30.0,365.0);
z=DSP.Util.hilbert(ys);
```
```julia
tfrpfc=cohenclass.tfrpwv(z);
```
Single pseudo Wigner Ville
Use fft.
```julia
### using 1 grid in 10 grids
nthin=20
itc=collect(1:nthin:nsample);
tfrpfi=cohenclass.tfrpwv(z,NaN,NaN,NaN,itc,NaN,0);
```
Single pseudo Wigner Ville
Use fft.
```julia
fig=PyPlot.figure()
ax = fig[:add_subplot](1,2,1)
a=juwplot.tfrshow(abs.(tfrpfc),x[2]-x[1],x[1],x[end],NaN,NaN,1.0)
PyPlot.xlabel("time [day]")
PyPlot.ylabel("frequency [1/day]")
ax = fig[:add_subplot](1,2,2)
a=juwplot.tfrshow(abs.(tfrpfi),(x[2]-x[1])*nthin,x[1],x[end],NaN,NaN,1.0)
PyPlot.xlabel("time [day]")
```

PyObject <matplotlib.text.Text object at 0x7f91c45bb390>
```julia
## sin FM
nsample=4096;
xs,ys,iws,ynorms=sampledata.genfm(nsample,2*pi,2*pi/200.0,10.0,720.0);
z=DSP.Util.hilbert(ys);
PyPlot.plot(xs,iws/(2*pi))
```

1-element Array{PyCall.PyObject,1}:
PyObject <matplotlib.lines.Line2D object at 0x7f91b84567f0>
```julia
nnufft=100
fs,fe=juwutils.frequency_to_index([0.95,1.05], xs[2]-xs[1], nsample,nnufft)
```
2-element Array{Float64,1}:
1368.33
1512.37
```julia
fin=collect(linspace(fs,fe,nnufft))
tfrpfc=cohenclass.tfrpwv(z,NaN,NaN,fin,NaN,NaN,0,"nufft");
```
Single pseudo Wigner Ville
Use nufft.
```julia
itc=collect(1:nthin:nsample);
tfrpfi=cohenclass.tfrpwv(z,NaN,NaN,fin,itc,NaN,0,"nufft");
```
Single pseudo Wigner Ville
Use nufft.
```julia
fig=PyPlot.figure()
ax = fig[:add_subplot](1,2,1)
a=juwplot.tfrshow(abs.(tfrpfc),x[2]-x[1],x[1],x[end],fin[1],fin[end],0.7)
PyPlot.xlabel("time [day]")
PyPlot.ylabel("frequency [1/day]")
ax = fig[:add_subplot](1,2,2)
a=juwplot.tfrshow(abs.(tfrpfi),(x[2]-x[1])*nthin,x[1],x[end],fin[1],fin[end],0.7)
PyPlot.xlabel("time [day]")
```

PyObject <matplotlib.text.Text object at 0x7f91cc1bb0b8>
```julia
```
|
HajimeKawaharaREPO_NAMEjuwvidPATH_START.@juwvid_extracted@juwvid-master@ipynb@Thinning time grids.ipynb@.PATH_END.py
|
{
"filename": "_lineposition.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/image/hoverlabel/font/_lineposition.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LinepositionValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(
self, plotly_name="lineposition", parent_name="image.hoverlabel.font", **kwargs
):
super(LinepositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
extras=kwargs.pop("extras", ["none"]),
flags=kwargs.pop("flags", ["under", "over", "through"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@image@hoverlabel@font@_lineposition.py@.PATH_END.py
|
{
"filename": "bayesian_regression.ipynb",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/tutorial/source/bayesian_regression.ipynb",
"type": "Jupyter Notebook"
}
|
# Bayesian Regression - Introduction (Part 1)
Regression is one of the most common and basic supervised learning tasks in machine learning. Suppose we're given a dataset $\mathcal{D}$ of the form
$$ \mathcal{D} = \{ (X_i, y_i) \} \qquad \text{for}\qquad i=1,2,...,N$$
The goal of linear regression is to fit a function to the data of the form:
$$ y = w X + b + \epsilon $$
where $w$ and $b$ are learnable parameters and $\epsilon$ represents observation noise. Specifically $w$ is a matrix of weights and $b$ is a bias vector.
In this tutorial, we will first implement linear regression in PyTorch and learn point estimates for the parameters $w$ and $b$. Then we will see how to incorporate uncertainty into our estimates by using Pyro to implement Bayesian regression. Additionally, we will learn how to use the Pyro's utility functions to do predictions and serve our model using `TorchScript`.
## Tutorial Outline
- [Setup](#Setup)
- [Dataset](#Dataset)
- [Linear Regression](#Linear-Regression)
- [Training with PyTorch Optimizers](#Training-with-PyTorch-Optimizers)
- [Regression Fit](#Plotting-the-Regression-Fit)
- [Bayesian Regression with Pyro's SVI](#Bayesian-Regression-with-Pyro's-Stochastic-Variational-Inference-%28SVI%29)
- [Model](#Model)
- [Using an AutoGuide](#Using-an-AutoGuide)
- [Optimizing the Evidence Lower Bound](#Optimizing-the-Evidence-Lower-Bound)
- [Model Evaluation](#Model-Evaluation)
- [Serving the Model using TorchScript](#Model-Serving-via-TorchScript)
## Setup
Let's begin by importing the modules we'll need.
```python
%reset -s -f
```
```python
import os
from functools import partial
import torch
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import pyro
import pyro.distributions as dist
# for CI testing
smoke_test = ('CI' in os.environ)
assert pyro.__version__.startswith('1.9.1')
pyro.set_rng_seed(1)
# Set matplotlib settings
%matplotlib inline
plt.style.use('default')
```
### Dataset
The following example is adapted from \[1\]. We would like to explore the relationship between topographic heterogeneity of a nation as measured by the Terrain Ruggedness Index (variable *rugged* in the dataset) and its GDP per capita. In particular, it was noted by the authors in \[2\] that terrain ruggedness or bad geography is related to poorer economic performance outside of Africa, but rugged terrains have had a reverse effect on income for African nations. Let us look at the data and investigate this relationship. We will be focusing on three features from the dataset:
- `rugged`: quantifies the Terrain Ruggedness Index
- `cont_africa`: whether the given nation is in Africa
- `rgdppc_2000`: Real GDP per capita for the year 2000
The response variable GDP is highly skewed, so we will log-transform it.
```python
DATA_URL = "https://d2hg8soec8ck9v.cloudfront.net/datasets/rugged_data.csv"
data = pd.read_csv(DATA_URL, encoding="ISO-8859-1")
df = data[["cont_africa", "rugged", "rgdppc_2000"]]
df = df[np.isfinite(df.rgdppc_2000)]
df["rgdppc_2000"] = np.log(df["rgdppc_2000"])
```
```python
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6), sharey=True)
african_nations = df[df["cont_africa"] == 1]
non_african_nations = df[df["cont_africa"] == 0]
sns.scatterplot(x=non_african_nations["rugged"],
y=non_african_nations["rgdppc_2000"],
ax=ax[0])
ax[0].set(xlabel="Terrain Ruggedness Index",
ylabel="log GDP (2000)",
title="Non African Nations")
sns.scatterplot(x=african_nations["rugged"],
y=african_nations["rgdppc_2000"],
ax=ax[1])
ax[1].set(xlabel="Terrain Ruggedness Index",
ylabel="log GDP (2000)",
title="African Nations");
```

## Linear Regression
We would like to predict log GDP per capita of a nation as a function of two features from the dataset - whether the nation is in Africa, and its Terrain Ruggedness Index. We will create a trivial class called `PyroModule[nn.Linear]` that subclasses [PyroModule](http://docs.pyro.ai/en/dev/nn.html#module-pyro.nn.module) and `torch.nn.Linear`. `PyroModule` is very similar to PyTorch's `nn.Module`, but additionally supports [Pyro primitives](http://docs.pyro.ai/en/dev/primitives.html#primitives) as attributes that can be modified by Pyro's [effect handlers](http://pyro.ai/examples/effect_handlers.html) (see the [next section](#Model) on how we can have module attributes that are `pyro.sample` primitives). Some general notes:
- Learnable parameters in PyTorch modules are instances of `nn.Parameter`, in this case the `weight` and `bias` parameters of the `nn.Linear` class. When declared inside a `PyroModule` as attributes, these are automatically registered in Pyro's param store. While this model does not require us to constrain the value of these parameters during optimization, this can also be easily achieved in `PyroModule` using the [PyroParam](http://docs.pyro.ai/en/dev/nn.html#pyro.nn.module.PyroParam) statement.
- Note that while the `forward` method of `PyroModule[nn.Linear]` inherits from `nn.Linear`, it can also be easily overridden. e.g. in the case of logistic regression, we apply a sigmoid transformation to the linear predictor.
```python
from torch import nn
from pyro.nn import PyroModule
assert issubclass(PyroModule[nn.Linear], nn.Linear)
assert issubclass(PyroModule[nn.Linear], PyroModule)
```
### Training with PyTorch Optimizers
Note that in addition to the two features `rugged` and `cont_africa`, we also include an interaction term in our model, which lets us separately model the effect of ruggedness on the GDP for nations within and outside Africa.
We use the mean squared error (MSE) as our loss and Adam as our optimizer from the `torch.optim` module. We would like to optimize the parameters of our model, namely the `weight` and `bias` parameters of the network, which corresponds to our regression coefficents and the intercept.
```python
# Dataset: Add a feature to capture the interaction between "cont_africa" and "rugged"
df["cont_africa_x_rugged"] = df["cont_africa"] * df["rugged"]
data = torch.tensor(df[["cont_africa", "rugged", "cont_africa_x_rugged", "rgdppc_2000"]].values,
dtype=torch.float)
x_data, y_data = data[:, :-1], data[:, -1]
# Regression model
linear_reg_model = PyroModule[nn.Linear](3, 1)
# Define loss and optimize
loss_fn = torch.nn.MSELoss(reduction='sum')
optim = torch.optim.Adam(linear_reg_model.parameters(), lr=0.05)
num_iterations = 1500 if not smoke_test else 2
def train():
# run the model forward on the data
y_pred = linear_reg_model(x_data).squeeze(-1)
# calculate the mse loss
loss = loss_fn(y_pred, y_data)
# initialize gradients to zero
optim.zero_grad()
# backpropagate
loss.backward()
# take a gradient step
optim.step()
return loss
for j in range(num_iterations):
loss = train()
if (j + 1) % 50 == 0:
print("[iteration %04d] loss: %.4f" % (j + 1, loss.item()))
# Inspect learned parameters
print("Learned parameters:")
for name, param in linear_reg_model.named_parameters():
print(name, param.data.numpy())
```
[iteration 0050] loss: 3179.7852
[iteration 0100] loss: 1616.1371
[iteration 0150] loss: 1109.4117
[iteration 0200] loss: 833.7545
[iteration 0250] loss: 637.5822
[iteration 0300] loss: 488.2652
[iteration 0350] loss: 376.4650
[iteration 0400] loss: 296.0483
[iteration 0450] loss: 240.6140
[iteration 0500] loss: 203.9386
[iteration 0550] loss: 180.6171
[iteration 0600] loss: 166.3493
[iteration 0650] loss: 157.9457
[iteration 0700] loss: 153.1786
[iteration 0750] loss: 150.5735
[iteration 0800] loss: 149.2020
[iteration 0850] loss: 148.5065
[iteration 0900] loss: 148.1668
[iteration 0950] loss: 148.0070
[iteration 1000] loss: 147.9347
[iteration 1050] loss: 147.9032
[iteration 1100] loss: 147.8900
[iteration 1150] loss: 147.8847
[iteration 1200] loss: 147.8827
[iteration 1250] loss: 147.8819
[iteration 1300] loss: 147.8817
[iteration 1350] loss: 147.8816
[iteration 1400] loss: 147.8815
[iteration 1450] loss: 147.8815
[iteration 1500] loss: 147.8815
Learned parameters:
weight [[-1.9478593 -0.20278624 0.39330274]]
bias [9.22308]
### Plotting the Regression Fit
Let us plot the regression fit for our model, separately for countries outside and within Africa.
```python
fit = df.copy()
fit["mean"] = linear_reg_model(x_data).detach().cpu().numpy()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6), sharey=True)
african_nations = fit[fit["cont_africa"] == 1]
non_african_nations = fit[fit["cont_africa"] == 0]
fig.suptitle("Regression Fit", fontsize=16)
ax[0].plot(non_african_nations["rugged"], non_african_nations["rgdppc_2000"], "o")
ax[0].plot(non_african_nations["rugged"], non_african_nations["mean"], linewidth=2)
ax[0].set(xlabel="Terrain Ruggedness Index",
ylabel="log GDP (2000)",
title="Non African Nations")
ax[1].plot(african_nations["rugged"], african_nations["rgdppc_2000"], "o")
ax[1].plot(african_nations["rugged"], african_nations["mean"], linewidth=2)
ax[1].set(xlabel="Terrain Ruggedness Index",
ylabel="log GDP (2000)",
title="African Nations");
```

We notice that the relationship between terrain ruggedness has an inverse relationship with GDP for non-African nations, but it positively affects the GDP for African nations. It is however unclear how robust this trend is. In particular, we would like to understand how the regression fit would vary due to parameter uncertainty. To address this, we will build a simple Bayesian model for linear regression. [Bayesian modeling](http://mlg.eng.cam.ac.uk/zoubin/papers/NatureReprint15.pdf) offers a systematic framework for reasoning about model uncertainty. Instead of just learning point estimates, we're going to learn a _distribution_ over parameters that are consistent with the observed data.
## Bayesian Regression with Pyro's Stochastic Variational Inference (SVI)
### Model
In order to make our linear regression Bayesian, we need to put priors on the parameters $w$ and $b$. These are distributions that represent our prior belief about reasonable values for $w$ and $b$ (before observing any data).
Making a Bayesian model for linear regression is very intuitive using `PyroModule` as earlier. Note the following:
- The `BayesianRegression` module internally uses the same `PyroModule[nn.Linear]` module. However, note that we replace the `weight` and the `bias` of the this module with `PyroSample` statements. These statements allow us to place a prior over the `weight` and `bias` parameters, instead of treating them as fixed learnable parameters. For the bias component, we set a reasonably wide prior since it is likely to be substantially above 0.
- The `BayesianRegression.forward` method specifies the generative process. We generate the mean value of the response by calling the `linear` module (which, as you saw, samples the `weight` and `bias` parameters from the prior and returns a value for the mean response). Finally we use the `obs` argument to the `pyro.sample` statement to condition on the observed data `y_data` with a learned observation noise `sigma`. The model returns the regression line given by the variable `mean`.
```python
from pyro.nn import PyroSample
class BayesianRegression(PyroModule):
def __init__(self, in_features, out_features):
super().__init__()
self.linear = PyroModule[nn.Linear](in_features, out_features)
self.linear.weight = PyroSample(dist.Normal(0., 1.).expand([out_features, in_features]).to_event(2))
self.linear.bias = PyroSample(dist.Normal(0., 10.).expand([out_features]).to_event(1))
def forward(self, x, y=None):
sigma = pyro.sample("sigma", dist.Uniform(0., 10.))
mean = self.linear(x).squeeze(-1)
with pyro.plate("data", x.shape[0]):
obs = pyro.sample("obs", dist.Normal(mean, sigma), obs=y)
return mean
```
### Using an AutoGuide
In order to do inference, i.e. learn the posterior distribution over our unobserved parameters, we will use Stochastic Variational Inference (SVI). The guide determines a family of distributions, and `SVI` aims to find an approximate posterior distribution from this family that has the lowest KL divergence from the true posterior.
Users can write arbitrarily flexible custom guides in Pyro, but in this tutorial, we will restrict ourselves to Pyro's [autoguide library](http://docs.pyro.ai/en/dev/infer.autoguide.html). In the next [tutorial](bayesian_regression_ii.ipynb), we will explore how to write guides by hand.
To begin with, we will use the `AutoDiagonalNormal` guide that models the distribution of unobserved parameters in the model as a Gaussian with diagonal covariance, i.e. it assumes that there is no correlation amongst the latent variables (quite a strong modeling assumption as we shall see in [Part II](bayesian_regression_ii.ipynb)). Under the hood, this defines a `guide` that uses a `Normal` distribution with learnable parameters corresponding to each `sample` statement in the model. e.g. in our case, this distribution should have a size of `(5,)` correspoding to the 3 regression coefficients for each of the terms, and 1 component contributed each by the intercept term and `sigma` in the model.
Autoguide also supports learning MAP estimates with `AutoDelta` or composing guides with `AutoGuideList` (see the [docs](http://docs.pyro.ai/en/dev/infer.autoguide.html) for more information).
```python
from pyro.infer.autoguide import AutoDiagonalNormal
model = BayesianRegression(3, 1)
guide = AutoDiagonalNormal(model)
```
### Optimizing the Evidence Lower Bound
We will use stochastic variational inference (SVI) (for an introduction to SVI, see [SVI Part I](svi_part_i.ipynb)) for doing inference. Just like in the non-Bayesian linear regression model, each iteration of our training loop will take a gradient step, with the difference that in this case, we'll use the Evidence Lower Bound (ELBO) objective instead of the MSE loss by constructing a `Trace_ELBO` object that we pass to `SVI`.
```python
from pyro.infer import SVI, Trace_ELBO
adam = pyro.optim.Adam({"lr": 0.03})
svi = SVI(model, guide, adam, loss=Trace_ELBO())
```
Note that we use the `Adam` optimizer from Pyro's `optim` module and not the `torch.optim` module as earlier. Here `Adam` is a thin wrapper around `torch.optim.Adam` (see [here](svi_part_i.ipynb#Optimizers) for a discussion). Optimizers in `pyro.optim` are used to optimize and update parameter values in Pyro's parameter store. In particular, you will notice that we do not need to pass in learnable parameters to the optimizer since that is determined by the guide code and happens behind the scenes within the `SVI` class automatically. To take an ELBO gradient step we simply call the step method of SVI. The data argument we pass to `SVI.step` will be passed to both `model()` and `guide()`. The complete training loop is as follows:
```python
pyro.clear_param_store()
for j in range(num_iterations):
# calculate the loss and take a gradient step
loss = svi.step(x_data, y_data)
if j % 100 == 0:
print("[iteration %04d] loss: %.4f" % (j + 1, loss / len(data)))
```
[iteration 0001] loss: 6.2310
[iteration 0101] loss: 3.5253
[iteration 0201] loss: 3.2347
[iteration 0301] loss: 3.0890
[iteration 0401] loss: 2.6377
[iteration 0501] loss: 2.0626
[iteration 0601] loss: 1.4852
[iteration 0701] loss: 1.4631
[iteration 0801] loss: 1.4632
[iteration 0901] loss: 1.4592
[iteration 1001] loss: 1.4940
[iteration 1101] loss: 1.4988
[iteration 1201] loss: 1.4938
[iteration 1301] loss: 1.4679
[iteration 1401] loss: 1.4581
We can examine the optimized parameter values by fetching from Pyro's param store.
```python
guide.requires_grad_(False)
for name, value in pyro.get_param_store().items():
print(name, pyro.param(name))
```
AutoDiagonalNormal.loc Parameter containing:
tensor([-2.2371, -1.8097, -0.1691, 0.3791, 9.1823])
AutoDiagonalNormal.scale tensor([0.0551, 0.1142, 0.0387, 0.0769, 0.0702])
As you can see, instead of just point estimates, we now have uncertainty estimates (`AutoDiagonalNormal.scale`) for our learned parameters. Note that Autoguide packs the latent variables into a single tensor, in this case, one entry per variable sampled in our model. Both the `loc` and `scale` parameters have size `(5,)`, one for each of the latent variables in the model, as we had remarked earlier.
To look at the distribution of the latent parameters more clearly, we can make use of the `AutoDiagonalNormal.quantiles` method which will unpack the latent samples from the autoguide, and automatically constrain them to the site's support (e.g. the variable `sigma` must lie in `(0, 10)`). We see that the median values for the parameters are quite close to the Maximum Likelihood point estimates we obtained from our first model.
```python
guide.quantiles([0.25, 0.5, 0.75])
```
{'sigma': [tensor(0.9328), tensor(0.9647), tensor(0.9976)],
'linear.weight': [tensor([[-1.8868, -0.1952, 0.3272]]),
tensor([[-1.8097, -0.1691, 0.3791]]),
tensor([[-1.7327, -0.1429, 0.4309]])],
'linear.bias': [tensor([9.1350]), tensor([9.1823]), tensor([9.2297])]}
## Model Evaluation
To evaluate our model, we'll generate some predictive samples and look at the posteriors. For this we will make use of the [Predictive](http://docs.pyro.ai/en/stable/inference_algos.html#pyro.infer.predictive.Predictive) utility class.
- We generate 800 samples from our trained model. Internally, this is done by first generating samples for the unobserved sites in the `guide`, and then running the model forward by conditioning the sites to values sampled from the `guide`. Refer to the [Model Serving](#Model-Serving-via-TorchScript) section for insight on how the `Predictive` class works.
- Note that in `return_sites`, we specify both the outcome (`"obs"` site) as well as the return value of the model (`"_RETURN"`) which captures the regression line. Additionally, we would also like to capture the regression coefficients (given by `"linear.weight"`) for further analysis.
- The remaining code is simply used to plot the 90% CI for the two variables from our model.
```python
from pyro.infer import Predictive
def summary(samples):
site_stats = {}
for k, v in samples.items():
site_stats[k] = {
"mean": torch.mean(v, 0),
"std": torch.std(v, 0),
"5%": v.kthvalue(int(len(v) * 0.05), dim=0)[0],
"95%": v.kthvalue(int(len(v) * 0.95), dim=0)[0],
}
return site_stats
predictive = Predictive(model, guide=guide, num_samples=800,
return_sites=("linear.weight", "obs", "_RETURN"))
samples = predictive(x_data)
pred_summary = summary(samples)
```
```python
mu = pred_summary["_RETURN"]
y = pred_summary["obs"]
predictions = pd.DataFrame({
"cont_africa": x_data[:, 0],
"rugged": x_data[:, 1],
"mu_mean": mu["mean"],
"mu_perc_5": mu["5%"],
"mu_perc_95": mu["95%"],
"y_mean": y["mean"],
"y_perc_5": y["5%"],
"y_perc_95": y["95%"],
"true_gdp": y_data,
})
```
```python
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6), sharey=True)
african_nations = predictions[predictions["cont_africa"] == 1]
non_african_nations = predictions[predictions["cont_africa"] == 0]
african_nations = african_nations.sort_values(by=["rugged"])
non_african_nations = non_african_nations.sort_values(by=["rugged"])
fig.suptitle("Regression line 90% CI", fontsize=16)
ax[0].plot(non_african_nations["rugged"],
non_african_nations["mu_mean"])
ax[0].fill_between(non_african_nations["rugged"],
non_african_nations["mu_perc_5"],
non_african_nations["mu_perc_95"],
alpha=0.5)
ax[0].plot(non_african_nations["rugged"],
non_african_nations["true_gdp"],
"o")
ax[0].set(xlabel="Terrain Ruggedness Index",
ylabel="log GDP (2000)",
title="Non African Nations")
idx = np.argsort(african_nations["rugged"])
ax[1].plot(african_nations["rugged"],
african_nations["mu_mean"])
ax[1].fill_between(african_nations["rugged"],
african_nations["mu_perc_5"],
african_nations["mu_perc_95"],
alpha=0.5)
ax[1].plot(african_nations["rugged"],
african_nations["true_gdp"],
"o")
ax[1].set(xlabel="Terrain Ruggedness Index",
ylabel="log GDP (2000)",
title="African Nations");
```

The above figure shows the uncertainty in our estimate of the regression line, and the 90% CI around the mean. We can also see that most of the data points actually lie outside the 90% CI, and this is expected because we have not plotted the outcome variable which will be affected by `sigma`! Let us do so next.
```python
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6), sharey=True)
fig.suptitle("Posterior predictive distribution with 90% CI", fontsize=16)
ax[0].plot(non_african_nations["rugged"],
non_african_nations["y_mean"])
ax[0].fill_between(non_african_nations["rugged"],
non_african_nations["y_perc_5"],
non_african_nations["y_perc_95"],
alpha=0.5)
ax[0].plot(non_african_nations["rugged"],
non_african_nations["true_gdp"],
"o")
ax[0].set(xlabel="Terrain Ruggedness Index",
ylabel="log GDP (2000)",
title="Non African Nations")
idx = np.argsort(african_nations["rugged"])
ax[1].plot(african_nations["rugged"],
african_nations["y_mean"])
ax[1].fill_between(african_nations["rugged"],
african_nations["y_perc_5"],
african_nations["y_perc_95"],
alpha=0.5)
ax[1].plot(african_nations["rugged"],
african_nations["true_gdp"],
"o")
ax[1].set(xlabel="Terrain Ruggedness Index",
ylabel="log GDP (2000)",
title="African Nations");
```

We observe that the outcome from our model and the 90% CI accounts for the majority of the data points that we observe in practice. It is usually a good idea to do such posterior predictive checks to see if our model gives valid predictions.
Finally, let us revisit our earlier question of how robust the relationship between terrain ruggedness and GDP is against any uncertainty in the parameter estimates from our model. For this, we plot the distribution of the slope of the log GDP given terrain ruggedness for nations within and outside Africa. As can be seen below, the probability mass for African nations is largely concentrated in the positive region and vice-versa for other nations, lending further credence to the original hypothesis.
```python
weight = samples["linear.weight"]
weight = weight.reshape(weight.shape[0], 3)
gamma_within_africa = weight[:, 1] + weight[:, 2]
gamma_outside_africa = weight[:, 1]
fig = plt.figure(figsize=(10, 6))
sns.distplot(gamma_within_africa, kde_kws={"label": "African nations"},)
sns.distplot(gamma_outside_africa, kde_kws={"label": "Non-African nations"})
fig.suptitle("Density of Slope : log(GDP) vs. Terrain Ruggedness");
```

## Model Serving via TorchScript
Finally, note that the `model`, `guide` and the `Predictive` utility class are all `torch.nn.Module` instances, and can be serialized as [TorchScript](https://pytorch.org/docs/stable/jit.html).
Here, we show how we can serve a Pyro model as a [torch.jit.ModuleScript](https://pytorch.org/docs/stable/jit.html#torch.jit.ScriptModule), which can be run separately as a C++ program without a Python runtime.
To do so, we will rewrite our own simple version of the `Predictive` utility class using Pyro's [effect handling library](http://pyro.ai/examples/effect_handlers.html). This uses:
- the `trace` poutine to capture the execution trace from running the model/guide code.
- the `replay` poutine to condition the sites in the model to values sampled from the guide trace.
```python
from collections import defaultdict
from pyro import poutine
from pyro.poutine.util import prune_subsample_sites
import warnings
class Predict(torch.nn.Module):
def __init__(self, model, guide):
super().__init__()
self.model = model
self.guide = guide
def forward(self, *args, **kwargs):
samples = {}
guide_trace = poutine.trace(self.guide).get_trace(*args, **kwargs)
model_trace = poutine.trace(poutine.replay(self.model, guide_trace)).get_trace(*args, **kwargs)
for site in prune_subsample_sites(model_trace).stochastic_nodes:
samples[site] = model_trace.nodes[site]['value']
return tuple(v for _, v in sorted(samples.items()))
predict_fn = Predict(model, guide)
predict_module = torch.jit.trace_module(predict_fn, {"forward": (x_data,)}, check_trace=False)
```
We use [torch.jit.trace_module](https://pytorch.org/docs/stable/jit.html#torch.jit.trace_module) to trace the `forward` method of this module and save it using [torch.jit.save](https://pytorch.org/docs/stable/jit.html#torch.jit.save). This saved model `reg_predict.pt` can be loaded with PyTorch's C++ API using `torch::jit::load(filename)`, or using the Python API as we do below.
```python
torch.jit.save(predict_module, '/tmp/reg_predict.pt')
pred_loaded = torch.jit.load('/tmp/reg_predict.pt')
pred_loaded(x_data)
```
(tensor([9.2165]),
tensor([[-1.6612, -0.1498, 0.4282]]),
tensor([ 7.5951, 8.2473, 9.3864, 9.2590, 9.0540, 9.3915, 8.6764, 9.3775,
9.5473, 9.6144, 10.3521, 8.5452, 5.4008, 8.4601, 9.6219, 9.7774,
7.1958, 7.2581, 8.9159, 9.0875, 8.3730, 8.7903, 9.3167, 8.8155,
7.4433, 9.9981, 8.6909, 9.2915, 10.1376, 7.7618, 10.1916, 7.4754,
6.3473, 7.7584, 9.1307, 6.0794, 8.5641, 7.8487, 9.2828, 9.0763,
7.9250, 10.9226, 8.0005, 10.1799, 5.3611, 8.1174, 8.0585, 8.5098,
6.8656, 8.6765, 7.8925, 9.5233, 10.1269, 10.2661, 7.8883, 8.9194,
10.2866, 7.0821, 8.2370, 8.3087, 7.8408, 8.4891, 8.0107, 7.6815,
8.7497, 9.3551, 9.9687, 10.4804, 8.5176, 7.1679, 10.8805, 7.4919,
8.7088, 9.2417, 9.2360, 9.7907, 8.4934, 7.8897, 9.5338, 9.6572,
9.6604, 9.9855, 6.7415, 8.1721, 10.0646, 10.0817, 8.4503, 9.2588,
8.4489, 7.7516, 6.8496, 9.2208, 8.9852, 10.6585, 9.4218, 9.1290,
9.5631, 9.7422, 10.2814, 7.2624, 9.6727, 8.9743, 6.9666, 9.5856,
9.2518, 8.4207, 8.6988, 9.1914, 7.8161, 9.8446, 6.5528, 8.5518,
6.7168, 7.0694, 8.9211, 8.5311, 8.4545, 10.8346, 7.8768, 9.2537,
9.0776, 9.4698, 7.9611, 9.2177, 8.0880, 8.5090, 9.2262, 8.9242,
9.3966, 7.5051, 9.1014, 8.9601, 7.7225, 8.7569, 8.5847, 8.8465,
9.7494, 8.8587, 6.5624, 6.9372, 9.9806, 10.1259, 9.1864, 7.5758,
9.8258, 8.6375, 7.6954, 8.9718, 7.0985, 8.6360, 8.5951, 8.9163,
8.4661, 8.4551, 10.6844, 7.5948, 8.7568, 9.5296, 8.9530, 7.1214,
9.1401, 8.4992, 8.9115, 10.9739, 8.1593, 10.1162, 9.7072, 7.8641,
8.8606, 7.5935]),
tensor(0.9631))
Let us check that our `Predict` module was indeed serialized correctly, by generating samples from the loaded module and regenerating the previous plot.
```python
weight = []
for _ in range(800):
# index = 1 corresponds to "linear.weight"
weight.append(pred_loaded(x_data)[1])
weight = torch.stack(weight).detach()
weight = weight.reshape(weight.shape[0], 3)
gamma_within_africa = weight[:, 1] + weight[:, 2]
gamma_outside_africa = weight[:, 1]
fig = plt.figure(figsize=(10, 6))
sns.distplot(gamma_within_africa, kde_kws={"label": "African nations"},)
sns.distplot(gamma_outside_africa, kde_kws={"label": "Non-African nations"})
fig.suptitle("Loaded TorchScript Module : log(GDP) vs. Terrain Ruggedness");
```

In the next section, we'll look at how to write guides for variational inference as well as compare the results with inference via HMC.
### References
1. McElreath, D., *Statistical Rethinking, Chapter 7*, 2016
2. Nunn, N. & Puga, D., *[Ruggedness: The blessing of bad geography in Africa"](https://diegopuga.org/papers/rugged.pdf)*, Review of Economics and Statistics 94(1), Feb. 2012
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@tutorial@source@bayesian_regression.ipynb@.PATH_END.py
|
{
"filename": "_tickprefix.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/ternary/caxis/_tickprefix.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickprefixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="tickprefix", parent_name="layout.ternary.caxis", **kwargs
):
super(TickprefixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@ternary@caxis@_tickprefix.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "terryyin/lizard",
"repo_path": "lizard_extracted/lizard-master/setup.py",
"type": "Python"
}
|
#!/usr/bin/env python
'''
Setup script.
To install lizard:
sudo setup.py build install
'''
import codecs
import os
import re
from setuptools import setup, Command
try:
here = os.path.dirname(os.path.abspath(__file__))
version = '0.0.0'
changes = os.path.join(here, "CHANGELOG.md")
pattern = r'^\#*\s*(?P<version>[0-9]+.[0-9]+(.[0-9]+)?)'
with codecs.open(changes, encoding='utf-8') as changes:
for line in changes:
match = re.match(pattern, line)
if match:
version = match.group("version")
break
# Save last Version
def save_version():
version_path = os.path.join(here, "lizard_ext/version.py")
with open(version_path) as version_file_read:
content_file = version_file_read.read()
VSRE = r"^version = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, content_file, re.M)
current_version = mo.group(1)
content_file = content_file.replace(current_version, "{}".format(version))
with open(version_path, 'w') as version_file_write:
version_file_write.write(content_file)
save_version()
except:
from lizard_ext import version
class VersionCommand(Command):
description = 'Show library version'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print(version)
setup(
name='lizard',
version=version,
description='''A code analyzer without caring the C/C++ header files. ''' +
'''It works with Java, C/C++, JavaScript, Python, Ruby, Swift, Objective C. Metrics includes cyclomatic complexity number etc.''',
long_description=open('README.rst').read(),
url='http://www.lizard.ws',
project_urls={
'Source': 'https://github.com/terryyin/lizard',
},
download_url='https://pypi.python.org/lizard/',
license='MIT',
platforms='any',
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: Freeware',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Quality Assurance',
'Programming Language :: C',
'Programming Language :: C++',
'Programming Language :: Java',
'Programming Language :: JavaScript',
'Programming Language :: Objective C',
'Programming Language :: Python',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11'],
cmdclass={'version': VersionCommand},
packages=['lizard_ext', 'lizard_languages'],
#data_files=[('lizard_ext', [])],
py_modules=['lizard'],
install_requires=['pygments'],
entry_points={'console_scripts': ['lizard = lizard:main']},
author='Terry Yin',
author_email='terry@odd-e.com',
)
|
terryyinREPO_NAMElizardPATH_START.@lizard_extracted@lizard-master@setup.py@.PATH_END.py
|
{
"filename": "model_obs.py",
"repo_name": "remi-adam/minot",
"repo_path": "minot_extracted/minot-master/minot/model_obs.py",
"type": "Python"
}
|
"""
This file contain a subclass of the model.py module and Cluster class. It
is dedicated to the computing of observables.
"""
#==================================================
# Requested imports
#==================================================
import numpy as np
import scipy.ndimage as ndimage
import astropy.units as u
from astropy.wcs import WCS
from astropy import constants as const
import scipy.interpolate as interpolate
from minot import model_tools
from minot.ClusterTools import cluster_global
from minot.ClusterTools import cluster_profile
from minot.ClusterTools import cluster_spectra
from minot.ClusterTools import cluster_xspec
from minot.ClusterTools import map_tools
#==================================================
# Observable class
#==================================================
class Observables(object):
""" Observable class
This class serves as a parser to the main Cluster class, to
include the subclass Observable in this other file.
Attributes
----------
The attributes are the same as the Cluster class, see model.py
Methods
----------
- get_*_spectrum(): compute the {* = gamma, neutrinos, IC, radio, SZ, Xray} spectrum
integrating over the volume up to Rmax
- get_*_profile(): compute the {* = gamma, neutrinos, IC, radio, SZ, Xray} profile,
integrating over the energy if relevant
- get_*_flux(): compute the {* = gamma, neutrinos, IC, radio, SZ, Xray} flux integrating
the energy range and for R>Rmax if relevant.
- get_*_map(): compute a {* = gamma, neutrinos, IC, radio, SZ, Xray} map.
- get_*_hpmap(): compute a {* = gamma, neutrinos, IC, radio, SZ, Xray} map, healpix format.
"""
#==================================================
# Compute gamma ray spectrum
#==================================================
def get_gamma_spectrum(self, energy=np.logspace(-2,6,100)*u.GeV,
Rmin=None, Rmax=None,
type_integral='spherical',
Rmin_los=None, NR500_los=5.0,
Cframe=False,
model='Kafexhiu2014'):
"""
Compute the gamma ray emission enclosed within [Rmin,Rmax], in 3d (i.e. spherically
integrated), or the gamma ray emmission enclosed within an circular area (i.e.
cylindrical).
Parameters
----------
- energy (quantity) : the physical energy of gamma rays
- Rmin, Rmax (quantity): the radius within with the spectrum is computed
(default is 1kpc, R500)
- type_integral (string): either 'spherical' or 'cylindrical'
- Rmin_los (quantity): minimal radius at which l.o.s integration starts
This is used only for cylindrical case
- NR500_los (float): the line-of-sight integration will stop at NR500_los x R500.
This is used only for cylindrical case
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
- model (str): change the reference model to 'Kafexhiu2014' or 'Kelner2006'
Outputs
----------
- energy (quantity) : the physical energy of gamma rays
- dN_dEdSdt (np.ndarray) : the spectrum in units of GeV-1 cm-2 s-1
"""
# In case the input is not an array
energy = model_tools.check_qarray(energy, unit='GeV')
# K-correction
if Cframe:
energy_rf = energy*1.0
else:
energy_rf = energy*(1+self._redshift)
# Check the type of integral
ok_list = ['spherical', 'cylindrical']
if not type_integral in ok_list:
raise ValueError("This requested integral type (type_integral) is not available")
# Get the integration limits
if Rmin is None:
Rmin = self._Rmin
if Rmax is None:
Rmax = self._R500
if Rmin_los is None:
Rmin_los = self._Rmin
if Rmin.to_value('kpc') <= 0:
raise TypeError("Rmin cannot be 0 (or less than 0) because integrations are in log space.")
if Rmin.to_value('kpc') < 1e-2:
if not self._silent:
print("WARNING: the requested value of Rmin is very small. Rmin~kpc is expected")
# Compute the integral
if type_integral == 'spherical':
rad = model_tools.sampling_array(Rmin, Rmax, NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdVdt = self.get_rate_gamma(energy_rf, rad, model=model)
dN_dEdt = model_tools.spherical_integration(dN_dEdVdt, rad)
# Compute the integral
if type_integral == 'cylindrical':
Rmax3d = np.sqrt((NR500_los*self._R500)**2 + Rmax**2)
Rmin3d = np.sqrt(Rmin_los**2 + Rmin**2)
r3d = model_tools.sampling_array(Rmin3d*0.9, Rmax3d*1.1, NptPd=self._Npt_per_decade_integ, unit=True)
los = model_tools.sampling_array(Rmin_los, NR500_los*self._R500,
NptPd=self._Npt_per_decade_integ, unit=True)
r2d = model_tools.sampling_array(Rmin, Rmax, NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdVdt = self.get_rate_gamma(energy_rf, r3d, model=model)
dN_dEdt = model_tools.cylindrical_integration(dN_dEdVdt, energy, r3d, r2d, los,
Rtrunc=self._R_truncation)
# From intrinsic luminosity to flux
dN_dEdSdt = dN_dEdt / (4*np.pi * self._D_lum**2)
# Apply EBL absorbtion
if self._EBL_model != 'none' and not Cframe:
absorb = cluster_spectra.get_ebl_absorb(energy.to_value('GeV'), self._redshift, self._EBL_model)
dN_dEdSdt = dN_dEdSdt * absorb
return energy, dN_dEdSdt.to('GeV-1 cm-2 s-1')
#==================================================
# Compute gamma ray profile
#==================================================
def get_gamma_profile(self, radius=np.logspace(0,4,100)*u.kpc,
Emin=None, Emax=None, Energy_density=False,
Rmin_los=None, NR500_los=5.0,
Cframe=False,
model='Kafexhiu2014'):
"""
Compute the gamma ray emission profile within Emin-Emax.
Parameters
----------
- radius (quantity): the projected 2d radius in units homogeneous to kpc, as a 1d array
- Emin (quantity): the lower bound for gamma ray energy integration
- Emax (quantity): the upper bound for gamma ray energy integration
- Energy_density (bool): if True, then the energy density is computed. Otherwise,
the number density is computed.
- Rmin_los (quantity): minimal radius at which l.o.s integration starts
- NR500_los (float): the line-of-sight integration will stop at NR500_los x R500.
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
- model (str): change the reference model to 'Kafexhiu2014' or 'Kelner2006'
Outputs
----------
- radius (quantity): the projected 2d radius in unit of kpc
- dN_dSdtdO (np.ndarray) : the spectrum in units of cm-2 s-1 sr-1 or GeV cm-2 s-1 sr-1
"""
# In case the input is not an array
radius = model_tools.check_qarray(radius, unit='kpc')
# Get the integration limits
if Emin is None:
Emin = self._Epmin/10.0 # photon energy down to 0.1 minimal proton energy
if Emax is None:
Emax = self._Epmax
if Rmin_los is None:
Rmin_los = self._Rmin
Rmin = np.amin(radius.to_value('kpc'))*u.kpc
Rmax = np.amax(radius.to_value('kpc'))*u.kpc
# Define energy and K correction
eng = model_tools.sampling_array(Emin, Emax, NptPd=self._Npt_per_decade_integ, unit=True)
if Cframe:
eng_rf = eng*1.0
else:
eng_rf = eng*(1+self._redshift)
# Define array for integration
Rmax3d = np.sqrt((NR500_los*self._R500)**2 + Rmax**2)
Rmin3d = np.sqrt(Rmin_los**2 + Rmin**2)
r3d = model_tools.sampling_array(Rmin3d*0.9, Rmax3d*1.1, NptPd=self._Npt_per_decade_integ, unit=True)
los = model_tools.sampling_array(Rmin_los, NR500_los*self._R500,
NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdVdt = self.get_rate_gamma(eng_rf, r3d, model=model)
# Apply EBL absorbtion
if self._EBL_model != 'none' and not Cframe:
absorb = cluster_spectra.get_ebl_absorb(eng.to_value('GeV'), self._redshift, self._EBL_model)
dN_dEdVdt = dN_dEdVdt * model_tools.replicate_array(absorb, len(r3d), T=True)
# Compute energy integal
dN_dVdt = model_tools.energy_integration(dN_dEdVdt, eng, Energy_density=Energy_density)
# Compute integral over l.o.s.
dN_dVdt_proj = model_tools.los_integration_1dfunc(dN_dVdt, r3d, radius, los)
dN_dVdt_proj[radius > self._R_truncation] = 0
# Convert to physical to angular scale
dN_dtdO = dN_dVdt_proj * self._D_ang**2 * u.Unit('sr-1')
# From intrinsic luminosity to flux
dN_dSdtdO = dN_dtdO / (4*np.pi * self._D_lum**2)
# return
if Energy_density:
dN_dSdtdO = dN_dSdtdO.to('GeV cm-2 s-1 sr-1')
else :
dN_dSdtdO = dN_dSdtdO.to('cm-2 s-1 sr-1')
return radius, dN_dSdtdO
#==================================================
# Compute gamma ray flux
#==================================================
def get_gamma_flux(self, Emin=None, Emax=None, Energy_density=False,
Rmin=None, Rmax=None,
type_integral='spherical',
Rmin_los=None, NR500_los=5.0,
Cframe=False,
model='Kafexhiu2014'):
"""
Compute the gamma ray emission enclosed within Rmax, in 3d (i.e. spherically
integrated), or the gamma ray emmission enclosed within an circular area (i.e.
cylindrical), and in a given energy band. The minimal energy can be an array to
flux(>E) and the radius max can be an array to get flux(<R).
Parameters
----------
- Emin (quantity): the lower bound for gamma ray energy integration
It can be an array.
- Emax (quantity): the upper bound for gamma ray energy integration
- Energy_density (bool): if True, then the energy density is computed. Otherwise,
the number density is computed.
- Rmin (quantity): the minimal radius within with the spectrum is computed
- Rmax (quantity): the maximal radius within with the spectrum is computed.
It can be an array.
- type_integral (string): either 'spherical' or 'cylindrical'
- Rmin_los (quantity): minimal radius at which l.o.s integration starts
This is used only for cylindrical case
- NR500_los (float): the line-of-sight integration will stop at NR500_los x R500.
This is used only for cylindrical case
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
Outputs
----------
- flux (quantity) : the gamma ray flux either in GeV/cm2/s or ph/cm2/s, depending
on parameter Energy_density
"""
# Check the type of integral
ok_list = ['spherical', 'cylindrical']
if not type_integral in ok_list:
raise ValueError("This requested integral type (type_integral) is not available")
# Get the integration limits
if Rmin_los is None:
Rmin_los = self._Rmin
if Rmin is None:
Rmin = self._Rmin
if Rmin.to_value('kpc') <= 0:
raise TypeError("Rmin cannot be 0 (or less than 0) because integrations are in log space.")
if Rmin.to_value('kpc') < 1e-2:
if not self._silent:
print("WARNING: the requested value of Rmin is very small. Rmin~kpc is expected")
if Rmax is None:
Rmax = self._R500
if Emin is None:
Emin = self._Epmin/10.0 # default photon energy down to 0.1 minimal proton energy
if Emax is None:
Emax = self._Epmax
# Check if Emin and Rmax are scalar or array
if type(Emin.value) == np.ndarray and type(Rmax.value) == np.ndarray:
raise ValueError('Emin and Rmax cannot both be array simultaneously')
#----- Case of scalar quantities
if (type(Emin.value) == float or type(Emin.value) == np.float64) and (type(Rmax.value) == float or type(Rmax.value) == np.float64):
# Get a spectrum
energy = model_tools.sampling_array(Emin, Emax, NptPd=self._Npt_per_decade_integ, unit=True)
energy, dN_dEdSdt = self.get_gamma_spectrum(energy, Rmin=Rmin, Rmax=Rmax,
type_integral=type_integral, Rmin_los=Rmin_los,
NR500_los=NR500_los,
Cframe=Cframe, model=model)
# Integrate over it and return
flux = model_tools.energy_integration(dN_dEdSdt, energy, Energy_density=Energy_density)
#----- Case of energy array
if type(Emin.value) == np.ndarray:
# Get a spectrum
energy = model_tools.sampling_array(np.amin(Emin.value)*Emin.unit, Emax,
NptPd=self._Npt_per_decade_integ, unit=True)
energy, dN_dEdSdt = self.get_gamma_spectrum(energy, Rmin=Rmin, Rmax=Rmax,
type_integral=type_integral, Rmin_los=Rmin_los,
NR500_los=NR500_los,
Cframe=Cframe, model=model)
# Integrate over it and return
if Energy_density:
flux = np.zeros(len(Emin))*u.Unit('GeV cm-2 s-1')
else:
flux = np.zeros(len(Emin))*u.Unit('cm-2 s-1')
itpl = interpolate.interp1d(energy.value, dN_dEdSdt.value, kind='linear')
for i in range(len(Emin)):
eng_i = model_tools.sampling_array(Emin[i], Emax, NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdSdt_i = itpl(eng_i.value)*dN_dEdSdt.unit
flux[i] = model_tools.energy_integration(dN_dEdSdt_i, eng_i, Energy_density=Energy_density)
#----- Case of radius array (need to use dN/dVdEdt and not get_profile because spherical flux)
if type(Rmax.value) == np.ndarray:
# Get energy integration
eng = model_tools.sampling_array(Emin, Emax, NptPd=self._Npt_per_decade_integ, unit=True)
if Cframe:
eng_rf = eng*1.0
else:
eng_rf = eng*(1+self._redshift)
if type_integral == 'spherical':
Rmax3d = np.amax(Rmax.value)*Rmax.unit
Rmin3d = Rmin
if type_integral == 'cylindrical':
Rmax3d = np.sqrt((NR500_los*self._R500)**2 + (np.amax(Rmax.value)*Rmax.unit)**2)*1.1
Rmin3d = np.sqrt(Rmin_los**2 + Rmin**2)*0.9
r3d = model_tools.sampling_array(Rmin3d, Rmax3d, NptPd=self._Npt_per_decade_integ, unit=True)
los = model_tools.sampling_array(Rmin_los, NR500_los*self._R500,
NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdVdt = self.get_rate_gamma(eng_rf, r3d, model=model)
# Apply EBL absorbtion
if self._EBL_model != 'none' and not Cframe:
absorb = cluster_spectra.get_ebl_absorb(eng.to_value('GeV'), self._redshift, self._EBL_model)
dN_dEdVdt = dN_dEdVdt * model_tools.replicate_array(absorb, len(r3d), T=True)
# Compute energy integal
dN_dVdt = model_tools.energy_integration(dN_dEdVdt, eng, Energy_density=Energy_density)
# Define output
if Energy_density:
flux = np.zeros(len(Rmax))*u.Unit('GeV cm-2 s-1')
else:
flux = np.zeros(len(Rmax))*u.Unit('cm-2 s-1')
# Case of spherical integral: direct volume integration
if type_integral == 'spherical':
itpl = interpolate.interp1d(r3d.to_value('kpc'), dN_dVdt.value, kind='linear')
for i in range(len(Rmax)):
rad_i = model_tools.sampling_array(Rmin, Rmax[i], NptPd=self._Npt_per_decade_integ, unit=True)
dN_dVdt_i = itpl(rad_i.to_value('kpc'))*dN_dVdt.unit
lum_i = model_tools.spherical_integration(dN_dVdt_i, rad_i)
flux[i] = lum_i / (4*np.pi * self._D_lum**2)
# Case of cylindrical integral
if type_integral == 'cylindrical':
# Compute integral over l.o.s.
radius = model_tools.sampling_array(Rmin, np.amax(Rmax.value)*Rmax.unit,
NptPd=self._Npt_per_decade_integ, unit=True)
dN_dVdt_proj = model_tools.los_integration_1dfunc(dN_dVdt, r3d, radius, los)
dN_dVdt_proj[radius > self._R_truncation] = 0
dN_dSdVdt_proj = dN_dVdt_proj / (4*np.pi * self._D_lum**2)
itpl = interpolate.interp1d(radius.to_value('kpc'), dN_dSdVdt_proj.value, kind='linear')
for i in range(len(Rmax)):
rad_i = model_tools.sampling_array(Rmin, Rmax[i], NptPd=self._Npt_per_decade_integ, unit=True)
dN_dSdVdt_proj_i = itpl(rad_i.value)*dN_dSdVdt_proj.unit
flux[i] = model_tools.trapz_loglog(2*np.pi*rad_i*dN_dSdVdt_proj_i, rad_i)
# Return
if Energy_density:
flux = flux.to('GeV cm-2 s-1')
else:
flux = flux.to('cm-2 s-1')
return flux
#==================================================
# Compute gamma map
#==================================================
def get_gamma_map(self, Emin=None, Emax=None,
Rmin_los=None, NR500_los=5.0,
Rmin=None, Rmax=None,
Energy_density=False, Normalize=False,
Cframe=False,
model='Kafexhiu2014'):
"""
Compute the gamma ray map. The map is normalized so that the integral
of the map over the cluster volume is 1 (up to Rmax=5R500).
Parameters
----------
- Emin (quantity): the lower bound for gamma ray energy integration.
Has no effect if Normalized is True
- Emax (quantity): the upper bound for gamma ray energy integration
Has no effect if Normalized is True
- Rmin_los (Quantity): the radius at which line of sight integration starts
- NR500_los (float): the integration will stop at NR500_los x R500
- Rmin, Rmax (quantity): the radius within with the spectrum is computed
(default is 1kpc, Rtruncation) for getting the normlization flux.
Has no effect if Normalized is False
- Energy_density (bool): if True, then the energy density is computed. Otherwise,
the number density is computed.
Has no effect if Normalized is True
- Normalize (bool): if True, the map is normalized by the flux to get a
template in unit of sr-1
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
- model (str): change the reference model to 'Kafexhiu2014' or 'Kelner2006'
Outputs
----------
gamma_map (np.ndarray) : the map in units of sr-1 or brightness
"""
# Get the header
header = self.get_map_header()
# Get a R.A-Dec. map
ra_map, dec_map = map_tools.get_radec_map(header)
# Get a cluster distance map (in deg)
dist_map = map_tools.greatcircle(ra_map, dec_map, self._coord.icrs.ra.to_value('deg'),
self._coord.icrs.dec.to_value('deg'))
# Define the radius used fo computing the profile
theta_max = np.amax(dist_map) # maximum angle from the cluster
theta_min = np.amin(dist_map) # minimum angle from the cluster (~0 if cluster within FoV)
if theta_min > 10 and theta_max > 10:
print('!!!!! WARNING: the cluster location is very much offset from the field of view')
rmax = theta_max*np.pi/180 * self._D_ang
rmin = theta_min*np.pi/180 * self._D_ang
if rmin == 0: rmin = self._Rmin
radius = model_tools.sampling_array(rmin, rmax, NptPd=self._Npt_per_decade_integ, unit=True)
# Project the integrand
r_proj, profile = self.get_gamma_profile(radius, Emin=Emin, Emax=Emax, Energy_density=Energy_density,
Rmin_los=Rmin_los, NR500_los=NR500_los,
Cframe=Cframe, model=model)
# Convert to angle and interpolate onto a map
theta_proj = (r_proj/self._D_ang).to_value('')*180.0/np.pi # degrees
gamma_map = map_tools.profile2map(profile.value, theta_proj, dist_map)*profile.unit
# Avoid numerical residual ringing from interpolation
gamma_map[dist_map > self._theta_truncation.to_value('deg')] = 0
# Compute the normalization: to return a map in sr-1, i.e. by computing the total flux
if Normalize:
if Rmax is None:
if self._R_truncation is not np.inf:
Rmax = self._R_truncation
else:
Rmax = NR500_los*self._R500
if Rmin is None:
Rmin = self._Rmin
flux = self.get_gamma_flux(Rmin=Rmin, Rmax=Rmax, type_integral='cylindrical', NR500_los=NR500_los,
Emin=Emin, Emax=Emax, Energy_density=Energy_density, Cframe=Cframe)
gamma_map = gamma_map / flux
gamma_map = gamma_map.to('sr-1')
else:
if Energy_density:
gamma_map = gamma_map.to('GeV cm-2 s-1 sr-1')
else :
gamma_map = gamma_map.to('cm-2 s-1 sr-1')
return gamma_map
#==================================================
# Compute gamma map - healpix format
#==================================================
def get_gamma_hpmap(self, nside=2048, Emin=None, Emax=None,
Rmin_los=None, NR500_los=5.0,
Rmin=None, Rmax=None,
Energy_density=False,
Cframe=False,
model='Kafexhiu2014',
maplonlat=None, output_lonlat=False):
"""
Compute the gamma ray map (RING) healpix format.
Parameters
----------
- nside (int): healpix Nside
- Emin (quantity): the lower bound for gamma ray energy integration.
- Emax (quantity): the upper bound for gamma ray energy integration
- Rmin_los (Quantity): the radius at which line of sight integration starts
- NR500_los (float): the integration will stop at NR500_los x R500
- Rmin, Rmax (quantity): the radius within with the spectrum is computed
(default is 1kpc, Rtruncation) for getting the normlization flux.
- Energy_density (bool): if True, then the energy density is computed. Otherwise,
the number density is computed.
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
- model (str): change the reference model to 'Kafexhiu2014' or 'Kelner2006'
- maplonlat (2d tuple of np.array): healpix maps of galactic longitude and latitude
which can be provided to save time in case of repeated computation
- output_lonlat (bool): use this keyword to also return the lon and lat maps
Outputs
----------
- gamma_map (np.ndarray) : the map in units of sr-1 or brightness
- if output_lonlat is True, maplon and maplat are also returned
"""
# Get a healpy radius map
radius, dist_map, maplon, maplat = model_tools.radius_hpmap(self._coord.galactic.l.to_value('deg'),
self._coord.galactic.b.to_value('deg'),
self._R_truncation, self._Rmin,
self._Npt_per_decade_integ,
nside=nside, maplonlat=maplonlat)
# Project the integrand
r_proj, profile = self.get_gamma_profile(radius, Emin=Emin, Emax=Emax, Energy_density=Energy_density,
Rmin_los=Rmin_los, NR500_los=NR500_los, Cframe=Cframe,
model=model)
# Convert to angle and interpolate onto a map
theta_proj = (r_proj/self._D_ang).to_value('')*180.0/np.pi # degrees
itpl = interpolate.interp1d(theta_proj, profile, kind='cubic', fill_value='extrapolate')
gamma_map = itpl(dist_map)*profile.unit
# Avoid numerical residual ringing from interpolation
gamma_map[dist_map > self._theta_truncation.to_value('deg')] = 0
# Compute the normalization: to return a map in sr-1, i.e. by computing the total flux
if Energy_density:
gamma_map = gamma_map.to('GeV cm-2 s-1 sr-1')
else :
gamma_map = gamma_map.to('cm-2 s-1 sr-1')
if output_lonlat:
return gamma_map, maplon, maplat
else:
return gamma_map
#==================================================
# Compute neutrinos spectrum
#==================================================
def get_neutrino_spectrum(self, energy=np.logspace(-2,6,100)*u.GeV,
Rmin=None, Rmax=None,
type_integral='spherical',
Rmin_los=None, NR500_los=5.0,
flavor='all',
Cframe=False):
"""
Compute the neutrino emission enclosed within [Rmin,Rmax], in 3d (i.e. spherically
integrated), or the neutrino emmission enclosed within an circular area (i.e.
cylindrical).
Parameters
----------
- energy (quantity) : the physical energy of neutrinos
- Rmin, Rmax (quantity): the radius within with the spectrum is computed
(default is 1kpc, R500)
- type_integral (string): either 'spherical' or 'cylindrical'
- Rmin_los (quantity): minimal radius at which l.o.s integration starts
This is used only for cylindrical case
- NR500_los (float): the line-of-sight integration will stop at NR500_los x R500.
This is used only for cylindrical case
- flavor (str): either 'all', 'numu' or 'nue'
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
Outputs
----------
- energy (quantity) : the physical energy of neutrino
- dN_dEdSdt (np.ndarray) : the spectrum in units of GeV-1 cm-2 s-1
"""
# In case the input is not an array
energy = model_tools.check_qarray(energy, unit='GeV')
# K-correction
if Cframe:
energy_rf = energy*1.0
else:
energy_rf = energy*(1+self._redshift)
# Check the type of integral
ok_list = ['spherical', 'cylindrical']
if not type_integral in ok_list:
raise ValueError("This requested integral type (type_integral) is not available")
# Get the integration limits
if Rmin is None:
Rmin = self._Rmin
if Rmax is None:
Rmax = self._R500
if Rmin_los is None:
Rmin_los = self._Rmin
if Rmin.to_value('kpc') <= 0:
raise TypeError("Rmin cannot be 0 (or less than 0) because integrations are in log space.")
if Rmin.to_value('kpc') < 1e-2:
if not self._silent:
print("WARNING: the requested value of Rmin is very small. Rmin~kpc is expected")
# Compute the integral
if type_integral == 'spherical':
rad = model_tools.sampling_array(Rmin, Rmax, NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdVdt = self.get_rate_neutrino(energy_rf, rad, flavor=flavor)
dN_dEdt = model_tools.spherical_integration(dN_dEdVdt, rad)
# Compute the integral
if type_integral == 'cylindrical':
Rmax3d = np.sqrt((NR500_los*self._R500)**2 + Rmax**2)
Rmin3d = np.sqrt(Rmin_los**2 + Rmin**2)
r3d = model_tools.sampling_array(Rmin3d*0.9, Rmax3d*1.1, NptPd=self._Npt_per_decade_integ, unit=True)
los = model_tools.sampling_array(Rmin_los, NR500_los*self._R500,
NptPd=self._Npt_per_decade_integ, unit=True)
r2d = model_tools.sampling_array(Rmin, Rmax, NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdVdt = self.get_rate_neutrino(energy_rf, r3d, flavor=flavor)
dN_dEdt = model_tools.cylindrical_integration(dN_dEdVdt, energy, r3d, r2d, los,
Rtrunc=self._R_truncation)
# From intrinsic luminosity to flux
dN_dEdSdt = dN_dEdt / (4*np.pi * self._D_lum**2)
return energy, dN_dEdSdt.to('GeV-1 cm-2 s-1')
#==================================================
# Compute neutrino profile
#==================================================
def get_neutrino_profile(self, radius=np.logspace(0,4,100)*u.kpc,
Emin=None, Emax=None, Energy_density=False,
Rmin_los=None, NR500_los=5.0,
flavor='all',
Cframe=False):
"""
Compute the neutrino emission profile within Emin-Emax.
Parameters
----------
- radius (quantity): the projected 2d radius in units homogeneous to kpc, as a 1d array
- Emin (quantity): the lower bound for neutrino energy integration
- Emax (quantity): the upper bound for neutrino energy integration
- Energy_density (bool): if True, then the energy density is computed. Otherwise,
the number density is computed.
- Rmin_los (Quantity): the radius at which line of sight integration starts
- NR500_los (float): the line-of-sight integration will stop at NR500_los x R500.
- flavor (str): either 'all', 'numu' or 'nue'
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
Outputs
----------
- radius (quantity): the projected 2d radius in unit of kpc
- dN_dSdtdO (np.ndarray) : the spectrum in units of cm-2 s-1 sr-1 or GeV cm-2 s-1 sr-1
"""
# In case the input is not an array
radius = model_tools.check_qarray(radius, unit='kpc')
# Get the integration limits
if Emin is None:
Emin = self._Epmin/10.0 # photon energy down to 0.1 minimal proton energy
if Emax is None:
Emax = self._Epmax
if Rmin_los is None:
Rmin_los = self._Rmin
Rmin = np.amin(radius.to_value('kpc'))*u.kpc
Rmax = np.amax(radius.to_value('kpc'))*u.kpc
# Define energy and K correction
eng = model_tools.sampling_array(Emin, Emax, NptPd=self._Npt_per_decade_integ, unit=True)
if Cframe:
eng_rf = eng*1.0
else:
eng_rf = eng*(1+self._redshift)
# Define array for integration
Rmax3d = np.sqrt((NR500_los*self._R500)**2 + Rmax**2)
Rmin3d = np.sqrt(Rmin_los**2 + Rmin**2)
r3d = model_tools.sampling_array(Rmin3d*0.9, Rmax3d*1.1, NptPd=self._Npt_per_decade_integ, unit=True)
los = model_tools.sampling_array(Rmin_los, NR500_los*self._R500,
NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdVdt = self.get_rate_neutrino(eng_rf, r3d, flavor=flavor)
# Compute energy integal
dN_dVdt = model_tools.energy_integration(dN_dEdVdt, eng, Energy_density=Energy_density)
# Compute integral over l.o.s.
dN_dVdt_proj = model_tools.los_integration_1dfunc(dN_dVdt, r3d, radius, los)
dN_dVdt_proj[radius > self._R_truncation] = 0
# Convert to physical to angular scale
dN_dtdO = dN_dVdt_proj * self._D_ang**2 * u.Unit('sr-1')
# From intrinsic luminosity to flux
dN_dSdtdO = dN_dtdO / (4*np.pi * self._D_lum**2)
# return
if Energy_density:
dN_dSdtdO = dN_dSdtdO.to('GeV cm-2 s-1 sr-1')
else :
dN_dSdtdO = dN_dSdtdO.to('cm-2 s-1 sr-1')
return radius, dN_dSdtdO
#==================================================
# Compute neutrino flux
#==================================================
def get_neutrino_flux(self, Emin=None, Emax=None, Energy_density=False,
Rmin=None, Rmax=None,
type_integral='spherical',
Rmin_los=None, NR500_los=5.0,
flavor='all',
Cframe=False):
"""
Compute the neutrino emission enclosed within Rmax, in 3d (i.e. spherically
integrated), or the neutrino emmission enclosed within an circular area (i.e.
cylindrical), and in a given energy band. The minimal energy can be an array to
flux(>E) and the radius max can be an array to get flux(<R).
Parameters
----------
- Emin (quantity): the lower bound for neutrino energy integration
It can be an array.
- Emax (quantity): the upper bound for neutrino energy integration
- Energy_density (bool): if True, then the energy density is computed. Otherwise,
the number density is computed.
- Rmin (quantity): the minimal radius within with the spectrum is computed
- Rmax (quantity): the maximal radius within with the spectrum is computed.
It can be an array.
- type_integral (string): either 'spherical' or 'cylindrical'
- Rmin_los (quantity): minimal radius at which l.o.s integration starts
This is used only for cylindrical case
- NR500_los (float): the line-of-sight integration will stop at NR500_los x R500.
This is used only for cylindrical case
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
Outputs
----------
- flux (quantity) : the neutrino flux either in GeV/cm2/s or ph/cm2/s, depending
on parameter Energy_density
"""
# Check the type of integral
ok_list = ['spherical', 'cylindrical']
if not type_integral in ok_list:
raise ValueError("This requested integral type (type_integral) is not available")
# Get the integration limits
if Rmin_los is None:
Rmin_los = self._Rmin
if Rmin is None:
Rmin = self._Rmin
if Rmin.to_value('kpc') <= 0:
raise TypeError("Rmin cannot be 0 (or less than 0) because integrations are in log space.")
if Rmin.to_value('kpc') < 1e-2:
if not self._silent:
print("WARNING: the requested value of Rmin is very small. Rmin~kpc is expected")
if Rmax is None:
Rmax = self._R500
if Emin is None:
Emin = self._Epmin/10.0 # default photon energy down to 0.1 minimal proton energy
if Emax is None:
Emax = self._Epmax
# Check if Emin and Rmax are scalar or array
if type(Emin.value) == np.ndarray and type(Rmax.value) == np.ndarray:
raise ValueError('Emin and Rmax cannot both be array simultaneously')
#----- Case of scalar quantities
if (type(Emin.value) == float or type(Emin.value) == np.float64) and (type(Rmax.value) == float or type(Rmax.value) == np.float64):
# Get a spectrum
energy = model_tools.sampling_array(Emin, Emax, NptPd=self._Npt_per_decade_integ, unit=True)
energy, dN_dEdSdt = self.get_neutrino_spectrum(energy, Rmin=Rmin, Rmax=Rmax,
type_integral=type_integral,
Rmin_los=Rmin_los, NR500_los=NR500_los,
flavor=flavor, Cframe=Cframe)
# Integrate over it and return
flux = model_tools.energy_integration(dN_dEdSdt, energy, Energy_density=Energy_density)
#----- Case of energy array
if type(Emin.value) == np.ndarray:
# Get a spectrum
energy = model_tools.sampling_array(np.amin(Emin.value)*Emin.unit, Emax,
NptPd=self._Npt_per_decade_integ, unit=True)
energy, dN_dEdSdt = self.get_neutrino_spectrum(energy, Rmin=Rmin, Rmax=Rmax,
type_integral=type_integral, Rmin_los=Rmin_los,
NR500_los=NR500_los,
flavor=flavor, Cframe=Cframe)
# Integrate over it and return
if Energy_density:
flux = np.zeros(len(Emin))*u.Unit('GeV cm-2 s-1')
else:
flux = np.zeros(len(Emin))*u.Unit('cm-2 s-1')
itpl = interpolate.interp1d(energy.value, dN_dEdSdt.value, kind='linear')
for i in range(len(Emin)):
eng_i = model_tools.sampling_array(Emin[i], Emax, NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdSdt_i = itpl(eng_i.value)*dN_dEdSdt.unit
flux[i] = model_tools.energy_integration(dN_dEdSdt_i, eng_i, Energy_density=Energy_density)
#----- Case of radius array (need to use dN/dVdEdt and not get_profile because spherical flux)
if type(Rmax.value) == np.ndarray:
# Get energy integration
eng = model_tools.sampling_array(Emin, Emax, NptPd=self._Npt_per_decade_integ, unit=True)
if Cframe:
eng_rf = eng*1.0
else:
eng_rf = eng*(1+self._redshift)
if type_integral == 'spherical':
Rmax3d = np.amax(Rmax.value)*Rmax.unit
Rmin3d = Rmin
if type_integral == 'cylindrical':
Rmax3d = np.sqrt((NR500_los*self._R500)**2 + (np.amax(Rmax.value)*Rmax.unit)**2)*1.1
Rmin3d = np.sqrt(Rmin_los**2 + Rmin**2)*0.9
r3d = model_tools.sampling_array(Rmin3d, Rmax3d, NptPd=self._Npt_per_decade_integ, unit=True)
los = model_tools.sampling_array(Rmin_los, NR500_los*self._R500,
NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdVdt = self.get_rate_neutrino(eng_rf, r3d)
# Compute energy integal
dN_dVdt = model_tools.energy_integration(dN_dEdVdt, eng, Energy_density=Energy_density)
# Define output
if Energy_density:
flux = np.zeros(len(Rmax))*u.Unit('GeV cm-2 s-1')
else:
flux = np.zeros(len(Rmax))*u.Unit('cm-2 s-1')
# Case of spherical integral: direct volume integration
if type_integral == 'spherical':
itpl = interpolate.interp1d(r3d.to_value('kpc'), dN_dVdt.value, kind='linear')
for i in range(len(Rmax)):
rad_i = model_tools.sampling_array(Rmin, Rmax[i], NptPd=self._Npt_per_decade_integ, unit=True)
dN_dVdt_i = itpl(rad_i.to_value('kpc'))*dN_dVdt.unit
lum_i = model_tools.spherical_integration(dN_dVdt_i, rad_i)
flux[i] = lum_i / (4*np.pi * self._D_lum**2)
# Case of cylindrical integral
if type_integral == 'cylindrical':
# Compute integral over l.o.s.
radius = model_tools.sampling_array(Rmin, np.amax(Rmax.value)*Rmax.unit,
NptPd=self._Npt_per_decade_integ, unit=True)
dN_dVdt_proj = model_tools.los_integration_1dfunc(dN_dVdt, r3d, radius, los)
dN_dVdt_proj[radius > self._R_truncation] = 0
dN_dSdVdt_proj = dN_dVdt_proj / (4*np.pi * self._D_lum**2)
itpl = interpolate.interp1d(radius.to_value('kpc'), dN_dSdVdt_proj.value, kind='linear')
for i in range(len(Rmax)):
rad_i = model_tools.sampling_array(Rmin, Rmax[i], NptPd=self._Npt_per_decade_integ, unit=True)
dN_dSdVdt_proj_i = itpl(rad_i.value)*dN_dSdVdt_proj.unit
flux[i] = model_tools.trapz_loglog(2*np.pi*rad_i*dN_dSdVdt_proj_i, rad_i)
# Return
if Energy_density:
flux = flux.to('GeV cm-2 s-1')
else:
flux = flux.to('cm-2 s-1')
return flux
#==================================================
# Compute neutrino map
#==================================================
def get_neutrino_map(self, Emin=None, Emax=None,
Rmin_los=None, NR500_los=5.0,
Rmin=None, Rmax=None,
Energy_density=False, Normalize=False,
flavor='all',
Cframe=False):
"""
Compute the neutrino map. The map is normalized so that the integral
of the map over the cluster volume is 1 (up to Rmax=5R500).
Parameters
----------
- Emin (quantity): the lower bound for nu energy integration.
Has no effect if Normalized is True
- Emax (quantity): the upper bound for nu energy integration
Has no effect if Normalized is True
- Rmin_los (Quantity): the radius at which line of sight integration starts
- NR500_los (float): the integration will stop at NR500_los x R500
- Rmin, Rmax (quantity): the radius within with the spectrum is computed
(default is 1kpc, Rtruncation) for getting the normlization flux.
Has no effect if Normalized is False
- Energy_density (bool): if True, then the energy density is computed. Otherwise,
the number density is computed.
Has no effect if Normalized is True
- Normalize (bool): if True, the map is normalized by the flux to get a
template in unit of sr-1
- flavor (str): either 'all', 'numu' or 'nue'
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
Outputs
----------
neutrino_map (np.ndarray) : the map in units of sr-1 or brightness
"""
# Get the header
header = self.get_map_header()
# Get a R.A-Dec. map
ra_map, dec_map = map_tools.get_radec_map(header)
# Get a cluster distance map (in deg)
dist_map = map_tools.greatcircle(ra_map, dec_map, self._coord.icrs.ra.to_value('deg'),
self._coord.icrs.dec.to_value('deg'))
# Define the radius used fo computing the profile
theta_max = np.amax(dist_map) # maximum angle from the cluster
theta_min = np.amin(dist_map) # minimum angle from the cluster (~0 if cluster within FoV)
if theta_min > 10 and theta_max > 10:
print('!!!!! WARNING: the cluster location is very much offset from the field of view')
rmax = theta_max*np.pi/180 * self._D_ang
rmin = theta_min*np.pi/180 * self._D_ang
if rmin == 0: rmin = self._Rmin
radius = model_tools.sampling_array(rmin, rmax, NptPd=self._Npt_per_decade_integ, unit=True)
# Project the integrand
r_proj, profile = self.get_neutrino_profile(radius, Emin=Emin, Emax=Emax, Energy_density=Energy_density,
Rmin_los=Rmin_los, NR500_los=NR500_los,
flavor=flavor, Cframe=Cframe)
# Convert to angle and interpolate onto a map
theta_proj = (r_proj/self._D_ang).to_value('')*180.0/np.pi # degrees
nu_map = map_tools.profile2map(profile.value, theta_proj, dist_map)*profile.unit
# Avoid numerical residual ringing from interpolation
nu_map[dist_map > self._theta_truncation.to_value('deg')] = 0
# Compute the normalization: to return a map in sr-1, i.e. by computing the total flux
if Normalize:
if Rmax is None:
if self._R_truncation is not np.inf:
Rmax = self._R_truncation
else:
Rmax = NR500_los*self._R500
if Rmin is None:
Rmin = self._Rmin
flux = self.get_neutrino_flux(Rmin=Rmin, Rmax=Rmax, type_integral='cylindrical', NR500_los=NR500_los,
Emin=Emin, Emax=Emax, Energy_density=Energy_density,
flavor=flavor, Cframe=Cframe)
nu_map = nu_map / flux
nu_map = nu_map.to('sr-1')
else:
if Energy_density:
nu_map = nu_map.to('GeV cm-2 s-1 sr-1')
else :
nu_map = nu_map.to('cm-2 s-1 sr-1')
return nu_map
#==================================================
# Compute nu map - healpix format
#==================================================
def get_neutrino_hpmap(self, nside=2048, Emin=None, Emax=None,
Rmin_los=None, NR500_los=5.0,
Rmin=None, Rmax=None,
Energy_density=False,
flavor='all',
Cframe=False,
maplonlat=None, output_lonlat=False):
"""
Compute the neutrino map in (RING) healpix format.
Parameters
----------
- nside (int): healpix Nside
- Emin (quantity): the lower bound for nu energy integration.
- Emax (quantity): the upper bound for nu energy integration
- Rmin_los (Quantity): the radius at which line of sight integration starts
- NR500_los (float): the integration will stop at NR500_los x R500
- Rmin, Rmax (quantity): the radius within with the spectrum is computed
(default is 1kpc, Rtruncation) for getting the normlization flux.
- Energy_density (bool): if True, then the energy density is computed. Otherwise,
the number density is computed.
- flavor (str): either 'all', 'numu' or 'nue'
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
- output_lonlat (bool): use this keyword to also return the lon and lat maps
Outputs
----------
- neutrino_map (np.ndarray) : the map in units of sr-1 or brightness
- if output_lonlat is True, maplon and maplat are also returned
"""
# Get a healpy radius map
radius, dist_map, maplon, maplat = model_tools.radius_hpmap(self._coord.galactic.l.to_value('deg'),
self._coord.galactic.b.to_value('deg'),
self._R_truncation, self._Rmin,
self._Npt_per_decade_integ,
nside=nside, maplonlat=maplonlat)
# Project the integrand
r_proj, profile = self.get_neutrino_profile(radius, Emin=Emin, Emax=Emax, Energy_density=Energy_density,
Rmin_los=Rmin_los, NR500_los=NR500_los, flavor=flavor,
Cframe=Cframe)
# Convert to angle and interpolate onto a map
theta_proj = (r_proj/self._D_ang).to_value('')*180.0/np.pi # degrees
itpl = interpolate.interp1d(theta_proj, profile, kind='cubic', fill_value='extrapolate')
nu_map = itpl(dist_map)*profile.unit
# Avoid numerical residual ringing from interpolation
nu_map[dist_map > self._theta_truncation.to_value('deg')] = 0
# Compute the normalization: to return a map in sr-1, i.e. by computing the total flux
if Energy_density:
nu_map = nu_map.to('GeV cm-2 s-1 sr-1')
else :
nu_map = nu_map.to('cm-2 s-1 sr-1')
if output_lonlat:
return nu_map, maplon, maplat
else:
return nu_map
#==================================================
# Compute inverse compton spectrum
#==================================================
def get_ic_spectrum(self, energy=np.logspace(-2,6,100)*u.GeV,
Rmin=None, Rmax=None,
type_integral='spherical',
Rmin_los=None, NR500_los=5.0,
Cframe=False):
"""
Compute the inverse Compton emission enclosed within [Rmin,Rmax], in 3d (i.e. spherically
integrated), or the inverse Compton emmission enclosed within an circular area (i.e.
cylindrical).
Note
----------
At high energy, the IC emission analytical parametrization present sharp features
which require a rather high NptEePD (10 is clearly to low and will induce wiggles
in the spectrum)
Parameters
----------
- energy (quantity) : the physical energy of photons
- Rmin, Rmax (quantity): the radius within with the spectrum is computed
(default is 1kpc, R500)
- type_integral (string): either 'spherical' or 'cylindrical'
- Rmin_los (quantity): minimal radius at which l.o.s integration starts
This is used only for cylindrical case
- NR500_los (float): the line-of-sight integration will stop at NR500_los x R500.
This is used only for cylindrical case
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
Outputs
----------
- energy (quantity) : the physical energy of photons
- dN_dEdSdt (np.ndarray) : the spectrum in units of GeV-1 cm-2 s-1
"""
# In case the input is not an array
energy = model_tools.check_qarray(energy, unit='GeV')
# K-correction
if Cframe:
energy_rf = energy*1.0
else:
energy_rf = energy*(1+self._redshift)
# Check the type of integral
ok_list = ['spherical', 'cylindrical']
if not type_integral in ok_list:
raise ValueError("This requested integral type (type_integral) is not available")
# Get the integration limits
if Rmin is None:
Rmin = self._Rmin
if Rmax is None:
Rmax = self._R500
if Rmin_los is None:
Rmin_los = self._Rmin
if Rmin.to_value('kpc') <= 0:
raise TypeError("Rmin cannot be 0 (or less than 0) because integrations are in log space.")
if Rmin.to_value('kpc') < 1e-2:
if not self._silent:
print("WARNING: the requested value of Rmin is very small. Rmin~kpc is expected")
# Compute the integral
if type_integral == 'spherical':
rad = model_tools.sampling_array(Rmin, Rmax, NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdVdt = self.get_rate_ic(energy_rf, rad)
dN_dEdt = model_tools.spherical_integration(dN_dEdVdt, rad)
# Compute the integral
if type_integral == 'cylindrical':
Rmax3d = np.sqrt((NR500_los*self._R500)**2 + Rmax**2)
Rmin3d = np.sqrt(Rmin_los**2 + Rmin**2)
r3d = model_tools.sampling_array(Rmin3d*0.9, Rmax3d*1.1, NptPd=self._Npt_per_decade_integ, unit=True)
los = model_tools.sampling_array(Rmin_los, NR500_los*self._R500,
NptPd=self._Npt_per_decade_integ, unit=True)
r2d = model_tools.sampling_array(Rmin, Rmax, NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdVdt = self.get_rate_ic(energy_rf, r3d)
dN_dEdt = model_tools.cylindrical_integration(dN_dEdVdt, energy, r3d, r2d, los,
Rtrunc=self._R_truncation)
# From intrinsic luminosity to flux
dN_dEdSdt = dN_dEdt / (4*np.pi * self._D_lum**2)
# Apply EBL absorbtion
if self._EBL_model != 'none' and not Cframe:
absorb = cluster_spectra.get_ebl_absorb(energy.to_value('GeV'), self._redshift, self._EBL_model)
dN_dEdSdt = dN_dEdSdt * absorb
return energy, dN_dEdSdt.to('GeV-1 cm-2 s-1')
#==================================================
# Compute inverse Compton profile
#==================================================
def get_ic_profile(self, radius=np.logspace(0,4,100)*u.kpc,
Emin=None, Emax=None, Energy_density=False,
Rmin_los=None, NR500_los=5.0,
Cframe=False):
"""
Compute the inverse Compton emission profile within Emin-Emax.
Parameters
----------
- radius (quantity): the projected 2d radius in units homogeneous to kpc, as a 1d array
- Emin (quantity): the lower bound for IC energy integration
- Emax (quantity): the upper bound for IC energy integration
- Energy_density (bool): if True, then the energy density is computed. Otherwise,
the number density is computed.
- Rmin_los (Quantity): the radius at which line of sight integration starts
- NR500_los (float): the line-of-sight integration will stop at NR500_los x R500.
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
Outputs
----------
- radius (quantity): the projected 2d radius in unit of kpc
- dN_dSdtdO (np.ndarray) : the spectrum in units of cm-2 s-1 sr-1 or GeV cm-2 s-1 sr-1
"""
# In case the input is not an array
radius = model_tools.check_qarray(radius, unit='kpc')
# Get the integration limits
if Emin is None:
Emin = self._Epmin/10.0 # photon energy down to 0.1 minimal proton energy
if Emax is None:
Emax = self._Epmax
if Rmin_los is None:
Rmin_los = self._Rmin
Rmin = np.amin(radius.to_value('kpc'))*u.kpc
Rmax = np.amax(radius.to_value('kpc'))*u.kpc
# Define energy and K correction
eng = model_tools.sampling_array(Emin, Emax, NptPd=self._Npt_per_decade_integ, unit=True)
if Cframe:
eng_rf = eng*1.0
else:
eng_rf = eng*(1+self._redshift)
# Define array for integration
Rmax3d = np.sqrt((NR500_los*self._R500)**2 + Rmax**2)
Rmin3d = np.sqrt(Rmin_los**2 + Rmin**2)
r3d = model_tools.sampling_array(Rmin3d*0.9, Rmax3d*1.1, NptPd=self._Npt_per_decade_integ, unit=True)
los = model_tools.sampling_array(Rmin_los, NR500_los*self._R500,
NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdVdt = self.get_rate_ic(eng_rf, r3d)
# Apply EBL absorbtion
if self._EBL_model != 'none' and not Cframe:
absorb = cluster_spectra.get_ebl_absorb(eng.to_value('GeV'), self._redshift, self._EBL_model)
dN_dEdVdt = dN_dEdVdt * model_tools.replicate_array(absorb, len(r3d), T=True)
# Compute energy integal
dN_dVdt = model_tools.energy_integration(dN_dEdVdt, eng, Energy_density=Energy_density)
# Compute integral over l.o.s.
dN_dVdt_proj = model_tools.los_integration_1dfunc(dN_dVdt, r3d, radius, los)
dN_dVdt_proj[radius > self._R_truncation] = 0
# Convert to physical to angular scale
dN_dtdO = dN_dVdt_proj * self._D_ang**2 * u.Unit('sr-1')
# From intrinsic luminosity to flux
dN_dSdtdO = dN_dtdO / (4*np.pi * self._D_lum**2)
# return
if Energy_density:
dN_dSdtdO = dN_dSdtdO.to('GeV cm-2 s-1 sr-1')
else :
dN_dSdtdO = dN_dSdtdO.to('cm-2 s-1 sr-1')
return radius, dN_dSdtdO
#==================================================
# Compute gamma ray flux
#==================================================
def get_ic_flux(self, Emin=None, Emax=None, Energy_density=False,
Rmin=None, Rmax=None,
type_integral='spherical',
Rmin_los=None, NR500_los=5.0,
Cframe=False):
"""
Compute the inverse Compton emission enclosed within Rmax, in 3d (i.e. spherically
integrated), or the inverse Compton emmission enclosed within an circular area (i.e.
cylindrical), and in a given energy band. The minimal energy can be an array to
flux(>E) and the radius max can be an array to get flux(<R).
Parameters
----------
- Emin (quantity): the lower bound for IC energy integration
It can be an array.
- Emax (quantity): the upper bound for IC energy integration
- Energy_density (bool): if True, then the energy density is computed. Otherwise,
the number density is computed.
- Rmin (quantity): the minimal radius within with the spectrum is computed
- Rmax (quantity): the maximal radius within with the spectrum is computed.
It can be an array.
- type_integral (string): either 'spherical' or 'cylindrical'
- Rmin_los (quantity): minimal radius at which l.o.s integration starts
This is used only for cylindrical case
- NR500_los (float): the line-of-sight integration will stop at NR500_los x R500.
This is used only for cylindrical case
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
Outputs
----------
- flux (quantity) : the IC flux either in GeV/cm2/s or ph/cm2/s, depending
on parameter Energy_density
"""
# Check the type of integral
ok_list = ['spherical', 'cylindrical']
if not type_integral in ok_list:
raise ValueError("This requested integral type (type_integral) is not available")
# Get the integration limits
if Rmin_los is None:
Rmin_los = self._Rmin
if Rmin is None:
Rmin = self._Rmin
if Rmin.to_value('kpc') <= 0:
raise TypeError("Rmin cannot be 0 (or less than 0) because integrations are in log space.")
if Rmin.to_value('kpc') < 1e-2:
if not self._silent:
print("WARNING: the requested value of Rmin is very small. Rmin~kpc is expected")
if Rmax is None:
Rmax = self._R500
if Emin is None:
Emin = self._Epmin/10.0 # default photon energy down to 0.1 minimal proton energy
if Emax is None:
Emax = self._Epmax
# Check if Emin and Rmax are scalar or array
if type(Emin.value) == np.ndarray and type(Rmax.value) == np.ndarray:
raise ValueError('Emin and Rmax cannot both be array simultaneously')
#----- Case of scalar quantities
if (type(Emin.value) == float or type(Emin.value) == np.float64) and (type(Rmax.value) == float or type(Rmax.value) == np.float64):
# Get a spectrum
energy = model_tools.sampling_array(Emin, Emax, NptPd=self._Npt_per_decade_integ, unit=True)
energy, dN_dEdSdt = self.get_ic_spectrum(energy, Rmin=Rmin, Rmax=Rmax,
type_integral=type_integral,
Rmin_los=Rmin_los, NR500_los=NR500_los, Cframe=Cframe)
# Integrate over it and return
flux = model_tools.energy_integration(dN_dEdSdt, energy, Energy_density=Energy_density)
#----- Case of energy array
if type(Emin.value) == np.ndarray:
# Get a spectrum
energy = model_tools.sampling_array(np.amin(Emin.value)*Emin.unit, Emax,
NptPd=self._Npt_per_decade_integ, unit=True)
energy, dN_dEdSdt = self.get_ic_spectrum(energy, Rmin=Rmin, Rmax=Rmax,
type_integral=type_integral, Rmin_los=Rmin_los,
NR500_los=NR500_los, Cframe=Cframe)
# Integrate over it and return
if Energy_density:
flux = np.zeros(len(Emin))*u.Unit('GeV cm-2 s-1')
else:
flux = np.zeros(len(Emin))*u.Unit('cm-2 s-1')
itpl = interpolate.interp1d(energy.value, dN_dEdSdt.value, kind='linear')
for i in range(len(Emin)):
eng_i = model_tools.sampling_array(Emin[i], Emax, NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdSdt_i = itpl(eng_i.value)*dN_dEdSdt.unit
flux[i] = model_tools.energy_integration(dN_dEdSdt_i, eng_i, Energy_density=Energy_density)
#----- Case of radius array (need to use dN/dVdEdt and not get_profile because spherical flux)
if type(Rmax.value) == np.ndarray:
# Get energy integration
eng = model_tools.sampling_array(Emin, Emax, NptPd=self._Npt_per_decade_integ, unit=True)
if Cframe:
eng_rf = eng*1.0
else:
eng_rf = eng*(1+self._redshift)
if type_integral == 'spherical':
Rmax3d = np.amax(Rmax.value)*Rmax.unit
Rmin3d = Rmin
if type_integral == 'cylindrical':
Rmax3d = np.sqrt((NR500_los*self._R500)**2 + (np.amax(Rmax.value)*Rmax.unit)**2)*1.1
Rmin3d = np.sqrt(Rmin_los**2 + Rmin**2)*0.9
r3d = model_tools.sampling_array(Rmin3d, Rmax3d, NptPd=self._Npt_per_decade_integ, unit=True)
los = model_tools.sampling_array(Rmin_los, NR500_los*self._R500,
NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdVdt = self.get_rate_ic(eng_rf, r3d)
# Apply EBL absorbtion
if self._EBL_model != 'none' and not Cframe:
absorb = cluster_spectra.get_ebl_absorb(eng.to_value('GeV'), self._redshift, self._EBL_model)
dN_dEdVdt = dN_dEdVdt * model_tools.replicate_array(absorb, len(r3d), T=True)
# Compute energy integal
dN_dVdt = model_tools.energy_integration(dN_dEdVdt, eng, Energy_density=Energy_density)
# Define output
if Energy_density:
flux = np.zeros(len(Rmax))*u.Unit('GeV cm-2 s-1')
else:
flux = np.zeros(len(Rmax))*u.Unit('cm-2 s-1')
# Case of spherical integral: direct volume integration
if type_integral == 'spherical':
itpl = interpolate.interp1d(r3d.to_value('kpc'), dN_dVdt.value, kind='linear')
for i in range(len(Rmax)):
rad_i = model_tools.sampling_array(Rmin, Rmax[i], NptPd=self._Npt_per_decade_integ, unit=True)
dN_dVdt_i = itpl(rad_i.to_value('kpc'))*dN_dVdt.unit
lum_i = model_tools.spherical_integration(dN_dVdt_i, rad_i)
flux[i] = lum_i / (4*np.pi * self._D_lum**2)
# Case of cylindrical integral
if type_integral == 'cylindrical':
# Compute integral over l.o.s.
radius = model_tools.sampling_array(Rmin, np.amax(Rmax.value)*Rmax.unit,
NptPd=self._Npt_per_decade_integ, unit=True)
dN_dVdt_proj = model_tools.los_integration_1dfunc(dN_dVdt, r3d, radius, los)
dN_dVdt_proj[radius > self._R_truncation] = 0
dN_dSdVdt_proj = dN_dVdt_proj / (4*np.pi * self._D_lum**2)
itpl = interpolate.interp1d(radius.to_value('kpc'), dN_dSdVdt_proj.value, kind='linear')
for i in range(len(Rmax)):
rad_i = model_tools.sampling_array(Rmin, Rmax[i], NptPd=self._Npt_per_decade_integ, unit=True)
dN_dSdVdt_proj_i = itpl(rad_i.value)*dN_dSdVdt_proj.unit
flux[i] = model_tools.trapz_loglog(2*np.pi*rad_i*dN_dSdVdt_proj_i, rad_i)
# Return
if Energy_density:
flux = flux.to('GeV cm-2 s-1')
else:
flux = flux.to('cm-2 s-1')
return flux
#==================================================
# Compute IC map
#==================================================
def get_ic_map(self, Emin=None, Emax=None,
Rmin_los=None, NR500_los=5.0,
Rmin=None, Rmax=None,
Energy_density=False, Normalize=False,
Cframe=False):
"""
Compute the inverse Compton map. The map is normalized so that the integral
of the map over the cluster volume is 1 (up to Rmax=5R500).
Parameters
----------
- Emin (quantity): the lower bound for IC energy integration.
- Emax (quantity): the upper bound for IC energy integration
- Rmin_los (Quantity): the radius at which line of sight integration starts
- NR500_los (float): the integration will stop at NR500_los x R500
- Rmin, Rmax (quantity): the radius within with the spectrum is computed
(default is 1kpc, Rtruncation) for getting the normlization flux.
Has no effect if Normalized is False
- Energy_density (bool): if True, then the energy density is computed. Otherwise,
the number density is computed.
Has no effect if Normalized is True
- Normalize (bool): if True, the map is normalized by the flux to get a
template in unit of sr-1
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
Outputs
----------
ic_map (np.ndarray) : the map in units of sr-1 or brightness
"""
# Get the header
header = self.get_map_header()
# Get a R.A-Dec. map
ra_map, dec_map = map_tools.get_radec_map(header)
# Get a cluster distance map (in deg)
dist_map = map_tools.greatcircle(ra_map, dec_map, self._coord.icrs.ra.to_value('deg'),
self._coord.icrs.dec.to_value('deg'))
# Define the radius used fo computing the profile
theta_max = np.amax(dist_map) # maximum angle from the cluster
theta_min = np.amin(dist_map) # minimum angle from the cluster (~0 if cluster within FoV)
if theta_min > 10 and theta_max > 10:
print('!!!!! WARNING: the cluster location is very much offset from the field of view')
rmax = theta_max*np.pi/180 * self._D_ang
rmin = theta_min*np.pi/180 * self._D_ang
if rmin == 0: rmin = self._Rmin
radius = model_tools.sampling_array(rmin, rmax, NptPd=self._Npt_per_decade_integ, unit=True)
# Project the integrand
r_proj, profile = self.get_ic_profile(radius, Emin=Emin, Emax=Emax, Energy_density=Energy_density,
Rmin_los=Rmin_los, NR500_los=NR500_los, Cframe=Cframe)
# Convert to angle and interpolate onto a map
theta_proj = (r_proj/self._D_ang).to_value('')*180.0/np.pi # degrees
ic_map = map_tools.profile2map(profile.value, theta_proj, dist_map)*profile.unit
# Avoid numerical residual ringing from interpolation
ic_map[dist_map > self._theta_truncation.to_value('deg')] = 0
# Compute the normalization: to return a map in sr-1, i.e. by computing the total flux
if Normalize:
if Rmax is None:
if self._R_truncation is not np.inf:
Rmax = self._R_truncation
else:
Rmax = NR500_los*self._R500
if Rmin is None:
Rmin = self._Rmin
flux = self.get_ic_flux(Rmin=Rmin, Rmax=Rmax, type_integral='cylindrical', NR500_los=NR500_los,
Emin=Emin, Emax=Emax, Energy_density=Energy_density, Cframe=Cframe)
ic_map = ic_map / flux
ic_map = ic_map.to('sr-1')
else:
if Energy_density:
ic_map = ic_map.to('GeV cm-2 s-1 sr-1')
else :
ic_map = ic_map.to('cm-2 s-1 sr-1')
return ic_map
#==================================================
# Compute IC map - healpix format
#==================================================
def get_ic_hpmap(self, nside=2048, Emin=None, Emax=None,
Rmin_los=None, NR500_los=5.0,
Rmin=None, Rmax=None,
Energy_density=False,
Cframe=False,
maplonlat=None, output_lonlat=False):
"""
Compute the inverse Compton map in the (RING) healpix format
Parameters
----------
- nside (int): healpix Nside
- Emin (quantity): the lower bound for IC energy integration.
- Emax (quantity): the upper bound for IC energy integration
- Rmin_los (Quantity): the radius at which line of sight integration starts
- NR500_los (float): the integration will stop at NR500_los x R500
- Rmin, Rmax (quantity): the radius within with the spectrum is computed
(default is 1kpc, Rtruncation) for getting the normlization flux.
- Energy_density (bool): if True, then the energy density is computed. Otherwise,
the number density is computed.
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
- maplonlat (2d tuple of np.array): healpix maps of galactic longitude and latitude
which can be provided to save time in case of repeated computation
- output_lonlat (bool): use this keyword to also return the lon and lat maps
Outputs
----------
- ic_map (np.ndarray) : the map in units of sr-1 or brightness
- if output_lonlat is True, maplon and maplat are also returned
"""
# Get a healpy radius map
radius, dist_map, maplon, maplat = model_tools.radius_hpmap(self._coord.galactic.l.to_value('deg'),
self._coord.galactic.b.to_value('deg'),
self._R_truncation, self._Rmin,
self._Npt_per_decade_integ,
nside=nside, maplonlat=maplonlat)
# Project the integrand
r_proj, profile = self.get_ic_profile(radius, Emin=Emin, Emax=Emax, Energy_density=Energy_density,
Rmin_los=Rmin_los, NR500_los=NR500_los, Cframe=Cframe)
# Convert to angle and interpolate onto a map
theta_proj = (r_proj/self._D_ang).to_value('')*180.0/np.pi # degrees
itpl = interpolate.interp1d(theta_proj, profile, kind='cubic', fill_value='extrapolate')
ic_map = itpl(dist_map)*profile.unit
# Avoid numerical residual ringing from interpolation
ic_map[dist_map > self._theta_truncation.to_value('deg')] = 0
# Compute the normalization: to return a map in sr-1, i.e. by computing the total flux
if Energy_density:
ic_map = ic_map.to('GeV cm-2 s-1 sr-1')
else :
ic_map = ic_map.to('cm-2 s-1 sr-1')
if output_lonlat:
return ic_map, maplon, maplat
else:
return ic_map
#==================================================
# Compute synchrotron spectrum
#==================================================
def get_synchrotron_spectrum(self, frequency=np.logspace(-3,2,100)*u.GHz,
Rmin=None, Rmax=None,
type_integral='spherical',
Rmin_los=None, NR500_los=5.0,
Cframe=False):
"""
Compute the synchrotron emission enclosed within [Rmin,Rmax], in 3d (i.e. spherically
integrated), or the synchrotron emmission enclosed within a circular area (i.e.
cylindrical).
Parameters
----------
- frequency (quantity) : the physical frequency of photons
- Rmin, Rmax (quantity): the radius within with the spectrum is computed
(default is 1kpc, R500)
- type_integral (string): either 'spherical' or 'cylindrical'
- Rmin_los (quantity): minimal radius at which l.o.s integration starts
This is used only for cylindrical case
- NR500_los (float): the line-of-sight integration will stop at NR500_los x R500.
This is used only for cylindrical case
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
Outputs
----------
- frequency (quantity) : the physical energy of photons
- dE_dtdfdS (np.ndarray) : the spectrum in units of Jy
"""
# In case the input is not an array
frequency = model_tools.check_qarray(frequency, unit='GHz')
energy = (const.h * frequency).to('eV')
# K-correction
if Cframe:
energy_rf = energy*1.0
else:
energy_rf = energy*(1+self._redshift)
# Check the type of integral
ok_list = ['spherical', 'cylindrical']
if not type_integral in ok_list:
raise ValueError("This requested integral type (type_integral) is not available")
# Get the integration limits
if Rmin is None:
Rmin = self._Rmin
if Rmax is None:
Rmax = self._R500
if Rmin_los is None:
Rmin_los = self._Rmin
if Rmin.to_value('kpc') <= 0:
raise TypeError("Rmin cannot be 0 (or less than 0) because integrations are in log space.")
if Rmin.to_value('kpc') < 1e-2:
if not self._silent:
print("WARNING: the requested value of Rmin is very small. Rmin~kpc is expected")
# Compute the integral
if type_integral == 'spherical':
rad = model_tools.sampling_array(Rmin, Rmax, NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdVdt = self.get_rate_synchrotron(energy_rf, rad)
dN_dEdt = model_tools.spherical_integration(dN_dEdVdt, rad)
# Compute the integral
if type_integral == 'cylindrical':
Rmax3d = np.sqrt((NR500_los*self._R500)**2 + Rmax**2)
Rmin3d = np.sqrt(Rmin_los**2 + Rmin**2)
r3d = model_tools.sampling_array(Rmin3d*0.9, Rmax3d*1.1, NptPd=self._Npt_per_decade_integ, unit=True)
los = model_tools.sampling_array(Rmin_los, NR500_los*self._R500,
NptPd=self._Npt_per_decade_integ, unit=True)
r2d = model_tools.sampling_array(Rmin, Rmax, NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdVdt = self.get_rate_synchrotron(energy_rf, r3d)
dN_dEdt = model_tools.cylindrical_integration(dN_dEdVdt, energy, r3d, r2d, los,
Rtrunc=self._R_truncation)
# From intrinsic luminosity to flux
dN_dEdSdt = dN_dEdt / (4*np.pi * self._D_lum**2)
return frequency, (dN_dEdSdt * energy**2 / frequency).to('Jy')
#==================================================
# Compute synchrotron profile
#==================================================
def get_synchrotron_profile(self, radius=np.logspace(0,4,100)*u.kpc,
freq0=1*u.GHz,
Rmin_los=None, NR500_los=5.0,
Cframe=False):
"""
Compute the synchrotron emission profile at frequency freq0.
Parameters
----------
- radius (quantity): the projected 2d radius in units homogeneous to kpc, as a 1d array
- freq0 (quantity): the frequency at which the profile is computed
- Rmin_los (Quantity): the radius at which line of sight integration starts
- NR500_los (float): the line-of-sight integration will stop at NR500_los x R500.
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
Outputs
----------
- radius (quantity): the projected 2d radius in unit of kpc
- sed (np.ndarray) : the spectrum in units of Jy/sr
"""
# In case the input is not an array
radius = model_tools.check_qarray(radius, unit='kpc')
# Get the integration limits
if Rmin_los is None:
Rmin_los = self._Rmin
Rmin = np.amin(radius.to_value('kpc'))*u.kpc
Rmax = np.amax(radius.to_value('kpc'))*u.kpc
# Define energy and K correction
eng0 = (freq0 * const.h).to('eV')
if Cframe:
eng0_rf = eng0*1.0
else:
eng0_rf = eng0*(1+self._redshift)
# Define array for integration
Rmax3d = np.sqrt((NR500_los*self._R500)**2 + Rmax**2)
Rmin3d = np.sqrt(Rmin_los**2 + Rmin**2)
r3d = model_tools.sampling_array(Rmin3d*0.9, Rmax3d*1.1, NptPd=self._Npt_per_decade_integ, unit=True)
los = model_tools.sampling_array(Rmin_los, NR500_los*self._R500,
NptPd=self._Npt_per_decade_integ, unit=True)
dN_dVdt_E = self.get_rate_synchrotron(eng0_rf, r3d).flatten()
# Compute integral over l.o.s.
dN_dVdt_E_proj = model_tools.los_integration_1dfunc(dN_dVdt_E, r3d, radius, los)
dN_dVdt_E_proj[radius > self._R_truncation] = 0
# Convert to physical to angular scale
dN_dtdO_E = dN_dVdt_E_proj * self._D_ang**2 * u.Unit('sr-1')
# From intrinsic luminosity to flux
dN_dSdtdO_E = dN_dtdO_E / (4*np.pi * self._D_lum**2)
# return
sed = (dN_dSdtdO_E * eng0**2/freq0).to('Jy sr-1')
return radius, sed
#==================================================
# Compute synchrotron flux
#==================================================
def get_synchrotron_flux(self, freq0=1*u.GHz,
Rmin=None, Rmax=None,
type_integral='spherical',
Rmin_los=None, NR500_los=5.0,
Cframe=False):
"""
Compute the synchrotron emission enclosed within Rmax, in 3d (i.e. spherically
integrated), or the synchrotron emmission enclosed within a circular area (i.e.
cylindrical), and at a given frequency. The radius max can be an array to get flux(<R).
Parameters
----------
- freq0 (quantity): the frequency at which the profile is computed
- Rmin (quantity): the minimal radius within with the spectrum is computed
- Rmax (quantity): the maximal radius within with the spectrum is computed.
It can be an array.
- type_integral (string): either 'spherical' or 'cylindrical'
- Rmin_los (quantity): minimal radius at which l.o.s integration starts
This is used only for cylindrical case
- NR500_los (float): the line-of-sight integration will stop at NR500_los x R500.
This is used only for cylindrical case
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
Outputs
----------
- flux (quantity) : the synchrotron flux in Jy
"""
# Check the type of integral
ok_list = ['spherical', 'cylindrical']
if not type_integral in ok_list:
raise ValueError("This requested integral type (type_integral) is not available")
# Get the integration limits
if Rmin_los is None:
Rmin_los = self._Rmin
if Rmin is None:
Rmin = self._Rmin
if Rmax is None:
Rmax = self._R500
if Rmin.to_value('kpc') <= 0:
raise TypeError("Rmin cannot be 0 (or less than 0) because integrations are in log space.")
if Rmin.to_value('kpc') < 1e-2:
if not self._silent:
print("WARNING: the requested value of Rmin is very small. Rmin~kpc is expected")
#----- Case of scalar quantities
if type(Rmax.value) == float or type(Rmax.value) == np.float64:
freq0, flux = self.get_synchrotron_spectrum(freq0, Rmin=Rmin, Rmax=Rmax,
type_integral=type_integral, Rmin_los=Rmin_los,
NR500_los=NR500_los, Cframe=Cframe)
#----- Case of radius array (need to use dN/dVdEdt and not get_profile because spherical flux)
if type(Rmax.value) == np.ndarray:
# Get frequency sampling
eng0 = (freq0 * const.h).to('eV')
if Cframe:
eng0_rf = eng0*1.0
else:
eng0_rf = eng0*(1+self._redshift)
if type_integral == 'spherical':
Rmax3d = np.amax(Rmax.value)*Rmax.unit
Rmin3d = Rmin
if type_integral == 'cylindrical':
Rmax3d = np.sqrt((NR500_los*self._R500)**2 + (np.amax(Rmax.value)*Rmax.unit)**2)*1.1
Rmin3d = np.sqrt(Rmin_los**2 + Rmin**2)*0.9
r3d = model_tools.sampling_array(Rmin3d, Rmax3d, NptPd=self._Npt_per_decade_integ, unit=True)
los = model_tools.sampling_array(Rmin_los, NR500_los*self._R500,
NptPd=self._Npt_per_decade_integ, unit=True)
dN_dVdt_E = self.get_rate_synchrotron(eng0_rf, r3d).flatten()
# Define output
flux = np.zeros(len(Rmax))*u.Unit('Jy')
# Case of spherical integral: direct volume integration
itpl = interpolate.interp1d(r3d.to_value('kpc'), dN_dVdt_E.value, kind='linear')
if type_integral == 'spherical':
for i in range(len(Rmax)):
rad_i = model_tools.sampling_array(Rmin, Rmax[i], NptPd=self._Npt_per_decade_integ, unit=True)
dN_dVdt_E_i = itpl(rad_i.to_value('kpc'))*dN_dVdt_E.unit
lum_i = model_tools.spherical_integration(dN_dVdt_E_i, rad_i) * eng0**2/freq0
flux[i] = lum_i / (4*np.pi * self._D_lum**2)
# Case of cylindrical integral
if type_integral == 'cylindrical':
# Compute integral over l.o.s.
radius = model_tools.sampling_array(Rmin, np.amax(Rmax.value)*Rmax.unit,
NptPd=self._Npt_per_decade_integ, unit=True)
dN_dVdt_E_proj = model_tools.los_integration_1dfunc(dN_dVdt_E, r3d, radius, los)
dN_dVdt_E_proj[radius > self._R_truncation] = 0
dN_dSdVdt_E_proj = dN_dVdt_E_proj / (4*np.pi * self._D_lum**2)
itpl = interpolate.interp1d(radius.to_value('kpc'), dN_dSdVdt_E_proj.value, kind='linear')
for i in range(len(Rmax)):
rad_i = model_tools.sampling_array(Rmin, Rmax[i], NptPd=self._Npt_per_decade_integ, unit=True)
dN_dSdVdt_E_proj_i = itpl(rad_i.value)*dN_dSdVdt_E_proj.unit
flux[i] = model_tools.trapz_loglog(2*np.pi*rad_i*dN_dSdVdt_E_proj_i, rad_i) * eng0**2/freq0
return flux.to('Jy')
#==================================================
# Compute synchrotron map
#==================================================
def get_synchrotron_map(self, freq0=1*u.GHz,
Rmin_los=None, NR500_los=5.0,
Rmin=None, Rmax=None,
Normalize=False,
Cframe=False):
"""
Compute the synchrotron map. The map is normalized so that the integral
of the map over the cluster volume is 1 (up to Rmax=5R500).
Parameters
----------
- freq0 (quantity): the frequency at wich we work
- Rmin_los (Quantity): the radius at which line of sight integration starts
- NR500_los (float): the integration will stop at NR500_los x R500
- Rmin, Rmax (quantity): the radius within with the spectrum is computed
(default is 1kpc, Rtruncation) for getting the normlization flux.
Has no effect if Normalized is False
- Normalize (bool): if True, the map is normalized by the flux to get a
template in unit of sr-1
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
Outputs
----------
synchrotron_map (np.ndarray) : the map in units of sr-1 or brightness
"""
# Get the header
header = self.get_map_header()
# Get a R.A-Dec. map
ra_map, dec_map = map_tools.get_radec_map(header)
# Get a cluster distance map (in deg)
dist_map = map_tools.greatcircle(ra_map, dec_map, self._coord.icrs.ra.to_value('deg'),
self._coord.icrs.dec.to_value('deg'))
# Define the radius used fo computing the profile
theta_max = np.amax(dist_map) # maximum angle from the cluster
theta_min = np.amin(dist_map) # minimum angle from the cluster (~0 if cluster within FoV)
if theta_min > 10 and theta_max > 10:
print('!!!!! WARNING: the cluster location is very much offset from the field of view')
rmax = theta_max*np.pi/180 * self._D_ang
rmin = theta_min*np.pi/180 * self._D_ang
if rmin == 0: rmin = self._Rmin
radius = model_tools.sampling_array(rmin, rmax, NptPd=self._Npt_per_decade_integ, unit=True)
# Project the integrand
r_proj, profile = self.get_synchrotron_profile(radius, freq0=freq0,
Rmin_los=Rmin_los, NR500_los=NR500_los, Cframe=Cframe)
# Convert to angle and interpolate onto a map
theta_proj = (r_proj/self._D_ang).to_value('')*180.0/np.pi # degrees
synchrotron_map = map_tools.profile2map(profile.value, theta_proj, dist_map)*profile.unit
# Avoid numerical residual ringing from interpolation
synchrotron_map[dist_map > self._theta_truncation.to_value('deg')] = 0
# Compute the normalization: to return a map in sr-1, i.e. by computing the total flux
if Normalize:
if Rmax is None:
if self._R_truncation is not np.inf:
Rmax = self._R_truncation
else:
Rmax = NR500_los*self._R500
if Rmin is None:
Rmin = self._Rmin
flux = self.get_synchrotron_flux(Rmin=Rmin, Rmax=Rmax, type_integral='cylindrical',
NR500_los=NR500_los, freq0=freq0, Cframe=Cframe)
synchrotron_map = synchrotron_map / flux
synchrotron_map = synchrotron_map.to('sr-1')
else:
synchrotron_map = synchrotron_map.to('Jy sr-1')
return synchrotron_map
#==================================================
# Compute synchrotron map - healpix format
#==================================================
def get_synchrotron_hpmap(self, nside=2048,
freq0=1*u.GHz,
Rmin_los=None, NR500_los=5.0,
Rmin=None, Rmax=None,
Cframe=False,
maplonlat=None, output_lonlat=False):
"""
Compute the synchrotron map in the (RING) healpix format.
Parameters
----------
- nside (int): healpix Nside
- freq0 (quantity): the frequency at wich we work
- Rmin_los (Quantity): the radius at which line of sight integration starts
- NR500_los (float): the integration will stop at NR500_los x R500
- Rmin, Rmax (quantity): the radius within with the spectrum is computed
(default is 1kpc, Rtruncation) for getting the normlization flux.
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
- maplonlat (2d tuple of np.array): healpix maps of galactic longitude and latitude
which can be provided to save time in case of repeated computation
- output_lonlat (bool): use this keyword to also return the lon and lat maps
Outputs
----------
synchrotron_map (np.ndarray) : the map in units of sr-1 or brightness
"""
# Get a healpy radius map
radius, dist_map, maplon, maplat = model_tools.radius_hpmap(self._coord.galactic.l.to_value('deg'),
self._coord.galactic.b.to_value('deg'),
self._R_truncation, self._Rmin,
self._Npt_per_decade_integ,
nside=nside, maplonlat=maplonlat)
# Project the integrand
r_proj, profile = self.get_synchrotron_profile(radius, freq0=freq0,
Rmin_los=Rmin_los, NR500_los=NR500_los, Cframe=Cframe)
# Convert to angle and interpolate onto a map
theta_proj = (r_proj/self._D_ang).to_value('')*180.0/np.pi # degrees
itpl = interpolate.interp1d(theta_proj, profile, kind='cubic', fill_value='extrapolate')
synchrotron_map = itpl(dist_map)*profile.unit
# Avoid numerical residual ringing from interpolation
synchrotron_map[dist_map > self._theta_truncation.to_value('deg')] = 0
# Return the result
synchrotron_map = synchrotron_map.to('Jy sr-1')
if output_lonlat:
return synchrotron_map, maplon, maplat
else:
return synchrotron_map
#==================================================
# Compute SZ spectrum
#==================================================
def get_sz_spectrum(self, frequency=np.logspace(1,3,100)*u.GHz, Compton_only=False,
Rmin=None, Rmax=None,
type_integral='spherical',
Rmin_los=None, NR500_los=5.0):
"""
Compute the SZ emission enclosed within [Rmin,Rmax], in 3d (i.e. spherically
integrated), or the SZ emmission enclosed within a circular area (i.e.
cylindrical).
Parameters
----------
- frequency (quantity) : the physical frequency of photons
- Rmin, Rmax (quantity): the radius within with the spectrum is computed
(default is 1kpc, R500)
- type_integral (string): either 'spherical' or 'cylindrical'
- Rmin_los (quantity): minimal radius at which l.o.s integration starts
This is used only for cylindrical case
- NR500_los (float): the line-of-sight integration will stop at NR500_los x R500.
This is used only for cylindrical case
Outputs
----------
- frequency (quantity) : the physical energy of photons
- dE_dtdfdS (np.ndarray) : the spectrum in units of Jy
"""
# In case the input is not an array
frequency = model_tools.check_qarray(frequency, unit='GHz')
# Check the type of integral
ok_list = ['spherical', 'cylindrical']
if not type_integral in ok_list:
raise ValueError("This requested integral type (type_integral) is not available")
# Get the integration limits
if Rmin is None:
Rmin = self._Rmin
if Rmax is None:
Rmax = self._R500
if Rmin_los is None:
Rmin_los = self._Rmin
if Rmin.to_value('kpc') <= 0:
raise TypeError("Rmin cannot be 0 (or less than 0) because integrations are in log space.")
if Rmin.to_value('kpc') < 1e-2:
if not self._silent:
print("WARNING: the requested value of Rmin is very small. Rmin~kpc is expected")
# Compute the integral
if type_integral == 'spherical':
rad = model_tools.sampling_array(Rmin, Rmax, NptPd=self._Npt_per_decade_integ, unit=True)
dE_dtdVdfdO_f = self.get_rate_sz(frequency, rad, Compton_only=Compton_only)
dE_dtdfdO_f = model_tools.spherical_integration(dE_dtdVdfdO_f, rad)
# Compute the integral
if type_integral == 'cylindrical':
Rmax3d = np.sqrt((NR500_los*self._R500)**2 + Rmax**2)
Rmin3d = np.sqrt(Rmin_los**2 + Rmin**2)
r3d = model_tools.sampling_array(Rmin3d*0.9, Rmax3d*1.1, NptPd=self._Npt_per_decade_integ, unit=True)
los = model_tools.sampling_array(Rmin_los, NR500_los*self._R500,
NptPd=self._Npt_per_decade_integ, unit=True)
r2d = model_tools.sampling_array(Rmin, Rmax, NptPd=self._Npt_per_decade_integ, unit=True)
dE_dtdVdfdO_f = self.get_rate_sz(frequency, r3d, Compton_only=Compton_only)
dE_dtdfdO_f = model_tools.cylindrical_integration(dE_dtdVdfdO_f, frequency, r3d, r2d, los,
Rtrunc=self._R_truncation)
# return
if Compton_only:
output = dE_dtdfdO_f.to('kpc2')
else:
# Below is because for SZ we want \int S_SZ dOmega, not \int S_SZ dS
dE_dtdf_f = dE_dtdfdO_f / self._D_ang**2 * u.sr
output = dE_dtdf_f.to('Jy')
return frequency, output
#==================================================
# Compute SZ profile
#==================================================
def get_sz_profile(self, radius=np.logspace(0,4,100)*u.kpc,
freq0=100*u.GHz, Compton_only=False,
Rmin_los=None, NR500_los=5.0):
"""
Get the SZ parameter profile.
Parameters
----------
- radius (quantity): the physical 2d radius in units homogeneous to kpc, as a 1d array
- freq0 (quantity): the frequency at which the profile is computed.
- Compton (bool): if set to true, return the Compton-y parameter.
- Rmin_los (Quantity): the radius at which line of sight integration starts
- NR500_los (float): the line-of-sight integration will stop at NR500_los x R500.
- Compton_only (bool): Output the Compton parameter instead of the spectrum. In the case of
Compton only, the frequency input does not matter
Outputs
----------
- radius (quantity): the projected 2d radius in unit of kpc
- output : the Compton parameter or brightness profile
Note
----------
The pressure profile is truncated at N R500 along the line-of-sight.
"""
# In case the input is not an array
radius = model_tools.check_qarray(radius, unit='kpc')
# Get the integration limits
if Rmin_los is None:
Rmin_los = self._Rmin
Rmin = np.amin(radius.to_value('kpc'))*u.kpc
Rmax = np.amax(radius.to_value('kpc'))*u.kpc
# Define array for integration
Rmax3d = np.sqrt((NR500_los*self._R500)**2 + Rmax**2)
Rmin3d = np.sqrt(Rmin_los**2 + Rmin**2)
r3d = model_tools.sampling_array(Rmin3d*0.9, Rmax3d*1.1, NptPd=self._Npt_per_decade_integ, unit=True)
los = model_tools.sampling_array(Rmin_los, NR500_los*self._R500, NptPd=self._Npt_per_decade_integ,
unit=True)
dE_dtdVdfdO_f = self.get_rate_sz(freq0, r3d, Compton_only=Compton_only).flatten()
# Compute integral over l.o.s.
dE_dtdSdfdO_f = model_tools.los_integration_1dfunc(dE_dtdVdfdO_f, r3d, radius, los)
dE_dtdSdfdO_f[radius > self._R_truncation] = 0
# return
if Compton_only:
output = dE_dtdSdfdO_f.to_value('')*u.adu
else:
output = dE_dtdSdfdO_f.to('Jy sr-1')
return radius, output
#==================================================
# Compute SZ flux
#==================================================
def get_sz_flux(self, freq0=100*u.GHz, Compton_only=False,
Rmin=None, Rmax=None,
type_integral='spherical',
Rmin_los=None, NR500_los=5.0):
"""
Compute the SZ emission enclosed within Rmax, in 3d (i.e. spherically
integrated), or the SZ emmission enclosed within a circular area (i.e.
cylindrical), and at a given frequency (or in Compton unit). The
radius max can be an array to get flux(<R).
Parameters
----------
- freq0 (quantity): the frequency at which the profile is computed
- Compton_only (bool): Output the Compton parameter instead of the spectrum. In the case of
Compton only, the frequency input does not matter
- Rmin (quantity): the minimal radius within with the spectrum is computed
- Rmax (quantity): the maximal radius within with the spectrum is computed.
It can be an array.
- type_integral (string): either 'spherical' or 'cylindrical'
- Rmin_los (quantity): minimal radius at which l.o.s integration starts
This is used only for cylindrical case
- NR500_los (float): the line-of-sight integration will stop at NR500_los x R500.
This is used only for cylindrical case
Outputs
----------
- flux (quantity) : the synchrotron flux in Jy or kpc^2 (for Compton)
"""
# Check the type of integral
ok_list = ['spherical', 'cylindrical']
if not type_integral in ok_list:
raise ValueError("This requested integral type (type_integral) is not available")
# Get the integration limits
if Rmin_los is None:
Rmin_los = self._Rmin
if Rmin is None:
Rmin = self._Rmin
if Rmax is None:
Rmax = self._R500
if Rmin.to_value('kpc') <= 0:
raise TypeError("Rmin cannot be 0 (or less than 0) because integrations are in log space.")
if Rmin.to_value('kpc') < 1e-2:
if not self._silent:
print("WARNING: the requested value of Rmin is very small. Rmin~kpc is expected")
#----- Case of scalar quantities
if type(Rmax.value) == float or type(Rmax.value) == np.float64:
freq0, flux = self.get_sz_spectrum(freq0, Compton_only=Compton_only, Rmin=Rmin, Rmax=Rmax,
type_integral=type_integral, Rmin_los=Rmin_los,
NR500_los=NR500_los)
#----- Case of radius array (need to use dN/dVdEdt and not get_profile because spherical flux)
elif type(Rmax.value) == np.ndarray:
# Get frequency sampling
if type_integral == 'spherical':
Rmax3d = np.amax(Rmax.value)*Rmax.unit
Rmin3d = Rmin
if type_integral == 'cylindrical':
Rmax3d = np.sqrt((NR500_los*self._R500)**2 + (np.amax(Rmax.value)*Rmax.unit)**2)*1.1
Rmin3d = np.sqrt(Rmin_los**2 + Rmin**2)*0.9
r3d = model_tools.sampling_array(Rmin3d, Rmax3d, NptPd=self._Npt_per_decade_integ, unit=True)
los = model_tools.sampling_array(Rmin_los, NR500_los*self._R500,
NptPd=self._Npt_per_decade_integ, unit=True)
# Increase numerical precision by adding a point at R_truncation
if np.amax(r3d) > self._R_truncation:
r3d = r3d.insert(0, self._R_truncation)
r3d.sort()
if np.amax(los) > self._R_truncation:
los = los.insert(0, self._R_truncation)
los.sort()
dE_dtdVdfdO_f = self.get_rate_sz(freq0, r3d, Compton_only=Compton_only).flatten()
# Define output
if Compton_only:
flux = np.zeros(len(Rmax))*u.Unit('kpc2')
else:
flux = np.zeros(len(Rmax))*u.Unit('Jy')
# Case of spherical integral: direct volume integration
itpl = interpolate.interp1d(r3d.to_value('kpc'), dE_dtdVdfdO_f.value, kind='linear')
if type_integral == 'spherical':
for i in range(len(Rmax)):
# Avoid ringing from integration
Rmax_i = np.amin([Rmax[i].to_value('kpc'), self._R_truncation.to_value('kpc')])*u.kpc
rad_i = model_tools.sampling_array(Rmin, Rmax_i, NptPd=self._Npt_per_decade_integ, unit=True)
dE_dtdVdfdO_f_i = itpl(rad_i.to_value('kpc'))*dE_dtdVdfdO_f.unit
if Compton_only:
flux[i] = model_tools.spherical_integration(dE_dtdVdfdO_f_i, rad_i)
else:
flux[i] = model_tools.spherical_integration(dE_dtdVdfdO_f_i, rad_i) / self._D_ang**2*u.sr
# Case of cylindrical integral
if type_integral == 'cylindrical':
# Compute integral over l.o.s.
radius = model_tools.sampling_array(Rmin, np.amax(Rmax.value)*Rmax.unit,
NptPd=self._Npt_per_decade_integ, unit=True)
dE_dtdVdfdO_f_proj = model_tools.los_integration_1dfunc(dE_dtdVdfdO_f, r3d, radius, los)
dE_dtdVdfdO_f_proj[radius > self._R_truncation] = 0
itpl = interpolate.interp1d(radius.to_value('kpc'), dE_dtdVdfdO_f_proj.value, kind='linear')
# Avoid ringing from integration
for i in range(len(Rmax)):
Rmax_i = np.amin([Rmax[i].to_value('kpc'), self._R_truncation.to_value('kpc')])*u.kpc
rad_i = model_tools.sampling_array(Rmin, Rmax_i, NptPd=self._Npt_per_decade_integ, unit=True)
dE_dtdVdfdO_f_proj_i = itpl(rad_i.value)*dE_dtdVdfdO_f_proj.unit
if Compton_only:
flux[i] = model_tools.trapz_loglog(2*np.pi*rad_i*dE_dtdVdfdO_f_proj_i, rad_i)
else:
flux[i] = model_tools.trapz_loglog(2*np.pi*rad_i*dE_dtdVdfdO_f_proj_i, rad_i) / self._D_ang**2*u.sr
else:
raise('Bug: Rmax.value not recognized.')
# output
if Compton_only:
output = flux.to('kpc2')
else:
output = flux.to('Jy')
return output
#==================================================
# Compute SZ map
#==================================================
def get_sz_map(self, freq0=100*u.GHz, Compton_only=False,
Rmin_los=None, NR500_los=5.0,
Rmin=None, Rmax=None,
Normalize=False):
"""
Compute the SZ map. The map is normalized so that the integral
of the map over the cluster volume is 1 (up to Rmax=5R500).
Parameters
----------
- freq0 (quantity): the frequency at wich we work
Has no effect if Normalized is True
- Compton_only (bool): Output the Compton parameter instead of the spectrum. In the case of
Compton only, the frequency input does not matter
- Rmin_los (Quantity): the radius at which line of sight integration starts
- NR500_los (float): the integration will stop at NR500_los x R500
- Rmin, Rmax (quantity): the radius within with the spectrum is computed
(default is 1kpc, Rtruncation) for getting the normlization flux.
Has no effect if Normalized is False
- Normalize (bool): if True, the map is normalized by the flux to get a
template in unit of sr-1
Outputs
----------
sz_map (np.ndarray) : the map in units of sr-1 or brightness, or Compton
"""
# Get the header
header = self.get_map_header()
# Get a R.A-Dec. map
ra_map, dec_map = map_tools.get_radec_map(header)
# Get a cluster distance map (in deg)
dist_map = map_tools.greatcircle(ra_map, dec_map, self._coord.icrs.ra.to_value('deg'),
self._coord.icrs.dec.to_value('deg'))
# Define the radius used fo computing the profile
theta_max = np.amax(dist_map) # maximum angle from the cluster
theta_min = np.amin(dist_map) # minimum angle from the cluster (~0 if cluster within FoV)
if theta_min > 10 and theta_max > 10:
print('!!!!! WARNING: the cluster location is very much offset from the field of view')
rmax = theta_max*np.pi/180 * self._D_ang
rmin = theta_min*np.pi/180 * self._D_ang
if rmin == 0: rmin = self._Rmin
radius = model_tools.sampling_array(rmin, rmax, NptPd=self._Npt_per_decade_integ, unit=True)
# Project the integrand
r_proj, profile = self.get_sz_profile(radius, freq0=freq0, Compton_only=Compton_only,
Rmin_los=Rmin_los, NR500_los=NR500_los)
# Convert to angle and interpolate onto a map
theta_proj = (r_proj/self._D_ang).to_value('')*180.0/np.pi # degrees
sz_map = map_tools.profile2map(profile.value, theta_proj, dist_map)*profile.unit
# Avoid numerical residual ringing from interpolation
sz_map[dist_map > self._theta_truncation.to_value('deg')] = 0
# Compute the normalization: to return a map in sr-1, i.e. by computing the total flux
if Normalize:
if Rmax is None:
if self._R_truncation is not np.inf:
Rmax = self._R_truncation
else:
Rmax = NR500_los*self._R500
if Rmin is None:
Rmin = self._Rmin
flux = self.get_sz_flux(Rmin=Rmin, Rmax=Rmax, type_integral='cylindrical', NR500_los=NR500_los,
freq0=freq0, Compton_only=Compton_only)
if Compton_only:
sz_map = sz_map.to_value('adu') / (flux/self._D_ang**2*u.sr)
sz_map = sz_map.to('sr-1')
else:
sz_map = sz_map / flux
sz_map = sz_map.to('sr-1')
else:
if Compton_only:
sz_map = sz_map.to('adu')
else:
sz_map = sz_map.to('Jy sr-1')
return sz_map
#==================================================
# Compute SZ map - healpix format
#==================================================
def get_sz_hpmap(self, nside=2048, freq0=100*u.GHz, Compton_only=False,
Rmin_los=None, NR500_los=5.0,
Rmin=None, Rmax=None,
maplonlat=None, output_lonlat=False):
"""
Compute the SZ map projected onto a (RING) healpix map.
Parameters
----------
- nside (int): healpix Nside
- freq0 (quantity): the frequency at wich we work
- Compton_only (bool): Output the Compton parameter instead of the spectrum. In the case of
Compton only, the frequency input does not matter
- Rmin_los (Quantity): the radius at which line of sight integration starts
- NR500_los (float): the integration will stop at NR500_los x R500
- Rmin, Rmax (quantity): the radius within with the spectrum is computed
(default is 1kpc, Rtruncation) for getting the normlization flux.
- maplonlat (2d tuple of np.array): healpix maps of galactic longitude and latitude
which can be provided to save time in case of repeated computation
- output_lonlat (bool): use this keyword to also return the lon and lat maps
Outputs
----------
- sz_map (healpix map) : the map in units of brightness, or Compton
- if output_lonlat is True, maplon and maplat are also returned
"""
# Get a healpy radius map
radius, dist_map, maplon, maplat = model_tools.radius_hpmap(self._coord.galactic.l.to_value('deg'),
self._coord.galactic.b.to_value('deg'),
self._R_truncation, self._Rmin,
self._Npt_per_decade_integ,
nside=nside, maplonlat=maplonlat)
# Project the integrand
r_proj, profile = self.get_sz_profile(radius, freq0=freq0, Compton_only=Compton_only,
Rmin_los=Rmin_los, NR500_los=NR500_los)
# Convert to angle and interpolate onto a map
theta_proj = (r_proj/self._D_ang).to_value('')*180.0/np.pi # degrees
itpl = interpolate.interp1d(theta_proj, profile, kind='cubic', fill_value='extrapolate')
sz_map = itpl(dist_map)*profile.unit
# Avoid numerical residual ringing from interpolation
sz_map[dist_map > self._theta_truncation.to_value('deg')] = 0
# Return the results
if Compton_only:
sz_map = sz_map.to('adu')
else:
sz_map = sz_map.to('Jy sr-1')
if output_lonlat:
return sz_map, maplon, maplat
else:
return sz_map
#==================================================
# Compute Xray spectrum
#==================================================
def get_xray_spectrum(self, energy=np.linspace(0.1,50,100)*u.keV,
Rmin=None, Rmax=None,
type_integral='spherical',
Rmin_los=None, NR500_los=5.0,
output_type='C',
nH=0.0*u.cm**-2,
model='APEC',
resp_file=None,
data_file=None,
Cframe=False):
"""
Compute the X-ray spectrum enclosed within [Rmin,Rmax], in 3d (i.e. spherically
integrated), or the SZ emmission enclosed within a circular area (i.e.
cylindrical). The emission is computed for a mean temperature.
Parameters
----------
- energy (quantity) : the physical energy of photons
- Rmin, Rmax (quantity): the radius within with the spectrum is computed
(default is 1kpc, R500)
- type_integral (string): either 'spherical' or 'cylindrical'
- Rmin_los (quantity): minimal radius at which l.o.s integration starts
This is used only for cylindrical case
- NR500_los (float): the line-of-sight integration will stop at NR500_los x R500.
This is used only for cylindrical case
- output_type (str): type of output
S == energy counts in erg/s/cm^2/sr
C == counts in ph/s/cm^2/sr
R == count rate in ph/s/sr (accounting for instrumental response)
- nH (quantity): hydrogen column density (homogeneous to cm**-2)
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
Outputs
----------
- energy (quantity) : the physical energy of photons at the center of the bin
- dN_dtdSdE (np.ndarray) : the spectrum in units of s-1 cm-2 keV-1
"""
# In case the input is not an array
energy = model_tools.check_qarray(energy, unit='keV')
# Check the type of integral
ok_list = ['spherical', 'cylindrical']
if not type_integral in ok_list:
raise ValueError("This requested integral type (type_integral) is not available")
# Get the integration limits
if Rmin is None:
Rmin = self._Rmin
if Rmax is None:
Rmax = self._R500
if Rmin_los is None:
Rmin_los = self._Rmin
if Rmin.to_value('kpc') <= 0:
raise TypeError("Rmin cannot be 0 (or less than 0) because integrations are in log space.")
if Rmin.to_value('kpc') < 1e-2:
if not self._silent:
print("WARNING: the requested value of Rmin is very small. Rmin~kpc is expected")
# Get useful quantity
mu_gas,mu_e,mu_p,mu_alpha = cluster_global.mean_molecular_weight(Y=self._helium_mass_fraction,
Z=self._metallicity_sol*self._abundance)
# Get a mean temperature
if type_integral == 'spherical':
rad = model_tools.sampling_array(Rmin, Rmax, NptPd=self._Npt_per_decade_integ, unit=True)
rad, temperature = self.get_temperature_gas_profile(rad)
rad, n_e = self.get_density_gas_profile(rad)
Tmean = model_tools.spherical_integration(temperature, rad) / (4.0/3*np.pi*Rmax**3)
N2int = model_tools.spherical_integration(n_e**2*mu_e/mu_p, rad)
if type_integral == 'cylindrical':
Rmax3d = np.sqrt((NR500_los*self._R500)**2 + Rmax**2)
Rmin3d = np.sqrt(Rmin_los**2 + Rmin**2)
r3d = model_tools.sampling_array(Rmin3d*0.9, Rmax3d*1.1, NptPd=self._Npt_per_decade_integ, unit=True)
los = model_tools.sampling_array(Rmin_los, NR500_los*self._R500,
NptPd=self._Npt_per_decade_integ, unit=True)
r2d = model_tools.sampling_array(Rmin, Rmax, NptPd=self._Npt_per_decade_integ, unit=True)
rad, temperature = self.get_temperature_gas_profile(r3d)
temperature[temperature/temperature != 1] = 0
rad, n_e = self.get_density_gas_profile(r3d)
temperature_proj = model_tools.los_integration_1dfunc(temperature, r3d, r2d, los)
temperature_pproj = model_tools.trapz_loglog(2*np.pi*r2d*temperature_proj, r2d)
Tmean = temperature_pproj/(2*Rmax3d * np.pi*Rmax**2)
n_e2_proj = model_tools.los_integration_1dfunc(n_e**2*mu_e/mu_p, r3d, r2d, los)
N2int = model_tools.trapz_loglog(2*np.pi*r2d*n_e2_proj, r2d)
if not self._silent:
print(('Mean temperature used to compute the spectrum:', Tmean))
# Get the spectrum normalized to 1 cm-5
if Cframe:
z_xspec = 0.0
else:
z_xspec = self._redshift
dSB, dNph, dR, ectr, epot = cluster_xspec.xray_spectrum(nH.to_value('cm-2')*1e-22,
Tmean.to_value('keV'),
self._abundance,
z_xspec,
emin=np.amin(energy.to_value('keV')),
emax=np.amax(energy.to_value('keV')),
nbin=len(energy),
file_ana='./xspec_analysis.txt',
file_out='./xspec_analysis_output.txt',
model=model,
resp_file=resp_file,
data_file=data_file,
cleanup=True, logspace=True)
# Normalization
xspec_norm = (1e-14/(4*np.pi*self._D_ang**2*(1+self._redshift)**2) * N2int).to_value('cm-5')
# return
if output_type == 'S':
output = dSB * xspec_norm * u.Unit('erg s-1 cm-2 keV-1')
if output_type == 'C':
output = dNph * xspec_norm * u.Unit('s-1 cm-2 keV-1')
if output_type == 'R':
output = dR * xspec_norm * u.Unit('s-1 keV-1')
return ectr*u.keV, output
#==================================================
# Compute Xray profile
#==================================================
def get_xray_profile(self, radius=np.logspace(0,4,100)*u.kpc,
Rmin_los=None, NR500_los=5.0,
output_type='C',
Cframe=False):
"""
Get the Xray surface brightness profile. An xspec table file is needed as
output_dir+'/XSPEC_table.txt'. The energy band is defined in this file.
Parameters
----------
- radius (quantity): the physical 2d radius in units homogeneous to kpc, as a 1d array
- Rmin_los (Quantity): the radius at which line of sight integration starts
- NR500_los (float): the line-of-sight integration will stop at NR500_los x R500.
- output_type (str): type of output
S == energy counts in erg/s/cm^2/sr
C == counts in ph/s/cm^2/sr
R == count rate in ph/s/sr (accounting for instrumental response)
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
Outputs
----------
- radius (quantity): the projected 2d radius in unit of kpc
- output : the brightness profile, depending on output_type
Note
----------
The pressure profile is truncated at R500 along the line-of-sight.
"""
# In case the input is not an array
radius = model_tools.check_qarray(radius, unit='kpc')
# Check output type
output_list = ['S', 'C', 'R']
if output_type not in output_list:
raise ValueError("Available output_type are S, C and R.")
# Get the integration limits
if Rmin_los is None:
Rmin_los = self._Rmin
Rmin = np.amin(radius.to_value('kpc'))*u.kpc
Rmax = np.amax(radius.to_value('kpc'))*u.kpc
# Define array for integration
Rmax3d = np.sqrt((NR500_los*self._R500)**2 + Rmax**2)
Rmin3d = np.sqrt(Rmin_los**2 + Rmin**2)
r3d = model_tools.sampling_array(Rmin3d*0.9, Rmax3d*1.1, NptPd=self._Npt_per_decade_integ, unit=True)
los = model_tools.sampling_array(Rmin_los, NR500_los*self._R500,
NptPd=self._Npt_per_decade_integ, unit=True)
dN_dVdt = self.get_rate_xray(r3d, output_type=output_type, Cframe=Cframe).flatten()
# Compute integral over l.o.s.
dN_dVdt_proj = model_tools.los_integration_1dfunc(dN_dVdt, r3d, radius, los)
dN_dVdt_proj[radius > self._R_truncation] = 0
# Convert to physical to angular scale
dN_dtdO = dN_dVdt_proj * self._D_ang**2 * u.Unit('sr-1')
# From intrinsic luminosity to flux
dN_dSdtdO = dN_dtdO / (4*np.pi * self._D_lum**2)
# return
if output_type == 'S':
output = dN_dSdtdO.to('erg s-1 cm-2 sr-1')
if output_type == 'C':
output = dN_dSdtdO.to('s-1 cm-2 sr-1')
if output_type == 'R':
output = dN_dSdtdO.to('s-1 sr-1')
return radius, output
#==================================================
# Compute Xray flux
#==================================================
def get_xray_flux(self, Rmin=None, Rmax=None,
type_integral='spherical',
Rmin_los=None, NR500_los=5.0,
output_type='C',
Cframe=False):
"""
Compute the Xray emission enclosed within Rmax, in 3d (i.e. spherically
integrated), or the Xray emmission enclosed within a circular area (i.e.
cylindrical), and in a given band depending on Xspec file. The radius
max can be an array to get flux(<R).
Parameters
----------
- Rmin (quantity): the minimal radius within with the spectrum is computed
- Rmax (quantity): the maximal radius within with the spectrum is computed.
It can be an array.
- type_integral (string): either 'spherical' or 'cylindrical'
- Rmin_los (quantity): minimal radius at which l.o.s integration starts
This is used only for cylindrical case
- NR500_los (float): the line-of-sight integration will stop at NR500_los x R500.
This is used only for cylindrical case
- output_type (str): type of output
S == energy counts in erg/s/cm^2/sr
C == counts in ph/s/cm^2/sr
R == count rate in ph/s/sr (accounting for instrumental response)
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
Outputs
----------
- flux (quantity) : the Xray flux in erg s-1 cm-2 or s-1 cm-2 or s-1
(depending on output_type)
"""
# Check the type of integral
ok_list = ['spherical', 'cylindrical']
if not type_integral in ok_list:
raise ValueError("This requested integral type (type_integral) is not available")
# Check output type
output_list = ['S', 'C', 'R']
if output_type not in output_list:
raise ValueError("Available output_type are S, C and R.")
# Get the integration limits
if Rmin_los is None:
Rmin_los = self._Rmin
if Rmin is None:
Rmin = self._Rmin
if Rmax is None:
Rmax = self._R500
if Rmin.to_value('kpc') <= 0:
raise TypeError("Rmin cannot be 0 (or less than 0) because integrations are in log space.")
if Rmin.to_value('kpc') < 1e-2:
if not self._silent:
print("WARNING: the requested value of Rmin is very small. Rmin~kpc is expected")
#----- Case of scalar quantities
scalar_flag = False
if type(Rmax.value) == float or type(Rmax.value) == np.float64:
scalar_flag = True
Rmax = np.tile(Rmax, [1]) # replicate Rmax to make it as an array
#----- Case of radius array (need to use dN/dVdEdt and not get_profile because spherical flux)
if type(Rmax.value) == np.ndarray:
# Get frequency sampling
if type_integral == 'spherical':
Rmax3d = np.amax(Rmax.value)*Rmax.unit
Rmin3d = Rmin
if type_integral == 'cylindrical':
Rmax3d = np.sqrt((NR500_los*self._R500)**2 + (np.amax(Rmax.value)*Rmax.unit)**2)*1.1
Rmin3d = np.sqrt(Rmin_los**2 + Rmin**2)*0.9
r3d = model_tools.sampling_array(Rmin3d, Rmax3d, NptPd=self._Npt_per_decade_integ, unit=True)
los = model_tools.sampling_array(Rmin_los, NR500_los*self._R500,
NptPd=self._Npt_per_decade_integ, unit=True)
dN_dVdt = self.get_rate_xray(r3d, output_type=output_type, Cframe=Cframe).flatten()
# Define output
if output_type == 'S':
flux = np.zeros(len(Rmax))*u.Unit('erg s-1 cm-2')
if output_type == 'C':
flux = np.zeros(len(Rmax))*u.Unit('s-1 cm-2')
if output_type == 'R':
flux = np.zeros(len(Rmax))*u.Unit('s-1')
# Case of spherical integral: direct volume integration
itpl = interpolate.interp1d(r3d.to_value('kpc'), dN_dVdt.value, kind='linear')
if type_integral == 'spherical':
for i in range(len(Rmax)):
rad_i = model_tools.sampling_array(Rmin, Rmax[i], NptPd=self._Npt_per_decade_integ, unit=True)
dN_dVdt_i = itpl(rad_i.to_value('kpc'))*dN_dVdt.unit
lum_i = model_tools.spherical_integration(dN_dVdt_i, rad_i)
flux[i] = lum_i / (4*np.pi * self._D_lum**2)
# Case of cylindrical integral
if type_integral == 'cylindrical':
# Compute integral over l.o.s.
radius = model_tools.sampling_array(Rmin, np.amax(Rmax.value)*Rmax.unit,
NptPd=self._Npt_per_decade_integ, unit=True)
dN_dVdt_proj = model_tools.los_integration_1dfunc(dN_dVdt, r3d, radius, los)
dN_dVdt_proj[radius > self._R_truncation] = 0
dN_dSdVdt_proj = dN_dVdt_proj / (4*np.pi * self._D_lum**2)
itpl = interpolate.interp1d(radius.to_value('kpc'), dN_dSdVdt_proj.value, kind='linear')
for i in range(len(Rmax)):
rad_i = model_tools.sampling_array(Rmin, Rmax[i], NptPd=self._Npt_per_decade_integ, unit=True)
dN_dSdVdt_proj_i = itpl(rad_i.value)*dN_dSdVdt_proj.unit
flux[i] = model_tools.trapz_loglog(2*np.pi*rad_i*dN_dSdVdt_proj_i, rad_i)
# Define output
if scalar_flag:
flux = flux[0] # to return a scalar
if output_type == 'S':
output = flux.to('erg s-1 cm-2')
if output_type == 'C':
output = flux.to('s-1 cm-2')
if output_type == 'R':
output = flux.to('s-1')
return flux
#==================================================
# Compute Xray map
#==================================================
def get_xray_map(self, Rmin_los=None, NR500_los=5.0,
Rmin=None, Rmax=None,
Normalize=False,
output_type='C',
Cframe=False):
"""
Compute the Xray map. The map is normalized so that the integral
of the map over the cluster volume is 1 (up to Rmax=5R500).
Parameters
----------
- Rmin_los (Quantity): the radius at which line of sight integration starts
- NR500_los (float): the integration will stop at NR500_los x R500
- Rmin, Rmax (quantity): the radius within with the spectrum is computed
(default is 1kpc, Rtruncation) for getting the normlization flux.
Has no effect if Normalized is False
- Normalize (bool): if True, the map is normalized by the flux to get a
template in unit of sr-1
- output_type (str): type of output
S == energy counts in erg/s/cm^2/sr
C == counts in ph/s/cm^2/sr
R == count rate in ph/s/sr (accounting for instrumental response)
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
Outputs
----------
xray_map (np.ndarray) : the map in units of sr-1 or brightness
"""
# Check output type
output_list = ['S', 'C', 'R']
if output_type not in output_list:
raise ValueError("Available output_type are S, C and R.")
# Get the header
header = self.get_map_header()
# Get a R.A-Dec. map
ra_map, dec_map = map_tools.get_radec_map(header)
# Get a cluster distance map (in deg)
dist_map = map_tools.greatcircle(ra_map, dec_map, self._coord.icrs.ra.to_value('deg'),
self._coord.icrs.dec.to_value('deg'))
# Define the radius used fo computing the profile
theta_max = np.amax(dist_map) # maximum angle from the cluster
theta_min = np.amin(dist_map) # minimum angle from the cluster (~0 if cluster within FoV)
if theta_min > 10 and theta_max > 10:
print('!!!!! WARNING: the cluster location is very much offset from the field of view')
rmax = theta_max*np.pi/180 * self._D_ang
rmin = theta_min*np.pi/180 * self._D_ang
if rmin == 0: rmin = self._Rmin
radius = model_tools.sampling_array(rmin, rmax, NptPd=self._Npt_per_decade_integ, unit=True)
# Project the integrand
r_proj, profile = self.get_xray_profile(radius, Rmin_los=Rmin_los, NR500_los=NR500_los,
output_type=output_type, Cframe=Cframe)
# Convert to angle and interpolate onto a map
theta_proj = (r_proj/self._D_ang).to_value('')*180.0/np.pi # degrees
xray_map = map_tools.profile2map(profile.value, theta_proj, dist_map)*profile.unit
# Avoid numerical residual ringing from interpolation
xray_map[dist_map > self._theta_truncation.to_value('deg')] = 0
# Compute the normalization: to return a map in sr-1, i.e. by computing the total flux
if Normalize:
if Rmax is None:
if self._R_truncation is not np.inf:
Rmax = self._R_truncation
else:
Rmax = NR500_los*self._R500
if Rmin is None:
Rmin = self._Rmin
flux = self.get_xray_flux(Rmin=Rmin, Rmax=Rmax, type_integral='cylindrical',
NR500_los=NR500_los, output_type=output_type, Cframe=Cframe)
xray_map = xray_map / flux
xray_map = xray_map.to('sr-1')
else:
if output_type == 'S':
xray_map = xray_map.to('erg s-1 cm-2 sr-1')
if output_type == 'C':
xray_map = xray_map.to('s-1 cm-2 sr-1')
if output_type == 'R':
xray_map = xray_map.to('s-1 sr-1')
return xray_map
#==================================================
# Compute Xray map - healpix format
#==================================================
def get_xray_hpmap(self, nside=2048,
Rmin_los=None, NR500_los=5.0,
Rmin=None, Rmax=None,
output_type='C',
Cframe=False,
maplonlat=None, output_lonlat=False):
"""
Compute the Xray map projected onto a (RING) healpix format.
Parameters
----------
- nside (int): healpix Nside
- Rmin_los (Quantity): the radius at which line of sight integration starts
- NR500_los (float): the integration will stop at NR500_los x R500
- Rmin, Rmax (quantity): the radius within with the spectrum is computed
(default is 1kpc, Rtruncation) for getting the normlization flux.
- output_type (str): type of output
S == energy counts in erg/s/cm^2/sr
C == counts in ph/s/cm^2/sr
R == count rate in ph/s/sr (accounting for instrumental response)
- Cframe (bool): computation assumes that we are in the cluster frame (no redshift effect)
- maplonlat (2d tuple of np.array): healpix maps of galactic longitude and latitude
which can be provided to save time in case of repeated computation
- output_lonlat (bool): use this keyword to also return the lon and lat maps
Outputs
----------
- xray_map (np.ndarray) : the map in units of sr-1 or brightness
- if output_lonlat is True, maplon and maplat are also returned
"""
# Check output type
output_list = ['S', 'C', 'R']
if output_type not in output_list:
raise ValueError("Available output_type are S, C and R.")
# Get a healpy radius map
radius, dist_map, maplon, maplat = model_tools.radius_hpmap(self._coord.galactic.l.to_value('deg'),
self._coord.galactic.b.to_value('deg'),
self._R_truncation, self._Rmin,
self._Npt_per_decade_integ,
nside=nside, maplonlat=maplonlat)
# Project the integrand
r_proj, profile = self.get_xray_profile(radius, Rmin_los=Rmin_los, NR500_los=NR500_los,
output_type=output_type, Cframe=Cframe)
# Convert to angle and interpolate onto a map
theta_proj = (r_proj/self._D_ang).to_value('')*180.0/np.pi # degrees
itpl = interpolate.interp1d(theta_proj, profile, kind='cubic', fill_value='extrapolate')
xray_map = itpl(dist_map)*profile.unit
# Avoid numerical residual ringing from interpolation
xray_map[dist_map > self._theta_truncation.to_value('deg')] = 0
# Return the result
if output_type == 'S':
xray_map = xray_map.to('erg s-1 cm-2 sr-1')
if output_type == 'C':
xray_map = xray_map.to('s-1 cm-2 sr-1')
if output_type == 'R':
xray_map = xray_map.to('s-1 sr-1')
if output_lonlat:
return xray_map, maplon, maplat
else:
return xray_map
|
remi-adamREPO_NAMEminotPATH_START.@minot_extracted@minot-master@minot@model_obs.py@.PATH_END.py
|
{
"filename": "priors.py",
"repo_name": "esheldon/ngmix",
"repo_path": "ngmix_extracted/ngmix-master/ngmix/priors/priors.py",
"type": "Python"
}
|
"""
Convention is that all priors should have peak ln(prob)==0. This
helps use in priors for LM fitting.
"""
import numpy as np
from ..gexceptions import GMixRangeError
from .random import make_rng
from ..defaults import LOWVAL
class PriorBase(object):
"""
Base object for priors.
Parameters
----------
bounds: 2-tuple of floats or None
The bounds of the parameter. Default of None means no bounds.
rng: np.random.RandomState
An random number generator (RNG) to use.
attributes
----------
bounds: 2-tuple of floats or None
The bounds of the parameter. Default of None means no bounds.
rng: np.random.RandomState
The RNG.
methods
-------
has_bounds()
Returns True if the object has bounds defined and they are non-None, False
otherwise.
"""
def __init__(self, rng, bounds=None):
assert rng is not None, 'rng is a required argument'
self.bounds = bounds
self.rng = make_rng(rng=rng)
def has_bounds(self):
"""
Returns True if the object has a bounds defined, False otherwise.
"""
return hasattr(self, "bounds") and self.bounds is not None
class FlatPrior(PriorBase):
"""
A flat prior between `minval` and `maxval`.
Parameters
----------
minval: float
The minimum value of the allowed range.
maxval: float
The maximum value of the allowed range.
rng: np.random.RandomState
An random number generator (RNG) to use.
"""
def __init__(self, minval, maxval, rng):
super().__init__(rng=rng)
self.minval = minval
self.maxval = maxval
def get_prob_scalar(self, val):
"""
Returns 1 if the value is in [minval, maxval] or raises a GMixRangeError
Parameters
----------
val: number
The location at which to evaluate
"""
retval = 1.0
if val < self.minval or val > self.maxval:
raise GMixRangeError(
"value %s out of range: "
"[%s,%s]" % (val, self.minval, self.maxval)
)
return retval
def get_lnprob_scalar(self, val):
"""
Returns 0.0 if the value is in [minval, maxval] or raises a GMixRangeError
Parameters
----------
val: number
The location at which to evaluate
"""
retval = 0.0
if val < self.minval or val > self.maxval:
raise GMixRangeError(
"value %s out of range: "
"[%s,%s]" % (val, self.minval, self.maxval)
)
return retval
def get_prob_array(self, vals):
"""
Returns 1 if the value is in [minval, maxval] or raises a GMixRangeError
Parameters
----------
vals: array
The locations at which to evaluate
"""
retval = 1.0
w, = np.where((vals < self.minval) | (vals > self.maxval))
if w.size > 0:
raise GMixRangeError(
"values were out of range: "
"[%s,%s]" % (self.minval, self.maxval)
)
return vals*0 + retval
def get_lnprob_array(self, vals):
"""
Returns 0.0 if the value is in [minval, maxval] or raises a GMixRangeError
Parameters
----------
vals: array
The location at which to evaluate
"""
retval = 0.0
w, = np.where((vals < self.minval) | (vals > self.maxval))
if w.size > 0:
raise GMixRangeError(
"values were out of range: "
"[%s,%s]" % (self.minval, self.maxval)
)
return retval
def get_fdiff(self, val):
"""
Compute sqrt(-2ln(p)) ~ (data - mode)/err for using with LM fitters.
Parameters
----------
val: number
The location at which to evaluate
"""
retval = 0.0
if val < self.minval or val > self.maxval:
raise GMixRangeError(
"value %s out of range: "
"[%s,%s]" % (val, self.minval, self.maxval)
)
return retval
def sample(self, nrand=None):
"""
Returns samples uniformly on the interval.
Parameters
----------
nrand: int or None
The number of samples. If None, a single scalar sample is drawn.
Default is None.
Returns
-------
samples: scalar or array-like
The samples with shape (`nrand`,). If `nrand` is None, then a
scalar is returned.
"""
if nrand is None:
is_scalar = True
nrand = 1
else:
is_scalar = False
rvals = self.rng.uniform(size=nrand)
rvals = self.minval + (self.maxval - self.minval) * rvals
if is_scalar:
rvals = rvals[0]
return rvals
class TwoSidedErf(PriorBase):
"""
A two-sided error function that evaluates to 1 in the middle, zero at
extremes.
A limitation seems to be the accuracy of the erf implementation.
Parameters
----------
minval: float
The minimum value. This is where p(x) = 0.5 at the lower end.
width_at_min: float
The width of the transition region from 0 to 1 at the lower end.
maxval: float
The maximum value. This is where p(x) = 0.5 at the upper end.
width_at_max: float
The width of the transition region from 1 to 0 at the upper end.
rng: np.random.RandomState
An random number generator (RNG) to use.
"""
def __init__(self, minval, width_at_min, maxval, width_at_max, rng):
super().__init__(rng=rng)
self.minval = minval
self.width_at_min = width_at_min
self.maxval = maxval
self.width_at_max = width_at_max
def get_prob_scalar(self, val):
"""
get the probability of the point
Parameters
----------
val: number
The location at which to evaluate
"""
from math import erf
p1 = 0.5 * erf((self.maxval - val) / self.width_at_max)
p2 = 0.5 * erf((val - self.minval) / self.width_at_min)
return p1 + p2
def get_lnprob_scalar(self, val):
"""
get the log probability of the point
Parameters
----------
val: number
The location at which to evaluate
"""
p = self.get_prob_scalar(val)
if p <= 0.0:
lnp = LOWVAL
else:
lnp = np.log(p)
return lnp
def get_prob_array(self, vals):
"""
get the probability of a set of points
Parameters
----------
vals: number
The locations at which to evaluate
"""
vals = np.array(vals, ndmin=1, dtype="f8", copy=False)
pvals = np.zeros(vals.size)
for i in range(vals.size):
pvals[i] = self.get_prob_scalar(vals[i])
return pvals
def get_lnprob_array(self, vals):
"""
get the log probability of a set of points
Parameters
----------
vals: array
The locations at which to evaluate
"""
p = self.get_prob_array(vals)
lnp = np.zeros(p.size) + LOWVAL
(w,) = np.where(p > 0.0)
if w.size > 0:
lnp[w] = np.log(p[w])
return lnp
def get_fdiff(self, val):
"""
Compute sqrt(-2ln(p)) ~ (data - mode)/err for using with LM fitters.
Parameters
----------
val: number
The location at which to evaluate
"""
if isinstance(val, np.ndarray):
return self._get_fdiff_array(val)
else:
return self._get_fdiff_scalar(val)
def _get_fdiff_array(self, vals):
"""
get diff array by evaluating one at a time
Parameters
----------
vals: number
The locations at which to evaluate
"""
vals = np.array(vals, ndmin=1, dtype="f8", copy=False)
fdiff = np.zeros(vals.size)
for i in range(vals.size):
fdiff[i] = self._get_fdiff_scalar(vals[i])
return fdiff
def _get_fdiff_scalar(self, val):
"""
get something similar to a (model-data)/err. Note however that with
the current implementation, the *sign* of the difference is lost in
this case.
Parameters
----------
val: number
The location at which to evaluate
"""
p = self.get_lnprob_scalar(val)
p = -2 * p
if p < 0.0:
p = 0.0
return np.sqrt(p)
def sample(self, nrand=None):
"""
Draw random samples of the prior.
Note this function is not perfect in that it only goes from
-5,5 sigma past each side.
Parameters
----------
nrand: int or None
The number of samples. If None, a single scalar sample is drawn.
Default is None.
Returns
-------
samples: scalar or array-like
The samples with shape (`nrand`,). If `nrand` is None, then a
scalar is returned.
"""
rng = self.rng
if nrand is None:
nrand = 1
is_scalar = True
else:
is_scalar = False
xmin = self.minval - 5.0 * self.width_at_min
xmax = self.maxval + 5.0 * self.width_at_max
rvals = np.zeros(nrand)
ngood = 0
nleft = nrand
while ngood < nrand:
randx = rng.uniform(low=xmin, high=xmax, size=nleft)
pvals = self.get_prob_array(randx)
randy = rng.uniform(size=nleft)
(w,) = np.where(randy < pvals)
if w.size > 0:
rvals[ngood:ngood + w.size] = randx[w]
ngood += w.size
nleft -= w.size
if is_scalar:
rvals = rvals[0]
return rvals
class Normal(PriorBase):
"""
A Normal distribution.
This class provides an interface consistent with LogNormal.
Parameters
----------
mean: float
The mean of the Gaussian.
sigma: float
The standard deviation of the Gaussian.
bounds: 2-tuple of floats or None
The bounds of the parameter. Default of None means no bounds.
rng: np.random.RandomState
An random number generator (RNG) to use.
attributes
----------
mean: float
The mean of the Gaussian.
sigma: float
The standard deviation of the Gaussian.
"""
def __init__(self, mean, sigma, rng, bounds=None):
super().__init__(rng=rng, bounds=bounds)
self.mean = mean
self.sigma = sigma
self.sinv = 1.0 / sigma
self.s2inv = 1.0 / sigma ** 2
self.ndim = 1
def get_lnprob(self, val):
"""
Compute -0.5 * ( (val-mean)/sigma )**2.
Parameters
----------
val: number
Location at which to evaluate
"""
diff = self.mean - val
return -0.5 * diff * diff * self.s2inv
get_lnprob_scalar = get_lnprob
get_lnprob_array = get_lnprob
def get_prob(self, val):
"""
Compute exp(-0.5 * ( (val-mean)/sigma )**2)
Note that this function is missing the normalization factor.
Parameters
----------
val: number
Location at which to evaluate
"""
diff = self.mean - val
lnp = -0.5 * diff * diff * self.s2inv
return np.exp(lnp)
get_prob_array = get_prob
def get_prob_scalar(self, val):
"""
Compute exp(-0.5 * ( (x-mean)/sigma )**2).
Note that this function is missing the normalization factor.
Parameters
----------
val: number
The location at which to evaluate
"""
from math import exp
diff = self.mean - val
lnp = -0.5 * diff * diff * self.s2inv
return exp(lnp)
def get_fdiff(self, val):
"""
Compute sqrt(-2ln(p)) ~ (data - mode)/err for use with LM fitter.
Parameters
----------
val: number
The location at which to evaluate
"""
return (val - self.mean) * self.sinv
def sample(self, nrand=None, size=None):
"""
Draw random samples of the prior.
Parameters
----------
nrand: int or None
The number of samples. If None, a single scalar sample is drawn.
Default is None.
Returns
-------
samples: scalar or array-like
The samples with shape (`nrand`,). If `nrand` is None, then a
scalar is returned.
"""
if size is None and nrand is not None:
# if they have given nrand and not n, use that
# this keeps the API the same but allows ppl to use the new API of nrand
size = nrand
return self.rng.normal(loc=self.mean, scale=self.sigma, size=size,)
class LMBounds(PriorBase):
"""
Class to hold simple bounds for the leastsqbound version
of LM.
The fdiff is always zero, but the bounds will be sent
to the minimizer.
Parameters
----------
minval: float
The minimum bound.
maxval: float
The maximum bound.
rng: np.random.RandomState
An random number generator (RNG) to use.
attributes
----------
mean: float
The mean of the uniform distribution.
sigma: float
The standard deviation of the uniform distribution.
"""
def __init__(self, minval, maxval, rng):
super().__init__(rng)
self.bounds = (minval, maxval)
self.mean = (minval + maxval) / 2.0
self.sigma = (maxval - minval) * 0.28 # exact is 1/sqrt(12) ~ 0.28867513459
def get_fdiff(self, val):
"""
Compute sqrt(-2ln(p)) ~ (data - mode)/err for use with LM fitter. Always zero.
Parameters
----------
val: number
The location at which to evaluate
"""
return 0.0 * val
def sample(self, nrand=None):
"""
Returns samples uniformly on the interval.
Parameters
----------
nrand: int or None
The number of samples. If None, a single scalar sample is drawn.
Default is None.
Returns
-------
samples: scalar or array-like
The samples with shape (`nrand`,). If `nrand` is None, then a
scalar is returned.
"""
return self.rng.uniform(
low=self.bounds[0], high=self.bounds[1], size=nrand,
)
class Bounded1D(PriorBase):
"""
Wrap a pdf and limit samples to the input bounds.
Parameters
----------
pdf: object
A PDF object with a `sample` method.
bounds: 2-tuple of floats
A 2-tuple of floats.
attributes
----------
pdf: object
A PDF object with a `sample` method.
"""
def __init__(self, pdf, bounds):
self.pdf = pdf
self.set_limits(bounds)
def set_limits(self, limits):
"""
set the limits
Parameters
----------
limits: sequence
Limits to set
"""
ok = False
try:
n = len(limits)
if n == 2:
ok = True
except TypeError:
pass
if ok is False:
raise ValueError(
"expected bounds to be 2-element sequence, got %s" % (limits,)
)
if limits[0] >= limits[1]:
raise ValueError(
"bounds[0] must be less than bounds[1], got: %s" % (limits,)
)
self.limits = limits
self.bounds = limits
def sample(self, nrand=None, size=None):
"""
Draw random samples of the PDF with the bounds.
Parameters
----------
nrand: int or None
The number of samples. If None, a single scalar sample is drawn.
Default is None.
Returns
-------
samples: scalar or array-like
The samples with shape (`nrand`,). If `nrand` is None, then a
scalar is returned.
"""
if size is None and nrand is not None:
# if they have given nrand and not n, use that
# this keeps the API the same but allows ppl to use the new API of nrand
size = nrand
bounds = self.bounds
if size is None:
nval = 1
else:
nval = size
values = np.zeros(nval)
ngood = 0
nleft = nval
while nleft > 0:
tmp = self.pdf.sample(nleft)
(w,) = np.where((tmp > bounds[0]) & (tmp < bounds[1]))
if w.size > 0:
values[ngood:ngood + w.size] = tmp[w]
ngood += w.size
nleft -= w.size
if size is None:
values = values[0]
return values
# keep this so that the API stays the same
LimitPDF = Bounded1D
class LogNormal(PriorBase):
"""
Lognormal distribution
Parameters
----------
mean: float
such that <x> in linear space is mean. This implies the mean in log(x)
is
<log(x)> = log(mean) - 0.5*log( 1 + sigma**2/mean**2 )
sigma: float
such than the variace in linear space is sigma**2. This implies
the variance in log space is
var(log(x)) = log( 1 + sigma**2/mean**2 )
shift: float
An optional shift to apply to the samples and the locations for
evaluating the PDF. The shift is added to samples from the underlying
log-normal.
rng: np.random.RandomState
An random number generator (RNG) to use.
attributes
----------
shift: float
An optional shift to apply to the samples and the locations for
evaluating the PDF. The shift is added to samples from the underlying
log-normal.
mean: float
The linear-space mean <x>.
sigma: float
The linear-space standard deviation.
logmean: float
The log-space mean.
logvar: float
The log-space variance.
logsigma: float
The log-space standard deviation.
logivar: float
The inverse of the log-space variace.
mode: float
The linear-space mode.
log_mode: float
The log-space mode.
lnprob_max: float
The log of the maximum value of the distribution.
"""
def __init__(self, mean, sigma, rng, shift=None):
super().__init__(rng=rng)
if mean <= 0:
raise ValueError("mean %s is < 0" % mean)
self.shift = shift
self.mean = mean
self.sigma = sigma
logmean = np.log(self.mean) - 0.5 * np.log(
1 + self.sigma ** 2 / self.mean ** 2
)
logvar = np.log(1 + self.sigma ** 2 / self.mean ** 2)
logsigma = np.sqrt(logvar)
logivar = 1.0 / logvar
self.logmean = logmean
self.logvar = logvar
self.logsigma = logsigma
self.logivar = logivar
log_mode = self.logmean - self.logvar
self.mode = np.exp(log_mode)
chi2 = self.logivar * (log_mode - self.logmean) ** 2
# subtract mode to make max 0.0
self.lnprob_max = -0.5 * chi2 - log_mode
self.log_mode = log_mode
def get_lnprob_scalar(self, val):
"""
Get the log-probability of val
Parameters
----------
val: number
The location at which to evaluate
"""
if self.shift is not None:
val = val - self.shift
if val <= 0:
raise GMixRangeError("values of val must be > 0")
logval = np.log(val)
chi2 = self.logivar * (logval - self.logmean) ** 2
# subtract mode to make max 0.0
lnprob = -0.5 * chi2 - logval - self.lnprob_max
return lnprob
def get_lnprob_array(self, vals):
"""
Get the log-probability of vals.
Parameters
----------
vals: array
The locations at which to evaluate
"""
vals = np.array(vals, dtype="f8", copy=False)
if self.shift is not None:
vals = vals - self.shift
(w,) = np.where(vals <= 0)
if w.size > 0:
raise GMixRangeError("values must be > 0")
logvals = np.log(vals)
chi2 = self.logivar * (logvals - self.logmean) ** 2
# subtract mode to make max 0.0
lnprob = -0.5 * chi2 - logvals - self.lnprob_max
return lnprob
def get_prob_scalar(self, val):
"""
Get the probability of x.
Parameters
----------
val: number
The location at which to evaluate
"""
lnprob = self.get_lnprob_scalar(val)
return np.exp(lnprob)
def get_prob_array(self, vals):
"""
Get the probability of val.
Parameters
----------
vals: number
The locations at which to evaluate
"""
lnp = self.get_lnprob_array(vals)
return np.exp(lnp)
def get_fdiff(self, val):
"""
Compute sqrt(-2ln(p)) ~ (data - mode)/err for use with LM fitter.
Parameters
----------
val: number
The location at which to evaluate
"""
lnp = self.get_lnprob_scalar(val)
chi2 = -2 * lnp
if chi2 < 0.0:
chi2 = 0.0
fdiff = np.sqrt(chi2)
return fdiff
def sample(self, nrand=None):
"""
Draw random samples from the LogNormal.
Parameters
----------
nrand: int or None
The number of samples. If None, a single scalar sample is drawn.
Default is None.
Returns
-------
samples: scalar or array-like
The samples with shape (`nrand`,). If `nrand` is None, then a
scalar is returned.
"""
z = self.rng.normal(size=nrand)
r = np.exp(self.logmean + self.logsigma * z)
if self.shift is not None:
r += self.shift
return r
def sample_brute(self, nrand=None, maxval=None):
"""
Draw random samples from the LogNormal using a brute force algorithm.
This method is used to help check other methods.
Parameters
----------
nrand: int or None
The number of samples. If None, a single scalar sample is drawn.
Default is None.
maxval: float or None
The maximum value to allow for draws.
Returns
-------
samples: scalar or array-like
The samples with shape (`nrand`,). If `nrand` is None, then a
scalar is returned.
"""
rng = self.rng
if maxval is None:
maxval = self.mean + 10 * self.sigma
if nrand is None:
is_scalar = True
nrand = 1
else:
is_scalar = False
samples = np.zeros(nrand)
ngood = 0
nleft = nrand
while ngood < nrand:
rvals = maxval * rng.rand(nleft)
if self.shift is not None:
rvals += self.shift
# between 0,1 which is max for prob
h = rng.uniform(size=nleft)
pvals = self.get_prob_array(rvals)
(w,) = np.where(h < pvals)
if w.size > 0:
samples[ngood:ngood + w.size] = rvals[w]
ngood += w.size
nleft -= w.size
if is_scalar:
samples = samples[0]
return samples
def _calc_fdiff(self, pars):
"""
calculate fdiff for fitting the parameters of the distribution
Parameters
----------
pars: array
parameters at which to evaluate
"""
try:
ln = LogNormal(pars[0], pars[1], rng=self.rng)
model = ln.get_prob_array(self._fitx) * pars[2]
except (GMixRangeError, ValueError):
return self._fity * 0 - np.inf
fdiff = model - self._fity
return fdiff
def fit(self, x, y):
"""
Fit to the input x and y.
Parameters
----------
x: array-like
The x-values for the fit.
y: array-like
The y-values for the fit. Usually p(x).
Returns
-------
res: dict
A dictionary with the best-fit parameters and other information
from the fit.
"""
from ..fitting.leastsqbound import run_leastsq
rng = self.rng
self._fitx = x
self._fity = y
for i in range(4):
f1, f2, f3 = 1.0 + rng.uniform(low=0.1, high=0.1, size=3)
guess = np.array([x.mean() * f1, x.std() * f2, y.mean() * f3])
res = run_leastsq(self._calc_fdiff, guess, 0,)
if res["flags"] == 0:
break
return res
class Sinh(PriorBase):
"""
a sinh distribution with mean and scale.
Currently only supports "fdiff" style usage as a prior,
e.g. for LM.
Parameters
----------
mean: float
The mean value where the value of fdiff is zero.
scale: float
The value such that fdiff of `mean` +/- `scale` is +/-1.
rng: np.random.RandomState
An random number generator (RNG) to use.
attributes
----------
mean: float
The mean value where the value of fdiff is zero.
scale: float
The value such that fdiff of `mean` +/- `scale` is +/-1.
"""
def __init__(self, mean, scale, rng):
super().__init__(rng=rng)
self.mean = mean
self.scale = scale
def get_fdiff(self, val):
"""
For use with LM fitter - computes sinh((model-data)/width)
Parameters
----------
val: number
The location at which to evaluate
"""
return np.sinh((val - self.mean) / self.scale)
def sample(self, nrand=None):
"""
sample around the mean, +/- a scale length
Parameters
----------
nrand: int or None
The number of samples. If None, a single scalar sample is drawn.
Default is None.
Returns
-------
samples: scalar or array-like
The samples with shape (`nrand`,). If `nrand` is None, then a
scalar is returned.
"""
if nrand is None:
is_scalar = True
nrand = 1
else:
is_scalar = False
vals = self.rng.uniform(
low=self.mean - self.scale, high=self.mean + self.scale, size=nrand
)
if is_scalar:
vals = vals[0]
return vals
class TruncatedGaussian(PriorBase):
"""
Truncated gaussian between [minval, maxval].
Parameters
----------
mean: float
The mean of the Gaussian.
sigma: float
The standard deviation of the Gaussian.
minval: float
The minimum of the distribution.
maxval: float
The maximum of the distribution.
rng: np.random.RandomState
An random number generator (RNG) to use.
attributes
----------
mean: float
The mean of the Gaussian.
sigma: float
The standard deviation of the Gaussian.
minval: float
The minimum of the distribution.
maxval: float
The maximum of the distribution.
"""
def __init__(self, mean, sigma, minval, maxval, rng):
super().__init__(rng=rng)
self.mean = mean
self.sigma = sigma
self.ivar = 1.0 / sigma ** 2
self.sinv = 1.0 / sigma
self.minval = minval
self.maxval = maxval
def get_lnprob_scalar(self, val):
"""
get the log probability of the point - raises if not in [minval, maxval]
Parameters
----------
val: number
The location at which to evaluate
"""
if val < self.minval or val > self.maxval:
raise GMixRangeError("value out of range")
diff = val - self.mean
return -0.5 * diff * diff * self.ivar
def get_lnprob_array(self, val):
"""
get the log probability of an array - raises if not in [minval, maval]
Parameters
----------
val: array
The locations at which to evaluate
"""
lnp = np.zeros(val.size)
lnp -= np.inf
(w,) = np.where((val > self.minval) & (val < self.maxval))
if w.size > 0:
diff = val[w] - self.mean
lnp[w] = -0.5 * diff * diff * self.ivar
return lnp
def get_fdiff(self, val):
"""
Compute sqrt(-2ln(p)) ~ (data - mode)/err for use with LM fitter.
Parameters
----------
val: number
The location at which to evaluate
"""
if val < self.minval or val > self.maxval:
raise GMixRangeError("value out of range")
return (val - self.mean) * self.sinv
def sample(self, nrand=None):
"""
Sample from the truncated Gaussian.
Parameters
----------
nrand: int or None
The number of samples. If None, a single scalar sample is drawn.
Default is None.
Returns
-------
samples: scalar or array-like
The samples with shape (`nrand`,). If `nrand` is None, then a
scalar is returned.
"""
rng = self.rng
if nrand is None:
is_scalar = True
nrand = 1
else:
is_scalar = False
vals = np.zeros(nrand)
ngood = 0
nleft = nrand
while ngood < nrand:
tvals = rng.normal(loc=self.mean, scale=self.sigma, size=nleft)
(w,) = np.where((tvals > self.minval) & (tvals < self.maxval))
if w.size > 0:
vals[ngood:ngood + w.size] = tvals[w]
ngood += w.size
nleft -= w.size
if is_scalar:
vals = vals[0]
return vals
|
esheldonREPO_NAMEngmixPATH_START.@ngmix_extracted@ngmix-master@ngmix@priors@priors.py@.PATH_END.py
|
{
"filename": "tf_planck2018_lite.md",
"repo_name": "alessiospuriomancini/cosmopower",
"repo_path": "cosmopower_extracted/cosmopower-main/docs/tutorial/likelihoods/tf_planck2018_lite.md",
"type": "Markdown"
}
|
The notebook [tf_planck2018_lite.ipynb](https://github.com/alessiospuriomancini/cosmopower/blob/main/cosmopower/notebooks/likelihoods_notebooks/tf_planck2018_lite.ipynb) shows an example of how to run a complete inference pipeline with power spectra sourced from ``CosmoPower``. The notebooks runs a version of the _Planck_ 2018 ``lite`` likelihood rewritten to be fully implemented in [TensorFlow](https://www.tensorflow.org/): [tf_planck2018_lite.py](https://github.com/alessiospuriomancini/cosmopower/blob/main/cosmopower/likelihoods/tf_planck2018_lite). The ``lite`` version of the _Planck_ likelihood is pre-marginalised over a set of nuisance parameters. This TensorFlow version of the _Planck_ lite likelihood, provided as part of ``CosmoPower``, is an adaptation for TensorFlow of the [planck-lite-py](https://github.com/heatherprince/planck-lite-py) likelihood written by H. Prince and J. Dunkley.
If you use ``tf_planck2018_lite``, _in addition_ to the ``CosmoPower`` [release paper](https://arxiv.org/abs/2106.03846) please also cite [Prince & Dunkley (2019)](https://arxiv.org/abs/1909.05869) and [Planck (2018)](https://arxiv.org/abs/1907.12875).
The notebook [tf_planck2018_lite.ipynb](https://github.com/alessiospuriomancini/cosmopower/blob/main/cosmopower/notebooks/likelihoods_notebooks/tf_planck2018_lite.ipynb) can also be run on Colab [](https://colab.research.google.com/drive/1TUDp1MWe0nU79JJXlsHBuMszWVpOLg7S?usp=sharing)
# ``tf_planck2018_lite`` instantiation
Her we will simply show how to instantiate the ``tf_planck2018_lite`` likelihood, referring to the [tf_planck2018_lite.ipynb](https://github.com/alessiospuriomancini/cosmopower/blob/main/cosmopower/notebooks/likelihoods_notebooks/tf_planck2018_lite.ipynb) notebook for a more detailed example of how to run it for inference.
The ``tf_planck2018_lite`` likelihood requires emulators for the TT, TE, EE power spectra. In the [tf_planck2018_lite.ipynb](https://github.com/alessiospuriomancini/cosmopower/blob/main/cosmopower/notebooks/likelihoods_notebooks/tf_planck2018_lite.ipynb) notebook we use the pre-trained models from the ``CosmoPower`` [release paper](https://arxiv.org/abs/2106.03846), available [in the ``CosmoPower`` repository](https://github.com/alessiospuriomancini/cosmopower/blob/main/cosmopower/trained_models/CP_paper/CMB).
To create an instance of ``tf_planck2018_lite``, we import ``CosmoPower`` and remember to input:
- a path to the ``tf_planck2018_lite`` likelihood. It will be used to access the _Planck_ data;
- parameters of the analysis, as well as their priors;
- the ``CosmoPower`` emulators.
```python
import cosmopower as cp
# CosmoPower emulators
tt_emu_model = cp.cosmopower_NN(restore=True,
restore_filename='cmb_TT_NN')
te_emu_model = cp.cosmopower_PCAplusNN(restore=True,
restore_filename='cmb_TE_PCAplusNN')
ee_emu_model = cp.cosmopower_NN(restore=True,
restore_filename='cmb_EE_NN')
# path to the tf_planck2018_lite likelihood
tf_planck2018_lite_path = '/path/to/cosmopower/likelihoods/tf_planck2018_lite/'
# parameters of the analysis, and their priors
parameters_and_priors = {'omega_b': [0.001, 0.04, 'uniform'],
'omega_cdm': [0.005, 0.99, 'uniform'],
'h': [0.2, 1.0, 'uniform'],
'tau_reio': [0.01, 0.8, 'uniform'],
'n_s': [0.9, 1.1, 'uniform'],
'ln10^{10}A_s': [1.61, 3.91, 'uniform'],
'A_planck': [1.0, 0.01, 'gaussian'],
}
# instantiation
tf_planck = cp.tf_planck2018_lite(parameters=parameters_and_priors,
tf_planck2018_lite_path=tf_planck2018_lite_path,
tt_emu_model=tt_emu_model,
te_emu_model=te_emu_model,
ee_emu_model=ee_emu_model
)
```
|
alessiospuriomanciniREPO_NAMEcosmopowerPATH_START.@cosmopower_extracted@cosmopower-main@docs@tutorial@likelihoods@tf_planck2018_lite.md@.PATH_END.py
|
{
"filename": "_array.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/histogram/error_y/_array.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ArrayValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="array", parent_name="histogram.error_y", **kwargs):
super(ArrayValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@histogram@error_y@_array.py@.PATH_END.py
|
{
"filename": "_weight.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram2dcontour/colorbar/title/font/_weight.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WeightValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self,
plotly_name="weight",
parent_name="histogram2dcontour.colorbar.title.font",
**kwargs,
):
super(WeightValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
extras=kwargs.pop("extras", ["normal", "bold"]),
max=kwargs.pop("max", 1000),
min=kwargs.pop("min", 1),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram2dcontour@colorbar@title@font@_weight.py@.PATH_END.py
|
{
"filename": "moster13.py",
"repo_name": "astropy/halotools",
"repo_path": "halotools_extracted/halotools-master/halotools/empirical_models/smhm_models/moster13.py",
"type": "Python"
}
|
"""
Module containing classes used to model the mapping between
stellar mass and halo mass based on Moster et al. (2013).
"""
from __future__ import division, print_function, absolute_import, unicode_literals
from ..component_model_templates import PrimGalpropModel
from ...sim_manager import sim_defaults
__all__ = ['Moster13SmHm']
class Moster13SmHm(PrimGalpropModel):
""" Stellar-to-halo-mass relation based on
Moster et al. (2013), arXiv:1205.5807.
"""
def __init__(self, **kwargs):
"""
Parameters
----------
prim_haloprop_key : string, optional
String giving the column name of the primary halo property governing stellar mass.
Default is set in the `~halotools.empirical_models.model_defaults` module.
scatter_model : object, optional
Class governing stochasticity of stellar mass. Default scatter is log-normal,
implemented by the `LogNormalScatterModel` class.
scatter_abscissa : array_like, optional
Array of values giving the abscissa at which
the level of scatter will be specified by the input ordinates.
Default behavior will result in constant scatter at a level set in the
`~halotools.empirical_models.model_defaults` module.
scatter_ordinates : array_like, optional
Array of values defining the level of scatter at the input abscissa.
Default behavior will result in constant scatter at a level set in the
`~halotools.empirical_models.model_defaults` module.
"""
super(Moster13SmHm, self).__init__(
galprop_name='stellar_mass', **kwargs)
self.littleh = 0.704
self.publications = ['arXiv:0903.4682', 'arXiv:1205.5807']
def mean_stellar_mass(self, **kwargs):
""" Return the stellar mass of a central galaxy as a function
of the input table.
Parameters
----------
prim_haloprop : array, optional
Array of mass-like variable upon which occupation statistics are based.
If ``prim_haloprop`` is not passed, then ``table`` keyword argument must be passed.
table : object, optional
Data table storing halo catalog.
If ``table`` is not passed, then ``prim_haloprop`` keyword argument must be passed.
redshift : float or array, optional
Redshift of the halo hosting the galaxy.
Default is set in `~halotools.sim_manager.sim_defaults`.
If passing an array, must be of the same length as
the ``prim_haloprop`` or ``table`` argument.
Returns
-------
mstar : array_like
Array containing stellar masses living in the input table.
Notes
------
The parameter values in Moster+13 were fit to data assuming h=0.704,
but all halotools inputs are in h=1 units. Thus we will transform our
input halo mass to h=0.704 units, evaluate using the moster parameters,
and then transform back to h=1 units before returning the result.
"""
# Retrieve the array storing the mass-like variable
if 'table' in list(kwargs.keys()):
mass = kwargs['table'][self.prim_haloprop_key]
elif 'prim_haloprop' in list(kwargs.keys()):
mass = kwargs['prim_haloprop']
else:
raise KeyError("Must pass one of the following keyword arguments to mean_occupation:\n"
"``table`` or ``prim_haloprop``")
if 'redshift' in list(kwargs.keys()):
redshift = kwargs['redshift']
elif hasattr(self, 'redshift'):
redshift = self.redshift
else:
redshift = sim_defaults.default_redshift
# convert mass from h=1 to h=0.704
mass = mass/self.littleh
# compute the parameter values that apply to the input redshift
a = 1./(1+redshift)
m1 = self.param_dict['m10'] + self.param_dict['m11']*(1-a)
n = self.param_dict['n10'] + self.param_dict['n11']*(1-a)
beta = self.param_dict['beta10'] + self.param_dict['beta11']*(1-a)
gamma = self.param_dict['gamma10'] + self.param_dict['gamma11']*(1-a)
# Calculate each term contributing to Eqn 2
norm = 2.*n*mass
m_by_m1 = mass/(10.**m1)
denom_term1 = m_by_m1**(-beta)
denom_term2 = m_by_m1**gamma
mstar = norm / (denom_term1 + denom_term2)
# mstar has been computed in h=0.704 units, so we convert back to h=1 units
return mstar*self.littleh**2
def retrieve_default_param_dict(self):
""" Method returns a dictionary of all model parameters
set to the values in Table 1 of Moster et al. (2013).
Returns
-------
d : dict
Dictionary containing parameter values.
"""
# All calculations are done internally using the same h=0.7 units
# as in Behroozi et al. (2010), so the parameter values here are
# the same as in Table 1, even though the
# mean_stellar_mass method accepts and returns arguments in h=1 units.
d = ({
'm10': 11.590,
'm11': 1.195,
'n10': 0.0351,
'n11': -0.0247,
'beta10': 1.376,
'beta11': -0.826,
'gamma10': 0.608,
'gamma11': 0.329})
return d
|
astropyREPO_NAMEhalotoolsPATH_START.@halotools_extracted@halotools-master@halotools@empirical_models@smhm_models@moster13.py@.PATH_END.py
|
{
"filename": "make_fBM_moments.py",
"repo_name": "Astroua/TurbuStat",
"repo_path": "TurbuStat_extracted/TurbuStat-master/Examples/paper_plots/make_fBM_moments.py",
"type": "Python"
}
|
'''
Make moment maps from the fBM cubes.
Only generating from the repeated cubes with density and velocity indices
of -4.
'''
from spectral_cube import SpectralCube
from astropy.table import Table, Column
import numpy as np
import astropy.units as u
from astropy import constants as cc
import os
import matplotlib.pyplot as plt
from itertools import product
from turbustat.moments import Moments
# data_path = "/Volumes/Travel_Data/Turbulence/fBM_cubes/"
data_path = os.path.expanduser("~/MyRAID/Astrostat/TurbuStat_Paper/fBM_cubes/")
vel_inds = np.array([3.3, 11 / 3., 4.])
dens_inds = np.array([2.5, 3., 11 / 3., 4.])
reps = range(5)
cube_size = 256
spec_ext = []
# for dens, vel, rep in product(dens_inds, vel_inds, reps):
# for dens, vel in product(dens_inds[::-1], vel_inds[::-1]):
for rep in reps:
dens = 4.
vel = 4.
name = "fBM_density_{0:.2f}_velocity_{1:.2f}_rep_{2}"\
.format(np.abs(dens), np.abs(vel), rep)
filename = "fBM_density_{0:.2f}_velocity_{1:.2f}_rep_{2}_size_{3}.fits"\
.format(np.abs(dens), np.abs(vel), rep, cube_size)
cube = SpectralCube.read(os.path.join(data_path, filename))
moments = Moments(cube)
moments.make_moments()
moments.make_moment_errors()
|
AstrouaREPO_NAMETurbuStatPATH_START.@TurbuStat_extracted@TurbuStat-master@Examples@paper_plots@make_fBM_moments.py@.PATH_END.py
|
{
"filename": "time_regression.py",
"repo_name": "quatrope/astroalign",
"repo_path": "astroalign_extracted/astroalign-master/benchmarks/time_regression.py",
"type": "Python"
}
|
# MIT License
# Copyright (c) 2016-2019 Martin Beroiz, Juan B. Cabral, Bruno Sanchez
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
# IMPORTS
# =============================================================================
import sys
import os
import timeit
import datetime as dt
import argparse
from collections import OrderedDict
import numpy as np
import astroalign as aa
from sklearn.model_selection import ParameterGrid
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
import pandas as pd
import joblib
import tqdm
test_path = os.path.abspath(os.path.dirname(aa.__file__))
sys.path.insert(0, test_path)
from tests.test_align import simulate_image_pair # noqa
# =============================================================================
# CONSTANTS
# =============================================================================
SIZES = (256, 512, 768, 1024)
STARS = 10000
NOISE = 1000
STEP = 10
STATEMENT = "aa.register(source, target)"
REPEATS = 50
COMB_NUMBER = 10
DEFAULT_SIZE = (8, 8)
# =============================================================================
# FUNCTIONS
# =============================================================================
def get_images(size, stars, noise, seed):
"""Retrieves a pair source and target image"""
if seed is not None:
np.random.seed(seed)
shape = (size, size)
source, target = simulate_image_pair(
shape=shape, num_stars=stars, noise_level=noise)[:2]
return source, target
def get_parameters(min_size, max_size, step_size, stars,
noise, seed, comb_number, repeats):
"""Create a list of dictionaries with all the combinations of the given
parameters.
"""
sample_size = int((max_size - min_size) / step_size)
sizes = np.linspace(min_size, max_size, sample_size, dtype=int)
grid = ParameterGrid({
"size": sizes, "stars": [stars],
"noise": [noise], "repeats": [repeats]})
grid = list(grid) * comb_number
# set the random state for run in parallel
random = np.random.RandomState(seed)
images_seeds = random.randint(1_000_000, size=len(grid))
for idx, g in enumerate(grid):
g["idx"] = idx
g["seed"] = seed
g["min_size"] = min_size
g["max_size"] = max_size
g["step_size"] = step_size
g["images_seed"] = images_seeds[idx]
return grid
def _test(idx, min_size, max_size, step_size, size,
stars, noise, seed, repeats, images_seed):
# create the two images
source, target = get_images(
size=size, stars=stars, noise=noise, seed=images_seed)
# create the timer
test_globals = {"aa": aa, "source": source, "target": target}
timer = timeit.Timer(stmt=STATEMENT, globals=test_globals)
# find the number of loops
loops = timer.autorange()[0]
# create a copy of the params to be returned ad result
result = OrderedDict({
"idx": idx, "min_size": min_size, "max_size": max_size,
"step_size": step_size, "size": size, "noise": noise,
"stars": stars, "seed": seed, "images_seed": images_seed,
"repeats": repeats, "loops": loops})
# execute the timeit
times = timer.repeat(repeats, loops)
# store the times into the result
result["time"] = np.min(np.array(times) / loops)
for tidx, time in enumerate(times):
result[f"time_{tidx}"] = time
return result
def benchmark(min_size=min(SIZES), max_size=max(SIZES), step_size=STEP,
stars=STARS, noise=NOISE, seed=None, repeats=REPEATS,
n_jobs=-1, comb_number=COMB_NUMBER):
grid = get_parameters(
min_size=min_size, max_size=max_size, step_size=step_size,
repeats=repeats, stars=stars, noise=noise, seed=seed,
comb_number=comb_number)
with joblib.Parallel(n_jobs=n_jobs) as parallel:
results = parallel(
joblib.delayed(_test)(**params) for params in tqdm.tqdm(grid))
df = pd.DataFrame(results)
return df
def describe(results):
repetitions = results.repeats.values[0]
resume = results[["time", "size", "loops"]].describe()
return repetitions, resume
def plot(results, ax):
df = results[["size", "time"]]
df.plot.scatter(x='size', y='time', c='LightBlue', ax=ax, marker=".")
# linear regression
x = df["size"].values.reshape((-1, 1))
y = df["time"].values
linear = LinearRegression().fit(x, y)
y_pred = linear.predict(x)
mqe = mean_squared_error(y, y_pred)
r2 = r2_score(y, y_pred)
ax.plot(x, y_pred, color='DarkBlue', linewidth=2)
ax.set_title(
"Linear regression between size and time "
f"\n$mse={mqe:.3f}$ - $R^2={r2:.3f}$")
ax.set_xlabel("Size")
ax.set_ylabel("Seconds")
return ax
# =============================================================================
# CLI MAIN
# =============================================================================
class CLI:
def __init__(self):
self._parser = argparse.ArgumentParser(
description="Astroalign time benchmark tool based on timeit")
self._parser.set_defaults(
callback=lambda ns: self.parser.print_usage())
self._parser.add_argument(
'--version', action='version', version='%(prog)s 2019.10')
subparsers = self._parser.add_subparsers()
# =====================================================================
# benchmark subparser
# =====================================================================
benchmark = subparsers.add_parser(
"benchmark",
help="Execute and collect the regression benchmark of astroalign")
benchmark.set_defaults(callback=self.benchmark_command)
benchmark.add_argument(
"--max", dest="max_size", type=int, default=max(SIZES),
help=("The size in pixels of the bigger square image. "
f"(defaults={max(SIZES)})."))
benchmark.add_argument(
"--min", dest="min_size", type=int, default=min(SIZES),
help=("The size in pixels of the smallest square image. "
f"(defaults={max(SIZES)})."))
benchmark.add_argument(
"--step", dest="step_size", type=int, default=STEP,
help=f"The size between every image (defaults={STEP}).")
benchmark.add_argument(
"--stars", dest="stars", type=int, default=STARS,
help=("The total numbers of stars in the image "
f"(defaults={STARS})."))
benchmark.add_argument(
"--noise", dest="noise", type=int, default=NOISE,
help=f"lambda parameter for poisson noise (default={NOISE})")
benchmark.add_argument(
"--number", dest="comb_number", type=int, default=10,
help=("How many random images pairs must be created for one "
f"size (default={COMB_NUMBER})."))
benchmark.add_argument(
"--seed", dest="seed", type=int, default=None,
help=("Random seed used to initialize the pseudo-random number "
"generator. if seed is None, then random-state will try to "
"read data from /dev/urandom (or the Windows analogue) if "
"available or seed from the clock otherwise "
"(default=None)."))
benchmark.add_argument(
"--repeats", dest="repeats", type=int, default=REPEATS,
help=("How many measurements must be taken for every image pair. "
"The final 'time' is the lower bound of all the times. "
"Docs: https://docs.python.org/3.7/library/timeit.html"))
benchmark.add_argument(
"--jobs", dest="n_jobs", type=int, default=-1,
help=("The number of CPU to run the benchmars. "
"-1 uses all the available CPUS (default=-1)"))
benchmark.add_argument(
"--out", "-o", dest="out", required=True,
type=argparse.FileType('w'),
help="Output file path. The data was stored in CSV format")
# =====================================================================
# describe subparser
# =====================================================================
describe = subparsers.add_parser(
"describe",
help="Show a resume and (optionally) of the benchmark results")
describe.set_defaults(callback=self.describe_command)
describe.add_argument(
"--file", "-f", dest="file", required=True,
type=argparse.FileType('r'),
help="File path of the time benchmark data in CSV format")
# =====================================================================
# plot subparser
# =====================================================================
plot = subparsers.add_parser(
"plot", help="Show three boxplots of a given results")
plot.set_defaults(callback=self.plot_command)
plot.add_argument(
"--file", "-f", dest="file", required=True,
type=argparse.FileType('r'),
help="File path of the time benchmark data in CSV format")
plot.add_argument(
"--size", dest="size", nargs=2, type=float,
help=("The size of the entire figure in inches in the format "
f"'width height' (default={DEFAULT_SIZE})."))
plot.add_argument(
"--out", "-o", dest="out",
help=("A file to store the generated plot. "
"By default the default matplotlib backend shows the plot"))
def parse_and_run(self, *args, **kwargs):
ns = self._parser.parse_args(*args, **kwargs)
return ns.callback(ns)
def plot_command(self, ns):
import matplotlib.pyplot as plt
results = pd.read_csv(ns.file)
size = ns.size if ns.size else DEFAULT_SIZE
fig, ax = plt.subplots()
fig.set_size_inches(*size)
plot(results, ax)
fig.suptitle("")
plt.tight_layout()
if ns.out is None:
print(f"Showing plot for data stored in '{ns.file.name}'...")
fig.canvas.set_window_title(f"{self.parser.prog} - {ns.file.name}")
plt.show()
else:
print(
f"Storing plot for data in '{ns.file.name}' -> '{ns.out}'...")
plt.savefig(ns.out)
print("DONE!")
def describe_command(self, ns):
results = pd.read_csv(ns.file)
repetitions, resume = describe(results)
print(f"Executed: {len(results)} cases")
print(f"\twith {repetitions} repetitions \n")
print(">>>>> Resume <<<<<")
print(resume)
print("")
def benchmark_command(self, ns):
if ns.step_size <= 0:
self._parser.error(f"'step' must be > 0. Found {ns.step_size}")
now = dt.datetime.now
print(
f"[{now()}] Starting benchmark for astroalign {aa.__version__}...")
print("")
results = benchmark(
max_size=ns.max_size, min_size=ns.min_size, step_size=ns.step_size,
stars=ns.stars, noise=ns.noise, seed=ns.seed,
repeats=ns.repeats, n_jobs=ns.n_jobs, comb_number=ns.comb_number)
repetitions, resume = describe(results)
print(f"[{now()}] Executed: {len(results)} cases")
print(f"\twith {repetitions} repetitions \n")
print(">>>>> Resume <<<<<")
print(resume)
print("")
results.to_csv(ns.out, index=False)
print(f"[{now()}] Data stored in '{ns.out.name}'")
@property
def parser(self):
return self._parser
# =============================================================================
# MAIN
# =============================================================================
if __name__ == "__main__":
parser = CLI()
parser.parse_and_run()
|
quatropeREPO_NAMEastroalignPATH_START.@astroalign_extracted@astroalign-master@benchmarks@time_regression.py@.PATH_END.py
|
{
"filename": "_vertexnormalsepsilon.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/isosurface/lighting/_vertexnormalsepsilon.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VertexnormalsepsilonValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="vertexnormalsepsilon",
parent_name="isosurface.lighting",
**kwargs
):
super(VertexnormalsepsilonValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@isosurface@lighting@_vertexnormalsepsilon.py@.PATH_END.py
|
{
"filename": "_node.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/sankey/_node.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Node(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "sankey"
_path_str = "sankey.node"
_valid_props = {
"align",
"color",
"colorsrc",
"customdata",
"customdatasrc",
"groups",
"hoverinfo",
"hoverlabel",
"hovertemplate",
"hovertemplatesrc",
"label",
"labelsrc",
"line",
"pad",
"thickness",
"x",
"xsrc",
"y",
"ysrc",
}
# align
# -----
@property
def align(self):
"""
Sets the alignment method used to position the nodes along the
horizontal axis.
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['justify', 'left', 'right', 'center']
Returns
-------
Any
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# color
# -----
@property
def color(self):
"""
Sets the `node` color. It can be a single value, or an array
for specifying color for each `node`. If `node.color` is
omitted, then the default `Plotly` color palette will be cycled
through to have a variety of colors. These defaults are not
fully opaque, to allow some visibility of what is beneath the
node.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data to each node.
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# groups
# ------
@property
def groups(self):
"""
Groups of nodes. Each group is defined by an array with the
indices of the nodes it contains. Multiple groups can be
specified.
The 'groups' property is an info array that may be specified as:
* a 2D list where:
The 'groups[i][j]' property is a number and may be specified as:
- An int or float
Returns
-------
list
"""
return self["groups"]
@groups.setter
def groups(self, val):
self["groups"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear when hovering nodes.
If `none` or `skip` are set, no information is displayed upon
hovering. But, if `none` is set, click and hover events are
still fired.
The 'hoverinfo' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'none', 'skip']
Returns
-------
Any
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.node.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for `align`.
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for `bgcolor`.
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for `bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for `namelength`.
Returns
-------
plotly.graph_objs.sankey.node.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. The variables available in `hovertemplate`
are the ones emitted as event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified per-point
(the ones that are `arrayOk: true`) are available. Variables
`sourceLinks` and `targetLinks` are arrays of link
objects.Finally, the template string has access to variables
`value` and `label`. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# label
# -----
@property
def label(self):
"""
The shown name of the node.
The 'label' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["label"]
@label.setter
def label(self, val):
self["label"] = val
# labelsrc
# --------
@property
def labelsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `label`.
The 'labelsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["labelsrc"]
@labelsrc.setter
def labelsrc(self, val):
self["labelsrc"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.node.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the color of the `line` around each
`node`.
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
width
Sets the width (in px) of the `line` around
each `node`.
widthsrc
Sets the source reference on Chart Studio Cloud
for `width`.
Returns
-------
plotly.graph_objs.sankey.node.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# pad
# ---
@property
def pad(self):
"""
Sets the padding (in px) between the `nodes`.
The 'pad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["pad"]
@pad.setter
def pad(self, val):
self["pad"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness (in px) of the `nodes`.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# x
# -
@property
def x(self):
"""
The normalized horizontal position of the node.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xsrc
# ----
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `x`.
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
# y
# -
@property
def y(self):
"""
The normalized vertical position of the node.
The 'y' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# ysrc
# ----
@property
def ysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `y`.
The 'ysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ysrc"]
@ysrc.setter
def ysrc(self, val):
self["ysrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the alignment method used to position the nodes
along the horizontal axis.
color
Sets the `node` color. It can be a single value, or an
array for specifying color for each `node`. If
`node.color` is omitted, then the default `Plotly`
color palette will be cycled through to have a variety
of colors. These defaults are not fully opaque, to
allow some visibility of what is beneath the node.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
customdata
Assigns extra data to each node.
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
groups
Groups of nodes. Each group is defined by an array with
the indices of the nodes it contains. Multiple groups
can be specified.
hoverinfo
Determines which trace information appear when hovering
nodes. If `none` or `skip` are set, no information is
displayed upon hovering. But, if `none` is set, click
and hover events are still fired.
hoverlabel
:class:`plotly.graph_objects.sankey.node.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Variables `sourceLinks` and
`targetLinks` are arrays of link objects.Finally, the
template string has access to variables `value` and
`label`. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
label
The shown name of the node.
labelsrc
Sets the source reference on Chart Studio Cloud for
`label`.
line
:class:`plotly.graph_objects.sankey.node.Line` instance
or dict with compatible properties
pad
Sets the padding (in px) between the `nodes`.
thickness
Sets the thickness (in px) of the `nodes`.
x
The normalized horizontal position of the node.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
The normalized vertical position of the node.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
"""
def __init__(
self,
arg=None,
align=None,
color=None,
colorsrc=None,
customdata=None,
customdatasrc=None,
groups=None,
hoverinfo=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
label=None,
labelsrc=None,
line=None,
pad=None,
thickness=None,
x=None,
xsrc=None,
y=None,
ysrc=None,
**kwargs,
):
"""
Construct a new Node object
The nodes of the Sankey plot.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.sankey.Node`
align
Sets the alignment method used to position the nodes
along the horizontal axis.
color
Sets the `node` color. It can be a single value, or an
array for specifying color for each `node`. If
`node.color` is omitted, then the default `Plotly`
color palette will be cycled through to have a variety
of colors. These defaults are not fully opaque, to
allow some visibility of what is beneath the node.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
customdata
Assigns extra data to each node.
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
groups
Groups of nodes. Each group is defined by an array with
the indices of the nodes it contains. Multiple groups
can be specified.
hoverinfo
Determines which trace information appear when hovering
nodes. If `none` or `skip` are set, no information is
displayed upon hovering. But, if `none` is set, click
and hover events are still fired.
hoverlabel
:class:`plotly.graph_objects.sankey.node.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Variables `sourceLinks` and
`targetLinks` are arrays of link objects.Finally, the
template string has access to variables `value` and
`label`. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
label
The shown name of the node.
labelsrc
Sets the source reference on Chart Studio Cloud for
`label`.
line
:class:`plotly.graph_objects.sankey.node.Line` instance
or dict with compatible properties
pad
Sets the padding (in px) between the `nodes`.
thickness
Sets the thickness (in px) of the `nodes`.
x
The normalized horizontal position of the node.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
The normalized vertical position of the node.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
Returns
-------
Node
"""
super(Node, self).__init__("node")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.Node
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Node`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("groups", None)
_v = groups if groups is not None else _v
if _v is not None:
self["groups"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("label", None)
_v = label if label is not None else _v
if _v is not None:
self["label"] = _v
_v = arg.pop("labelsrc", None)
_v = labelsrc if labelsrc is not None else _v
if _v is not None:
self["labelsrc"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("pad", None)
_v = pad if pad is not None else _v
if _v is not None:
self["pad"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xsrc", None)
_v = xsrc if xsrc is not None else _v
if _v is not None:
self["xsrc"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("ysrc", None)
_v = ysrc if ysrc is not None else _v
if _v is not None:
self["ysrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@sankey@_node.py@.PATH_END.py
|
{
"filename": "test_preprocess.py",
"repo_name": "quatrope/feets",
"repo_path": "feets_extracted/feets-master/tests/test_preprocess.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2017 Juan Cabral
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
# DOC
# =============================================================================
"""All feets preprocess tests"""
# =============================================================================
# IMPORTS
# =============================================================================
from feets import preprocess
import numpy as np
# =============================================================================
# NOISE
# =============================================================================
def test_remove_noise():
random = np.random.RandomState(42)
time = np.arange(5)
mag = random.rand(5)
mag[-1] = np.mean(mag) + 100 * np.std(mag)
error = np.zeros(5)
error[-1] = 10
ptime, pmag, perror = preprocess.remove_noise(time, mag, error)
assert len(ptime) == len(time) - 1
assert len(pmag) == len(mag) - 1
assert len(perror) == len(error) - 1
np.testing.assert_array_equal(ptime, time[:-1])
np.testing.assert_array_equal(pmag, mag[:-1])
np.testing.assert_array_equal(perror, error[:-1])
def test_remove_noise_low_error():
random = np.random.RandomState(42)
time = np.arange(5)
mag = random.rand(5)
mag[-1] = np.mean(mag) + 100 * np.std(mag)
error = np.zeros(5)
ptime, pmag, perror = preprocess.remove_noise(time, mag, error)
assert len(ptime) == len(time)
assert len(pmag) == len(mag)
assert len(perror) == len(error)
np.testing.assert_array_equal(ptime, time)
np.testing.assert_array_equal(pmag, mag)
np.testing.assert_array_equal(perror, error)
def test_remove_noise_no_outlier():
random = np.random.RandomState(42)
time = np.arange(5)
mag = random.rand(5)
error = np.zeros(5)
ptime, pmag, perror = preprocess.remove_noise(time, mag, error)
assert len(ptime) == len(time)
assert len(pmag) == len(mag)
assert len(perror) == len(error)
np.testing.assert_array_equal(ptime, time)
np.testing.assert_array_equal(pmag, mag)
np.testing.assert_array_equal(perror, error)
# =============================================================================
# ALIGN
# =============================================================================
def test_align():
random = np.random.RandomState(42)
time = np.arange(5)
mag = random.rand(5)
error = random.rand(5)
time2 = np.arange(5)
random.shuffle(time2)
mag2 = mag[time2]
error2 = error[time2]
atime, amag, amag2, aerror, aerror2 = preprocess.align(
time, time2, mag, mag2, error, error2
)
np.testing.assert_array_equal(amag, amag2)
assert np.array_equal(amag, mag) or np.array_equal(amag, mag2)
assert np.array_equal(amag2, mag) or np.array_equal(amag2, mag2)
np.testing.assert_array_equal(aerror, aerror2)
assert np.array_equal(aerror, error) or np.array_equal(aerror, error2)
assert np.array_equal(aerror2, error) or np.array_equal(aerror2, error2)
def test_align_different_len():
random = np.random.RandomState(42)
time = np.arange(5)
mag = random.rand(5)
error = random.rand(5)
time2 = np.arange(6)
random.shuffle(time2)
mag2 = np.hstack((mag, random.rand(1)))[time2]
error2 = np.hstack((error, random.rand(1)))[time2]
atime, amag, amag2, aerror, aerror2 = preprocess.align(
time, time2, mag, mag2, error, error2
)
np.testing.assert_array_equal(amag, amag2)
np.testing.assert_array_equal(aerror, aerror2)
|
quatropeREPO_NAMEfeetsPATH_START.@feets_extracted@feets-master@tests@test_preprocess.py@.PATH_END.py
|
{
"filename": "test_metadata.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/utils/metadata/tests/test_metadata.py",
"type": "Python"
}
|
from collections import OrderedDict, defaultdict
from dataclasses import dataclass
import numpy as np
import pytest
from astropy.io import fits
from astropy.utils import metadata
from astropy.utils.metadata import (
MergeConflictError,
MetaData,
common_dtype,
enable_merge_strategies,
merge,
)
class OrderedDictSubclass(OrderedDict):
pass
class MetaBaseTest:
def test_none(self):
d = self.test_class(*self.args)
assert isinstance(d.meta, dict)
assert len(d.meta) == 0
@pytest.mark.parametrize(
"meta",
([{"a": 1}, OrderedDict([("a", 1)]), OrderedDictSubclass([("a", 1)])]),
)
def test_mapping_init(self, meta):
d = self.test_class(*self.args, meta=meta)
assert type(d.meta) == type(meta)
assert d.meta["a"] == 1
@pytest.mark.parametrize("meta", (["ceci n'est pas un meta", 1.2, [1, 2, 3]]))
def test_non_mapping_init(self, meta):
with pytest.raises(TypeError):
self.test_class(*self.args, meta=meta)
@pytest.mark.parametrize(
"meta",
([{"a": 1}, OrderedDict([("a", 1)]), OrderedDictSubclass([("a", 1)])]),
)
def test_mapping_set(self, meta):
d = self.test_class(*self.args, meta=meta)
assert type(d.meta) == type(meta)
assert d.meta["a"] == 1
@pytest.mark.parametrize("meta", (["ceci n'est pas un meta", 1.2, [1, 2, 3]]))
def test_non_mapping_set(self, meta):
with pytest.raises(TypeError):
d = self.test_class(*self.args, meta=meta)
def test_meta_fits_header(self):
header = fits.header.Header()
header.set("observer", "Edwin Hubble")
header.set("exptime", "3600")
d = self.test_class(*self.args, meta=header)
assert d.meta["OBSERVER"] == "Edwin Hubble"
class ExampleData:
meta = MetaData()
def __init__(self, meta=None):
self.meta = meta
class TestMetaExampleData(MetaBaseTest):
test_class = ExampleData
args = ()
@dataclass
class ExampleDataclass:
meta: MetaData = MetaData() # noqa: RUF009
class TestMetaExampleDataclass(MetaBaseTest):
test_class = ExampleDataclass
args = ()
@dataclass(frozen=True)
class ExampleFrozenDataclass:
meta: MetaData = MetaData() # noqa: RUF009
class TestMetaExampleFrozenDataclass(MetaBaseTest):
test_class = ExampleFrozenDataclass
args = ()
def test_metadata_default():
data = ExampleDataclass()
data.meta["a"] = 1
assert isinstance(data.meta, OrderedDict)
def test_metadata_default_factory():
"""Test the default_factory argument to MetaData."""
class ExampleData:
meta = MetaData(default_factory=defaultdict)
def __init__(self, meta=None):
self.meta = meta
data = ExampleData()
assert isinstance(data.meta, defaultdict)
assert len(data.meta) == 0
def test_metadata_merging_conflict_exception():
"""Regression test for issue #3294.
Ensure that an exception is raised when a metadata conflict exists
and ``metadata_conflicts='error'`` has been set.
"""
data1 = ExampleData()
data2 = ExampleData()
data1.meta["somekey"] = {"x": 1, "y": 1}
data2.meta["somekey"] = {"x": 1, "y": 999}
with pytest.raises(MergeConflictError):
merge(data1.meta, data2.meta, metadata_conflicts="error")
def test_metadata_merging():
# Recursive merge
meta1 = {
"k1": {
"k1": [1, 2],
"k2": 2,
},
"k2": 2,
"k4": (1, 2),
}
meta2 = {
"k1": {"k1": [3]},
"k3": 3,
"k4": (3,),
}
out = merge(meta1, meta2, metadata_conflicts="error")
assert out == {
"k1": {
"k2": 2,
"k1": [1, 2, 3],
},
"k2": 2,
"k3": 3,
"k4": (1, 2, 3),
}
# Merge two ndarrays
meta1 = {"k1": np.array([1, 2])}
meta2 = {"k1": np.array([3])}
out = merge(meta1, meta2, metadata_conflicts="error")
assert np.all(out["k1"] == np.array([1, 2, 3]))
# Merge list and np.ndarray
meta1 = {"k1": [1, 2]}
meta2 = {"k1": np.array([3])}
assert np.all(out["k1"] == np.array([1, 2, 3]))
# Can't merge two scalar types
meta1 = {"k1": 1}
meta2 = {"k1": 2}
with pytest.raises(MergeConflictError):
merge(meta1, meta2, metadata_conflicts="error")
# Conflicting shape
meta1 = {"k1": np.array([1, 2])}
meta2 = {"k1": np.array([[3]])}
with pytest.raises(MergeConflictError):
merge(meta1, meta2, metadata_conflicts="error")
# Conflicting array type
meta1 = {"k1": np.array([1, 2])}
meta2 = {"k1": np.array(["3"])}
with pytest.raises(MergeConflictError):
merge(meta1, meta2, metadata_conflicts="error")
# Conflicting array type with 'silent' merging
meta1 = {"k1": np.array([1, 2])}
meta2 = {"k1": np.array(["3"])}
out = merge(meta1, meta2, metadata_conflicts="silent")
assert np.all(out["k1"] == np.array(["3"]))
def test_metadata_merging_new_strategy():
original_merge_strategies = list(metadata.MERGE_STRATEGIES)
class MergeNumbersAsList(metadata.MergeStrategy):
"""
Scalar float or int values are joined in a list.
"""
types = ((int, float), (int, float))
@classmethod
def merge(cls, left, right):
return [left, right]
class MergeConcatStrings(metadata.MergePlus):
"""
Scalar string values are concatenated
"""
types = (str, str)
enabled = False
# Normally can't merge two scalar types
meta1 = {"k1": 1, "k2": "a"}
meta2 = {"k1": 2, "k2": "b"}
# Enable new merge strategy
with enable_merge_strategies(MergeNumbersAsList, MergeConcatStrings):
assert MergeNumbersAsList.enabled
assert MergeConcatStrings.enabled
out = merge(meta1, meta2, metadata_conflicts="error")
assert out["k1"] == [1, 2]
assert out["k2"] == "ab"
assert not MergeNumbersAsList.enabled
assert not MergeConcatStrings.enabled
# Confirm the default enabled=False behavior
with pytest.raises(MergeConflictError):
merge(meta1, meta2, metadata_conflicts="error")
# Enable all MergeStrategy subclasses
with enable_merge_strategies(metadata.MergeStrategy):
assert MergeNumbersAsList.enabled
assert MergeConcatStrings.enabled
out = merge(meta1, meta2, metadata_conflicts="error")
assert out["k1"] == [1, 2]
assert out["k2"] == "ab"
assert not MergeNumbersAsList.enabled
assert not MergeConcatStrings.enabled
metadata.MERGE_STRATEGIES = original_merge_strategies
def test_common_dtype_string():
u3 = np.array(["123"])
u4 = np.array(["1234"])
b3 = np.array([b"123"])
b5 = np.array([b"12345"])
assert common_dtype([u3, u4]).endswith("U4")
assert common_dtype([b5, u4]).endswith("U5")
assert common_dtype([b3, b5]).endswith("S5")
def test_common_dtype_basic():
i8 = np.array(1, dtype=np.int64)
f8 = np.array(1, dtype=np.float64)
u3 = np.array("123")
with pytest.raises(MergeConflictError):
common_dtype([i8, u3])
assert common_dtype([i8, i8]).endswith("i8")
assert common_dtype([i8, f8]).endswith("f8")
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@utils@metadata@tests@test_metadata.py@.PATH_END.py
|
{
"filename": "irafpar.py",
"repo_name": "iraf-community/pyraf",
"repo_path": "pyraf_extracted/pyraf-main/pyraf/irafpar.py",
"type": "Python"
}
|
"""irafpar.py -- parse IRAF .par files and create lists of IrafPar objects
R. White, 2000 January 7
"""
import copy
import glob
import os
import re
import types
from .tools import minmatch, irafutils, taskpars, basicpar
from .tools.irafglobals import INDEF, Verbose, yes, no
from .tools.basicpar import (warning, _StringMixin, IrafPar, IrafParS,
_cmdlineFlag)
# also import basicpar.IrafPar* class names for cached scripts
from .tools.basicpar import (IrafParB, IrafParI, IrafParR, IrafParAB,
IrafParAI, IrafParAR, IrafParAS)
from . import iraf
# -----------------------------------------------------
# IRAF parameter factory
# -----------------------------------------------------
_string_list_types = ('*struct', '*s', '*f', '*i')
def IrafParFactory(fields, strict=0):
"""IRAF parameter factory
fields is a list of the comma-separated fields (as in the .par file).
Each entry is a string or None (indicating that field was omitted.)
Set the strict parameter to a non-zero value to do stricter parsing
(to find errors in the input.)"""
# Sanity check
if len(fields) < 3 or None in fields[0:3]:
raise SyntaxError("At least 3 fields must be given")
type = fields[1]
# Handle special PyRAF/IRAF types, otherwise default to the standard types
if type in _string_list_types:
return IrafParLS(fields, strict)
elif type == "*gcur" or type == "gcur":
return IrafParGCur(fields, strict)
elif type == "*imcur" or type == "imcur":
return IrafParImCur(fields, strict)
elif type == "*ukey" or type == "ukey":
return IrafParUKey(fields, strict)
elif type == "pset":
return IrafParPset(fields, strict)
else:
return basicpar.parFactory(fields, strict)
# -----------------------------------------------------
# make an IrafPar variable (another factory function,
# using more descriptive notation for characteristics)
# -----------------------------------------------------
# dictionary mapping verbose types to short par-file types
_typedict = {
'string': 's',
'char': 's',
'file': 'f',
'struct': 'struct',
'int': 'i',
'bool': 'b',
'real': 'r',
'double': 'd',
'gcur': 'gcur',
'imcur': 'imcur',
'ukey': 'ukey',
'pset': 'pset',
}
def makeIrafPar(init_value,
datatype=None,
name="<anonymous>",
mode="h",
array_size=None,
list_flag=0,
min=None,
max=None,
enum=None,
prompt="",
strict=0,
filename=None):
"""Create an IrafPar variable"""
# Deprecation note - after 1.6 is released, remove the arg and this note
if (filename is not None and len(filename) > 0 and
filename != 'string_proc'):
warning("Use of filename arg in makeIrafPar is rather deprecated\n" +
", filename = \'" + filename + "'",
level=-1)
# if init_value is already an IrafPar, just return it
# XXX Could check parameters to see if they are ok
if isinstance(init_value, IrafPar):
return init_value
# XXX Enhance this to determine datatype from init_value if it is omitted
# XXX Could use _typedict.get(datatype,datatype) to allow short types to be used
if datatype is None:
raise ValueError("datatype must be specified")
shorttype = _typedict[datatype]
if array_size is None:
shape = None
else:
shorttype = "a" + shorttype
# array_size can be an integer or a tuple
# get a tuple shape and make array_size the
# combined size of all dimensions
try:
shape = tuple(array_size)
except TypeError:
shape = (array_size,)
array_size = 1
for d in shape:
array_size = array_size * d
if list_flag:
shorttype = "*" + shorttype
# messy stuff -- construct strings like we would read
# from .par file for this parameter
if shape is None:
# scalar parameter
fields = [name, shorttype, mode, init_value, min, max, prompt]
if fields[4] is None:
fields[4] = enum
else:
# N-dimensional array parameter
fields = [
name,
shorttype,
mode,
str(len(shape)), # number of dims
]
for d in shape:
fields.extend([
d, # dimension
"1"
]) # apparently always 1
if min is None:
fields.extend([enum, max, prompt])
else:
fields.extend([min, max, prompt])
if init_value is not None:
if len(init_value) != array_size:
raise ValueError("Initial value list does not match array "
f"size for parameter `{name}'")
for iv in init_value:
fields.append(iv)
else:
fields = fields + array_size * [None]
for i in range(len(fields)):
if fields[i] is not None:
fields[i] = str(fields[i])
try:
return IrafParFactory(fields, strict=strict)
except ValueError as e:
errmsg = f"Bad value for parameter `{name}'\n{str(e)}"
raise ValueError(errmsg)
# -----------------------------------------------------
# IRAF pset parameter class
# -----------------------------------------------------
class IrafParPset(IrafParS):
"""IRAF pset parameter class"""
def __init__(self, fields, strict=0):
IrafParS.__init__(self, fields, strict)
# omitted pset parameters default to null string
if self.value is None:
self.value = ""
def get(self,
field=None,
index=None,
lpar=0,
prompt=1,
native=0,
mode="h"):
"""Return pset value (IrafTask object)"""
if index:
raise SyntaxError("Parameter " + self.name +
" is pset, cannot use index")
if field:
return self._getField(field)
if lpar:
return str(self.value)
# assume there are no query or indirection pset parameters
# see if parameter value has .par extension, if so, it is a file name
f = self.value.split('.')
if len(f) > 1 and f[-1] == 'par':
# must be a file name
from .iraffunctions import IrafTaskFactory
irf_val = iraf.Expand(self.value)
return IrafTaskFactory(taskname=irf_val.split(".")[0],
value=irf_val)
else:
# must be a task name
if self.value:
# The normal case here is that the value is a task name string
# so we get&return that task. There is a quirky case where in
# some CL scripts (e.g. ccdproc.cl), the CL script writers use
# this place as a temporarty place to store values; handle that.
if self.value.startswith('<') and self.value.endswith(
'>') and self.name in self.value:
# don't lookup task for self.value, it is something like:
# "<IrafCLTask ccdproc (mscsrc$ccdproc.cl) Pkg: mscred Bin: mscbin$>"
return iraf.getTask(self.name)
# this is only a safe assumption to make in a PSET
else:
return iraf.getTask(self.value)
else:
return iraf.getTask(self.name)
# -----------------------------------------------------
# IRAF list parameter base class
# -----------------------------------------------------
class IrafParL(_StringMixin, IrafPar):
"""IRAF list parameter base class"""
def __init__(self, fields, strict=0):
IrafPar.__init__(self, fields, strict)
# filehandle for input file
self.__dict__['fh'] = None
# lines used to store input when not reading from a tty
self.__dict__['lines'] = None
# flag inidicating error message has been printed if file does not exist
# message only gets printed once for each file
self.__dict__['errMsg'] = 0
# omitted list parameters default to null string
if self.value is None:
self.value = ""
# --------------------------------------------
# public methods
# --------------------------------------------
def set(self, value, field=None, index=None, check=1):
"""Set value of this parameter from a string or other value.
Field is optional parameter field (p_prompt, p_minimum, etc.)
Index is optional array index (zero-based). Set check=0 to
assign the value without checking to see if it is within
the min-max range or in the choice list."""
if index is not None:
raise SyntaxError("Parameter " + self.name + " is not an array")
if field:
self._setField(value, field, check=check)
else:
if check:
self.value = self.checkValue(value)
else:
self.value = self._coerceValue(value)
self.setChanged()
# close file if it is open
if self.fh:
try:
self.fh.close()
except OSError:
pass
self.fh = None
self.lines = None
self.errMsg = 0
def get(self,
field=None,
index=None,
lpar=0,
prompt=1,
native=0,
mode="h"):
"""Return value of this parameter as a string (or in native format
if native is non-zero.)"""
if field:
return self._getField(field, native=native, prompt=prompt)
if lpar:
if self.value is None and native == 0:
return ""
else:
return self.value
# assume there are no query or indirection list parameters
if index is not None:
raise SyntaxError("Parameter " + self.name + " is not an array")
if self.value:
# non-null value means we're reading from a file
try:
if not self.fh:
self.fh = open(iraf.Expand(self.value), errors="ignore")
if self.fh.isatty():
self.lines = None
else:
# read lines in a block
# reverse to make pop more convenient & faster
self.lines = self.fh.readlines()
self.lines.reverse()
if self.lines is None:
value = self.fh.readline()
elif self.lines:
value = self.lines.pop()
else:
value = ''
if not value:
# EOF -- raise exception
raise EOFError(f"EOF from list parameter `{self.name}'")
if value[-1:] == "\n":
value = value[:-1]
except OSError as e:
if not self.errMsg:
warning(f"Unable to read values for list parameter "
f"`{self.name}' from file `{self.value}'\n{str(e)}",
level=-1)
# only print message one time
self.errMsg = 1
# fall back on default behavior if file is not readable
value = self._getNextValue()
else:
# if self.value is null, use the special _getNextValue method
# (which should always return a string)
if prompt:
value = self._getNextValue()
else:
return self.value
if native:
return self._coerceValue(value)
else:
return value
# --------------------------------------------
# private methods
# --------------------------------------------
# Use _getNextValue() method to implement a particular type
def _getNextValue(self):
"""Return a string with next value"""
raise RuntimeError("Bug: base class IrafParL cannot be used directly")
def _getPFilename(self, native, prompt):
"""Get p_filename field for this parameter (returns filename)"""
# XXX is this OK? should we check for self.value==None?
return self.value
def _getPType(self):
"""Get underlying datatype for this parameter
Strip off '*' from list params
"""
return self.type[1:]
# -----------------------------------------------------
# IRAF string list parameter class
# -----------------------------------------------------
class IrafParLS(IrafParL):
"""IRAF string list parameter class"""
def _getNextValue(self):
"""Return next string value"""
# save current values (in case this got called
# because filename was in error)
saveVal = self.value
saveErr = self.errMsg
try:
# get rid of default value in prompt
self.value = None
self.getWithPrompt()
retval = self.value
return retval
finally:
# restore original values
self.value = saveVal
self.errMsg = saveErr
# -----------------------------------------------------
# IRAF cursor parameter class
# -----------------------------------------------------
class IrafParCursor(IrafParL):
"""Base class for cursor parameters"""
def _coerceOneValue(self, value, strict=0):
if isinstance(value, IrafParCursor):
return value.p_filename
else:
return IrafParL._coerceOneValue(self, value, strict)
# -----------------------------------------------------
# IRAF gcur (graphics cursor) parameter class
# -----------------------------------------------------
class IrafParGCur(IrafParCursor):
"""IRAF graphics cursor parameter class"""
def _getNextValue(self):
"""Return next graphics cursor value"""
from . import gki # lazy import - reduce circular imports on startup
return gki.kernel.gcur()
# -----------------------------------------------------
# IRAF imcur (image display cursor) parameter class
# -----------------------------------------------------
class IrafParImCur(IrafParCursor):
"""IRAF image display cursor parameter class"""
def _getNextValue(self):
"""Return next image display cursor value"""
from . import irafimcur # lazy import - reduce circular imports on startup
return irafimcur.imcur()
# -----------------------------------------------------
# IRAF ukey (user typed key) parameter class
# -----------------------------------------------------
class IrafParUKey(IrafParL):
"""IRAF user typed key parameter class"""
def _getNextValue(self):
"""Return next typed character"""
from . import irafukey # lazy import - reduce circular imports on startup
return irafukey.ukey()
# -----------------------------------------------------
# IRAF parameter list synchronized to disk file
# -----------------------------------------------------
from . import filecache
class ParCache(filecache.FileCache):
"""Parameter cache that updates from .par file when necessary"""
def __init__(self, filename, parlist, strict=0):
self.initparlist = parlist
# special filename used by cl2py
if filename is None or filename == 'string_proc':
filename = ''
try:
filecache.FileCache.__init__(self, filename)
except OSError:
# whoops, couldn't open that file
# start with a null file instead unless strict is set
if strict:
raise
filename = ''
filecache.FileCache.__init__(self, filename)
def getValue(self):
return self.pars, self.pardict, self.psetlist
def newValue(self):
"""Called to create initial value"""
# initparlist dominates .par file during initialization
if self.initparlist is not None:
self.pars = self.initparlist
elif self.filename:
self.pars = _readpar(self.filename)
else:
# create empty list if no filename is specified
self.pars = []
# build auxiliary attributes from pars list
self._buildFromPars()
def updateValue(self):
"""Initialize parameter list from parameter file"""
if self.filename:
# .par file dominates initparlist on update
self.pars = _readpar(self.filename)
elif self.initparlist is not None:
self.pars = self.initparlist
else:
# create empty list if no filename is specified
self.pars = []
# build auxiliary attributes from pars list
self._buildFromPars()
def _buildFromPars(self):
# build minmatch dictionary of all parameters, including
# those in psets
self.pardict = minmatch.MinMatchDict()
psetlist = []
for p in self.pars:
self.pardict.add(p.name, p)
if isinstance(p, IrafParPset):
psetlist.append(p)
# add mode, $nargs to parameter list if not already present
if not self.pardict.has_exact_key("mode"):
p = makeIrafPar("al", name="mode", datatype="string", mode="h")
self.pars.append(p)
self.pardict.add(p.name, p)
if not self.pardict.has_exact_key("$nargs"):
p = makeIrafPar(0, name="$nargs", datatype="int", mode="h")
self.pars.append(p)
self.pardict.add(p.name, p)
# save the list of pset parameters
# Defer adding the parameters until later because saved parameter
# sets may not be defined yet when restoring from save file.
self.psetlist = psetlist
# -----------------------------------------------------
# IRAF parameter list class
# -----------------------------------------------------
# Note that all methods are mixed case and all attributes are private
# (start with __) to avoid conflicts with parameter names
class IrafParList(taskpars.TaskPars):
"""List of Iraf parameters"""
def __init__(self, taskname, filename="", parlist=None):
"""Create a parameter list for task taskname
If parlist is specified, uses it as a list of IrafPar objects.
Else if filename is specified, reads a .par file.
If neither is specified, generates a default list.
"""
self.__pars = []
self.__hasPsets = False
self.__psets2merge = None # is a list when populated
self.__psetLock = False
self.__filename = filename
self.__name = taskname
self.__filecache = ParCache(filename, parlist)
# initialize parameter list
self.Update()
def Update(self):
"""Check to make sure this list is in sync with parameter file"""
self.__pars, self.__pardict, self.__psets2merge = \
self.__filecache.get()
if self.__psets2merge:
self.__addPsetParams()
def setFilename(self, filename):
"""Change filename and create ParCache object
Retains current parameter values until an unlearn is done
"""
if hasattr(filename, 'name') and hasattr(filename, 'read'):
filename = filename.name
if isinstance(filename, str):
root, ext = os.path.splitext(filename)
if ext != ".par":
# Only .par files are used as basis for parameter cache -- see if there
# is one
# Note that parameters specified in CL scripts are automatically updated
# when the script changes
filename = root + ".par"
if not os.path.exists(filename):
filename = ""
else:
filename = ""
if self.__filename != filename:
if filename:
# it is an error if this file does not exist
self.__filecache = ParCache(filename, None, strict=1)
else:
# for null filename, default parameter list is fixed
self.__filecache = ParCache(filename, self.__pars)
self.__filename = filename
def __addPsetParams(self):
"""
Merge pset parameters into the parameter lists.
Developer note - the original intention of this may have been to ensure
that the pset par which appears in this list is NOT a copy of the
original par (from the pset) but a reference to the same object, and if
so, that would make things work smoothly, but it was found in Feb of
2013 that this is not happening correctly, and may be an unsafe plan.
Therefore the code was changed to allow clients to access both copies;
see getParObjects() and any related code. """
# return immediately if they have already been added or
# if we are in the midst of a recursive call tree
if self.__psetLock or self.__psets2merge is None:
return
# otherwise, merge in any PSETs
if len(self.__psets2merge) > 0:
self.__hasPsets = True # never reset
self.__psetLock = True # prevent us from coming in recursively
# Work from the pset's pardict because then we get
# parameters from nested psets too
for p in self.__psets2merge:
# silently ignore parameters from psets that already are defined
psetdict = p.get().getParDict()
for pname in psetdict.keys():
if not self.__pardict.has_exact_key(pname):
self.__pardict.add(pname, psetdict[pname])
# back to normal state
self.__psets2merge = None
self.__psetLock = False
def addParam(self, p):
"""Add a parameter to the list"""
if not isinstance(p, IrafPar):
t = type(p)
if issubclass(t, types.InstanceType):
tname = p.__class__.__name__
else:
tname = t.__name__
raise TypeError("Parameter must be of type IrafPar (value: " +
tname + ", type: " + str(t) + ", object: " +
repr(p) + ")")
elif self.__pardict.has_exact_key(p.name):
if p.name in ["$nargs", "mode"]:
# allow substitution of these default parameters
self.__pardict[p.name] = p
for i in range(len(self.__pars)):
j = -i - 1
if self.__pars[j].name == p.name:
self.__pars[j] = p
return
else:
raise RuntimeError(f"Bug: parameter `{name}' is in "
"dictionary __pardict but not in "
"list __pars??")
raise ValueError(f"Parameter named `{p.name}' is already defined")
# add it just before the mode and $nargs parameters (if present)
j = -1
for i in range(len(self.__pars)):
j = -i - 1
if self.__pars[j].name not in ["$nargs", "mode"]:
break
else:
j = -len(self.__pars) - 1
self.__pars.insert(len(self.__pars) + j + 1, p)
self.__pardict.add(p.name, p)
if isinstance(p, IrafParPset):
# parameters from this pset will be added too
if self.__psets2merge is None:
# add immediately
self.__psets2merge = [p]
self.__addPsetParams()
else:
# just add to the pset list
self.__psets2merge.append(p)
# can't call __addPsetParams here as we may now be inside a call
def isConsistent(self, other):
"""Compare two IrafParLists for consistency
Returns true if lists are consistent, false if inconsistent.
Only checks immutable param characteristics (name & type).
Allows hidden parameters to be in any order, but requires
non-hidden parameters to be in identical order.
"""
if not isinstance(other, self.__class__):
if Verbose > 0:
print(f'Comparison list is not a {self.__class__.__name__}')
return 0
# compare minimal set of parameter attributes
thislist = self._getConsistentList()
otherlist = other._getConsistentList()
if thislist == otherlist:
return 1
else:
if Verbose > 0:
_printVerboseDiff(thislist, otherlist)
return 0
def _getConsistentList(self):
"""Return simplified parameter dictionary used for consistency check
Dictionary is keyed by param name, with value of type and
(for non-hidden parameters) sequence number.
"""
dpar = {}
j = 0
hflag = -1
for par in self.__pars:
if par.mode == "h":
dpar[par.name] = (par.type, hflag)
else:
dpar[par.name] = (par.type, j)
j = j + 1
return dpar
def _dlen(self):
""" For diagnostic use only: return length of class attr name dict. """
return len(self.__dict__)
def clearFlags(self):
"""Clear all status flags for all parameters"""
for p in self.__pars:
p.setFlags(0)
def setAllFlags(self):
"""Set all status flags to indicate parameters were set on cmdline"""
for p in self.__pars:
p.setCmdline()
# parameters are accessible as attributes
def __getattr__(self, name):
# DBG: id(self), len(self.__dict__), "__getattr__ for: "+str(name)
if name and name[0] == '_':
raise AttributeError(name)
try:
return self.getValue(name, native=1)
except SyntaxError as e:
raise AttributeError(str(e))
def __setattr__(self, name, value):
# DBG: id(self), len(self.__dict__), "__setattr__ for: "+str(name)+", value: "+str(value)[0:20]
# hidden Python parameters go into the standard dictionary
# (hope there are none of these in IRAF tasks)
if name and name[0] == '_':
object.__setattr__(self, name, value)
else:
self.setParam(name, value)
def __len__(self):
return len(self.__pars)
# public accessor functions for attributes
def hasPar(self, param):
"""Test existence of parameter named param"""
if self.__psets2merge:
self.__addPsetParams()
param = irafutils.untranslateName(param)
return param in self.__pardict
def getFilename(self):
return self.__filename
def getParList(self, docopy=0):
if docopy:
# return copy of the list if docopy flag set
pars = copy.deepcopy(self.__pars)
for p in pars:
p.setFlags(0)
return pars
else:
# by default return the list itself
return self.__pars
def getParDict(self):
if self.__psets2merge:
self.__addPsetParams()
return self.__pardict
def getParObject(self, param):
""" Returns an IrafPar object matching the name given (param).
This looks only at the "top level" (which includes
any duplicated PSET pars via __addPsetParams), but does not look
down into PSETs. Note the difference between this and getParObjects
in their different return types. """
if self.__psets2merge:
self.__addPsetParams()
try:
param = irafutils.untranslateName(param)
return self.__pardict[param]
except KeyError as e:
raise e.__class__("Error in parameter '" + param + "' for task " +
self.__name + "\n" + str(e))
def getParObjects(self, param, typecheck=True):
"""
Returns all IrafPar objects matching the string name given (param),
in the form of a dict like:
{ scopename : <IrafPar instance>, ... }
where scopename is '' if par was found as a regular par in this list,
or, where scopename is psetname if the par was found inside a PSET.
It is possible that some dict values will actually be the same object
in memory (see docs for __addPsetParams).
This _will_ raise a KeyError if the given param name was not
found at the "top level" (a regular par inside this par list)
even if it is also in a PSET.
typecheck: If multiple par objects are found, and typecheck is set to
True, only the first (e.g. top level) will be returned if those
par objects have a different value for their .type attribute.
Otherwise all par objects found are returned in the dict.
Note the difference between this and getParObject in their
different return types.
"""
# Notes:
# To accomplish the parameter setting (e.g. setParam) this calls up
# all possible exact-name-matching pars in this par list, whether
# they be on the "top" level with that name (param), or down in some
# PSET with that name (param). If we are simply an IRAFish task, then
# this is fine as we can assume the task likely does not have a par of
# its own and a PSET par, both of which have the same name. Thus any
# such case will acquire a copy of the PSET par at the top level. See
# discussion of this in __addPsetParams().
# BUT, if we are a CL script (e.g. mscombine.cl), we could have local
# vars which happen to have the same names as PSET pars. This is an
# issue that we need to handle and be aware of (see typecheck arg).
if self.__psets2merge:
self.__addPsetParams()
param = irafutils.untranslateName(param)
retval = {}
# First find the single "top-level" matching par
try:
pobj = self.__pardict[param]
retval[''] = pobj
except KeyError as e:
raise e.__class__("Error in parameter '" + param + "' for task " +
self.__name + "\n" + str(e))
# Next, see if there are any pars by this name inside any PSETs
if not self.__hasPsets:
return retval
# There is a PSET in here somewhere...
allpsets = [p for p in self.__pars if isinstance(p, IrafParPset)]
for pset in allpsets:
# Search the pset's pars. We definitely do NOT want a copy,
# we need the originals to edit.
its_task = pset.get()
its_plist = its_task.getParList(docopy=0)
# assume full paramname given (no min-matching inside of PSETs)
matching_pars = [pp for pp in its_plist if pp.name == param]
if len(matching_pars) > 1:
raise RuntimeError('Unexpected multiple matches for par: ' +
param + ', are: ' +
str([p.name for p in matching_pars]))
# found one with that name; add it to outgoing dict
if len(matching_pars) > 0:
addit = True
if typecheck and '' in retval:
# in this case we already found a top-level and we've been
# asked to make sure to return only same-type matches
addit = matching_pars[0].type == retval[
''].type # attr is a char
if addit:
retval[pset.name] = matching_pars[0]
return retval
def getAllMatches(self, param):
"""Return list of all parameter names that may match param"""
if param == "":
return list(self.__pardict.keys())
else:
return self.__pardict.getallkeys(param, [])
def getValue(self, param, native=0, prompt=1, mode="h"):
"""Return value for task parameter 'param' (with min-match)
If native is non-zero, returns native format for value. Default is
to return a string.
If prompt is zero, does not prompt for parameter. Default is to
prompt for query parameters.
"""
par = self.getParObject(param)
value = par.get(native=native, mode=mode, prompt=prompt)
if isinstance(value, str) and value and value[0] == ")":
# parameter indirection: ')task.param'
try:
task = iraf.getTask(self.__name)
value = task.getParam(value[1:], native=native, mode="h")
except KeyError:
# if task is not known, use generic function to get param
value = iraf.clParGet(value[1:],
native=native,
mode="h",
prompt=prompt)
return value
def setParam(self, param, value, scope='', check=0, idxHint=None):
"""Set task parameter 'param' to value (with minimum-matching).
scope, idxHint, and check are included for use as a task object
but they are currently ignored."""
matches_dict = self.getParObjects(param)
for par_obj in matches_dict.values():
par_obj.set(value)
def setParList(self, *args, **kw):
"""Set value of multiple parameters from list"""
# first undo translations that were applied to keyword names
for key in list(kw.keys()):
okey = key
key = irafutils.untranslateName(key)
if okey != key:
value = kw[okey]
del kw[okey]
kw[key] = value
# then expand all keywords to their full names and add to fullkw
fullkw = {}
dupl_pset_pars = []
for key in kw.keys():
# recall, kw is just simple { namestr: valstr, ... }
try:
# find par obj for this key
# (read docs for getParObjects - note the 's')
results_dict = self.getParObjects(key)
# results_dict is of form: { psetname : <IrafPar instance> }
# where results_dict may be (and most often is) empty string ''.
# if no KeyError, then there exists a top-level entry ('')
if '' not in results_dict:
raise RuntimeError('No top-level match; expected KeyError')
# assume results_dict[''].name.startswith(key) or .name==key
# recall that key might be shortened version of par's .name
param = (results_dict[''].name, ''
) # this means (paramname, [unused])
results_dict.pop('')
# if there are others, then they are pars with the same name
# but located down inside a PSET. So we save them for further
# handling down below.
for psetname in results_dict:
if not results_dict[psetname].name.startswith(key):
raise RuntimeError('PSET name non-match; par name: ' +
key + '; got: ' +
results_dict[psetname].name)
dupl_pset_pars.append(
(psetname, results_dict[psetname].name, key))
except KeyError as e:
# Perhaps it is pset.param ? This would occur if the caller
# used kwargs like gemcube(..., geofunc.axis1 = 1, ...)
# (see help call #3454 for Mark Sim.)
i = key.find('.')
if i <= 0:
raise e
# recall that key[:i] might be shortened version of par's .name
param = (self.getParObject(key[:i]).name, key[i + 1:])
# here param is (pset name, par name)
if param in fullkw:
msg_full_pname = param[0]
if param[1]:
msg_full_pname = '.'.join(param)
# at this point, msg_full_pname is fully qualified
raise SyntaxError("Multiple values given for parameter " +
msg_full_pname + " in task " + self.__name)
# Add it
fullkw[param] = kw[key]
# At this point, an example of fullkw might be:
# {('extname', ''): 'mef', ('long', ''): no, ('ccdtype', ''): ''}
# NOTE that the keys to this dict are EITHER in the form
# (top level par name, '') -OR- (pset name, par name)
# CDS June2014 - this is ugly - love to change this soon...
# Now add any duplicated pars that were found, both up at top level and
# down inside a PSET (saved as dupl_pset_pars list). The top level
# version has already been added to fullkw, so we add the PSET version.
for par_tup in dupl_pset_pars:
# par_tup is of form:
# (pset name (full), par name (full), par name (short/given), )
if par_tup[0:2] not in fullkw:
# use par_tup[2]; its the given kw arg w/out the
# identifying pset name
fullkw[par_tup[0:2]] = kw[par_tup[2]]
# Now add positional parameters to the keyword list, checking
# for duplicates
ipar = 0
for value in args:
while ipar < len(self.__pars):
if self.__pars[ipar].mode != "h":
break
ipar = ipar + 1
else:
# executed if we run out of non-hidden parameters
raise SyntaxError("Too many positional parameters for task " +
self.__name)
# at this point, ipar is set to index of next found non-hidden
# par in self.__pars
param = (self.__pars[ipar].name, '')
if param in fullkw:
# uh-oh, it was already in our fullkw list, but now we got a
# positional value for it (occurs in _ccdtool; help call #5901)
msg_full_pname = param[0]
if param[1]:
msg_full_pname = '.'.join(param)
msg_val_from_kw = fullkw[param]
msg_val_from_pos = value
# let's say we only care if the 2 values are, in fact, different
if msg_val_from_kw != msg_val_from_pos:
raise SyntaxError('Both a positional value ("' +
str(msg_val_from_pos) +
'") and a keyword value ("' +
str(msg_val_from_kw) +
'") were given for parameter "' +
msg_full_pname + '" in task "' +
self.__name + '"')
# else:, we'll now just overwite the old value with the same new value
fullkw[param] = value
ipar = ipar + 1
# Now set all keyword parameters ...
# clear changed flags and set cmdline flags for arguments
self.clearFlags()
# Count number of positional parameters set on cmdline
# Note that this counts positional parameters set through
# keywords in $nargs -- that is different from IRAF, which
# counts only non-keyword parameters. That is a bug in IRAF.
nargs = 0
for key, value in fullkw.items():
param, tail = key
p = self.getParObject(param)
if tail:
# is pset parameter - get parameter object from its task
p = p.get().getParObject(tail)
# what if *this* p is a IrafParPset ? skip for now,
# since we think no one is doubly nesting PSETs
p.set(value)
p.setFlags(_cmdlineFlag)
if p.mode != "h":
nargs = nargs + 1
# Number of arguments on command line, $nargs, is used by some IRAF
# tasks (e.g. imheader).
self.setParam('$nargs', nargs)
def eParam(self):
from . import epar
epar.epar(self)
def tParam(self):
from . import tpar
tpar.tpar(self)
def lParam(self, verbose=0):
print(self.lParamStr(verbose=verbose))
def lParamStr(self, verbose=0):
"""List the task parameters"""
retval = []
# Do the non-hidden parameters first
for i in range(len(self.__pars)):
p = self.__pars[i]
if p.mode != 'h':
if Verbose > 0 or p.name != '$nargs':
retval.append(p.pretty(verbose=verbose or Verbose > 0))
# Now the hidden parameters
for i in range(len(self.__pars)):
p = self.__pars[i]
if p.mode == 'h':
if Verbose > 0 or p.name != '$nargs':
retval.append(p.pretty(verbose=verbose or Verbose > 0))
return '\n'.join(retval)
def dParam(self, taskname="", cl=1):
"""Dump the task parameters in executable form
Default is to write CL version of code; if cl parameter is
false, writes Python executable code instead.
"""
if taskname and taskname[-1:] != ".":
taskname = taskname + "."
for i in range(len(self.__pars)):
p = self.__pars[i]
if p.name != '$nargs':
print(f"{taskname}{p.dpar(cl=cl)}")
if cl:
print("# EOF")
def saveParList(self, filename=None, comment=None):
"""Write .par file data to filename (string or filehandle)"""
if filename is None:
filename = self.__filename
if not filename:
raise ValueError("No filename specified to save parameters")
# but not if user turned off parameter writes
writepars = int(iraf.envget("writepars", 1))
if writepars < 1:
msg = "No parameters written to disk."
print(msg)
return msg
# ok, go ahead and write 'em - set up file
if hasattr(filename, 'write'):
fh = filename
else:
absFileName = iraf.Expand(filename)
absDir = os.path.dirname(absFileName)
if len(absDir) and not os.path.isdir(absDir):
os.makedirs(absDir)
fh = open(absFileName, 'w')
nsave = len(self.__pars)
if comment:
fh.write('# ' + comment + '\n')
for par in self.__pars:
if par.name == '$nargs':
nsave = nsave - 1
else:
fh.write(par.save() + '\n')
if fh != filename:
fh.close()
return f"{nsave:d} parameters written to {filename}"
elif hasattr(fh, 'name'):
return f"{nsave:d} parameters written to {fh.name}"
else:
return f"{nsave:d} parameters written"
def __getinitargs__(self):
"""Return parameters for __init__ call in pickle"""
return (self.__name, self.__filename, self.__pars)
#
# These two methods were set to do nothing (they were previously
# needed for pickle) but having them this way makes PY3K deepcopy
# fail in an extremely difficult to diagnose way.
#
# def __getstate__(self):
# """Return additional state for pickle"""
# # nothing beyond init
# return None
#
# def __setstate__(self, state):
# """Restore additional state from pickle"""
# pass
def __str__(self):
s = '<IrafParList ' + self.__name + ' (' + self.__filename + ') ' + \
str(len(self.__pars)) + ' parameters>'
return s
# these methods are provided so an IrafParList can be treated like
# an IrafTask object by epar (and other modules)
def getDefaultParList(self):
return self.getParList()
def getName(self):
return self.__name
def getPkgname(self):
return ''
def run(self, *args, **kw):
pass
def _printVerboseDiff(list1, list2):
"""Print description of differences between parameter lists"""
pd1, hd1 = _extractDiffInfo(list1)
pd2, hd2 = _extractDiffInfo(list2)
_printHiddenDiff(pd1, hd1, pd2, hd2) # look for hidden/positional changes
_printDiff(pd1, pd2, 'positional') # compare positional parameters
_printDiff(hd1, hd2, 'hidden') # compare hidden parameters
def _extractDiffInfo(alist):
hflag = -1
pd = {}
hd = {}
for key, value in alist.items():
if value[1] == hflag:
hd[key] = value
else:
pd[key] = value
return (pd, hd)
def _printHiddenDiff(pd1, hd1, pd2, hd2):
for key in list(pd1.keys()):
if key in hd2:
print(f"Parameter `{key}' is hidden in list 2 but not list 1")
del pd1[key]
del hd2[key]
for key in list(pd2.keys()):
if key in hd1:
print(f"Parameter `{key}' is hidden in list 1 but not list 2")
del pd2[key]
del hd1[key]
def _printDiff(pd1, pd2, label):
if pd1 == pd2:
return
noextra = 1
k1 = sorted(pd1.keys())
k2 = sorted(pd2.keys())
if k1 != k2:
# parameter name lists differ
i1 = 0
i2 = 0
noextra = 0
while i1 < len(k1) and i2 < len(k2):
key1 = k1[i1]
key2 = k2[i2]
if key1 == key2:
i1 = i1 + 1
i2 = i2 + 1
else:
# one or both parameters missing
if key1 not in pd2:
print(f"Extra {label} parameter `{key1}' "
f"(type `{pd1[key1][0]}') in list 1")
# delete the extra parameter
del pd1[key1]
i1 = i1 + 1
if key2 not in pd1:
print(f"Extra {label} parameter `{key2}' "
f"(type `{pd2[key2][0]}') in list 2")
del pd2[key2]
i2 = i2 + 1
# other parameters must be missing
while i1 < len(k1):
key1 = k1[i1]
print("Extra {label} parameter `{key1}' "
f"(type `{pd1[key1][0]}') in list 1")
del pd1[key1]
i1 = i1 + 1
while i2 < len(k2):
key2 = k2[i2]
print(f"Extra {label} parameter `{key2}' "
f"(type `{pd2[key2][0]}') in list 2")
del pd2[key2]
i2 = i2 + 1
# remaining parameters are in both lists
# check for differing order or type, but ignore order if there
# were extra parameters
for key in pd1.keys():
if pd1[key] != pd2[key]:
mm = []
type1, order1 = pd1[key]
type2, order2 = pd2[key]
if noextra and order1 != order2:
mm.append("order disagreement")
if type1 != type2:
mm.append(f"type disagreement (`{type1}' vs. `{type2}')")
print(f"Parameter `{key}': {', '.join(mm)}")
# The dictionary of all special-use par files found on disk.
# Each key is a tuple of (taskName, pkgName).
# Each value is a list of path names.
_specialUseParFileDict = None
# For TASKMETA lines in par files, e.g.: '# TASKMETA: task=display package=tv'
_re_taskmeta = \
re.compile(r'^# *TASKMETA *: *task *= *([^ ]*) *package *= *([^ \n]*)')
def _updateSpecialParFileDict(dirToCheck=None, strict=False):
""" Search the disk in the given path (or .) for special-purpose parameter
files. These can have any name, end in .par, and have metadata comments
which identify their associated task. This function simply fills or
adds to our _specialUseParFileDict dictionary. If strict is True then
any .par file found is expected to have our TASKMETA tag. """
global _specialUseParFileDict
# Default state is that dictionary is created but empty
if _specialUseParFileDict is None:
_specialUseParFileDict = {}
# If the caller gave us a dirToCheck, use only it, otherwise check the
# usual places (which calls us recursively).
if dirToCheck is None:
# Check the auxilliary par dir
uparmAux = iraf.envget("uparm_aux", "")
if 'UPARM_AUX' in os.environ:
uparmAux = os.environ['UPARM_AUX']
if len(uparmAux) > 0:
_updateSpecialParFileDict(dirToCheck=uparmAux, strict=True)
# If the _updateSpecialParFileDict processing is found to be
# be taking too long, we could easily add a global flag here like
# _alreadyCheckedUparmAux = True
# Also check the current directory
_updateSpecialParFileDict(dirToCheck=os.getcwd())
# For performance, note that there is nothing yet in place to stop us
# from rereading a large dir of par files every time this is called
return # we've done enough
# Do a glob in the given dir
flist = glob.glob(dirToCheck + "/*.par")
if len(flist) <= 0:
return
# At this point, we have files. Foreach, figure out the task and
# package it is for, and add it's pathname to the dict.
for supfname in flist:
buf = []
try:
with open(supfname, errors="ignore") as supfile:
buf = supfile.readlines()
except OSError:
pass
if len(buf) < 1:
warning("Unable to read special use parameter file: " + supfname,
level=-1)
continue
# get task and pkg names, and verify this is a correct file
tupKey = None
for line in buf:
mo = _re_taskmeta.match(line)
if mo:
# the syntax is right, get the task and pkg names
tupKey = (mo.group(1), mo.group(2))
break # only one TASKMETA line per file
if tupKey:
if tupKey in _specialUseParFileDict:
supflist = _specialUseParFileDict[tupKey]
if supfname not in supflist:
_specialUseParFileDict[tupKey].append(supfname)
else:
_specialUseParFileDict[tupKey] = [
supfname,
]
# If it does not have the TASKMETA line, then it is likely a regular
# IRAF .par file. How it got here we don't know, but it got dropped
# here somehow and warning the user continuously about this would be
# very annoying, so be quiet about it.
def newSpecialParFile(taskName, pkgName, pathName):
""" Someone has just created a new one and we are being notified of that
fact so that we can update the dict. """
# We could at this point simply re-scan the disk for files, but for
# now let's assume the user doesnt want that. Just add this entry to
# the dict. Someday, after we gauge usage, we could change this to
# re-scan and add this entry to the dict if not there yet.
global _specialUseParFileDict
# lazy init - only search disk here when abs. necessary
if _specialUseParFileDict is None:
_updateSpecialParFileDict()
tupKey = (taskName, pkgName)
if tupKey in _specialUseParFileDict:
if pathName not in _specialUseParFileDict[tupKey]:
_specialUseParFileDict[tupKey].append(pathName)
else:
_specialUseParFileDict[tupKey] = [
pathName,
]
def haveSpecialVersions(taskName, pkgName):
""" This is a simple check to see if special-purpose parameter files
have been found for the given task/package. This returns True or False.
If the dictionary has not been created yet, this initializes it. Note
that this may take some time reading the disk. """
global _specialUseParFileDict
# Always update the _specialUseParFileDict, since we may have changed
# directories into a new work area with as-yet-unseen .par files
_updateSpecialParFileDict()
# Check and return answer
tupKey = (taskName, pkgName)
return tupKey in _specialUseParFileDict
def getSpecialVersionFiles(taskName, pkgName):
""" Returns a (possibly empty) list of path names for special versions of
parameter files. This also causes lazy initialization."""
global _specialUseParFileDict
tupKey = (taskName, pkgName)
if haveSpecialVersions(taskName, pkgName):
return _specialUseParFileDict[tupKey]
else:
return []
# -----------------------------------------------------
# Read IRAF .par file and return list of parameters
# -----------------------------------------------------
# Parameter file is basically comma-separated fields, but
# with some messy variations involving embedded quotes
# and the ability to break the final field across lines.
# First define regular expressions used in parsing
# Patterns that match a quoted string with embedded \" or \'
# From Freidl, Mastering Regular Expressions, p. 176.
#
# Modifications:
# - I'm using the "non-capturing" parentheses (?:...) where
# possible; I only capture the part of the string between
# the quotes.
# - Match leading white space and optional trailing comma.
# - Pick up any non-whitespace between the closing quote and
# the comma or end-of-line (which is a syntax error.)
# Any matched string gets captured into djunk or sjunk
# variable, depending on which quotes were matched.
whitespace = r'[ \t]*'
optcomma = r',?'
noncommajunk = r'[^,]*'
double = whitespace + r'"(?P<double>[^"\\]*(?:\\.[^"\\]*)*)"' + \
whitespace + r'(?P<djunk>[^,]*)' + optcomma
single = whitespace + r"'(?P<single>[^'\\]*(?:\\.[^'\\]*)*)'" + \
whitespace + r'(?P<sjunk>[^,]*)' + optcomma
# Comma-terminated string that doesn't start with quote
# Match explanation:
# - match leading white space
# - if end-of-string then done with capture
# - elif lookahead == comma then done with capture
# - else match not-[comma | blank | quote] followed
# by string of non-commas; then done with capture
# - match trailing comma if present
#
# Trailing blanks do get captured (which I think is
# the right thing to do)
comma = whitespace + r"(?P<comma>$|(?=,)|(?:[^, \t'" + r'"][^,]*))' + optcomma
# Combined pattern
field = '(?:' + comma + ')|(?:' + double + ')|(?:' + single + ')'
_re_field = re.compile(field, re.DOTALL)
# Pattern that matches trailing backslashes at end of line
_re_bstrail = re.compile(r'\\*$')
# clean up unnecessary global variables
del whitespace, field, comma, optcomma, noncommajunk, double, single
def _readpar(filename, strict=0):
"""Read IRAF .par file and return list of parameters"""
global _re_field, _re_bstrail
param_dict = {}
param_list = []
with open(os.path.expanduser(filename), errors="ignore") as fh:
lines = fh.readlines()
# reverse order of lines so we can use pop method
lines.reverse()
while lines:
# strip whitespace (including newline) off both ends
line = lines.pop().strip()
# skip comments and blank lines
# "..." is weird line that occurs in cl.par
if len(line) > 0 and line[0] != '#' and line != "...":
# Append next line if this line ends with continuation character.
while line[-1:] == "\\":
# odd number of trailing backslashes means this is continuation
if (len(_re_bstrail.search(line).group()) % 2 == 1):
try:
line = line[:-1] + lines.pop().rstrip()
except IndexError:
raise SyntaxError(filename +
": Continuation on last line\n" +
line)
else:
break
flist = []
i1 = 0
while len(line) > i1:
mm = _re_field.match(line, i1)
if mm is None:
# Failure occurs only for unmatched leading quote.
# Append more lines to get quotes to match. (Probably
# want to restrict this behavior to only the prompt
# field.)
while mm is None:
try:
nline = lines.pop()
except IndexError:
# serious error, run-on quote consumed entire file
sline = line.split('\n')
raise SyntaxError(filename +
": Unmatched quote\n" + sline[0])
line = line + '\n' + nline.rstrip()
mm = _re_field.match(line, i1)
if mm.group('comma') is not None:
g = mm.group('comma')
# completely omitted field (,,)
if g == "":
g = None
# check for trailing quote in unquoted string
elif g[-1:] == '"' or g[-1:] == "'":
warning(
filename + "\n" + line + "\n" +
"Unquoted string has trailing quote", strict)
elif mm.group('double') is not None:
if mm.group('djunk'):
warning(
filename + "\n" + line + "\n" +
"Non-blank follows quoted string", strict)
g = mm.group('double')
elif mm.group('single') is not None:
if mm.group('sjunk'):
warning(
filename + "\n" + line + "\n" +
"Non-blank follows quoted string", strict)
g = mm.group('single')
else:
raise SyntaxError(
filename + "\n" + line + "\n" + "Huh? mm.groups()=" +
repr(mm.groups()) + "\n" +
"Bug: doesn't match single, double or comma??")
flist.append(g)
# move match pointer
i1 = mm.end()
try:
par = IrafParFactory(flist, strict=strict)
except KeyboardInterrupt:
raise
except Exception as exc:
# XXX Shouldn't catch all exceptions here -- this could
# XXX screw things up
if Verbose:
import traceback
traceback.print_exc()
raise SyntaxError(filename + "\n" + line + "\n" + str(flist) +
"\n" + str(exc))
if par.name in param_dict:
warning(
filename + "\n" + line + "\n" + "Duplicate parameter " +
par.name, strict)
else:
param_dict[par.name] = par
param_list.append(par)
return param_list
|
iraf-communityREPO_NAMEpyrafPATH_START.@pyraf_extracted@pyraf-main@pyraf@irafpar.py@.PATH_END.py
|
{
"filename": "shift_test.py",
"repo_name": "vaexio/vaex",
"repo_path": "vaex_extracted/vaex-master/tests/shift_test.py",
"type": "Python"
}
|
import collections
import pytest
import numpy as np
import pyarrow as pa
import vaex.shift
import vaex.ml
def chunk_iter(chunks, chunk_size):
some_value = list(chunks.values())[0]
for i in range((len(some_value) + chunk_size-1)//chunk_size):
i1 = i*chunk_size
i2 = min(len(some_value), (i+1)*chunk_size)
yield i1, i2, {name: chunks[name].slice(i1, i2-i1) for name in chunks}
def eat_chunks(iter):
offsets = []
flat_chunks = collections.defaultdict(list)
for i1, i2, chunks in iter:
print(i1, i2, chunks)
offsets.append((i1, i2))
for name, values in chunks.items():
flat_chunks[name].extend(vaex.array_types.tolist(values))
return offsets, flat_chunks
def test_chunk_prepend(chunk_size=2):
x = pa.array([0, 1, 2, None, 4])
y = pa.array([0, 1, None, 9, 16])
xp = pa.array([99, 88])
xexpected = pa.array([99, 88, 0, 1, 2])
i = chunk_iter(dict(x=x, y=y), chunk_size)
offsets, chunks = eat_chunks(vaex.shift.chunk_prepend(i, {'x': xp}, chunk_size))
assert offsets == [(0, 2), (2, 4), (4, 5)]
assert chunks['x'] == xexpected.tolist()
def test_chunk_append(chunk_size=2):
x = pa.array([0, 1, 2, None, 4])
y = pa.array([0, 1, None, 9, 16])
xappend = pa.array([99, 88])
xexpected = pa.array([2, None, 4, 99, 88])
i = chunk_iter(dict(x=x, y=y), chunk_size)
offsets, chunks = eat_chunks(vaex.shift.chunk_append(i, {'x': xappend}, chunk_size))
# assert offsets == [(0, 2), (2, 4), (4, 5)] TODO FIX
assert chunks['x'] == xexpected.tolist()
def test_chunk_eat(chunk_size=2):
x = pa.array([0, 1, 2, None, 4])
y = pa.array([0, 1, None, 9, 16])
i = chunk_iter(dict(x=x, y=y), chunk_size)
offsets, chunks = eat_chunks(vaex.shift.chunk_eat(i, 3))
assert chunks['x'] == x[3:].tolist()
assert chunks['y'] == y[3:].tolist()
assert offsets == [(0, 1), (1, 2)]
@pytest.mark.parametrize("length", list(range(1, 5)))
@pytest.mark.parametrize("chunk_size", [2, 5, 10])
def test_chunk_trim(length, chunk_size):
x = pa.array([0, 1, 2, None, 4])
y = pa.array([0, 1, None, 9, 16])
i = chunk_iter(dict(x=x, y=y), chunk_size)
offsets, chunks = eat_chunks(vaex.shift.chunk_trim(i, length))
assert chunks['x'] == x[:length].tolist()
assert chunks['y'] == y[:length].tolist()
assert len(chunks['x']) == length
assert offsets[0] == (0, min(chunk_size, length))
if len(offsets) > 1:
assert offsets[1] == (2, min(4, length))
def test_sliding_matrix(chunk_size=2):
x = pa.array([0, 1, 2, None, 4])
y = pa.array([0, 1, None, 9, 16])
xappend = pa.array([99, 88])
xexpected = np.array([[0, 1], [1, 2], [2, -1], [-1, 4], [4, -1]])
xexpected = np.ma.array(xexpected, mask=xexpected==-1)
xresult = vaex.shift.sliding_matrix(None, x, None, 2, 0)
assert xresult.tolist() == xexpected.tolist()
xresult = vaex.shift.sliding_matrix(x[:2], x[2:4], x[4:], 2, 0)
assert xresult.tolist() == xexpected[2:4].tolist()
# with 3 elements
xexpected3 = np.array([[0, 1, 2], [1, 2, -1], [2, -1, 4], [-1, 4, -1], [4, -1, -1]])
xexpected3 = np.ma.array(xexpected3, mask=xexpected3==-1)
xresult3 = vaex.shift.sliding_matrix(None, x, None, 3, 0)
assert xresult3.tolist() == xexpected3.tolist()
xresult3 = vaex.shift.sliding_matrix(x[:2], x[2:4], x[4:], 3, 0)
assert xresult3.tolist() == xexpected3[2:4].tolist()
# using offset
xexpected = np.array([[-1, 0], [0, 1], [1, 2], [2, -1], [-1, 4]])
xexpected = np.ma.array(xexpected, mask=xexpected==-1)
xresult = vaex.shift.sliding_matrix(None, x, None, 2, 1)
assert xresult.tolist() == xexpected.tolist()
xresult = vaex.shift.sliding_matrix(x[:2], x[2:4], x[4:], 2, 1)
assert xresult.tolist() == xexpected[2:4].tolist()
# offset 2
xexpected = np.array([[-1, -1], [-1, 0], [0, 1], [1, 2], [2, -1]])
xexpected = np.ma.array(xexpected, mask=xexpected==-1)
xresult = vaex.shift.sliding_matrix(None, x, None, 2, 2)
assert xresult.tolist() == xexpected.tolist()
xresult = vaex.shift.sliding_matrix(x[:2], x[2:4], x[4:], 2, 2)
assert xresult.tolist() == xexpected[2:4].tolist()
# offset 1 and 3 elements
xexpected3 = np.array([[None, 0, 1], [0, 1, 2], [1, 2, -1], [2, -1, 4], [-1, 4, -1]])
xexpected3 = np.ma.array(xexpected3, mask=xexpected3==-1)
xresult3 = vaex.shift.sliding_matrix(None, x, None, 3, 1)
assert xresult3.tolist() == xexpected3.tolist()
xresult3 = vaex.shift.sliding_matrix(x[:2], x[2:4], x[4:], 3, 1)
assert xresult3.tolist() == xexpected3[2:4].tolist()
# offset 2 and 3 elements
xexpected3 = np.array([[None, None, 0], [None, 0, 1], [0, 1, 2], [1, 2, -1], [2, -1, 4]])
xexpected3 = np.ma.array(xexpected3, mask=xexpected3==-1)
xresult3 = vaex.shift.sliding_matrix(None, x, None, 3, 2)
assert xresult3.tolist() == xexpected3.tolist()
xresult3 = vaex.shift.sliding_matrix(x[:2], x[2:4], x[4:], 3, 2)
assert xresult3.tolist() == xexpected3[2:4].tolist()
@pytest.mark.parametrize("virtual", [False, True])
def test_shift_basics(df_factory, virtual, rebuild_dataset):
x = [0, 1, 2, None, 4]
y = [0, 1, None, 9, 16]
df = df_factory(x=x, y=y)
if virtual:
df['x'] = df.x + 0
dfp1 = df.shift(1, ['x'])
dfn1 = df.shift(-1, ['x'])
assert dfp1.x.tolist() == [None, 0, 1, 2, None]
assert dfp1.y.tolist() == [0, 1, None, 9, 16]
assert dfn1.x.tolist() == [1, 2, None, 4, None]
assert dfn1.y.tolist() == [0, 1, None, 9, 16]
assert dfp1.shift(1).x.tolist() == [None, None, 0, 1, 2]
assert dfp1.shift(-1).x.tolist() == [0, 1, 2, None, None]
assert dfp1.shift(-1, fill_value=99).x.tolist() == [0, 1, 2, None, 99]
assert dfn1.shift(1).x.tolist() == [None, 1, 2, None, 4]
assert dfn1.shift(-1).x.tolist() == [2, None, 4, None, None]
assert dfn1.shift(-1, fill_value=99).x.tolist() == [2, None, 4, None, 99]
assert df.shift(4).x.tolist() == [None, None, None, None, 0]
assert df.shift(5).x.tolist() == [None, None, None, None, None]
assert df.shift(6).x.tolist() == [None, None, None, None, None]
assert df.shift(-4).x.tolist() == [4, None, None, None, None]
assert df.shift(-5).x.tolist() == [None, None, None, None, None]
assert df.shift(-6).x.tolist() == [None, None, None, None, None]
dfp1_rebuild = vaex.from_dataset(rebuild_dataset(dfp1.dataset))
dfp1_rebuild.state_set(dfp1.state_get())
assert dfp1_rebuild.x.tolist() == dfp1.x.tolist()
# assert rebuild_dataset(df.shift(1).hashed()) == df.shift(1).hashed()
@pytest.mark.parametrize("length", list(range(1, 3)))
@pytest.mark.parametrize("i1", list(range(1, 3)))
def test_shift_slice(df_factory, i1, length):
x = [0, 1, 2, None, 4]
y = [0, 1, None, 9, 16]
df = df_factory(x=x, y=y)
dfp1 = df.shift(1, ['x'])
dfn1 = df.shift(-1, ['x'])
i2 = i1 + length + 1
assert dfp1[i1:i2].x.tolist() == [None, 0, 1, 2, None][i1:i2]
assert dfp1[i1:i2].y.tolist() == [0, 1, None, 9, 16][i1:i2]
assert dfn1[i1:i2].x.tolist() == [1, 2, None, 4, None][i1:i2]
def test_shift_basics_trim(df_factory):
x = [0, 1, 2, None, 4]
y = [0, 1, None, 9, 16]
df = df_factory(x=x, y=y)
dfp1 = df.shift(1, ['x'], trim=True)
dfn1 = df.shift(-1, ['x'], trim=True)
assert dfp1.x.tolist() == [0, 1, 2, None]
assert dfp1.y.tolist() == [1, None, 9, 16]
assert dfn1.x.tolist() == [1, 2, None, 4]
assert dfn1.y.tolist() == [0, 1, None, 9]
assert dfp1.shift(1, trim=True).x.tolist() == [0, 1, 2]
assert dfp1.shift(-1, trim=True).x.tolist() == [1, 2, None]
def test_shift_range(df_factory):
x = [0, 1, 2, 3, 4]
xm1 = [1, 2, 3, 4, None]
y = [0, 1, None, 9, 16]
df = df_factory(x=x, y=y)
df['x1'] = df['x']
df['x2'] = df['x']
df.shift(0, ['x1'], inplace=True)
df.shift(-1, ['x2'], inplace=True)
assert df.x1.tolist() == x
assert df.x2.tolist() == xm1
assert df.func.stack([df.x1, df.x2]).tolist() == [[0, 1], [1, 2], [2, 3], [3, 4], [4, None]]
df = df_factory(x=x, y=y)
df.shift((0, 2), 'x', inplace=True)
assert df.x.tolist() == [[0, 1], [1, 2], [2, 3], [3, 4], [4, None]]
# trim with range
df = df_factory(x=x, y=y)
df.shift((0, 3), 'x', inplace=True, trim=True)
assert df.x.tolist() == [[0, 1, 2], [1, 2, 3], [2, 3, 4]]
def test_shift_filtered(df_factory):
x = [0, 99, 1, 99, 2, 99, None, 99, 4, 99]
y = [0, 88, 1, 88, None, 88, 9, 88, 16, 88]
assert len(x) == len(y)
df = df0 = df_factory(x=x, y=y)
df = df[((df.x != 99) | df.x.ismissing()).fillna(True)]
dfp1 = df.shift(1, ['x'])
dfn1 = df.shift(-1, ['x'])
assert dfp1.x.tolist() == [None, 0, 1, 2, None]
assert dfp1.y.tolist() == [0, 1, None, 9, 16]
assert dfn1.x.tolist() == [1, 2, None, 4, None]
assert dfn1.y.tolist() == [0, 1, None, 9, 16]
assert dfp1.shift(1).x.tolist() == [None, None, 0, 1, 2]
assert dfp1.shift(-1).x.tolist() == [0, 1, 2, None, None]
assert dfp1.shift(-1, fill_value=99).x.tolist() == [0, 1, 2, None, 99]
assert dfn1.shift(1).x.tolist() == [None, 1, 2, None, 4]
assert dfn1.shift(-1).x.tolist() == [2, None, 4, None, None]
assert dfn1.shift(-1, fill_value=99).x.tolist() == [2, None, 4, None, 99]
assert df.shift(4).x.tolist() == [None, None, None, None, 0]
assert df.shift(5).x.tolist() == [None, None, None, None, None]
assert df.shift(6).x.tolist() == [None, None, None, None, None]
assert df.shift(-4).x.tolist() == [4, None, None, None, None]
assert df.shift(-5).x.tolist() == [None, None, None, None, None]
assert df.shift(-6).x.tolist() == [None, None, None, None, None]
def test_shift_string(df_factory):
x = np.arange(4)
s = pa.array(['aap', None, 'noot', 'mies'])
df = df_factory(x=x, s=s)
assert df.shift(1).s.tolist() == [None, 'aap', None, 'noot']
assert df.shift(-1).s.tolist() == [None, 'noot', 'mies', None]
assert df.shift(1, ['s'], fill_value='VAEX').s.tolist() == ['VAEX', 'aap', None, 'noot']
assert df.shift(-1, ['s'], fill_value='VAEX').s.tolist() == [None, 'noot', 'mies', 'VAEX']
def test_shift_virtual(df_factory):
x = [0, 1, 2, None, 4]
y = [0, 1, None, 9, 16]
xsp1 = [None, 0, 1, 2, None]
xsn1 = [1, 2, None, 4, None]
df = df_factory(x=x, y=y)
# # a is a virtual column that depends on x, but we don't shift a
df['a'] = df.x + 0
df['b'] = df.a
dfs = df.shift(1, ['x'])
assert dfs.x.tolist() == xsp1
assert dfs.a.tolist() == x
assert dfs.y.tolist() == y
dfs = df.shift(-1, ['x'])
assert dfs.x.tolist() == xsn1
assert dfs.a.tolist() == x
assert dfs.y.tolist() == y
# a is a virtual column that depends on x, we shift a, but we don't shift x
# we expect, a: __x_shifted, x: __x
df = df_factory(x=x, y=y)
df['a'] = df.x + 0
dfs = df.shift(1, ['a'])
assert dfs.x.tolist() == x
assert dfs.a.tolist() == xsp1
assert dfs.y.tolist() == y
dfs = df.shift(-1, ['a'])
assert dfs.x.tolist() == x
assert dfs.a.tolist() == xsn1
assert dfs.y.tolist() == y
# same, but now we also have a reference to a, which we also do not shift
df = df_factory(x=x, y=y)
df['a'] = df.x + 0
df['b'] = df.a + 0
dfs = df.shift(1, ['a'])
assert dfs.x.tolist() == x
assert dfs.a.tolist() == xsp1
assert dfs.b.tolist() == x
assert dfs.y.tolist() == y
dfs = df.shift(-1, ['a'])
assert dfs.x.tolist() == x
assert dfs.a.tolist() == xsn1
assert dfs.b.tolist() == x
assert dfs.y.tolist() == y
def test_shift_dataset(chunk_size=2):
x = np.arange(5)
y = x**2
ds = vaex.dataset.DatasetArrays(x=x, y=y)
dss = vaex.shift.DatasetShifted(ds, column_mapping={'x': 'x_shift'}, start=0, end=0)
offsets, chunks = eat_chunks(dss.chunk_iterator({'x'}, chunk_size=chunk_size))
assert chunks == {'x': x.tolist()}
offsets, chunks = eat_chunks(dss.chunk_iterator({'x_shift'}, chunk_size=chunk_size))
assert chunks == {'x_shift': x.tolist()}
offsets, chunks = eat_chunks(dss.chunk_iterator({'x_shift', 'x'}, chunk_size=chunk_size))
assert chunks == {'x': x.tolist(), 'x_shift': x.tolist()}
xs = [None] + x[:-1].tolist()
dss = vaex.shift.DatasetShifted(ds, column_mapping={'x': 'x_shift'}, start=1, end=1)
offsets, chunks = eat_chunks(dss.chunk_iterator({'x'}, chunk_size=chunk_size))
assert chunks == {'x': x.tolist()}
offsets, chunks = eat_chunks(dss.chunk_iterator({'x_shift'}, chunk_size=chunk_size))
assert chunks == {'x_shift': xs}
offsets, chunks = eat_chunks(dss.chunk_iterator({'x_shift', 'x', 'y'}, chunk_size=chunk_size))
assert chunks == {'x': x.tolist(), 'x_shift': xs, 'y': y.tolist()}
# two columns shifted
dss = vaex.shift.DatasetShifted(ds, column_mapping={'x': 'x_shift', 'y': 'y_shift'}, start=1, end=1)
dss_range = vaex.shift.DatasetShifted(ds, column_mapping={'x': 'x_shift', 'y': 'y_shift'}, start=1, end=2)
offsets, chunks = eat_chunks(dss.chunk_iterator({'x_shift'}, chunk_size=chunk_size))
assert chunks == {'x_shift': xs}
assert dss.shape('x_shift') == dss.shape('x')
assert not dss.is_masked('x_shift')
assert dss_range.is_masked('x_shift')
@pytest.mark.parametrize("chunk_number", [0.5, 1, 2.5, 5.5])
@pytest.mark.parametrize("period", list(range(-3, 4)))
def test_shift_large_dataset(chunk_number, period):
chunk_size = 1024**2 # same value at _chunk_iterator()
v=np.random.random(int(chunk_number*chunk_size))
df = vaex.from_arrays(x=v)
w = df.shift(period).values.reshape(-1)
if period<0:
assert np.all(w[:period]==v[-period:])
assert w[period:].tolist() == [None]*(-period)
elif period>0:
assert np.all(w[period:]==v[:-period])
assert w[:period].tolist() == [None]*period
else:
assert np.all(w==v)
@pytest.mark.parametrize("periods", [-1, 1, 2, -2])
def test_diff(df_factory, periods):
x = [0, 1, 2, 3, 4.0]
df = df_factory(x=x)
dfp = df.to_pandas_df(array_type='numpy')
df = df.diff(periods, fill_value=np.nan)
dfp = dfp.diff(periods)
result = df['x'].to_numpy()
expected = dfp['x'].to_numpy()
assert np.all(np.isnan(result) == np.isnan(expected))
mask = ~np.isnan(result)
assert result[mask].tolist() == expected[mask].tolist()
def test_diff_list():
periods = 2
x = np.arange(10, dtype='f8')
y = x**2
df = vaex.from_arrays(x=x, y=y)
dfp = df.to_pandas_df(array_type='numpy')
df = df.diff(periods, fill_value=np.nan, column=['x', 'y'])
dfp = dfp.diff(periods)
result = df['x'].to_numpy()
expected = dfp['x'].to_numpy()
assert np.all(np.isnan(result) == np.isnan(expected))
mask = ~np.isnan(result)
assert result[mask].tolist() == expected[mask].tolist()
@pytest.mark.parametrize("chunk_number", [0.5, 1, 2.5, 5.5])
def test_diff_large_dataset(chunk_number):
chunk_size = 1024**2 # same value at _chunk_iterator()
v=np.random.random(int(chunk_number*chunk_size))
df = vaex.from_arrays(x=v)
w = df.diff().values.reshape(-1)
assert np.all(w[1:]==np.diff(v))
assert w[:1].tolist()==[None]
|
vaexioREPO_NAMEvaexPATH_START.@vaex_extracted@vaex-master@tests@shift_test.py@.PATH_END.py
|
{
"filename": "_familysrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/funnel/textfont/_familysrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="familysrc", parent_name="funnel.textfont", **kwargs
):
super(FamilysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@funnel@textfont@_familysrc.py@.PATH_END.py
|
{
"filename": "_text.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/contourcarpet/_text.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="text", parent_name="contourcarpet", **kwargs):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@contourcarpet@_text.py@.PATH_END.py
|
{
"filename": "_tickformatstopdefaults.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/histogram2d/colorbar/_tickformatstopdefaults.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickformatstopdefaultsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name="tickformatstopdefaults",
parent_name="histogram2d.colorbar",
**kwargs
):
super(TickformatstopdefaultsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@histogram2d@colorbar@_tickformatstopdefaults.py@.PATH_END.py
|
{
"filename": "viewer.py",
"repo_name": "glue-viz/glue",
"repo_path": "glue_extracted/glue-main/glue/viewers/image/viewer.py",
"type": "Python"
}
|
import os
from astropy.wcs import WCS
from glue.core.subset import roi_to_subset_state
from glue.core.coordinates import Coordinates, LegacyCoordinates
from glue.core.coordinate_helpers import dependent_axes
from glue.core.data_region import RegionData
from glue.viewers.matplotlib.viewer import SimpleMatplotlibViewer
from glue.viewers.scatter.layer_artist import ScatterLayerArtist, ScatterRegionLayerArtist
from glue.viewers.image.layer_artist import ImageLayerArtist, ImageSubsetLayerArtist
from glue.viewers.image.compat import update_image_viewer_state
from glue.viewers.image.state import ImageViewerState
from glue.viewers.image.frb_artist import imshow
from glue.viewers.image.composite_array import CompositeArray
__all__ = ['MatplotlibImageMixin', 'SimpleImageViewer']
def get_identity_wcs(naxis):
wcs = WCS(naxis=naxis)
wcs.wcs.ctype = ['X'] * naxis
wcs.wcs.crval = [0.] * naxis
wcs.wcs.crpix = [1.] * naxis
wcs.wcs.cdelt = [1.] * naxis
return wcs
EXTRA_FOOTER = """
# Set tick label size - for now tick_params (called lower down) doesn't work
# properly, but these lines won't be needed in future.
ax.coords[{x_att_axis}].set_ticklabel(size={x_ticklabel_size})
ax.coords[{y_att_axis}].set_ticklabel(size={y_ticklabel_size})
""".strip()
class MatplotlibImageMixin(object):
def setup_callbacks(self):
self._wcs_set = False
self._changing_slice_requires_wcs_update = None
self.axes.set_adjustable('datalim')
self.state.add_callback('x_att', self._set_wcs)
self.state.add_callback('y_att', self._set_wcs)
self.state.add_callback('slices', self._on_slice_change)
self.state.add_callback('reference_data', self._set_wcs, echo_old=True)
self.axes._composite = CompositeArray()
self.axes._composite_image = imshow(self.axes, self.axes._composite, aspect='auto',
origin='lower', interpolation='nearest')
self._set_wcs()
def update_x_ticklabel(self, *event):
# We need to overload this here for WCSAxes
if hasattr(self, '_wcs_set') and self._wcs_set and self.state.x_att is not None:
axis = self.state.reference_data.ndim - self.state.x_att.axis - 1
else:
axis = 0
self.axes.coords[axis].set_ticklabel(size=self.state.x_ticklabel_size)
self.redraw()
def update_y_ticklabel(self, *event):
# We need to overload this here for WCSAxes
if hasattr(self, '_wcs_set') and self._wcs_set and self.state.y_att is not None:
axis = self.state.reference_data.ndim - self.state.y_att.axis - 1
else:
axis = 1
self.axes.coords[axis].set_ticklabel(size=self.state.y_ticklabel_size)
self.redraw()
def _update_axes(self, *args):
if self.state.x_att_world is not None:
self.state.x_axislabel = self.state.x_att_world.label
if self.state.y_att_world is not None:
self.state.y_axislabel = self.state.y_att_world.label
self.axes.figure.canvas.draw_idle()
def add_data(self, data):
result = super(MatplotlibImageMixin, self).add_data(data)
# If this is the first layer (or the first after all layers were)
# removed, set the WCS for the axes.
if len(self.layers) == 1:
self._set_wcs()
return result
def _update_data_numerical(self, *args, **kwargs):
super()._update_data_numerical(*args, **kwargs)
self.state._reference_data_changed(force=True)
def _on_slice_change(self, event=None):
if self._changing_slice_requires_wcs_update:
self._set_wcs(relim=False)
def _set_wcs(self, before=None, after=None, relim=True):
if self.state.x_att is None or self.state.y_att is None or self.state.reference_data is None:
return
# A callback event for reference_data is triggered if the choices change
# but the actual selection doesn't - so we avoid resetting the WCS in
# this case.
if after is not None and before is after:
return
ref_coords = getattr(self.state.reference_data, 'coords', None)
if ref_coords is None or isinstance(ref_coords, LegacyCoordinates):
self.axes.reset_wcs(slices=self.state.wcsaxes_slice,
wcs=get_identity_wcs(self.state.reference_data.ndim))
else:
self.axes.reset_wcs(slices=self.state.wcsaxes_slice, wcs=ref_coords)
# Reset the axis labels to match the fact that the new axes have no labels
self.state.x_axislabel = ''
self.state.y_axislabel = ''
self._update_appearance_from_settings()
self._update_axes()
self.update_x_ticklabel()
self.update_y_ticklabel()
if relim:
self.state.reset_limits()
# Determine whether changing slices requires changing the WCS
if ref_coords is None or type(ref_coords) is Coordinates:
self._changing_slice_requires_wcs_update = False
else:
ix = self.state.x_att.axis
iy = self.state.y_att.axis
x_dep = list(dependent_axes(ref_coords, ix))
y_dep = list(dependent_axes(ref_coords, iy))
if ix in x_dep:
x_dep.remove(ix)
if iy in x_dep:
x_dep.remove(iy)
if ix in y_dep:
y_dep.remove(ix)
if iy in y_dep:
y_dep.remove(iy)
self._changing_slice_requires_wcs_update = bool(x_dep or y_dep)
self._wcs_set = True
def apply_roi(self, roi, override_mode=None):
# Force redraw to get rid of ROI. We do this because applying the
# subset state below might end up not having an effect on the viewer,
# for example there may not be any layers, or the active subset may not
# be one of the layers. So we just explicitly redraw here to make sure
# a redraw will happen after this method is called.
self.redraw()
if len(self.layers) == 0:
return
if self.state.x_att is None or self.state.y_att is None or self.state.reference_data is None:
return
subset_state = roi_to_subset_state(roi,
x_att=self.state.x_att,
y_att=self.state.y_att)
self.apply_subset_state(subset_state, override_mode=override_mode)
def _scatter_artist(self, axes, state, layer=None, layer_state=None):
if len(self._layer_artist_container) == 0:
raise Exception("Can only add a scatter plot overlay once an image is present")
return ScatterLayerArtist(axes, state, layer=layer, layer_state=None)
def _region_artist(self, axes, state, layer=None, layer_state=None):
if len(self._layer_artist_container) == 0:
raise Exception("Can only add a region plot overlay once an image is present")
return ScatterRegionLayerArtist(axes, state, layer=layer, layer_state=None)
def get_data_layer_artist(self, layer=None, layer_state=None):
if isinstance(layer, RegionData):
cls = self._region_artist
elif layer.ndim == 1:
cls = self._scatter_artist
else:
cls = ImageLayerArtist
return self.get_layer_artist(cls, layer=layer, layer_state=layer_state)
def get_subset_layer_artist(self, layer=None, layer_state=None):
if isinstance(layer.data, RegionData):
cls = self._region_artist
elif layer.ndim == 1:
cls = self._scatter_artist
else:
cls = ImageSubsetLayerArtist
return self.get_layer_artist(cls, layer=layer, layer_state=layer_state)
@staticmethod
def update_viewer_state(rec, context):
return update_image_viewer_state(rec, context)
def show_crosshairs(self, x, y):
if getattr(self, '_crosshairs', None) is not None:
self._crosshairs.remove()
self._crosshairs, = self.axes.plot([x], [y], '+', ms=12,
mfc='none', mec='#d32d26',
mew=1, zorder=100)
self.axes.figure.canvas.draw_idle()
def hide_crosshairs(self):
if getattr(self, '_crosshairs', None) is not None:
self._crosshairs.remove()
self._crosshairs = None
self.axes.figure.canvas.draw_idle()
def _script_header(self):
imports = []
imports.append('import matplotlib.pyplot as plt')
imports.append('from glue.viewers.matplotlib.mpl_axes import init_mpl')
imports.append('from glue.viewers.image.composite_array import CompositeArray')
imports.append('from glue.viewers.image.frb_artist import imshow')
imports.append('from glue.viewers.matplotlib.mpl_axes import set_figure_colors')
script = ""
script += "fig, ax = init_mpl(wcs=True)\n"
script += f"ax.set_aspect('{self.state.aspect}')\n"
script += '\ncomposite = CompositeArray()\n'
script += f"image = imshow(ax, composite, origin='lower', interpolation='nearest', aspect='{self.state.aspect}')\n\n"
dindex = self.session.data_collection.index(self.state.reference_data)
script += f"ref_data = data_collection[{dindex}]\n"
if isinstance(self.state.reference_data.coords, (LegacyCoordinates, type(None))):
imports.append('from glue.viewers.image.viewer import get_identity_wcs')
ref_wcs = "get_identity_wcs(ref_data.ndim)"
else:
ref_wcs = "ref_data.coords"
script += f"ax.reset_wcs(slices={self.state.wcsaxes_slice}, wcs={ref_wcs})\n"
script += "# for the legend\n"
script += "legend_handles = []\n"
script += "legend_labels = []\n"
script += "legend_handler_dict = dict()\n\n"
return imports, script
def _script_footer(self):
imports, script = super(MatplotlibImageMixin, self)._script_footer()
options = dict(x_att_axis=0 if self.state.x_att is None else self.state.reference_data.ndim - self.state.x_att.axis - 1,
y_att_axis=1 if self.state.y_att is None else self.state.reference_data.ndim - self.state.y_att.axis - 1,
x_ticklabel_size=self.state.x_ticklabel_size,
y_ticklabel_size=self.state.y_ticklabel_size)
return [], EXTRA_FOOTER.format(**options) + os.linesep * 2 + script
class SimpleImageViewer(MatplotlibImageMixin, SimpleMatplotlibViewer):
_state_cls = ImageViewerState
def __init__(self, *args, **kwargs):
kwargs['wcs'] = True
super().__init__(*args, **kwargs)
MatplotlibImageMixin.setup_callbacks(self)
|
glue-vizREPO_NAMEgluePATH_START.@glue_extracted@glue-main@glue@viewers@image@viewer.py@.PATH_END.py
|
{
"filename": "freedman2020.py",
"repo_name": "ggalloni/cobaya",
"repo_path": "cobaya_extracted/cobaya-master/cobaya/likelihoods/H0/freedman2020.py",
"type": "Python"
}
|
from cobaya.likelihoods.base_classes import H0
class freedman2020(H0):
r"""
Local $H_0$ measurement from \cite{Freedman:2020dne}.
"""
pass
|
ggalloniREPO_NAMEcobayaPATH_START.@cobaya_extracted@cobaya-master@cobaya@likelihoods@H0@freedman2020.py@.PATH_END.py
|
{
"filename": "hull_demo.py",
"repo_name": "itseez/opencv",
"repo_path": "opencv_extracted/opencv-master/samples/python/tutorial_code/ShapeDescriptors/hull/hull_demo.py",
"type": "Python"
}
|
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
rng.seed(12345)
def thresh_callback(val):
threshold = val
# Detect edges using Canny
canny_output = cv.Canny(src_gray, threshold, threshold * 2)
# Find contours
contours, _ = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# Find the convex hull object for each contour
hull_list = []
for i in range(len(contours)):
hull = cv.convexHull(contours[i])
hull_list.append(hull)
# Draw contours + hull results
drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8)
for i in range(len(contours)):
color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256))
cv.drawContours(drawing, contours, i, color)
cv.drawContours(drawing, hull_list, i, color)
# Show in a window
cv.imshow('Contours', drawing)
# Load source image
parser = argparse.ArgumentParser(description='Code for Convex Hull tutorial.')
parser.add_argument('--input', help='Path to input image.', default='stuff.jpg')
args = parser.parse_args()
src = cv.imread(cv.samples.findFile(args.input))
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
# Convert image to gray and blur it
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
src_gray = cv.blur(src_gray, (3,3))
# Create Window
source_window = 'Source'
cv.namedWindow(source_window)
cv.imshow(source_window, src)
max_thresh = 255
thresh = 100 # initial threshold
cv.createTrackbar('Canny thresh:', source_window, thresh, max_thresh, thresh_callback)
thresh_callback(thresh)
cv.waitKey()
|
itseezREPO_NAMEopencvPATH_START.@opencv_extracted@opencv-master@samples@python@tutorial_code@ShapeDescriptors@hull@hull_demo.py@.PATH_END.py
|
{
"filename": "componentbuilder.py",
"repo_name": "AishwaryaC26/RIS-Vis",
"repo_path": "RIS-Vis_extracted/RIS-Vis-main/app/componentbuilder.py",
"type": "Python"
}
|
import dash_bootstrap_components as dbc
from dash import dcc, html
import elementstyling
from datetime import date, timedelta
import pandas as pd
import plotly.express as px
import os, ast
from dotenv import load_dotenv
load_dotenv()
time_constant = int(os.environ["TIME_CONSTANT"])
## Componentbuilder.py facilitates easily building graph/form components
'''
The graph component contains 2 modals: an expand modal (used to enlarge the graph) & the description modal (used to display a description of the graph)
card_title: string that will be displayed on top of the card
open_expand_button_id: id of expand button on main page to open modal
close_expand_button_id: id of expand button on modal to close modal
question_button_id: id of button to open description modal
modal_body_id: id of expand-modal body
modal_id: id of expand-modal
graph_div_id: id of div component where graph will be located
question_modal_id: id of modal for description
card_description: text that will go in description modal (defaults to empty string)
element_styling: styling of the component (defaults to None)
animate: set to True for map components only (defaults to False)
'''
def build_graph_component(card_title, open_expand_button_id, close_expand_button_id, question_button_id, modal_body_id, modal_id, graph_div_id, question_modal_id, card_description = "", element_styling = None, animate = False):
if animate:
graph_comp = dbc.Spinner([html.Div(dcc.Graph(id=graph_div_id, style={"height": "100%"}), id = f"""{graph_div_id}_div""")], color="primary")
else:
graph_comp = dbc.Spinner([html.Div(id=graph_div_id)], color="primary")
data_graph = dbc.Card([dbc.Row([dbc.Col(html.H4(card_title, className="card-title"), width = 6),
dbc.Col(html.Div([ dbc.Button(html.I(className="fas fa-question", style={'fontSize': '30px'}), id=question_button_id, n_clicks=0, style = elementstyling.IMG_STYLING),
dbc.Button(html.I(className="fas fa-expand", style={'fontSize': '30px'}), id=open_expand_button_id, n_clicks=0, style = elementstyling.IMG_STYLING),
], style = {"float": "right"}), width = 6)]),
dbc.Modal(
[
dbc.ModalHeader([html.H4(card_title, className="card-title"),
dbc.Button(html.I(className="fas fa-expand", style={'fontSize': '30px'}), id=close_expand_button_id, n_clicks=0, style = {"backgroundColor": "transparent"})], close_button=False),
dbc.ModalBody(id = modal_body_id),
],
id=modal_id,
is_open=False,
fullscreen=True,
keyboard=False,
backdrop="static",
),
dbc.Modal(
[
dbc.ModalHeader([html.H4("Description", className="card-title")]),
dbc.ModalBody(html.H5(card_description))
],
id=question_modal_id,
is_open=False,
size="xl",
keyboard=False,
backdrop="static",
),
graph_comp], body=True, style=element_styling)
return data_graph
'''
This method builds a graph component with ONLY an expand modal
card_title_id: id of card title
open_expand_button_id: id of button to open expand modal
close_expand_button_id: id of button to close expand modal
modal_body_id: id of modal body
modal_id: id of modal
graph_div_id: id of div component that will contain graph
full_card_id: id of card component
element_styling: styling of graph card (defaults to None)
'''
def build_graph_component_noq(card_title_id, open_expand_button_id, close_expand_button_id, modal_body_id, modal_id, graph_div_id, full_card_id, element_styling = None):
data_graph = dbc.Card([dbc.Row([dbc.Col(html.H4(id = card_title_id, className="card-title"), width = 6),
dbc.Col(html.Div([dbc.Button(html.I(className="fas fa-expand", style={'fontSize': '30px'}), id=open_expand_button_id, n_clicks=0, style = elementstyling.IMG_STYLING),
], style = {"float": "right"}), width = 6)]),
dbc.Modal(
[
dbc.ModalHeader([html.H6(className="card-title"),
dbc.Button(html.I(className="fas fa-expand", style={'fontSize': '30px'}), id=close_expand_button_id, n_clicks=0, style = {"backgroundColor": "transparent"})], close_button=False),
dbc.ModalBody(id = modal_body_id),
],
id=modal_id,
is_open=False,
fullscreen=True,
keyboard=False,
backdrop="static",
),
html.Div(id=graph_div_id)], id = full_card_id, body=True, style=element_styling)
return data_graph
'''
The form component contains 1 modal: a description modal (used to display a description of the form/overall page)
card_title: string that will be displayed on top of the card
dropdowns: list formatted as [label description, dropdown options, dropdown id] for each dropdown in the form
dateranges: list formatted as [label description, datepicker id] for each datepicker in the form
submitid: id of form submit button
question_button_id: id of description modal button
question_modal_id: id of description modal
card_description: description that will be placed in description modal
element_styling: styling of card component (defaults to None)
card_coords: when description component should include a map with locations of various stations, card_coords is a list formatted as:
- [station, latitude, longitude]
'''
def build_form_component(card_title, dropdowns, dateranges, submitid, question_button_id, question_modal_id, card_description = "", element_styling = None, card_coords = None,):
modal_body = html.H5([card_description, html.Br(), html.Br(), "Station Locations:", get_map_component(card_coords)]) if card_coords else \
html.H5([card_description, html.Br(), html.Br()])
form = []
for drop in dropdowns:
desc, options, id = drop[0], drop[1], drop[2]
curr_dropdown = html.Div(
[
dbc.Label(desc),
dcc.Dropdown(
options,
options[0],
id=id,
clearable=False,
),
],
)
form.append(curr_dropdown)
form.append(html.Br())
for dater in dateranges:
desc, id = dater[0], dater[1]
daterange = html.Div(
[ dbc.Row([dbc.Label(desc)]),
dcc.DatePickerRange(
id=id,
min_date_allowed=date(1900, 1, 1),
max_date_allowed=date.today(),
#start_date=date.today() - timedelta(10),
#end_date=date.today(),
start_date=date.today() - timedelta(time_constant),
end_date=date.today() - timedelta(time_constant -5)
)
],
)
form.append(daterange)
form.append(html.Br())
form.append(dbc.Button("Submit", color="primary",
className="mb-3", id = submitid))
return dbc.Card([dbc.Row([dbc.Col(html.H4(card_title, className="card-title"), width = 9),
dbc.Col(html.Div([ dbc.Button(html.I(className="fas fa-question", style={'fontSize': '30px'}), id=question_button_id, n_clicks=0, style = elementstyling.IMG_STYLING),
], style = {"float": "right"}), width = 3)]),
dbc.Modal(
[
dbc.ModalHeader([html.H4("Description", className="card-title")]),
dbc.ModalBody(modal_body)
],
id=question_modal_id,
is_open=False,
size="xl",
keyboard=False,
backdrop="static",
), dbc.Form(form)], body=True, style = element_styling)
'''
Builds map component given a list of coordinates formatted as:
[[station #1 name, station #1 latitude, station #1 longitude], [station #2 name...]
'''
def get_map_component(coords):
if not coords:
return None
## convert string to int
for tup in coords:
tup[1] = float(tup[1])
tup[2] = float(tup[2])
df = pd.DataFrame(coords, columns=['Station', 'Latitude', 'Longitude'])
fig = px.scatter_mapbox(df, lat="Latitude", lon="Longitude", color = "Station", zoom=3, height=245)
fig.update_layout(
mapbox_style="white-bg",
mapbox_layers=[
{
"below": 'traces',
"sourcetype": "raster",
"sourceattribution": "United States Geological Survey",
"source": [
"https://basemap.nationalmap.gov/arcgis/rest/services/USGSImageryOnly/MapServer/tile/{z}/{y}/{x}"
]
}
])
fig.update_traces(marker=dict(
size = 20
),)
fig.update_layout(
margin=dict(l=5,r=5,b=5,t=20),
)
fig.update_layout(template='plotly_dark')
return dcc.Graph(figure = fig)
|
AishwaryaC26REPO_NAMERIS-VisPATH_START.@RIS-Vis_extracted@RIS-Vis-main@app@componentbuilder.py@.PATH_END.py
|
{
"filename": "flux_ovd.py",
"repo_name": "dazhiUBC/SCUBA2_MF",
"repo_path": "SCUBA2_MF_extracted/SCUBA2_MF-main/blank/flux_ovd.py",
"type": "Python"
}
|
import pandas as pd
from MF import *
from astropy.coordinates import SkyCoord
ntrials = 10000 # number of mock maps
def read_ca(fname):
cata = pd.read_csv(fname,delimiter= ' ' )
f = cata['flux'] # flux
e = cata['err'] # err
c = SkyCoord(ra=cata['ra'],dec=cata['dec'],unit=(u.hourangle, u.deg)) # coordinate
return f,e,c
def read_sim(fname):
cata = pd.read_csv(fname,delimiter= ' ' )
f = cata['flux']
c = SkyCoord(ra=cata['ra'],dec=cata['dec'],unit=(u.hourangle, u.deg))
return f,c
def read_spu(fname):
cata = pd.read_csv(fname,delimiter= ' ' )
f = cata['spu_flux']
c = SkyCoord(ra=cata['ra'],dec=cata['dec'],unit=(u.hourangle, u.deg))
return f,c
# read the actual catalog
f_my, e_my, c_my = read_ca('../sources_4C23_850_cal_crop_MF.dat')
# define the position of the HzRGs
rg = SkyCoord(ra=316.811766,dec=23.529172, unit= u.deg)
# calculate the (catalog & simulation) counts as function of flux in both inner and outer region, the flux bin is for every 2mJy
fsimin = []
fsimout = []
fsim = []
flux_bin = np.linspace(2,12,6)
flux = np.zeros(5)
# the mean value for each bin
for i in range(5):
flux[i] = 0.5*flux_bin[i]+0.5*flux_bin[i+1]
# The number counts for each mock
for i in range(ntrials):
fnc_sim = np.zeros(5)
fnc_simin = np.zeros(5)
fnc_simout = np.zeros(5)
f_sim, c_sim = read_sim('mock_850/mock_map'+str(i)+'_rec.dat')
for j in range(len(c_sim)):
for k in range(5):
if flux_bin[k]<=f_sim[j]<flux_bin[k+1]:
fnc_sim[k] = fnc_sim[k]+1
if c_sim[j].separation(rg).arcmin<=4:
for k in range(5):
if flux_bin[k]<=f_sim[j]<flux_bin[k+1]:
fnc_simin[k] = fnc_simin[k]+1
else:
for k in range(5):
if flux_bin[k]<=f_sim[j]<flux_bin[k+1]:
fnc_simout[k] = fnc_simout[k]+1
fsim.append(fnc_sim)
fsimin.append(fnc_simin)
fsimout.append(fnc_simout)
# The catalog number counts separate the inner and outer region
fn = np.zeros(5)
fnin = np.zeros(5)
fnout = np.zeros(5)
for i in range(len(c_my)):
for j in range(5):
if flux_bin[j]<=f_my[i]<flux_bin[j+1]:
fn[j] = fn[j]+1
if c_my[i].separation(rg).arcmin<=4:
for j in range(5):
if flux_bin[j]<=f_my[i]<flux_bin[j+1]:
fnin[j] = fnin[j]+1
else:
for j in range(5):
if flux_bin[j]<=f_my[i]<flux_bin[j+1]:
fnout[j] = fnout[j]+1
# due to the low number (0) problem of the bright end, use the average of 20 maps
fsim = np.array(fsim)
fsimin = np.array(fsimin)
fsimout = np.array(fsimout)
np.save('flux/all.npy', np.array(fsim))
np.save('flux/in.npy', np.array(fsimin))
np.save('flux/out.npy', np.array(fsimout))
#f_min = []
#f_sin = [] it is not gaussian distribution, therefore standard deviation not accurate
#f_mout = []
#f_sout = []
#fin = []
#fout = []
#fin_min = [] # 68 and 32
#fin_max = []
#fout_min = []
#fout_max = []
ov = []
ov_in = []
ov_out = []
ov_med = []
ov_in_med = []
ov_out_med = []
ov_84 = []
ov_in84 = []
ov_out84 = []
ov_16 = []
ov_in16 = []
ov_out16 = []
for i in range(int(ntrials/20)):
fsimm = np.nanmean(fsim[i*20:(i+1)*20],axis=0)
fsiminm = np.nanmean(fsimin[i*20:(i+1)*20],axis=0) # simulation inner region mean value
fsimoutm = np.nanmean(fsimout[i*20:(i+1)*20],axis=0)
#fin.append(fsiminm)
#fout.append(fsimoutm)
ov.append((fn-fsimm)/fsimm)
ov_in.append((fnin-fsiminm)/fsiminm)
ov_out.append((fnout-fsimoutm)/fsimoutm)
ov = np.array(ov)
ov_in = np.array(ov_in)
ov_out = np.array(ov_out)
# statistic
for i in range(5):
#f_min.append(np.median(fin[:,i]))
#f_sin.append(np.nanstd(fin[:,i]))
#f_mout.append(np.median(fout[:,i]))
#f_sout.append(np.nanstd(fout[:,i]))
#fin_min.append(np.nanpercentile(fin[:,i],32))
#fin_max.append(np.nanpercentile(fin[:,i],68))
#fout_min.append(np.nanpercentile(fout[:,i],32))
#fout_max.append(np.nanpercentile(fout[:,i],68))
ov_med.append(np.median(ov[:,i]))
ov_in_med.append(np.median(ov_in[:,i]))
ov_out_med.append(np.median(ov_out[:,i]))
ov_84.append(np.nanpercentile(ov[:,i],84.1))
ov_in84.append(np.nanpercentile(ov_in[:,i],84.1))
ov_out84.append(np.nanpercentile(ov_out[:,i],84.1))
ov_16.append(np.nanpercentile(ov[:,i],15.9))
ov_in16.append(np.nanpercentile(ov_in[:,i],15.9))
ov_out16.append(np.nanpercentile(ov_out[:,i],15.9))
ov_med = np.array(ov_med)
ov_in_med = np.array(ov_in_med)
ov_out_med = np.array(ov_out_med)
ov_84 = np.array(ov_84)
ov_in84 = np.array(ov_in84)
ov_out84 = np.array(ov_out84)
ov_16 = np.array(ov_16)
ov_in16 = np.array(ov_in16)
ov_out16 = np.array(ov_out16)
# make correction due to 20 maps
#ov_84 = np.sqrt(20)*(ov_84-ov_med)/(ov_med+1)**2/fn + ov_med
#ov_in84 = np.sqrt(20)*(ov_in84-ov_in_med)/(ov_in_med+1)**2/fnin + ov_in_med
#ov_out84 = np.sqrt(20)*(ov_out84-ov_out_med)/(ov_out_med+1)**2/fnout + ov_out_med
#ov_16 = np.sqrt(20)*(ov_16-ov_med)/(ov_med+1)**2/fn + ov_med
#ov_in16 = np.sqrt(20)*(ov_in16-ov_in_med)/(ov_in_med+1)**2/fnin + ov_in_med
#ov_out16 = np.sqrt(20)*(ov_out16-ov_out_med)/(ov_out_med+1)**2/fnout + ov_out_med
# plot as a function of flux
plt.plot(flux,ov_med, color='tab:blue',label=r"Overdensity")
plt.plot(flux,ov_in_med, color='tab:green',label=r"Overdensity($\leq$4')")
plt.plot(flux,ov_out_med,color= 'tab:red',label="Overdensity(>4')" )
plt.scatter(flux,ov_med, color='tab:blue')
plt.scatter(flux,ov_in_med, color='tab:green')#,label="Overdensity(<4')")
plt.scatter(flux,ov_out_med,color= 'tab:red')#,label="Overdensity(>4')" )
plt.fill_between(flux,ov_16,ov_84,color='tab:blue',alpha=0.1 )
plt.fill_between(flux,ov_in16,ov_in84,color='tab:green',alpha=0.1 )
plt.fill_between(flux,ov_out16,ov_out84,color='tab:red',alpha=0.1 )
plt.ylim(-0.6,7.8)
plt.legend()
plt.xlabel('Flux (mJy)')
plt.ylabel('Overdensity')
plt.savefig('plot/ovd_f.pdf',bbox_inches='tight')
plt.savefig('plot/ovd_f.eps',bbox_inches='tight')
plt.close()
# bar
plt.bar(flux,ov_med,1.6,color='tab:blue',alpha = 0.6, label=r"Overdensity") # bar demonstration
plt.bar(flux-0.4,ov_in_med,0.8,color= 'tab:green',alpha = 0.6, label=r"Overdensity($\leq$4')" )
plt.bar(flux+0.4,ov_out_med,0.8,color= 'tab:red',alpha = 0.6, label="Overdensity(>4')" )
#plt.tick_params(labelsize = 15)
plt.legend()
plt.xlabel('Flux (mJy)')#,fontsize=15)
plt.ylabel('Overdensity')#,fontsize=15)
plt.savefig('plot/ovd_f_bar.pdf',bbox_inches='tight')
plt.savefig('plot/ovd_f_bar.eps',bbox_inches='tight')
plt.close()
|
dazhiUBCREPO_NAMESCUBA2_MFPATH_START.@SCUBA2_MF_extracted@SCUBA2_MF-main@blank@flux_ovd.py@.PATH_END.py
|
{
"filename": "polar_bar.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/galleries/examples/pie_and_polar_charts/polar_bar.py",
"type": "Python"
}
|
"""
=======================
Bar chart on polar axis
=======================
Demo of bar plot on a polar axis.
"""
import matplotlib.pyplot as plt
import numpy as np
# Fixing random state for reproducibility
np.random.seed(19680801)
# Compute pie slices
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
colors = plt.cm.viridis(radii / 10.)
ax = plt.subplot(projection='polar')
ax.bar(theta, radii, width=width, bottom=0.0, color=colors, alpha=0.5)
plt.show()
# %%
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.axes.Axes.bar` / `matplotlib.pyplot.bar`
# - `matplotlib.projections.polar`
#
# .. tags::
#
# plot-type: pie
# plot-type: bar
# level: beginner
# purpose: showcase
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@galleries@examples@pie_and_polar_charts@polar_bar.py@.PATH_END.py
|
{
"filename": "subscriber.py",
"repo_name": "crossbario/crossbar",
"repo_path": "crossbar_extracted/crossbar-master/crossbar/bridge/rest/subscriber.py",
"type": "Python"
}
|
#####################################################################################
#
# Copyright (c) typedef int GmbH
# SPDX-License-Identifier: EUPL-1.2
#
#####################################################################################
import json
from functools import partial
from twisted.internet.defer import inlineCallbacks
from twisted.web.http_headers import Headers
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.exception import ApplicationError
from autobahn.wamp.types import SubscribeOptions
from txaio import make_logger
class MessageForwarder(ApplicationSession):
log = make_logger()
def __init__(self, *args, **kwargs):
self._webtransport = kwargs.pop("webTransport", None)
if not self._webtransport:
import treq
self._webtransport = treq
super(MessageForwarder, self).__init__(*args, **kwargs)
@inlineCallbacks
def onJoin(self, details):
subscriptions = self.config.extra["subscriptions"]
debug = self.config.extra.get("debug", False)
method = self.config.extra.get("method", "POST")
expectedCode = self.config.extra.get("expectedcode")
@inlineCallbacks
def on_event(url, *args, **kwargs):
headers = Headers({b"Content-Type": [b"application/json"]})
body = json.dumps({
"args": args,
"kwargs": kwargs
},
sort_keys=False,
separators=(',', ':'),
ensure_ascii=False)
# http://treq.readthedocs.org/en/latest/api.html#treq.request
res = yield self._webtransport.request(method,
url.encode('utf8'),
data=body.encode('utf8'),
headers=headers)
if expectedCode:
if not res.code == expectedCode:
raise ApplicationError("Request returned {}, not the expected {}".format(res.code, expectedCode))
if debug:
content = yield self._webtransport.text_content(res)
self.log.debug(content)
for s in subscriptions:
# Assert that there's "topic" and "url" entries
assert "topic" in s
assert "url" in s
yield self.subscribe(partial(on_event, s["url"]),
s["topic"],
options=SubscribeOptions(match=s.get("match", "exact")))
self.log.debug("MessageForwarder subscribed to {topic}", topic=s["topic"])
|
crossbarioREPO_NAMEcrossbarPATH_START.@crossbar_extracted@crossbar-master@crossbar@bridge@rest@subscriber.py@.PATH_END.py
|
{
"filename": "_labelfont.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/histogram2dcontour/contours/_labelfont.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LabelfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name="labelfont",
parent_name="histogram2dcontour.contours",
**kwargs,
):
super(LabelfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Labelfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
size
style
Sets whether a font should be styled with a
normal or italic face from its family.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@histogram2dcontour@contours@_labelfont.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "nickhand/pyRSD",
"repo_path": "pyRSD_extracted/pyRSD-master/pyRSD/__init__.py",
"type": "Python"
}
|
"""
pyRSD
``pyRSD`` is a collection of algorithms to compute the redshift space matter
power spectra using perturbation theory and the redshift space distortion (RSD)
model based on a distribution function velocity moments approach
for all features of ``pyRSD``, you need to import one of the
following subpackages:
Subpackages
-----------
data
Simulation data.
rsd
RSD power spectra.
pygcl
Python bindings for a C++ "General Cosmology Library"
"""
# save the absolute path of the package and data directories
import os.path as _osp
import sys
import os
pkg_dir = _osp.abspath(_osp.dirname(__file__))
data_dir = _osp.join(pkg_dir, 'data')
on_rtd = os.environ.get('READTHEDOCS') == 'True'
# every module uses numpy
import numpy
try:
from pyRSD import pygcl
except Exception as msg:
if on_rtd:
pygcl = None
else:
import traceback
tb = traceback.format_exc()
raise ImportError("Cannot use package without pygcl\n%s" %tb)
def get_data_files():
"""
Returns the path of data files, which are installed to the package directory.
"""
import os
path = os.path.dirname(__file__)
path = os.path.join(path, 'data', 'class')
r = dict(
Alpha_inf_hyrec_file = os.path.join(path, 'hyrec', 'Alpha_inf.dat'),
R_inf_hyrec_file = os.path.join(path, 'hyrec', 'R_inf.dat'),
two_photon_tables_hyrec_file = os.path.join(path, 'hyrec', 'two_photon_tables.dat'),
sBBN_file = os.path.join(path, 'bbn', 'sBBN.dat'),
)
return r
def _init():
r = get_data_files()
# setting static variables with swig is tricky.
# see http://www.swig.org/Doc3.0/SWIGDocumentation.html#Python_nn20
from .gcl import cvar
cvar.ClassEngine_Alpha_inf_hyrec_file = r['Alpha_inf_hyrec_file']
cvar.ClassEngine_R_inf_hyrec_file = r['R_inf_hyrec_file']
cvar.ClassEngine_two_photon_tables_hyrec_file = r['two_photon_tables_hyrec_file']
cvar.ClassEngine_sBBN_file = r['sBBN_file']
if pygcl is not None:
_init(); del _init
from .version import __version__
|
nickhandREPO_NAMEpyRSDPATH_START.@pyRSD_extracted@pyRSD-master@pyRSD@__init__.py@.PATH_END.py
|
{
"filename": "test_cb_ssl.py",
"repo_name": "crossbario/crossbar",
"repo_path": "crossbar_extracted/crossbar-master/test/functests/cbtests/test_cb_ssl.py",
"type": "Python"
}
|
###############################################################################
#
# Copyright (c) typedef int GmbH. Licensed under EUPLv1.2.
#
###############################################################################
from __future__ import print_function
from __future__ import absolute_import
import random
from functools import partial
from os.path import join
from tempfile import mkdtemp
from subprocess import check_call
from psutil import Process
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp import types
from autobahn.wamp.exception import ApplicationError
from autobahn.twisted.component import Component
from twisted.internet.defer import Deferred, FirstError, inlineCallbacks
from twisted.internet.process import ProcessExitedAlready
from twisted.internet.ssl import CertificateOptions
from twisted.python import log
from OpenSSL import crypto
import pytest
import treq
from ..helpers import *
@inlineCallbacks
def test_verification(crypto_crossbar, request, self_signed_cert):
"""
Run a session with my own cert.
"""
privkey, certfile = self_signed_cert
# load our self-signed cert as the only certificate-authority
with open(certfile, 'r') as crt:
cert = crypto.load_certificate(crypto.FILETYPE_PEM, crt.read())
options = CertificateOptions(caCerts=[cert])
d = functest_session(url=u"wss://localhost:6464/tls_ws", realm=u"auth_realm", ssl=options)
results = yield DeferredList([d, sleep(5)], fireOnOneCallback=True, fireOnOneErrback=True)
assert d.called, "timed out without connecting successfully"
@inlineCallbacks
def test_verification_fails(reactor, crypto_crossbar, request, self_signed_cert):
"""
TLS fails to a self-signed cert
"""
tls_client = Component(
transports=u"wss://localhost:6464/tls_ws",
is_fatal=lambda _: True,
)
d = tls_client.start(reactor)
try:
session = yield d
assert False, "Connection should fail due to certificate error"
except Exception as e:
print("failed (we wanted this): {}".format(e))
@pytest.mark.parametrize(
'close_style', (
# XXX FIXME not working for some reason ...
# 'transport.sendClose',
# 'transport.close',
'session.leave',
)
)
@inlineCallbacks
def test_client_close(crypto_crossbar, request, self_signed_cert, close_style):
"""
is sendClose() sufficient to actually-close underlying transport?
"""
(privkey, certfile) = self_signed_cert
# load our self-signed cert as the only certificate-authority
with open(certfile, 'r') as crt:
cert = crypto.load_certificate(crypto.FILETYPE_PEM, crt.read())
options = CertificateOptions(caCerts=[cert])
existing = Process().connections()
sessions = []
for x in range(10):
session = yield functest_session(
url=u"wss://localhost:6464/tls_ws",
realm=u"auth_realm",
ssl=options,
)
sessions.append(session)
yield sleep(1) # overkill? let sessions start for-sure
started = Process().connections()
assert len(started) - len(existing) == 10
for session in sessions:
assert session._transport is not None
if close_style == 'session.leave':
yield session.leave()
elif close_style == 'transport.close':
yield session._transport.close()
elif close_style == 'transport.sendClose':
session._transport.sendClose()
else:
raise RuntimeError("Unknown close_style from paramtrize")
yield sleep(1) # overkill, but make sure connections can close
finished = Process().connections()
assert len(finished) == len(existing)
@inlineCallbacks
def test_untrusted_selfsigned(crypto_crossbar, request, self_signed_cert):
"""
Confirm we *don't* connect to untrusted server.
"""
(privkey, certfile) = self_signed_cert
# load our self-signed cert as the only certificate-authority
with open(certfile, 'r') as crt:
cert = crypto.load_certificate(crypto.FILETYPE_PEM, crt.read())
options = CertificateOptions(caCerts=[cert])
# letting the defaults go through, which should mean we don't trust this connection
d = functest_session(url=u"wss://localhost:6464/tls_ws", realm=u"auth_realm")
timeout = sleep(5)
results = yield DeferredList([d, timeout], fireOnOneCallback=True, fireOnOneErrback=True)
# results in a 2-tuple: (result, index of Deferred that fired)
assert results[1] is 1, "shouldn't have connected successfully"
|
crossbarioREPO_NAMEcrossbarPATH_START.@crossbar_extracted@crossbar-master@test@functests@cbtests@test_cb_ssl.py@.PATH_END.py
|
{
"filename": "file_io_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/lib/io/file_io_test.py",
"type": "Python"
}
|
# This Python file uses the following encoding: utf-8
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Testing File IO operations in file_io.py."""
import os.path
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import errors
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
class PathLike(object):
"""Backport of pathlib.Path for Python < 3.6"""
def __init__(self, name):
self.name = name
def __fspath__(self):
return self.name
def __str__(self):
return self.name
run_all_path_types = parameterized.named_parameters(
("str", file_io.join),
("pathlike", lambda *paths: PathLike(file_io.join(*paths))))
class FileIoTest(test.TestCase, parameterized.TestCase):
def setUp(self):
self._base_dir = file_io.join(self.get_temp_dir(), "base_dir")
file_io.create_dir(self._base_dir)
def tearDown(self):
file_io.delete_recursively(self._base_dir)
def testEmptyFilename(self):
f = file_io.FileIO("", mode="r")
with self.assertRaises(errors.NotFoundError):
_ = f.read()
def testJoinUrlLike(self):
"""file_io.join joins url-like filesystems with '/' on all platform."""
for fs in ("ram://", "gcs://", "file://"):
expected = fs + "exists/a/b/c.txt"
self.assertEqual(file_io.join(fs, "exists", "a", "b", "c.txt"), expected)
self.assertEqual(file_io.join(fs + "exists", "a", "b", "c.txt"), expected)
self.assertEqual(file_io.join(fs, "exists/a", "b", "c.txt"), expected)
self.assertEqual(file_io.join(fs, "exists", "a", "b/c.txt"), expected)
def testJoinFilesystem(self):
"""file_io.join respects the os.path.join behavior for native filesystems."""
for sep in ("/", "\\", os.sep):
self.assertEqual(os.path.join("a", "b", "c"), file_io.join("a", "b", "c"))
self.assertEqual(
os.path.join(sep + "a", "b", "c"), file_io.join(sep + "a", "b", "c"))
self.assertEqual(
os.path.join("a", sep + "b", "c"), file_io.join("a", sep + "b", "c"))
self.assertEqual(
os.path.join("a", "b", sep + "c"), file_io.join("a", "b", sep + "c"))
self.assertEqual(
os.path.join("a", "b", "c" + sep), file_io.join("a", "b", "c" + sep))
@run_all_path_types
def testFileDoesntExist(self, join):
file_path = join(self._base_dir, "temp_file")
self.assertFalse(file_io.file_exists(file_path))
with self.assertRaises(errors.NotFoundError):
_ = file_io.read_file_to_string(file_path)
@run_all_path_types
def testWriteToString(self, join):
file_path = join(self._base_dir, "temp_file")
file_io.write_string_to_file(file_path, "testing")
self.assertTrue(file_io.file_exists(file_path))
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("testing", file_contents)
def testAtomicWriteStringToFile(self):
file_path = file_io.join(self._base_dir, "temp_file")
file_io.atomic_write_string_to_file(file_path, "testing")
self.assertTrue(file_io.file_exists(file_path))
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("testing", file_contents)
def testAtomicWriteStringToFileOverwriteFalse(self):
file_path = file_io.join(self._base_dir, "temp_file")
file_io.atomic_write_string_to_file(file_path, "old", overwrite=False)
with self.assertRaises(errors.AlreadyExistsError):
file_io.atomic_write_string_to_file(file_path, "new", overwrite=False)
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("old", file_contents)
file_io.delete_file(file_path)
file_io.atomic_write_string_to_file(file_path, "new", overwrite=False)
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("new", file_contents)
@run_all_path_types
def testReadBinaryMode(self, join):
file_path = join(self._base_dir, "temp_file")
file_io.write_string_to_file(file_path, "testing")
with file_io.FileIO(file_path, mode="rb") as f:
self.assertEqual(b"testing", f.read())
@run_all_path_types
def testWriteBinaryMode(self, join):
file_path = join(self._base_dir, "temp_file")
file_io.FileIO(file_path, "wb").write("testing")
with file_io.FileIO(file_path, mode="r") as f:
self.assertEqual("testing", f.read())
def testAppend(self):
file_path = file_io.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="w") as f:
f.write("begin\n")
with file_io.FileIO(file_path, mode="a") as f:
f.write("a1\n")
with file_io.FileIO(file_path, mode="a") as f:
f.write("a2\n")
with file_io.FileIO(file_path, mode="r") as f:
file_contents = f.read()
self.assertEqual("begin\na1\na2\n", file_contents)
def testMultipleFiles(self):
file_prefix = file_io.join(self._base_dir, "temp_file")
for i in range(5000):
f = file_io.FileIO(file_prefix + str(i), mode="w+")
f.write("testing")
f.flush()
self.assertEqual("testing", f.read())
f.close()
def testMultipleWrites(self):
file_path = file_io.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="w") as f:
f.write("line1\n")
f.write("line2")
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("line1\nline2", file_contents)
def testFileWriteBadMode(self):
file_path = file_io.join(self._base_dir, "temp_file")
with self.assertRaises(errors.PermissionDeniedError):
file_io.FileIO(file_path, mode="r").write("testing")
def testFileReadBadMode(self):
file_path = file_io.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
self.assertTrue(file_io.file_exists(file_path))
with self.assertRaises(errors.PermissionDeniedError):
file_io.FileIO(file_path, mode="w").read()
@run_all_path_types
def testFileDelete(self, join):
file_path = join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
file_io.delete_file(file_path)
self.assertFalse(file_io.file_exists(file_path))
def testFileDeleteFail(self):
file_path = file_io.join(self._base_dir, "temp_file")
with self.assertRaises(errors.NotFoundError):
file_io.delete_file(file_path)
def testGetMatchingFiles(self):
dir_path = file_io.join(self._base_dir, "temp_dir")
file_io.create_dir(dir_path)
files = ["file1.txt", "file2.txt", "file3.txt", "file*.txt"]
for name in files:
file_path = file_io.join(dir_path, name)
file_io.FileIO(file_path, mode="w").write("testing")
expected_match = [file_io.join(dir_path, name) for name in files]
self.assertItemsEqual(
file_io.get_matching_files(file_io.join(dir_path, "file*.txt")),
expected_match)
self.assertItemsEqual(file_io.get_matching_files(tuple()), [])
files_subset = [
file_io.join(dir_path, files[0]),
file_io.join(dir_path, files[2])
]
self.assertItemsEqual(
file_io.get_matching_files(files_subset), files_subset)
file_io.delete_recursively(dir_path)
self.assertFalse(file_io.file_exists(file_io.join(dir_path, "file3.txt")))
def testGetMatchingFilesWhenParentDirContainsParantheses(self):
dir_path = file_io.join(self._base_dir, "dir_(special)")
file_io.create_dir(dir_path)
files = ["file1.txt", "file(2).txt"]
for name in files:
file_path = file_io.join(dir_path, name)
file_io.FileIO(file_path, mode="w").write("testing")
expected_match = [file_io.join(dir_path, name) for name in files]
glob_pattern = file_io.join(dir_path, "*")
self.assertItemsEqual(
file_io.get_matching_files(glob_pattern), expected_match)
@run_all_path_types
def testCreateRecursiveDir(self, join):
dir_path = join(self._base_dir, "temp_dir/temp_dir1/temp_dir2")
file_io.recursive_create_dir(dir_path)
file_io.recursive_create_dir(dir_path) # repeat creation
file_path = file_io.join(str(dir_path), "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
self.assertTrue(file_io.file_exists(file_path))
file_io.delete_recursively(file_io.join(self._base_dir, "temp_dir"))
self.assertFalse(file_io.file_exists(file_path))
@run_all_path_types
def testCopy(self, join):
file_path = join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = join(self._base_dir, "copy_file")
file_io.copy(file_path, copy_path)
self.assertTrue(file_io.file_exists(copy_path))
f = file_io.FileIO(file_path, mode="r")
self.assertEqual("testing", f.read())
self.assertEqual(7, f.tell())
def testCopyOverwrite(self):
file_path = file_io.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = file_io.join(self._base_dir, "copy_file")
file_io.FileIO(copy_path, mode="w").write("copy")
file_io.copy(file_path, copy_path, overwrite=True)
self.assertTrue(file_io.file_exists(copy_path))
self.assertEqual("testing", file_io.FileIO(file_path, mode="r").read())
def testCopyOverwriteFalse(self):
file_path = file_io.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = file_io.join(self._base_dir, "copy_file")
file_io.FileIO(copy_path, mode="w").write("copy")
with self.assertRaises(errors.AlreadyExistsError):
file_io.copy(file_path, copy_path, overwrite=False)
@run_all_path_types
def testRename(self, join):
file_path = join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
rename_path = join(self._base_dir, "rename_file")
file_io.rename(file_path, rename_path)
self.assertTrue(file_io.file_exists(rename_path))
self.assertFalse(file_io.file_exists(file_path))
def testRenameOverwrite(self):
file_path = file_io.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
rename_path = file_io.join(self._base_dir, "rename_file")
file_io.FileIO(rename_path, mode="w").write("rename")
file_io.rename(file_path, rename_path, overwrite=True)
self.assertTrue(file_io.file_exists(rename_path))
self.assertFalse(file_io.file_exists(file_path))
def testRenameOverwriteFalse(self):
file_path = file_io.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
rename_path = file_io.join(self._base_dir, "rename_file")
file_io.FileIO(rename_path, mode="w").write("rename")
with self.assertRaises(errors.AlreadyExistsError):
file_io.rename(file_path, rename_path, overwrite=False)
self.assertTrue(file_io.file_exists(rename_path))
self.assertTrue(file_io.file_exists(file_path))
def testDeleteRecursivelyFail(self):
fake_dir_path = file_io.join(self._base_dir, "temp_dir")
with self.assertRaises(errors.NotFoundError):
file_io.delete_recursively(fake_dir_path)
@run_all_path_types
def testIsDirectory(self, join):
dir_path = join(self._base_dir, "test_dir")
# Failure for a non-existing dir.
self.assertFalse(file_io.is_directory(dir_path))
file_io.create_dir(dir_path)
self.assertTrue(file_io.is_directory(dir_path))
file_path = join(str(dir_path), "test_file")
file_io.FileIO(file_path, mode="w").write("test")
# False for a file.
self.assertFalse(file_io.is_directory(file_path))
# Test that the value returned from `stat()` has `is_directory` set.
file_statistics = file_io.stat(dir_path)
self.assertTrue(file_statistics.is_directory)
@run_all_path_types
def testListDirectory(self, join):
dir_path = join(self._base_dir, "test_dir")
file_io.create_dir(dir_path)
files = ["file1.txt", "file2.txt", "file3.txt"]
for name in files:
file_path = join(str(dir_path), name)
file_io.FileIO(file_path, mode="w").write("testing")
subdir_path = join(str(dir_path), "sub_dir")
file_io.create_dir(subdir_path)
subdir_file_path = join(str(subdir_path), "file4.txt")
file_io.FileIO(subdir_file_path, mode="w").write("testing")
dir_list = file_io.list_directory(dir_path)
self.assertItemsEqual(files + ["sub_dir"], dir_list)
def testListDirectoryFailure(self):
dir_path = file_io.join(self._base_dir, "test_dir")
with self.assertRaises(errors.NotFoundError):
file_io.list_directory(dir_path)
def _setupWalkDirectories(self, dir_path):
# Creating a file structure as follows
# test_dir -> file: file1.txt; dirs: subdir1_1, subdir1_2, subdir1_3
# subdir1_1 -> file: file3.txt
# subdir1_2 -> dir: subdir2
file_io.create_dir(dir_path)
file_io.FileIO(
file_io.join(dir_path, "file1.txt"), mode="w").write("testing")
sub_dirs1 = ["subdir1_1", "subdir1_2", "subdir1_3"]
for name in sub_dirs1:
file_io.create_dir(file_io.join(dir_path, name))
file_io.FileIO(
file_io.join(dir_path, "subdir1_1/file2.txt"),
mode="w").write("testing")
file_io.create_dir(file_io.join(dir_path, "subdir1_2/subdir2"))
@run_all_path_types
def testWalkInOrder(self, join):
dir_path_str = file_io.join(self._base_dir, "test_dir")
dir_path = join(self._base_dir, "test_dir")
self._setupWalkDirectories(dir_path_str)
# Now test the walk (in_order = True)
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in file_io.walk(dir_path, in_order=True):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
self.assertItemsEqual(all_dirs, [dir_path_str] + [
file_io.join(dir_path_str, item) for item in
["subdir1_1", "subdir1_2", "subdir1_2/subdir2", "subdir1_3"]
])
self.assertEqual(dir_path_str, all_dirs[0])
self.assertLess(
all_dirs.index(file_io.join(dir_path_str, "subdir1_2")),
all_dirs.index(file_io.join(dir_path_str, "subdir1_2/subdir2")))
self.assertItemsEqual(all_subdirs[1:5], [[], ["subdir2"], [], []])
self.assertItemsEqual(all_subdirs[0],
["subdir1_1", "subdir1_2", "subdir1_3"])
self.assertItemsEqual(all_files, [["file1.txt"], ["file2.txt"], [], [], []])
self.assertLess(
all_files.index(["file1.txt"]), all_files.index(["file2.txt"]))
def testWalkPostOrder(self):
dir_path = file_io.join(self._base_dir, "test_dir")
self._setupWalkDirectories(dir_path)
# Now test the walk (in_order = False)
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in file_io.walk(dir_path, in_order=False):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
self.assertItemsEqual(all_dirs, [
file_io.join(dir_path, item) for item in
["subdir1_1", "subdir1_2/subdir2", "subdir1_2", "subdir1_3"]
] + [dir_path])
self.assertEqual(dir_path, all_dirs[4])
self.assertLess(
all_dirs.index(file_io.join(dir_path, "subdir1_2/subdir2")),
all_dirs.index(file_io.join(dir_path, "subdir1_2")))
self.assertItemsEqual(all_subdirs[0:4], [[], [], ["subdir2"], []])
self.assertItemsEqual(all_subdirs[4],
["subdir1_1", "subdir1_2", "subdir1_3"])
self.assertItemsEqual(all_files, [["file2.txt"], [], [], [], ["file1.txt"]])
self.assertLess(
all_files.index(["file2.txt"]), all_files.index(["file1.txt"]))
def testWalkFailure(self):
dir_path = file_io.join(self._base_dir, "test_dir")
# Try walking a directory that wasn't created.
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in file_io.walk(dir_path, in_order=False):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
self.assertItemsEqual(all_dirs, [])
self.assertItemsEqual(all_subdirs, [])
self.assertItemsEqual(all_files, [])
@run_all_path_types
def testStat(self, join):
file_path = join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
file_statistics = file_io.stat(file_path)
os_statistics = os.stat(str(file_path))
self.assertEqual(7, file_statistics.length)
self.assertEqual(
int(os_statistics.st_mtime), int(file_statistics.mtime_nsec / 1e9))
self.assertFalse(file_statistics.is_directory)
def testReadLine(self):
file_path = file_io.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual(36, f.size())
self.assertEqual("testing1\n", f.readline())
self.assertEqual("testing2\n", f.readline())
self.assertEqual("testing3\n", f.readline())
self.assertEqual("\n", f.readline())
self.assertEqual("testing5", f.readline())
self.assertEqual("", f.readline())
def testRead(self):
file_path = file_io.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual(36, f.size())
self.assertEqual("testing1\n", f.read(9))
self.assertEqual("testing2\n", f.read(9))
self.assertEqual("t", f.read(1))
self.assertEqual("esting3\n\ntesting5", f.read())
def testReadErrorReacquiresGil(self):
file_path = file_io.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
with self.assertRaises(errors.InvalidArgumentError):
# At present, this is sufficient to convince ourselves that the change
# fixes the problem. That is, this test will seg fault without the change,
# and pass with it. Unfortunately, this is brittle, as it relies on the
# Python layer to pass the argument along to the wrapped C++ without
# checking the argument itself.
f.read(-2)
def testTell(self):
file_path = file_io.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual(0, f.tell())
self.assertEqual("testing1\n", f.readline())
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
self.assertEqual(18, f.tell())
self.assertEqual("testing3\n", f.readline())
self.assertEqual(27, f.tell())
self.assertEqual("\n", f.readline())
self.assertEqual(28, f.tell())
self.assertEqual("testing5", f.readline())
self.assertEqual(36, f.tell())
self.assertEqual("", f.readline())
self.assertEqual(36, f.tell())
def testSeek(self):
file_path = file_io.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual("testing1\n", f.readline())
self.assertEqual(9, f.tell())
# Seek to 18
f.seek(18)
self.assertEqual(18, f.tell())
self.assertEqual("testing3\n", f.readline())
# Seek back to 9
f.seek(9)
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
f.seek(0)
self.assertEqual(0, f.tell())
self.assertEqual("testing1\n", f.readline())
with self.assertRaises(errors.InvalidArgumentError):
f.seek(-1)
with self.assertRaises(TypeError):
f.seek()
# TODO(jhseu): Delete after position deprecation.
with self.assertRaises(TypeError):
f.seek(offset=0, position=0)
f.seek(position=9)
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
def testSeekFromWhat(self):
file_path = file_io.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual("testing1\n", f.readline())
self.assertEqual(9, f.tell())
# Seek to 18
f.seek(9, 1)
self.assertEqual(18, f.tell())
self.assertEqual("testing3\n", f.readline())
# Seek back to 9
f.seek(9, 0)
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
f.seek(-f.size(), 2)
self.assertEqual(0, f.tell())
self.assertEqual("testing1\n", f.readline())
with self.assertRaises(errors.InvalidArgumentError):
f.seek(0, 3)
def testReadingIterator(self):
file_path = file_io.join(self._base_dir, "temp_file")
data = ["testing1\n", "testing2\n", "testing3\n", "\n", "testing5"]
with file_io.FileIO(file_path, mode="r+") as f:
f.write("".join(data))
actual_data = []
for line in f:
actual_data.append(line)
self.assertSequenceEqual(actual_data, data)
def testReadlines(self):
file_path = file_io.join(self._base_dir, "temp_file")
data = ["testing1\n", "testing2\n", "testing3\n", "\n", "testing5"]
f = file_io.FileIO(file_path, mode="r+")
f.write("".join(data))
f.flush()
lines = f.readlines()
self.assertSequenceEqual(lines, data)
def testUTF8StringPath(self):
file_path = file_io.join(self._base_dir, "UTF8测试_file")
file_io.write_string_to_file(file_path, "testing")
with file_io.FileIO(file_path, mode="rb") as f:
self.assertEqual(b"testing", f.read())
def testEof(self):
"""Test that reading past EOF does not raise an exception."""
file_path = file_io.join(self._base_dir, "temp_file")
f = file_io.FileIO(file_path, mode="r+")
content = "testing"
f.write(content)
f.flush()
self.assertEqual(content, f.read(len(content) + 1))
@run_all_path_types
def testUTF8StringPathExists(self, join):
file_path = join(self._base_dir, "UTF8测试_file_exist")
file_io.write_string_to_file(file_path, "testing")
v = file_io.file_exists(file_path)
self.assertEqual(v, True)
def testFilecmp(self):
file1 = file_io.join(self._base_dir, "file1")
file_io.write_string_to_file(file1, "This is a sentence\n" * 100)
file2 = file_io.join(self._base_dir, "file2")
file_io.write_string_to_file(file2, "This is another sentence\n" * 100)
file3 = file_io.join(self._base_dir, "file3")
file_io.write_string_to_file(file3, u"This is another sentence\n" * 100)
self.assertFalse(file_io.filecmp(file1, file2))
self.assertTrue(file_io.filecmp(file2, file3))
def testFilecmpSameSize(self):
file1 = file_io.join(self._base_dir, "file1")
file_io.write_string_to_file(file1, "This is a sentence\n" * 100)
file2 = file_io.join(self._base_dir, "file2")
file_io.write_string_to_file(file2, "This is b sentence\n" * 100)
file3 = file_io.join(self._base_dir, "file3")
file_io.write_string_to_file(file3, u"This is b sentence\n" * 100)
self.assertFalse(file_io.filecmp(file1, file2))
self.assertTrue(file_io.filecmp(file2, file3))
def testFilecmpBinary(self):
file1 = file_io.join(self._base_dir, "file1")
file_io.FileIO(file1, "wb").write("testing\n\na")
file2 = file_io.join(self._base_dir, "file2")
file_io.FileIO(file2, "wb").write("testing\n\nb")
file3 = file_io.join(self._base_dir, "file3")
file_io.FileIO(file3, "wb").write("testing\n\nb")
file4 = file_io.join(self._base_dir, "file4")
file_io.FileIO(file4, "wb").write("testing\n\ntesting")
self.assertFalse(file_io.filecmp(file1, file2))
self.assertFalse(file_io.filecmp(file1, file4))
self.assertTrue(file_io.filecmp(file2, file3))
def testFileCrc32(self):
file1 = file_io.join(self._base_dir, "file1")
file_io.write_string_to_file(file1, "This is a sentence\n" * 100)
crc1 = file_io.file_crc32(file1)
file2 = file_io.join(self._base_dir, "file2")
file_io.write_string_to_file(file2, "This is another sentence\n" * 100)
crc2 = file_io.file_crc32(file2)
file3 = file_io.join(self._base_dir, "file3")
file_io.write_string_to_file(file3, "This is another sentence\n" * 100)
crc3 = file_io.file_crc32(file3)
self.assertTrue(crc1 != crc2)
self.assertEqual(crc2, crc3)
def testFileCrc32WithBytes(self):
file1 = file_io.join(self._base_dir, "file1")
file_io.write_string_to_file(file1, "This is a sentence\n" * 100)
crc1 = file_io.file_crc32(file1, block_size=24)
file2 = file_io.join(self._base_dir, "file2")
file_io.write_string_to_file(file2, "This is another sentence\n" * 100)
crc2 = file_io.file_crc32(file2, block_size=24)
file3 = file_io.join(self._base_dir, "file3")
file_io.write_string_to_file(file3, "This is another sentence\n" * 100)
crc3 = file_io.file_crc32(file3, block_size=-1)
self.assertTrue(crc1 != crc2)
self.assertEqual(crc2, crc3)
def testFileCrc32Binary(self):
file1 = file_io.join(self._base_dir, "file1")
file_io.FileIO(file1, "wb").write("testing\n\n")
crc1 = file_io.file_crc32(file1)
file2 = file_io.join(self._base_dir, "file2")
file_io.FileIO(file2, "wb").write("testing\n\n\n")
crc2 = file_io.file_crc32(file2)
file3 = file_io.join(self._base_dir, "file3")
file_io.FileIO(file3, "wb").write("testing\n\n\n")
crc3 = file_io.file_crc32(file3)
self.assertTrue(crc1 != crc2)
self.assertEqual(crc2, crc3)
def testFileSeekableWithZip(self):
# Note: Test case for GitHub issue 27276, issue only exposed in python 3.7+.
filename = file_io.join(self._base_dir, "a.npz")
np.savez_compressed(filename, {"a": 1, "b": 2})
with gfile.GFile(filename, "rb") as f:
info = np.load(f, allow_pickle=True) # pylint: disable=unexpected-keyword-arg
_ = [i for i in info.items()]
def testHasAtomicMove(self):
self.assertTrue(file_io.has_atomic_move("/a/b/c"))
def testGetRegisteredSchemes(self):
expected = ["", "file", "ram"]
actual = file_io.get_registered_schemes()
# Be flexible about additional schemes that may sometimes be registered when
# this test is run, while still verifying each scheme appears just once.
maybe_expected = ["gs", "hypercomputer"]
for scheme in maybe_expected:
if scheme in actual:
expected.append(scheme)
self.assertCountEqual(expected, actual)
def testReadWriteWithEncoding(self):
file_path = file_io.join(self._base_dir, "temp_file")
with open(file_path, mode="w", encoding="cp932") as f:
f.write("今日はいい天気")
with file_io.FileIO(file_path, mode="r", encoding="cp932") as f:
self.assertEqual(f.read(), "今日はいい天気")
with file_io.FileIO(file_path, mode="w", encoding="cp932") as f:
f.write("今日はいい天気")
with open(file_path, mode="r", encoding="cp932") as f:
self.assertEqual(f.read(), "今日はいい天気")
if __name__ == "__main__":
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@lib@io@file_io_test.py@.PATH_END.py
|
{
"filename": "module.py",
"repo_name": "pyro-ppl/numpyro",
"repo_path": "numpyro_extracted/numpyro-master/numpyro/contrib/module.py",
"type": "Python"
}
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from collections import namedtuple
from copy import deepcopy
from functools import partial
import jax
from jax import random
import jax.numpy as jnp
from jax.tree_util import register_pytree_node
import numpyro
import numpyro.distributions as dist
from numpyro.primitives import mutable as numpyro_mutable
__all__ = [
"flax_module",
"haiku_module",
"random_flax_module",
"random_haiku_module",
]
def flax_module(
name, nn_module, *args, input_shape=None, apply_rng=None, mutable=None, **kwargs
):
"""
Declare a :mod:`~flax` style neural network inside a
model so that its parameters are registered for optimization via
:func:`~numpyro.primitives.param` statements.
Given a flax ``nn_module``, in flax to evaluate the module with
a given set of parameters, we use: ``nn_module.apply(params, x)``.
In a NumPyro model, the pattern will be::
net = flax_module("net", nn_module)
y = net(x)
or with dropout layers::
net = flax_module("net", nn_module, apply_rng=["dropout"])
rng_key = numpyro.prng_key()
y = net(x, rngs={"dropout": rng_key})
:param str name: name of the module to be registered.
:param flax.linen.Module nn_module: a `flax` Module which has .init and .apply methods
:param args: optional arguments to initialize flax neural network
as an alternative to `input_shape`
:param tuple input_shape: shape of the input taken by the
neural network.
:param list apply_rng: A list to indicate which extra rng _kinds_ are needed for
``nn_module``. For example, when ``nn_module`` includes dropout layers, we
need to set ``apply_rng=["dropout"]``. Defaults to None, which means no extra
rng key is needed. Please see
`Flax Linen Intro <https://flax.readthedocs.io/en/latest/notebooks/linen_intro.html#Invoking-Modules>`_
for more information in how Flax deals with stochastic layers like dropout.
:param list mutable: A list to indicate mutable states of ``nn_module``. For example,
if your module has BatchNorm layer, we will need to define ``mutable=["batch_stats"]``.
See the above `Flax Linen Intro` tutorial for more information.
:param kwargs: optional keyword arguments to initialize flax neural network
as an alternative to `input_shape`
:return: a callable with bound parameters that takes an array
as an input and returns the neural network transformed output
array.
"""
try:
import flax # noqa: F401
except ImportError as e:
raise ImportError(
"Looking like you want to use flax to declare "
"nn modules. This is an experimental feature. "
"You need to install `flax` to be able to use this feature. "
"It can be installed with `pip install flax`."
) from e
module_key = name + "$params"
nn_params = numpyro.param(module_key)
if mutable:
nn_state = numpyro_mutable(name + "$state")
assert nn_state is None or isinstance(nn_state, dict)
assert (nn_state is None) == (nn_params is None)
if nn_params is None:
# feed in dummy data to init params
args = (jnp.ones(input_shape),) if input_shape is not None else args
rng_key = numpyro.prng_key()
# split rng_key into a dict of rng_kind: rng_key
rngs = {}
if apply_rng:
assert isinstance(apply_rng, list)
for kind in apply_rng:
rng_key, subkey = random.split(rng_key)
rngs[kind] = subkey
rngs["params"] = rng_key
nn_vars = flax.core.unfreeze(nn_module.init(rngs, *args, **kwargs))
if "params" not in nn_vars:
raise ValueError(
"Your nn_module does not have any parameter. Currently, it is not"
" supported in NumPyro. Please make a github issue if you need"
" that feature."
)
nn_params = nn_vars["params"]
if mutable:
nn_state = {k: v for k, v in nn_vars.items() if k != "params"}
assert set(mutable) == set(nn_state)
numpyro_mutable(name + "$state", nn_state)
# make sure that nn_params keep the same order after unflatten
params_flat, tree_def = jax.tree.flatten(nn_params)
nn_params = jax.tree.unflatten(tree_def, params_flat)
numpyro.param(module_key, nn_params)
def apply_with_state(params, *args, **kwargs):
params = {"params": params, **nn_state}
out, new_state = nn_module.apply(params, mutable=mutable, *args, **kwargs)
nn_state.update(**new_state)
return out
def apply_without_state(params, *args, **kwargs):
return nn_module.apply({"params": params}, *args, **kwargs)
apply_fn = apply_with_state if mutable else apply_without_state
return partial(apply_fn, nn_params)
def haiku_module(name, nn_module, *args, input_shape=None, apply_rng=False, **kwargs):
"""
Declare a :mod:`~haiku` style neural network inside a
model so that its parameters are registered for optimization via
:func:`~numpyro.primitives.param` statements.
Given a haiku ``nn_module``, in haiku to evaluate the module with
a given set of parameters, we use: ``nn_module.apply(params, None, x)``.
In a NumPyro model, the pattern will be::
net = haiku_module("net", nn_module)
y = net(x) # or y = net(rng_key, x)
or with dropout layers::
net = haiku_module("net", nn_module, apply_rng=True)
rng_key = numpyro.prng_key()
y = net(rng_key, x)
:param str name: name of the module to be registered.
:param nn_module: a `haiku` Module which has .init and .apply methods
:type nn_module: haiku.Transformed or haiku.TransformedWithState
:param args: optional arguments to initialize flax neural network
as an alternative to `input_shape`
:param tuple input_shape: shape of the input taken by the
neural network.
:param bool apply_rng: A flag to indicate if the returned callable requires
an rng argument (e.g. when ``nn_module`` includes dropout layers). Defaults
to False, which means no rng argument is needed. If this is True, the signature
of the returned callable ``nn = haiku_module(..., apply_rng=True)`` will be
``nn(rng_key, x)`` (rather than ``nn(x)``).
:param kwargs: optional keyword arguments to initialize flax neural network
as an alternative to `input_shape`
:return: a callable with bound parameters that takes an array
as an input and returns the neural network transformed output
array.
"""
try:
import haiku as hk # noqa: F401
except ImportError as e:
raise ImportError(
"Looking like you want to use haiku to declare "
"nn modules. This is an experimental feature. "
"You need to install `haiku` to be able to use this feature. "
"It can be installed with `pip install dm-haiku`."
) from e
if not apply_rng:
nn_module = hk.without_apply_rng(nn_module)
module_key = name + "$params"
nn_params = numpyro.param(module_key)
with_state = isinstance(nn_module, hk.TransformedWithState)
if with_state:
nn_state = numpyro_mutable(name + "$state")
assert nn_state is None or isinstance(nn_state, dict)
assert (nn_state is None) == (nn_params is None)
if nn_params is None:
args = (jnp.ones(input_shape),) if input_shape is not None else args
# feed in dummy data to init params
rng_key = numpyro.prng_key()
if with_state:
nn_params, nn_state = nn_module.init(rng_key, *args, **kwargs)
nn_state = dict(nn_state)
numpyro_mutable(name + "$state", nn_state)
else:
nn_params = nn_module.init(rng_key, *args, **kwargs)
# haiku init returns an immutable dict
nn_params = hk.data_structures.to_mutable_dict(nn_params)
# we cast it to a mutable one to be able to set priors for parameters
# make sure that nn_params keep the same order after unflatten
params_flat, tree_def = jax.tree.flatten(nn_params)
nn_params = jax.tree.unflatten(tree_def, params_flat)
numpyro.param(module_key, nn_params)
def apply_with_state(params, *args, **kwargs):
out, new_state = nn_module.apply(params, nn_state, *args, **kwargs)
nn_state.update(**new_state)
return out
apply_fn = apply_with_state if with_state else nn_module.apply
return partial(apply_fn, nn_params)
# register an "empty" parameter which only stores its shape
# so that the optimizer can skip optimize this parameter, while
# it still provides shape information for priors
ParamShape = namedtuple("ParamShape", ["shape"])
register_pytree_node(
ParamShape, lambda x: ((None,), x.shape), lambda shape, x: ParamShape(shape)
)
def _update_params(params, new_params, prior, prefix=""):
"""
A helper to recursively set prior to new_params.
"""
for name, item in params.items():
flatten_name = ".".join([prefix, name]) if prefix else name
if isinstance(item, dict):
assert not isinstance(prior, dict) or flatten_name not in prior
new_item = new_params[name]
_update_params(item, new_item, prior, prefix=flatten_name)
elif (not isinstance(prior, dict)) or flatten_name in prior:
if isinstance(params[name], ParamShape):
param_shape = params[name].shape
else:
param_shape = jnp.shape(params[name])
params[name] = ParamShape(param_shape)
if isinstance(prior, dict):
d = prior[flatten_name]
elif callable(prior) and not isinstance(prior, dist.Distribution):
d = prior(flatten_name, param_shape)
else:
d = prior
param_batch_shape = param_shape[: len(param_shape) - d.event_dim]
# XXX: here we set all dimensions of prior to event dimensions.
new_params[name] = numpyro.sample(
flatten_name, d.expand(param_batch_shape).to_event()
)
def random_flax_module(
name,
nn_module,
prior,
*args,
input_shape=None,
apply_rng=None,
mutable=None,
**kwargs,
):
"""
A primitive to place a prior over the parameters of the Flax module `nn_module`.
.. note::
Parameters of a Flax module are stored in a nested dict. For example,
the module `B` defined as follows::
class A(flax.linen.Module):
@flax.linen.compact
def __call__(self, x):
return nn.Dense(1, use_bias=False, name='dense')(x)
class B(flax.linen.Module):
@flax.linen.compact
def __call__(self, x):
return A(name='inner')(x)
has parameters `{'inner': {'dense': {'kernel': param_value}}}`. In the argument
`prior`, to specify `kernel` parameter, we join the path to it using dots:
`prior={"inner.dense.kernel": param_prior}`.
:param str name: name of NumPyro module
:param flax.linen.Module: the module to be registered with NumPyro
:param prior: a NumPyro distribution or a Python dict with parameter names as keys and
respective distributions as values. For example::
net = random_flax_module("net",
flax.linen.Dense(features=1),
prior={"bias": dist.Cauchy(), "kernel": dist.Normal()},
input_shape=(4,))
Alternatively, we can use a callable. For example the following are equivalent::
prior=(lambda name, shape: dist.Cauchy() if name == "bias" else dist.Normal())
prior={"bias": dist.Cauchy(), "kernel": dist.Normal()}
:type prior: dict, ~numpyro.distributions.Distribution or callable
:param args: optional arguments to initialize flax neural network
as an alternative to `input_shape`
:param tuple input_shape: shape of the input taken by the neural network.
:param list apply_rng: A list to indicate which extra rng _kinds_ are needed for
``nn_module``. For example, when ``nn_module`` includes dropout layers, we
need to set ``apply_rng=["dropout"]``. Defaults to None, which means no extra
rng key is needed. Please see
`Flax Linen Intro <https://flax.readthedocs.io/en/latest/notebooks/linen_intro.html#Invoking-Modules>`_
for more information in how Flax deals with stochastic layers like dropout.
:param list mutable: A list to indicate mutable states of ``nn_module``. For example,
if your module has BatchNorm layer, we will need to define ``mutable=["batch_stats"]``.
See the above `Flax Linen Intro` tutorial for more information.
:param kwargs: optional keyword arguments to initialize flax neural network
as an alternative to `input_shape`
:returns: a sampled module
**Example**
.. doctest::
# NB: this example is ported from https://github.com/ctallec/pyvarinf/blob/master/main_regression.ipynb
>>> import numpy as np; np.random.seed(0)
>>> import tqdm
>>> from flax import linen as nn
>>> from jax import jit, random
>>> import numpyro
>>> import numpyro.distributions as dist
>>> from numpyro.contrib.module import random_flax_module
>>> from numpyro.infer import Predictive, SVI, TraceMeanField_ELBO, autoguide, init_to_feasible
...
>>> class Net(nn.Module):
... n_units: int
...
... @nn.compact
... def __call__(self, x):
... x = nn.Dense(self.n_units)(x[..., None])
... x = nn.relu(x)
... x = nn.Dense(self.n_units)(x)
... x = nn.relu(x)
... mean = nn.Dense(1)(x)
... rho = nn.Dense(1)(x)
... return mean.squeeze(), rho.squeeze()
...
>>> def generate_data(n_samples):
... x = np.random.normal(size=n_samples)
... y = np.cos(x * 3) + np.random.normal(size=n_samples) * np.abs(x) / 2
... return x, y
...
>>> def model(x, y=None, batch_size=None):
... module = Net(n_units=32)
... net = random_flax_module("nn", module, dist.Normal(0, 0.1), input_shape=())
... with numpyro.plate("batch", x.shape[0], subsample_size=batch_size):
... batch_x = numpyro.subsample(x, event_dim=0)
... batch_y = numpyro.subsample(y, event_dim=0) if y is not None else None
... mean, rho = net(batch_x)
... sigma = nn.softplus(rho)
... numpyro.sample("obs", dist.Normal(mean, sigma), obs=batch_y)
...
>>> n_train_data = 5000
>>> x_train, y_train = generate_data(n_train_data)
>>> guide = autoguide.AutoNormal(model, init_loc_fn=init_to_feasible)
>>> svi = SVI(model, guide, numpyro.optim.Adam(5e-3), TraceMeanField_ELBO())
>>> n_iterations = 3000
>>> svi_result = svi.run(random.PRNGKey(0), n_iterations, x_train, y_train, batch_size=256)
>>> params, losses = svi_result.params, svi_result.losses
>>> n_test_data = 100
>>> x_test, y_test = generate_data(n_test_data)
>>> predictive = Predictive(model, guide=guide, params=params, num_samples=1000)
>>> y_pred = predictive(random.PRNGKey(1), x_test[:100])["obs"].copy()
>>> assert losses[-1] < 3000
>>> assert np.sqrt(np.mean(np.square(y_test - y_pred))) < 1
"""
nn = flax_module(
name,
nn_module,
*args,
input_shape=input_shape,
apply_rng=apply_rng,
mutable=mutable,
**kwargs,
)
params = nn.args[0]
new_params = deepcopy(params)
with numpyro.handlers.scope(prefix=name):
_update_params(params, new_params, prior)
nn_new = partial(nn.func, new_params, *nn.args[1:], **nn.keywords)
return nn_new
def random_haiku_module(
name, nn_module, prior, *args, input_shape=None, apply_rng=False, **kwargs
):
"""
A primitive to place a prior over the parameters of the Haiku module `nn_module`.
:param str name: name of NumPyro module
:param nn_module: the module to be registered with NumPyro
:type nn_module: haiku.Transformed or haiku.TransformedWithState
:param prior: a NumPyro distribution or a Python dict with parameter names as keys and
respective distributions as values. For example::
net = random_haiku_module("net",
haiku.transform(lambda x: hk.Linear(1)(x)),
prior={"linear.b": dist.Cauchy(), "linear.w": dist.Normal()},
input_shape=(4,))
Alternatively, we can use a callable. For example the following are equivalent::
prior=(lambda name, shape: dist.Cauchy() if name.startswith("b") else dist.Normal())
prior={"bias": dist.Cauchy(), "kernel": dist.Normal()}
:type prior: dict, ~numpyro.distributions.Distribution or callable
:param args: optional arguments to initialize flax neural network
as an alternative to `input_shape`
:param tuple input_shape: shape of the input taken by the neural network.
:param bool apply_rng: A flag to indicate if the returned callable requires
an rng argument (e.g. when ``nn_module`` includes dropout layers). Defaults
to False, which means no rng argument is needed. If this is True, the signature
of the returned callable ``nn = haiku_module(..., apply_rng=True)`` will be
``nn(rng_key, x)`` (rather than ``nn(x)``).
:param kwargs: optional keyword arguments to initialize flax neural network
as an alternative to `input_shape`
:returns: a sampled module
"""
nn = haiku_module(
name, nn_module, *args, input_shape=input_shape, apply_rng=apply_rng, **kwargs
)
params = nn.args[0]
new_params = deepcopy(params)
with numpyro.handlers.scope(prefix=name):
_update_params(params, new_params, prior)
nn_new = partial(nn.func, new_params, *nn.args[1:], **nn.keywords)
return nn_new
|
pyro-pplREPO_NAMEnumpyroPATH_START.@numpyro_extracted@numpyro-master@numpyro@contrib@module.py@.PATH_END.py
|
{
"filename": "analysis.ipynb",
"repo_name": "tanner-trickle/EXCEED-DM",
"repo_path": "EXCEED-DM_extracted/EXCEED-DM-main/examples/16/analysis.ipynb",
"type": "Jupyter Notebook"
}
|
# Example 16 Analysis
We will plot the absorption rate as a function of mass, as well as the reach on $g_{aee}$
## Packages
```python
import numpy as np
# some personal preferences for nice plots
%run "../../utilities/output_parser/plotter.ipynb"
# helpful functions for processing output
import sys
sys.path.append("../../utilities/output_parser")
import EXDMDataHandler
from EXDMDataHandler import EXDMData
```
## Data
```python
data = EXDMData(filename = './output/EXDM_out_example_16.hdf5')
ripped_filename = './Xe_event_rate_pe.csv'
```
## Results
### Rate
```python
def get_ripped_data(filename, col):
data = []
with open(filename) as f:
for line in f:
if not line.startswith('S'):
try:
data.append(float(line.split(',')[col]))
except:
pass
return np.array(data)
rescaled_rate_masses_log_keV = get_ripped_data(ripped_filename, 0)
rescaled_rate = get_ripped_data(ripped_filename, 1)
```
```python
[ masses_eV, abs_rate ] = data.get_absorption_rates(g2=(10**(-12))**2, expt_T_year=1)
```
```python
save_fig = False
fig, axes = plt.subplots(nrows=1, ncols=1,
figsize=(7*1.1, 7))
log_mX_min = 2 - 3
log_mX_max = 6 - 3
log_events_min = -6
log_events_max = 3
set_custom_tick_options(axes)
set_custom_axes(axes, 'x', log_mX_min, log_mX_max,
ax_type = 'log',
label = r'$m_\phi \, [\mathrm{keV}]$')
set_custom_axes(axes, 'y', log_events_min, log_events_max,
ax_type = 'log',
label = r'$R \left[ \mathrm{kg}^{-1} \mathrm{day}^{-1} \right]$',
show_first = False)
axes.plot(
np.log10(masses_eV/10**3),
np.log10(abs_rate),
label = 'computed'
)
axes.plot(
rescaled_rate_masses_log_keV,
np.log10((0.4/0.3)*rescaled_rate),
linestyle = '--',
label = 'data'
)
axes.legend(loc = 'lower right', fontsize = 30, frameon = False)
fig.tight_layout()
axes.text(-0.75, -5.5, r'$g_{aee} = 10^{-12}$', fontsize = 30)
if save_fig:
plt.savefig('./output/Xe_ps_rate_compare.pdf',
bbox_inches='tight', pad_inches = 0.075)
plt.show()
```
------------------------------------------------------------
NameError Traceback (most recent call last)
Cell In[4], line 22
13 set_custom_axes(axes, 'x', log_mX_min, log_mX_max,
14 ax_type = 'log',
15 label = r'$m_\phi \, [\mathrm{keV}]$')
16 set_custom_axes(axes, 'y', log_events_min, log_events_max,
17 ax_type = 'log',
18 label = r'$R \left[ \mathrm{kg}^{-1} \mathrm{day}^{-1} \right]$',
19 show_first = False)
21 axes.plot(
---> 22 np.log10(masses_eV/10**3),
23 np.log10(abs_rate),
24 label = 'computed'
25 )
27 axes.plot(
28 rescaled_rate_masses_log_keV,
29 np.log10((0.4/0.3)*rescaled_rate),
30 linestyle = '--',
31 label = 'data'
32 )
34 axes.legend(loc = 'lower right', fontsize = 30, frameon = False)
NameError: name 'masses_eV' is not defined

### Reach
```python
[ masses_eV, g_constraint ] = data.get_abs_g_constraint(expt_M_kg=10*10**3)
```
```python
save_fig = False
fig, axes = plt.subplots(nrows=1, ncols=1,
figsize=(7*1.1, 7))
log_mX_min = 2
log_mX_max = 6
log_g_min = -16
log_g_max = -6
set_custom_tick_options(axes)
set_custom_axes(axes, 'x', log_mX_min, log_mX_max,
ax_type = 'log',
label = r'$m_\phi \, [\mathrm{eV}]$')
set_custom_axes(axes, 'y', log_g_min, log_g_max,
ax_type = 'log',
label = r'$d_M \, [\mathrm{GeV}^{-1}]$',
show_first = False)
axes.plot(
np.log10(masses_eV),
np.log10(g_constraint),
)
fig.tight_layout()
if save_fig:
plt.savefig('./output/Xe_d_M_constraint.pdf',
bbox_inches='tight', pad_inches = 0.075)
plt.show()
```

```python
```
|
tanner-trickleREPO_NAMEEXCEED-DMPATH_START.@EXCEED-DM_extracted@EXCEED-DM-main@examples@16@analysis.ipynb@.PATH_END.py
|
{
"filename": "ImageEnhance.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pillow/py2/PIL/ImageEnhance.py",
"type": "Python"
}
|
#
# The Python Imaging Library.
# $Id$
#
# image enhancement classes
#
# For a background, see "Image Processing By Interpolation and
# Extrapolation", Paul Haeberli and Douglas Voorhies. Available
# at http://www.graficaobscura.com/interp/index.html
#
# History:
# 1996-03-23 fl Created
# 2009-06-16 fl Fixed mean calculation
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
from . import Image, ImageFilter, ImageStat
class _Enhance(object):
def enhance(self, factor):
"""
Returns an enhanced image.
:param factor: A floating point value controlling the enhancement.
Factor 1.0 always returns a copy of the original image,
lower factors mean less color (brightness, contrast,
etc), and higher values more. There are no restrictions
on this value.
:rtype: :py:class:`~PIL.Image.Image`
"""
return Image.blend(self.degenerate, self.image, factor)
class Color(_Enhance):
"""Adjust image color balance.
This class can be used to adjust the colour balance of an image, in
a manner similar to the controls on a colour TV set. An enhancement
factor of 0.0 gives a black and white image. A factor of 1.0 gives
the original image.
"""
def __init__(self, image):
self.image = image
self.intermediate_mode = "L"
if "A" in image.getbands():
self.intermediate_mode = "LA"
self.degenerate = image.convert(self.intermediate_mode).convert(image.mode)
class Contrast(_Enhance):
"""Adjust image contrast.
This class can be used to control the contrast of an image, similar
to the contrast control on a TV set. An enhancement factor of 0.0
gives a solid grey image. A factor of 1.0 gives the original image.
"""
def __init__(self, image):
self.image = image
mean = int(ImageStat.Stat(image.convert("L")).mean[0] + 0.5)
self.degenerate = Image.new("L", image.size, mean).convert(image.mode)
if "A" in image.getbands():
self.degenerate.putalpha(image.getchannel("A"))
class Brightness(_Enhance):
"""Adjust image brightness.
This class can be used to control the brightness of an image. An
enhancement factor of 0.0 gives a black image. A factor of 1.0 gives the
original image.
"""
def __init__(self, image):
self.image = image
self.degenerate = Image.new(image.mode, image.size, 0)
if "A" in image.getbands():
self.degenerate.putalpha(image.getchannel("A"))
class Sharpness(_Enhance):
"""Adjust image sharpness.
This class can be used to adjust the sharpness of an image. An
enhancement factor of 0.0 gives a blurred image, a factor of 1.0 gives the
original image, and a factor of 2.0 gives a sharpened image.
"""
def __init__(self, image):
self.image = image
self.degenerate = image.filter(ImageFilter.SMOOTH)
if "A" in image.getbands():
self.degenerate.putalpha(image.getchannel("A"))
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pillow@py2@PIL@ImageEnhance.py@.PATH_END.py
|
{
"filename": "test_simulation.py",
"repo_name": "smirik/resonances",
"repo_path": "resonances_extracted/resonances-main/tests/resonances/test_simulation.py",
"type": "Python"
}
|
import numpy as np
import rebound
import tests.tools as tools
import shutil
from pathlib import Path
import pytest
import resonances
@pytest.fixture(autouse=True)
def run_around_tests():
Path('cache/tests').mkdir(parents=True, exist_ok=True)
yield
# shutil.rmtree('cache/tests')
def test_init():
resonances.config.set('integration.tmax', 100)
sim = resonances.Simulation()
sim.Nout = 10
sim.tmax_yrs = 100 / (2 * np.pi)
def test_solar_system():
sim = tools.create_test_simulation_for_solar_system()
assert isinstance(sim.sim, rebound.Simulation) is True
assert 10 == len(sim.sim.particles)
def test_add_body():
sim = tools.create_test_simulation_for_solar_system()
elem = tools.get_3body_elements_sample()
mmr = resonances.ThreeBody('4J-2S-1')
sim.add_body(elem, mmr)
assert 10 == len(sim.sim.particles) # because it is not added to rebound simulation yet
assert 1 == len(sim.bodies)
assert sim.bodies[0].initial_data['a'] == elem['a']
assert sim.bodies[0].mmrs[0].coeff[0] == mmr.coeff[0]
assert 5 == sim.bodies[0].mmrs[0].index_of_planets[0]
assert 6 == sim.bodies[0].mmrs[0].index_of_planets[1]
sim.add_body(6, mmr) # add from astdys
assert 2 == len(sim.bodies)
sim.add_body('7', mmr) # add from astdys
assert 3 == len(sim.bodies)
sim.add_body(1, '4J-2S-1')
assert 4 == len(sim.bodies)
elem['mass'] = 2.0
sim.add_body(elem, '5J-2S-2')
assert 5 == len(sim.bodies)
assert 2.0 == sim.bodies[4].initial_data['mass']
exception_text = 'You can add body only by its number or all orbital elements'
try:
sim.add_body(None, '5J-2S-2')
raise AssertionError(exception_text)
except Exception as e:
assert str(e) == exception_text
exception_text = 'You have to provide at least one resonance'
try:
sim.add_body(2, [])
raise AssertionError(exception_text)
except Exception as e:
assert str(e) == exception_text
def test_add_bodies_to_simulation():
sim = tools.create_test_simulation_for_solar_system()
tools.add_test_asteroid_to_simulation(sim)
body = sim.bodies[0]
sim.add_body_to_simulation(body)
assert 11 == len(sim.sim.particles)
def test_run():
sim = tools.create_test_simulation_for_solar_system()
tools.add_test_asteroid_to_simulation(sim)
mmr = sim.bodies[0].mmrs[0]
sim.save = 'all'
sim.plot = 'all'
sim.save_summary = True
sim.run()
assert 10 == len(sim.bodies[0].angles[mmr.to_s()])
assert 10 == len(sim.bodies[0].axis)
assert 10 == len(sim.bodies[0].ecc)
assert sim.bodies[0].statuses[mmr.to_s()] is not None
assert sim.bodies[0].axis_filtered is not None
assert sim.bodies[0].angles_filtered[mmr.to_s()] is not None
assert Path(f'cache/tests/data-asteroid-{mmr.to_s()}.csv').exists() is True
assert Path(f'cache/tests/asteroid_{mmr.to_s()}.png').exists() is True
assert Path('cache/tests/summary.csv').exists() is True
@pytest.fixture
def saving_fixtures():
# files index: data and plot for well determined and data and plot for undetermined.
return [
{'save': 'all', 'plot': 'all', 'files': [True, True, True, True]},
{'save': 'nonzero', 'plot': 'nonzero', 'files': [True, True, True, True]},
{'save': 'all', 'plot': 'resonant', 'files': [True, True, True, False]},
{'save': 'resonant', 'plot': 'all', 'files': [True, False, True, True]},
{'save': 'resonant', 'plot': 'resonant', 'files': [True, False, True, False]},
{'save': 'all', 'plot': None, 'files': [True, True, False, False]},
{'save': None, 'plot': 'all', 'files': [False, False, True, True]},
{'save': None, 'plot': None, 'files': [False, False, False, False]},
{'save': 'resonant', 'plot': None, 'files': [True, False, False, False]},
{'save': None, 'plot': 'resonant', 'files': [False, False, True, False]},
]
def test_shall_save_and_plot_body(saving_fixtures):
for saving_fixture in saving_fixtures:
sim = tools.create_test_simulation_for_solar_system(save=saving_fixture['save'], plot=saving_fixture['plot'])
elem = tools.get_3body_elements_sample()
mmr = resonances.ThreeBody('4J-2S-1')
sim.add_body(elem, mmr, name='asteroid')
sim.add_body(elem, mmr, name='asteroid2')
sim.bodies[0].statuses[mmr.to_s()] = 2
sim.bodies[1].statuses[mmr.to_s()] = -1
assert saving_fixture['files'][0] == sim.shall_save_body_in_mmr(sim.bodies[0], mmr)
assert saving_fixture['files'][1] == sim.shall_save_body_in_mmr(sim.bodies[1], mmr)
assert saving_fixture['files'][2] == sim.shall_plot_body_in_mmr(sim.bodies[0], mmr)
assert saving_fixture['files'][3] == sim.shall_plot_body_in_mmr(sim.bodies[1], mmr)
def test_saving_summary():
sim = tools.create_test_simulation_for_solar_system(save=True, save_summary=True)
tools.add_test_asteroid_to_simulation(sim)
sim.run()
assert Path('cache/tests/summary.csv').exists() is True
shutil.rmtree('cache/tests')
del sim
sim = tools.create_test_simulation_for_solar_system(save=True, save_summary=False)
tools.add_test_asteroid_to_simulation(sim)
sim.run()
assert Path('cache/tests/summary.csv').exists() is False
def test_get_simulation_summary():
sim = tools.create_test_simulation_for_solar_system(save=True)
tools.add_test_asteroid_to_simulation(sim)
sim.run()
df = sim.get_simulation_summary()
assert 1 == len(df)
assert 14 == len(df.columns)
assert 'asteroid' == df['name'].iloc[0]
assert '4J-2S-1+0+0-1' == df['mmr'].iloc[0]
def test_list_of_planets():
sim = tools.create_test_simulation_for_solar_system(save=True)
assert 10 == len(sim.list_of_planets()) # Yo, Pluto! And Sun... Am I really an astronomer?
@pytest.fixture
def planets_and_indexes():
return [[['Jupiter', 'Saturn'], [5, 6]], [['Mercury', 'Venus', 'Mars'], [1, 2, 4]]]
def test_get_index_of_planets(planets_and_indexes):
sim = tools.create_test_simulation_for_solar_system()
for data in planets_and_indexes:
planets_names = data[0]
planets_indexes = data[1]
assert all([a == b for a, b in zip(planets_indexes, sim.get_index_of_planets(planets_names))])
def test_process_status():
sim = tools.create_test_simulation_for_solar_system()
body = resonances.Body()
mmr = resonances.create_mmr('4J-2S-1')
body.statuses[mmr.to_s()] = 2
sim.save = 'all'
assert sim.process_status(body, mmr, sim.save) is True
sim.save = 'resonant'
assert sim.process_status(body, mmr, sim.save) is True
sim.save = 'nonzero'
assert sim.process_status(body, mmr, sim.save) is True
sim.save = 'candidates'
assert sim.process_status(body, mmr, sim.save) is False
body.statuses[mmr.to_s()] = 0
sim.save = 'all'
assert sim.process_status(body, mmr, sim.save) is True
sim.save = 'resonant'
assert sim.process_status(body, mmr, sim.save) is False
sim.save = 'nonzero'
assert sim.process_status(body, mmr, sim.save) is False
sim.save = 'candidates'
assert sim.process_status(body, mmr, sim.save) is False
body.statuses[mmr.to_s()] = -2
sim.save = 'all'
assert sim.process_status(body, mmr, sim.save) is True
sim.save = 'resonant'
assert sim.process_status(body, mmr, sim.save) is False
sim.save = 'nonzero'
assert sim.process_status(body, mmr, sim.save) is True
sim.save = 'candidates'
assert sim.process_status(body, mmr, sim.save) is True
body.statuses[mmr.to_s()] = 0
sim.save = None
assert sim.process_status(body, mmr, sim.save) is False
sim.save = 'resonant'
assert sim.process_status(body, mmr, sim.save) is False
sim.save = 'nonzero'
assert sim.process_status(body, mmr, sim.save) is False
sim.save = 'candidates'
assert sim.process_status(body, mmr, sim.save) is False
|
smirikREPO_NAMEresonancesPATH_START.@resonances_extracted@resonances-main@tests@resonances@test_simulation.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "ultralytics/ultralytics",
"repo_path": "ultralytics_extracted/ultralytics-main/ultralytics/cfg/models/README.md",
"type": "Markdown"
}
|
## Models
Welcome to the [Ultralytics](https://www.ultralytics.com/) Models directory! Here you will find a wide variety of pre-configured model configuration files (`*.yaml`s) that can be used to create custom YOLO models. The models in this directory have been expertly crafted and fine-tuned by the Ultralytics team to provide the best performance for a wide range of object detection and image segmentation tasks.
These model configurations cover a wide range of scenarios, from simple object detection to more complex tasks like instance segmentation and object tracking. They are also designed to run efficiently on a variety of hardware platforms, from CPUs to GPUs. Whether you are a seasoned machine learning practitioner or just getting started with YOLO, this directory provides a great starting point for your custom model development needs.
To get started, simply browse through the models in this directory and find one that best suits your needs. Once you've selected a model, you can use the provided `*.yaml` file to train and deploy your custom YOLO model with ease. See full details at the Ultralytics [Docs](https://docs.ultralytics.com/models/), and if you need help or have any questions, feel free to reach out to the Ultralytics team for support. So, don't wait, start creating your custom YOLO model now!
### Usage
Model `*.yaml` files may be used directly in the [Command Line Interface (CLI)](https://docs.ultralytics.com/usage/cli/) with a `yolo` command:
```bash
# Train a YOLO11n model using the coco8 dataset for 100 epochs
yolo task=detect mode=train model=yolo11n.yaml data=coco8.yaml epochs=100
```
They may also be used directly in a Python environment, and accept the same [arguments](https://docs.ultralytics.com/usage/cfg/) as in the CLI example above:
```python
from ultralytics import YOLO
# Initialize a YOLO11n model from a YAML configuration file
model = YOLO("model.yaml")
# If a pre-trained model is available, use it instead
# model = YOLO("model.pt")
# Display model information
model.info()
# Train the model using the COCO8 dataset for 100 epochs
model.train(data="coco8.yaml", epochs=100)
```
## Pre-trained Model Architectures
Ultralytics supports many model architectures. Visit [Ultralytics Models](https://docs.ultralytics.com/models/) to view detailed information and usage. Any of these models can be used by loading their configurations or pretrained checkpoints if available.
## Contribute New Models
Have you trained a new YOLO variant or achieved state-of-the-art performance with specific tuning? We'd love to showcase your work in our Models section! Contributions from the community in the form of new models, architectures, or optimizations are highly valued and can significantly enrich our repository.
By contributing to this section, you're helping us offer a wider array of model choices and configurations to the community. It's a fantastic way to share your knowledge and expertise while making the Ultralytics YOLO ecosystem even more versatile.
To get started, please consult our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) for step-by-step instructions on how to submit a Pull Request (PR) 🛠️. Your contributions are eagerly awaited!
Let's join hands to extend the range and capabilities of the Ultralytics YOLO models 🙏!
|
ultralyticsREPO_NAMEultralyticsPATH_START.@ultralytics_extracted@ultralytics-main@ultralytics@cfg@models@README.md@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "emerge-erc/ALminer",
"repo_path": "ALminer_extracted/ALminer-main/setup.py",
"type": "Python"
}
|
from setuptools import setup, find_packages
version = {}
with open('alminer/version.py') as v:
exec(v.read(), version)
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name='alminer',
version=version['__version__'],
author='Aida Ahmadi',
author_email='aahmadi@strw.leidenuniv.nl',
description='ALminer: ALMA archive mining and visualization toolkit',
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(include=['alminer']),
url='https://github.com/emerge-erc/ALminer',
project_urls={
"Bug Tracker": "https://github.com/emerge-erc/ALminer/issues"
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
license='MIT',
install_requires=['numpy>=1.15', 'pandas>1.0', 'matplotlib>=3.3.0', 'pyvo>=1.2.1',
'astropy>=3.1.2', 'astroquery @ git+https://git@github.com/astropy/astroquery'],
python_requires='>=3.6'
)
|
emerge-ercREPO_NAMEALminerPATH_START.@ALminer_extracted@ALminer-main@setup.py@.PATH_END.py
|
{
"filename": "test_particle.py",
"repo_name": "hannorein/REBOUND",
"repo_path": "REBOUND_extracted/REBOUND-main/rebound/tests/test_particle.py",
"type": "Python"
}
|
import rebound
import unittest
import math
import warnings
class TestParticleWarning(unittest.TestCase):
def test_testparticle0_with_mass(self):
sim = rebound.Simulation()
sim.add(m=1)
sim.N_active = sim.N
sim.add(m=1,a=1)
sim.testparticle_type = 1
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sim.integrate(1)
self.assertEqual(0,len(w))
sim.testparticle_type = 0
# Warning occurs each time integrate is called
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sim.integrate(2)
sim.integrate(3)
self.assertEqual(2,len(w))
sim.testparticle_hidewarnings = 1
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sim.integrate(4)
self.assertEqual(0,len(w))
class TestParticleInSimulation(unittest.TestCase):
def setUp(self):
self.sim = rebound.Simulation()
def tearDown(self):
self.sim = None
def test_adding(self):
self.sim.add()
self.sim.add(m=1.)
self.sim.add(m=1.,x=1, vy=1)
self.sim.add(x=2)
self.sim.add(x=3, r=1.)
with self.assertRaises(ValueError):
self.sim.add(x=4,a=1)
p4 = self.sim.particles[4]
ind = p4.index
self.assertEqual(ind,4)
def test_masses(self):
self.sim.add(m=1.)
self.sim.add(m=1.e-3, a=1.)
self.sim.add(m=1.e-6, a=2.)
self.sim.add(m=0., a=3.)
ps = self.sim.particles
self.assertAlmostEqual(ps[0].m,1.,delta=1e-15)
self.assertAlmostEqual(ps[1].m,1.e-3,delta=1e-15)
self.assertAlmostEqual(ps[2].m,1.e-6,delta=1e-15)
self.assertAlmostEqual(ps[3].m,0.,delta=1e-15)
def test_jacobi_masses(self):
self.sim.add(m=1.)
self.sim.add(m=1.e-3, a=1., jacobi_masses=True)
self.sim.add(m=1.e-6, a=2., jacobi_masses=True)
self.sim.add(m=0., a=3., jacobi_masses=True)
ps = self.sim.particles
self.assertAlmostEqual(ps[0].m,1.,delta=1e-15)
self.assertAlmostEqual(ps[1].m,1.e-3,delta=1e-15)
self.assertAlmostEqual(ps[2].m,1.e-6,delta=1e-15)
self.assertAlmostEqual(ps[3].m,0.,delta=1e-15)
self.assertAlmostEqual(ps[1].vy,1.000499875062461,delta=1e-15)
self.assertAlmostEqual(ps[2].vy,0.7081066347613374,delta=1e-15)
self.assertAlmostEqual(ps[3].vy,0.5783504759643414,delta=1e-15)
o = self.sim.orbits(jacobi_masses=True)
self.assertAlmostEqual(o[0].a,1.,delta=1e-15)
self.assertAlmostEqual(o[1].a,2.,delta=1e-15)
self.assertAlmostEqual(o[2].a,3.,delta=1e-15)
self.assertAlmostEqual(o[0].e,0.,delta=1e-15)
self.assertAlmostEqual(o[1].e,0.,delta=1e-15)
self.assertAlmostEqual(o[2].e,0.,delta=1e-15)
def test_adding_orbits(self):
self.sim.add(m=1.)
self.sim.add(a=1.)
with self.assertRaises(ValueError):
self.sim.add(e=0.1)
with self.assertRaises(ValueError):
self.sim.add(a=1.,e=0.1,omega=0.1,pomega=0.1)
self.sim.add(a=2.,e=0.1,inc=0.1,pomega=0.1)
self.sim.add(a=2.,e=0.1,inc=-0.1,pomega=0.1)
self.sim.add(a=2.,e=0.1,inc=0.1,theta=0.1)
self.sim.add(a=2.,e=0.1,inc=-0.1,theta=0.1)
self.sim.add(a=2.,e=0.1,inc=0.1,l=0.1)
self.sim.add(a=2.,e=0.1,inc=-0.1,l=0.1)
self.sim.add(P=2.,e=0.1,inc=-2.1,pomega=0.1)
self.sim.add(P=2.,e=0.1,inc=-2.1,pomega=0.1,f=0.2)
self.sim.add(P=2.,e=0.1,inc=-2.1,pomega=0.1,T=0.2)
self.sim.add(P=2.,e=0.1,inc=-2.1,pomega=0.1,theta=0.2)
with self.assertRaises(ValueError):
self.sim.add(a=2.,e=0.1,f=0.1,M=0.1)
with self.assertRaises(ValueError):
self.sim.add(a=2.,e=0.1,f=0.1,l=0.1)
with self.assertRaises(ValueError):
self.sim.add(a=2.,e=0.1,f=0.1,theta=0.1)
with self.assertRaises(ValueError):
self.sim.add(a=3.,e=1.)
with self.assertRaises(ValueError):
self.sim.add(a=3.,e=1.1)
with self.assertRaises(ValueError):
self.sim.add(a=3.,P=1.1)
with self.assertRaises(ValueError):
self.sim.add(a=3.,e=-0.1)
self.sim.add(a=-3.,e=1.4)
with self.assertRaises(ValueError):
self.sim.add(a=-3.,e=0.9)
self.sim.add(a=-3.,e=1.4,f=0.1)
with self.assertRaises(ValueError):
self.sim.add(a=-3.,e=1.4,f=3.1)
def test_sim_orbits(self):
self.sim.add(m=1.)
self.sim.add(m=1.e-3, a=1.,e=0.2,inc=0.3)
self.sim.add(m=1.e-3, a=2.,e=0.2,inc=0.3)
ps = self.sim.particles
self.assertAlmostEqual(ps[1].a,1.,delta=1e-15)
self.assertAlmostEqual(ps[1].e,0.2,delta=1e-15)
self.assertAlmostEqual(ps[1].inc,0.3,delta=1e-15)
self.assertAlmostEqual(ps[2].a,2.,delta=1e-15)
self.assertAlmostEqual(ps[2].e,0.2,delta=1e-15)
self.assertAlmostEqual(ps[2].inc,0.3,delta=1e-15)
def test_orbits(self):
self.sim.add(m=1.)
self.sim.add(a=1.)
p = self.sim.particles
with self.assertRaises(ValueError):
p[0].orbit()
o = p[1].orbit()
string = o.__str__()
self.assertGreater(len(string),20)
self.assertAlmostEqual(o.a,1.,delta=1e-15)
self.assertAlmostEqual(o.e,0.,delta=1e-15)
self.assertAlmostEqual(o.f,0.,delta=1e-15)
self.assertAlmostEqual(o.inc,0.,delta=1e-15)
self.assertAlmostEqual(p[1].a,1.,delta=1e-15)
self.assertAlmostEqual(p[1].e,0.,delta=1e-15)
self.assertAlmostEqual(p[1].f,0.,delta=1e-15)
self.assertAlmostEqual(p[1].inc,0.,delta=1e-15)
self.assertAlmostEqual(p[1].d,1.,delta=1e-15)
self.assertAlmostEqual(p[1].v,1.,delta=1e-15)
self.assertAlmostEqual(p[1].h,1.,delta=1e-15)
self.assertAlmostEqual(p[1].P,math.pi*2.,delta=1e-15)
self.assertAlmostEqual(p[1].n,1.,delta=1e-15)
self.assertAlmostEqual(p[1].omega,0.,delta=1e-15)
self.assertAlmostEqual(p[1].pomega,0.,delta=1e-15)
self.assertAlmostEqual(p[1].Omega,0.,delta=1e-15)
self.assertAlmostEqual(p[1].M,0.,delta=1e-15)
self.assertAlmostEqual(p[1].l,0.,delta=1e-15)
self.assertAlmostEqual(p[1].theta,0.,delta=1e-15)
self.assertAlmostEqual(p[1].T,0.,delta=1e-15)
def test_orbits_errors(self):
self.sim.add()
self.sim.add(x=1)
with self.assertRaises(ValueError):
self.sim.particles[1].orbit()
def test_orbits_errors2(self):
self.sim.add(m=1)
p1 = rebound.Particle(simulation=self.sim, a=1,m=0.1)
self.sim.add(p1)
with self.assertRaises(ValueError):
self.sim.particles[1].orbit(primary=p1)
def test_orbits_errors3(self):
p1 = rebound.Particle(m=1.,x=1.,vy=0.4)
p2 = rebound.Particle(m=1.,x=4.,vy=2.4)
with self.assertRaises(ValueError):
p2.orbit()
with self.assertRaises(ValueError):
p2.orbit(primary=p1)
p2.orbit(primary=p1,G=1.)
class TestParticleOperators(unittest.TestCase):
def test_sub(self):
p1 = rebound.Particle(m=1.1,x=1.2,vy=1.3)
p2 = rebound.Particle(m=1.4,x=1.6,vy=1.8)
p3 = p1 - p2
self.assertEqual(p3.x,p1.x-p2.x)
self.assertEqual(p3.y,p1.y-p2.y)
self.assertEqual(p3.z,p1.z-p2.z)
self.assertEqual(p3.vx,p1.vx-p2.vx)
self.assertEqual(p3.vy,p1.vy-p2.vy)
self.assertEqual(p3.vz,p1.vz-p2.vz)
self.assertEqual(p3.m,p1.m-p2.m)
def test_add(self):
p1 = rebound.Particle(m=1.1,x=1.2,vy=1.3)
p2 = rebound.Particle(m=1.4,x=1.6,vy=1.8)
p3 = p1 + p2
self.assertEqual(p3.x,p1.x+p2.x)
self.assertEqual(p3.y,p1.y+p2.y)
self.assertEqual(p3.z,p1.z+p2.z)
self.assertEqual(p3.vx,p1.vx+p2.vx)
self.assertEqual(p3.vy,p1.vy+p2.vy)
self.assertEqual(p3.vz,p1.vz+p2.vz)
self.assertEqual(p3.m,p1.m+p2.m)
def test_mul(self):
p1 = rebound.Particle(m=1.1,x=1.2,vy=1.3)
p2 = 2.*p1
self.assertEqual(p2.x,2.*p1.x)
self.assertEqual(p2.y,2.*p1.y)
self.assertEqual(p2.z,2.*p1.z)
self.assertEqual(p2.vx,2.*p1.vx)
self.assertEqual(p2.vy,2.*p1.vy)
self.assertEqual(p2.vz,2.*p1.vz)
self.assertEqual(p2.m,2.*p1.m)
def test_div(self):
p1 = rebound.Particle(m=1.2,x=1.4,vy=1.8)
p2 = p1/2.
self.assertEqual(p2.x,p1.x/2.)
self.assertEqual(p2.y,p1.y/2.)
self.assertEqual(p2.z,p1.z/2.)
self.assertEqual(p2.vx,p1.vx/2.)
self.assertEqual(p2.vy,p1.vy/2.)
self.assertEqual(p2.vz,p1.vz/2.)
self.assertEqual(p2.m,p1.m/2.)
def test_truediv(self):
p1 = rebound.Particle(m=1.2,x=1.4,vy=1.8)
p2 = p1/2
self.assertEqual(p2.x,p1.x/2.)
self.assertEqual(p2.y,p1.y/2.)
self.assertEqual(p2.z,p1.z/2.)
self.assertEqual(p2.vx,p1.vx/2.)
self.assertEqual(p2.vy,p1.vy/2.)
self.assertEqual(p2.vz,p1.vz/2.)
self.assertEqual(p2.m,p1.m/2.)
def test_isub(self):
p1 = rebound.Particle(m=1.1,x=1.2,vy=1.3)
p3 = p1.copy()
p2 = rebound.Particle(m=1.4,x=1.6,vy=1.8)
p3 -= p2
self.assertEqual(p3.x,p1.x-p2.x)
self.assertEqual(p3.y,p1.y-p2.y)
self.assertEqual(p3.z,p1.z-p2.z)
self.assertEqual(p3.vx,p1.vx-p2.vx)
self.assertEqual(p3.vy,p1.vy-p2.vy)
self.assertEqual(p3.vz,p1.vz-p2.vz)
self.assertEqual(p3.m,p1.m-p2.m)
def test_iadd(self):
p1 = rebound.Particle(m=1.1,x=1.2,vy=1.3)
p3 = p1.copy()
p2 = rebound.Particle(m=1.4,x=1.6,vy=1.8)
p3 += p2
self.assertEqual(p3.x,p1.x+p2.x)
self.assertEqual(p3.y,p1.y+p2.y)
self.assertEqual(p3.z,p1.z+p2.z)
self.assertEqual(p3.vx,p1.vx+p2.vx)
self.assertEqual(p3.vy,p1.vy+p2.vy)
self.assertEqual(p3.vz,p1.vz+p2.vz)
self.assertEqual(p3.m,p1.m+p2.m)
def test_imul(self):
p1 = rebound.Particle(m=1.1,x=1.2,vy=1.3)
p2 = p1.copy()
p2 *= 2.
self.assertEqual(p2.x,2.*p1.x)
self.assertEqual(p2.y,2.*p1.y)
self.assertEqual(p2.z,2.*p1.z)
self.assertEqual(p2.vx,2.*p1.vx)
self.assertEqual(p2.vy,2.*p1.vy)
self.assertEqual(p2.vz,2.*p1.vz)
self.assertEqual(p2.m,2.*p1.m)
def test_idiv(self):
p1 = rebound.Particle(m=1.2,x=1.4,vy=1.8)
p2 = p1.copy()
p2 /=2.
self.assertEqual(p2.x,p1.x/2.)
self.assertEqual(p2.y,p1.y/2.)
self.assertEqual(p2.z,p1.z/2.)
self.assertEqual(p2.vx,p1.vx/2.)
self.assertEqual(p2.vy,p1.vy/2.)
self.assertEqual(p2.vz,p1.vz/2.)
self.assertEqual(p2.m,p1.m/2.)
def test_itruediv(self):
p1 = rebound.Particle(m=1.2,x=1.4,vy=1.8)
p2 = p1.copy()
p2 /=2
self.assertEqual(p2.x,p1.x/2.)
self.assertEqual(p2.y,p1.y/2.)
self.assertEqual(p2.z,p1.z/2.)
self.assertEqual(p2.vx,p1.vx/2.)
self.assertEqual(p2.vy,p1.vy/2.)
self.assertEqual(p2.vz,p1.vz/2.)
self.assertEqual(p2.m,p1.m/2.)
class TestParticleCopy(unittest.TestCase):
def test_copy(self):
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(m=1.,e=0.2,a=1)
pc = sim.particles[1].copy()
self.assertEqual(sim.particles[1].x,pc.x)
self.assertEqual(sim.particles[1].vx,pc.vx)
self.assertEqual(sim.particles[1].m,pc.m)
sim.particles[1].m=0.01
self.assertNotEqual(sim.particles[1].m,pc.m)
def test_copy2(self):
sim = rebound.Simulation()
sim.add(m=1.)
p1 = rebound.Particle(simulation=sim,m=1.,e=0.2,a=1)
p2 = rebound.Particle(p1)
p1.m=2.
p2.m=3.
sim.add(p1)
sim.add(p2)
self.assertEqual(p1.m,2.)
self.assertEqual(p2.m,3.)
self.assertEqual(sim.particles[1].m,2.)
self.assertEqual(sim.particles[2].m,3.)
class TestParticleNotInSimulation(unittest.TestCase):
def test_create(self):
p1 = rebound.Particle()
p2 = rebound.Particle(x=1)
with self.assertRaises(ValueError):
p3 = rebound.Particle(a=1)
if __name__ == "__main__":
unittest.main()
|
hannoreinREPO_NAMEREBOUNDPATH_START.@REBOUND_extracted@REBOUND-main@rebound@tests@test_particle.py@.PATH_END.py
|
{
"filename": "_z.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/isosurface/caps/_z.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Z(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "isosurface.caps"
_path_str = "isosurface.caps.z"
_valid_props = {"fill", "show"}
# fill
# ----
@property
def fill(self):
"""
Sets the fill ratio of the `caps`. The default fill value of
the `caps` is 1 meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than one would allow
the creation of openings parallel to the edges.
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
# show
# ----
@property
def show(self):
"""
Sets the fill ratio of the `slices`. The default fill value of
the z `slices` is 1 meaning that they are entirely shaded. On
the other hand Applying a `fill` ratio less than one would
allow the creation of openings parallel to the edges.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["show"]
@show.setter
def show(self, val):
self["show"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
fill
Sets the fill ratio of the `caps`. The default fill
value of the `caps` is 1 meaning that they are entirely
shaded. On the other hand Applying a `fill` ratio less
than one would allow the creation of openings parallel
to the edges.
show
Sets the fill ratio of the `slices`. The default fill
value of the z `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
"""
def __init__(self, arg=None, fill=None, show=None, **kwargs):
"""
Construct a new Z object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.isosurface.caps.Z`
fill
Sets the fill ratio of the `caps`. The default fill
value of the `caps` is 1 meaning that they are entirely
shaded. On the other hand Applying a `fill` ratio less
than one would allow the creation of openings parallel
to the edges.
show
Sets the fill ratio of the `slices`. The default fill
value of the z `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
Returns
-------
Z
"""
super(Z, self).__init__("z")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.caps.Z
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.caps.Z`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("fill", None)
_v = fill if fill is not None else _v
if _v is not None:
self["fill"] = _v
_v = arg.pop("show", None)
_v = show if show is not None else _v
if _v is not None:
self["show"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@isosurface@caps@_z.py@.PATH_END.py
|
{
"filename": "loggingconfig.py",
"repo_name": "StingraySoftware/stingray",
"repo_path": "stingray_extracted/stingray-main/stingray/loggingconfig.py",
"type": "Python"
}
|
import logging
logger = None
class CustomFormatter(logging.Formatter):
grey = "\x1b[38;20m"
yellow = "\x1b[33;20m"
red = "\x1b[31;20m"
bold_red = "\x1b[31;1m"
reset = "\x1b[0m"
format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)"
FORMATS = {
logging.DEBUG: grey + format + reset,
logging.INFO: grey + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset,
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
def setup_logger():
global logger
if not logger:
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = CustomFormatter()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.WARNING)
return logger
|
StingraySoftwareREPO_NAMEstingrayPATH_START.@stingray_extracted@stingray-main@stingray@loggingconfig.py@.PATH_END.py
|
{
"filename": "colormaps.py",
"repo_name": "psheehan/pdspy",
"repo_path": "pdspy_extracted/pdspy-master/pdspy/plotting/colormaps.py",
"type": "Python"
}
|
import matplotlib.colors as colors
# Rainbow1 colormap.
rainbow1_cdict = {'red': ((0, 1, 1),
(0.15, 0, 0),
(0.45, 0, 0),
(0.5, 0, 0),
(0.55, 1, 1),
(0.75, 1, 1),
(0.875, 1, 1),
(1, 0, 0)),
'green': ((0, 1, 1),
(0.15, 0, 0),
(0.45, 1, 1),
(0.5, 1, 1),
(0.55, 1, 1),
(0.75, 0, 0),
(0.875, 0, 0),
(1, 0, 0)),
'blue': ((0, 1, 1),
(0.15, 1, 1),
(0.45, 1, 1),
(0.5, 0, 0),
(0.55, 0, 0),
(0.75, 0, 0),
(0.875, 1, 1),
(1, 0, 0))}
rainbow1 = colors.LinearSegmentedColormap('rainbow3',rainbow1_cdict,256)
# Rainbow3 colormap.
rainbow3_cdict = {'red': ((0, 0, 0),
(0.15, 0, 0),
(0.45, 0, 0),
(0.5, 0, 0),
(0.55, 1, 1),
(0.75, 1, 1),
(0.875, 1, 1),
(1, 1, 1)),
'green': ((0, 0, 0),
(0.15, 0, 0),
(0.45, 1, 1),
(0.5, 1, 1),
(0.55, 1, 1),
(0.75, 0, 0),
(0.875, 0, 0),
(1, 1, 1)),
'blue': ((0, 0, 0),
(0.15, 1, 1),
(0.45, 1, 1),
(0.5, 0, 0),
(0.55, 0, 0),
(0.75, 0, 0),
(0.875, 1, 1),
(1, 1, 1))}
rainbow3 = colors.LinearSegmentedColormap('rainbow3',rainbow3_cdict,256)
|
psheehanREPO_NAMEpdspyPATH_START.@pdspy_extracted@pdspy-master@pdspy@plotting@colormaps.py@.PATH_END.py
|
{
"filename": "_domain.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/polar/_domain.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DomainValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="domain", parent_name="layout.polar", **kwargs):
super(DomainValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Domain"),
data_docs=kwargs.pop(
"data_docs",
"""
column
If there is a layout grid, use the domain for
this column in the grid for this polar subplot
.
row
If there is a layout grid, use the domain for
this row in the grid for this polar subplot .
x
Sets the horizontal domain of this polar
subplot (in plot fraction).
y
Sets the vertical domain of this polar subplot
(in plot fraction).
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@polar@_domain.py@.PATH_END.py
|
{
"filename": "plot_param_contour_2stream_fixed_T_int.py",
"repo_name": "Jingxuan97/nemesispy",
"repo_path": "nemesispy_extracted/nemesispy-main/nemesispy/data/gcm/wasp43b_vivien/plot_param_contour_2stream_fixed_T_int.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Plot the distributions of best fit parameters of the 2-Stream Guillot profile
with fixed T_int from the fit to the WASP-43b GCM.
Parameters are {kappa,gamma,f}
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# Read GCM data
from nemesispy.data.gcm.wasp43b_vivien.process_wasp43b_gcm_vivien import (
nlon,nlat,xlon,xlat,npv,pv,\
tmap,h2omap,comap,co2map,ch4map,hemap,h2map,vmrmap,\
tmap_mod,h2omap_mod,comap_mod,co2map_mod,ch4map_mod,\
hemap_mod,h2map_mod,vmrmap_mod,phase_grid,\
kevin_phase_by_wave,kevin_wave_by_phase,\
pat_phase_by_wave,pat_wave_by_phase)
### 2-Stream Guillot profile with fixed T=100 K parameters
param_name_2stream_fixed_T = [r'log $\gamma$',r'log $\kappa$',r'$f$']
nparam = len(param_name_2stream_fixed_T)
## Load the best fitting 1D model parameters
params = np.loadtxt('best_fit_params_2_stream_Guillot_fixed_T_int.txt',
unpack=True,delimiter=',')
params = params.T
## Assign the parameters to their (longitude,latitude) grid positions
best_params = np.zeros((nlon,nlat,nparam))
for ilon in range(nlon):
for ilat in range(nlat):
best_params[ilon,ilat,:] = params[ilon*nlat+ilat,:]
## Plot the best fitting 1D parameters
# set up foreshortened latitude coordinates
fs = np.sin(xlat/180*np.pi)*90
x,y = np.meshgrid(xlon,fs,indexing='ij')
xticks = np.array([-180, -150, -120, -90, -60, -30, 0, 30, 60, 90, 120,
150, 180])
# move the y ticks to the foreshortened location
yticks_loc = np.sin(np.array([-60, -30, 0, 30, 60])/180*np.pi)*90
yticks_label = np.array([-60, -30, 0, 30, 60])
## Set up multiplot
fig,axs = plt.subplots(
nrows=5,ncols=1,
sharex=True,sharey=True,
figsize=(8.3,11.7),
dpi=400
)
for iparam,name in enumerate(param_name_2stream_fixed_T):
# contour plot
z_param = best_params[:,:,iparam]
im = axs[iparam].contourf(x,y,z_param,
levels=20,
cmap='magma',
vmin=z_param.min(),
vmax=z_param.max()
)
cbar = fig.colorbar(im,ax=axs[iparam])
# axis setting
axs[iparam].set_xticks(xticks)
axs[iparam].set_yticks(yticks_loc,yticks_label)
axs[iparam].set_title('{}'.format(name),#fontsize='small'
)
axs[3].axis('off')
axs[4].axis('off')
fig.tight_layout()
plt.savefig('figures/param_contour_2stream_fixed_T_int.pdf',dpi=400)
|
Jingxuan97REPO_NAMEnemesispyPATH_START.@nemesispy_extracted@nemesispy-main@nemesispy@data@gcm@wasp43b_vivien@plot_param_contour_2stream_fixed_T_int.py@.PATH_END.py
|
{
"filename": "_text.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattercarpet/_text.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="text", parent_name="scattercarpet", **kwargs):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattercarpet@_text.py@.PATH_END.py
|
{
"filename": "_elliptic_envelope.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/covariance/_elliptic_envelope.py",
"type": "Python"
}
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Real
import numpy as np
from ..base import OutlierMixin, _fit_context
from ..metrics import accuracy_score
from ..utils._param_validation import Interval
from ..utils.validation import check_is_fitted
from ._robust_covariance import MinCovDet
class EllipticEnvelope(OutlierMixin, MinCovDet):
"""An object for detecting outliers in a Gaussian distributed dataset.
Read more in the :ref:`User Guide <outlier_detection>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, the support of robust location and covariance estimates
is computed, and a covariance estimate is recomputed from it,
without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, default=None
The proportion of points to be included in the support of the raw
MCD estimate. If None, the minimum value of support_fraction will
be used within the algorithm: `(n_samples + n_features + 1) / 2 * n_samples`.
Range is (0, 1).
contamination : float, default=0.1
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Range is (0, 0.5].
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling
the data. Pass an int for reproducible results across multiple function
calls. See :term:`Glossary <random_state>`.
Attributes
----------
location_ : ndarray of shape (n_features,)
Estimated robust location.
covariance_ : ndarray of shape (n_features, n_features)
Estimated robust covariance matrix.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : ndarray of shape (n_samples,)
A mask of the observations that have been used to compute the
robust estimates of location and shape.
offset_ : float
Offset used to define the decision function from the raw scores.
We have the relation: ``decision_function = score_samples - offset_``.
The offset depends on the contamination parameter and is defined in
such a way we obtain the expected number of outliers (samples with
decision function < 0) in training.
.. versionadded:: 0.20
raw_location_ : ndarray of shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : ndarray of shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : ndarray of shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
dist_ : ndarray of shape (n_samples,)
Mahalanobis distances of the training set (on which :meth:`fit` is
called) observations.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
EmpiricalCovariance : Maximum likelihood covariance estimator.
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
LedoitWolf : LedoitWolf Estimator.
MinCovDet : Minimum Covariance Determinant
(robust estimator of covariance).
OAS : Oracle Approximating Shrinkage Estimator.
ShrunkCovariance : Covariance estimator with shrinkage.
Notes
-----
Outlier detection from covariance estimation may break or not
perform well in high-dimensional settings. In particular, one will
always take care to work with ``n_samples > n_features ** 2``.
References
----------
.. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the
minimum covariance determinant estimator" Technometrics 41(3), 212
(1999)
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import EllipticEnvelope
>>> true_cov = np.array([[.8, .3],
... [.3, .4]])
>>> X = np.random.RandomState(0).multivariate_normal(mean=[0, 0],
... cov=true_cov,
... size=500)
>>> cov = EllipticEnvelope(random_state=0).fit(X)
>>> # predict returns 1 for an inlier and -1 for an outlier
>>> cov.predict([[0, 0],
... [3, 3]])
array([ 1, -1])
>>> cov.covariance_
array([[0.7411..., 0.2535...],
[0.2535..., 0.3053...]])
>>> cov.location_
array([0.0813... , 0.0427...])
"""
_parameter_constraints: dict = {
**MinCovDet._parameter_constraints,
"contamination": [Interval(Real, 0, 0.5, closed="right")],
}
def __init__(
self,
*,
store_precision=True,
assume_centered=False,
support_fraction=None,
contamination=0.1,
random_state=None,
):
super().__init__(
store_precision=store_precision,
assume_centered=assume_centered,
support_fraction=support_fraction,
random_state=random_state,
)
self.contamination = contamination
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the EllipticEnvelope model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
super().fit(X)
self.offset_ = np.percentile(-self.dist_, 100.0 * self.contamination)
return self
def decision_function(self, X):
"""Compute the decision function of the given observations.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
decision : ndarray of shape (n_samples,)
Decision function of the samples.
It is equal to the shifted Mahalanobis distances.
The threshold for being an outlier is 0, which ensures a
compatibility with other outlier detection algorithms.
"""
check_is_fitted(self)
negative_mahal_dist = self.score_samples(X)
return negative_mahal_dist - self.offset_
def score_samples(self, X):
"""Compute the negative Mahalanobis distances.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
negative_mahal_distances : array-like of shape (n_samples,)
Opposite of the Mahalanobis distances.
"""
check_is_fitted(self)
return -self.mahalanobis(X)
def predict(self, X):
"""
Predict labels (1 inlier, -1 outlier) of X according to fitted model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
values = self.decision_function(X)
is_inlier = np.full(values.shape[0], -1, dtype=int)
is_inlier[values >= 0] = 1
return is_inlier
def score(self, X, y, sample_weight=None):
"""Return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) w.r.t. y.
"""
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@covariance@_elliptic_envelope.py@.PATH_END.py
|
{
"filename": "dolphin.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/galleries/examples/shapes_and_collections/dolphin.py",
"type": "Python"
}
|
"""
========
Dolphins
========
This example shows how to draw, and manipulate shapes given vertices
and nodes using the `~.path.Path`, `~.patches.PathPatch` and
`~matplotlib.transforms` classes.
"""
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
from matplotlib.patches import Circle, PathPatch
from matplotlib.path import Path
from matplotlib.transforms import Affine2D
# Fixing random state for reproducibility
np.random.seed(19680801)
r = np.random.rand(50)
t = np.random.rand(50) * np.pi * 2.0
x = r * np.cos(t)
y = r * np.sin(t)
fig, ax = plt.subplots(figsize=(6, 6))
circle = Circle((0, 0), 1, facecolor='none',
edgecolor=(0, 0.8, 0.8), linewidth=3, alpha=0.5)
ax.add_patch(circle)
im = plt.imshow(np.random.random((100, 100)),
origin='lower', cmap=cm.winter,
interpolation='spline36',
extent=(-1, 1, -1, 1))
im.set_clip_path(circle)
plt.plot(x, y, 'o', color=(0.9, 0.9, 1.0), alpha=0.8)
# Dolphin from OpenClipart library by Andy Fitzsimon
# <cc:License rdf:about="http://web.resource.org/cc/PublicDomain">
# <cc:permits rdf:resource="http://web.resource.org/cc/Reproduction"/>
# <cc:permits rdf:resource="http://web.resource.org/cc/Distribution"/>
# <cc:permits rdf:resource="http://web.resource.org/cc/DerivativeWorks"/>
# </cc:License>
dolphin = """
M -0.59739425,160.18173 C -0.62740401,160.18885 -0.57867129,160.11183
-0.57867129,160.11183 C -0.57867129,160.11183 -0.5438361,159.89315
-0.39514638,159.81496 C -0.24645668,159.73678 -0.18316813,159.71981
-0.18316813,159.71981 C -0.18316813,159.71981 -0.10322971,159.58124
-0.057804323,159.58725 C -0.029723983,159.58913 -0.061841603,159.60356
-0.071265813,159.62815 C -0.080250183,159.65325 -0.082918513,159.70554
-0.061841203,159.71248 C -0.040763903,159.7194 -0.0066711426,159.71091
0.077336307,159.73612 C 0.16879567,159.76377 0.28380306,159.86448
0.31516668,159.91533 C 0.3465303,159.96618 0.5011127,160.1771
0.5011127,160.1771 C 0.63668998,160.19238 0.67763022,160.31259
0.66556395,160.32668 C 0.65339985,160.34212 0.66350443,160.33642
0.64907098,160.33088 C 0.63463742,160.32533 0.61309688,160.297
0.5789627,160.29339 C 0.54348657,160.28968 0.52329693,160.27674
0.50728856,160.27737 C 0.49060916,160.27795 0.48965803,160.31565
0.46114204,160.33673 C 0.43329696,160.35786 0.4570711,160.39871
0.43309565,160.40685 C 0.4105108,160.41442 0.39416631,160.33027
0.3954995,160.2935 C 0.39683269,160.25672 0.43807996,160.21522
0.44567915,160.19734 C 0.45327833,160.17946 0.27946869,159.9424
-0.061852613,159.99845 C -0.083965233,160.0427 -0.26176109,160.06683
-0.26176109,160.06683 C -0.30127962,160.07028 -0.21167141,160.09731
-0.24649368,160.1011 C -0.32642366,160.11569 -0.34521187,160.06895
-0.40622293,160.0819 C -0.467234,160.09485 -0.56738444,160.17461
-0.59739425,160.18173
"""
vertices = []
codes = []
parts = dolphin.split()
i = 0
code_map = {
'M': Path.MOVETO,
'C': Path.CURVE4,
'L': Path.LINETO,
}
while i < len(parts):
path_code = code_map[parts[i]]
npoints = Path.NUM_VERTICES_FOR_CODE[path_code]
codes.extend([path_code] * npoints)
vertices.extend([[*map(float, y.split(','))]
for y in parts[i + 1:][:npoints]])
i += npoints + 1
vertices = np.array(vertices)
vertices[:, 1] -= 160
dolphin_path = Path(vertices, codes)
dolphin_patch = PathPatch(dolphin_path, facecolor=(0.6, 0.6, 0.6),
edgecolor=(0.0, 0.0, 0.0))
ax.add_patch(dolphin_patch)
vertices = Affine2D().rotate_deg(60).transform(vertices)
dolphin_path2 = Path(vertices, codes)
dolphin_patch2 = PathPatch(dolphin_path2, facecolor=(0.5, 0.5, 0.5),
edgecolor=(0.0, 0.0, 0.0))
ax.add_patch(dolphin_patch2)
plt.show()
# %%
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.path`
# - `matplotlib.path.Path`
# - `matplotlib.patches`
# - `matplotlib.patches.PathPatch`
# - `matplotlib.patches.Circle`
# - `matplotlib.axes.Axes.add_patch`
# - `matplotlib.transforms`
# - `matplotlib.transforms.Affine2D`
# - `matplotlib.transforms.Affine2D.rotate_deg`
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@galleries@examples@shapes_and_collections@dolphin.py@.PATH_END.py
|
{
"filename": "extinction.py",
"repo_name": "pcubillos/pyratbay",
"repo_path": "pyratbay_extracted/pyratbay-master/pyratbay/pyrat/extinction.py",
"type": "Python"
}
|
# Copyright (c) 2021-2022 Patricio Cubillos
# Pyrat Bay is open-source software under the GPL-2.0 license (see LICENSE)
import os
import ctypes
import multiprocessing as mp
import numpy as np
import scipy.interpolate as sip
from .. import tools as pt
from .. import constants as pc
from .. import io as io
from ..lib import _extcoeff as ec
def exttable(pyrat):
"""
Handle extinction-coefficient table (read/calculate/write file).
"""
# ID list of species with isotopes:
if np.size(np.where(pyrat.iso.imol>=0)[0]) == 0:
pyrat.ex.nspec = 0
else:
imols = pyrat.iso.imol[np.where(pyrat.iso.imol>=0)]
pyrat.ex.species = pyrat.mol.name[np.unique(imols)]
pyrat.ex.nspec = len(pyrat.ex.species)
# If the extinction file was not defined, skip this step:
if pyrat.ex.extfile is None:
pyrat.log.head("\nNo extinction-coefficient table requested.")
return
# If the extinction file exists, read it:
if pt.isfile(pyrat.ex.extfile) == 1:
pyrat.log.head("\nReading extinction-coefficient table file(s):")
for extfile in pyrat.ex.extfile:
pyrat.log.head(f" '{extfile}'.")
read_extinction(pyrat)
# If the extinction file doesn't exist, calculate it:
else:
pyrat.log.head(
"\nGenerating new extinction-coefficient table file:"
f"\n '{pyrat.ex.extfile[0]}'."
)
calc_extinction(pyrat)
def read_extinction(pyrat):
"""
Read extinction-coefficient tables from files.
"""
ex = pyrat.ex
log = pyrat.log
spec = pyrat.spec
ex.species, ex.etable = [], []
ex.ntemp = 0
ex.temp = ex.press = ex.wn = None
for extfile in ex.extfile:
edata = io.read_opacity(extfile)
efile = os.path.basename(extfile)
# Array sizes:
nspec, ntemp, nlayers, nwave = edata[0]
size_mismatch = (
ex.ntemp > 0 and
(ntemp != ex.ntemp or nlayers != ex.nlayers or nwave != ex.nwave)
)
if size_mismatch:
log.error(
f"Shape of the extinction-coefficient file '{efile}' "
"does not match with previous file shapes."
)
ex.ntemp, ex.nlayers, ex.nwave = ntemp, nlayers, nwave
# Species, temperature (K), pressure (barye), and wavenumber (cm-1):
species, temp, press, wn = edata[1]
if ex.temp is not None and np.any(np.abs(1-temp/ex.temp) > 0.01):
log.error(
f"Tabulated temperature array of file '{efile}' "
"does not match with the previous temperature arrays."
)
if ex.press is not None and np.any(np.abs(1-press/ex.press) > 0.01):
log.error(
f"Tabulated pressure array of file '{efile}' "
"does not match with the previous pressure arrays."
)
if ex.wn is not None and np.any(np.abs(1-wn/ex.wn) > 0.01):
log.error(
f"Tabulated wavelength array of file '{efile}' "
"does not match with the previous wavelength arrays."
)
ex.temp, ex.press, ex.wn = temp, press, wn
ex.species += list(species)
# Extinction-coefficient table (cm2 molecule-1):
ex.etable.append(edata[2])
ex.species = np.array(ex.species)
ex.nspec = len(ex.species)
ex.etable = np.vstack(ex.etable)
if np.ndim(ex.etable) == 3:
ex.etable = np.expand_dims(ex.etable, axis=0)
log.msg(
f"File(s) have {ex.nspec} molecules, {ex.ntemp} "
f"temperature samples, {ex.nlayers} layers, and {ex.nwave} "
"wavenumber samples.",
indent=2,
)
# Set tabulated temperature extrema:
ex.tmin = np.amin(ex.temp)
ex.tmax = np.amax(ex.temp)
with np.printoptions(precision=1):
str_temp = str(ex.temp)
with np.printoptions(formatter={'float':'{: .2f}'.format}, threshold=100):
str_wn = str(ex.wn)
with np.printoptions(formatter={'float':'{:.3e}'.format}):
str_press = str(ex.press/pc.bar)
log.msg(
f"Species names: {ex.species}\n"
f"Temperatures (K):\n {str_temp}\n"
f"Pressure layers (bar):\n{str_press}\n"
f"Wavenumber array (cm-1):\n {str_wn}",
indent=2,
)
# New wavelength sampling:
if ex.nwave != spec.nwave or np.sum(np.abs(ex.wn-spec.wn)) > 0:
wn_mask = (ex.wn >= spec.wnlow) & (ex.wn <= spec.wnhigh)
if spec.wnlow <= ex.wn[0]:
spec.wnlow = ex.wn[0]
if spec.wnhigh >= ex.wn[-1]:
spec.wnhigh = ex.wn[-1]
if len(set(np.ediff1d(ex.wn))) == 1:
spec.wnstep = ex.wn[1] - ex.wn[0]
spec.resolution = None
#spec.wnlow = ex.wn[0]
sampling_text = f'sampling rate = {spec.wnstep:.2f} cm-1'
else:
g = ex.wn[-2]/ex.wn[-1]
# Assuming no one would care to set a R with more than 5 decimals:
spec.resolution = np.round(0.5*(1+g)/(1-g), decimals=5)
#spec.wnhigh = 2 * ex.wn[-1]/(1+g)
sampling_text = f'R = {spec.resolution:.1f}'
# Update wavenumber sampling:
spec.wn = ex.wn = ex.wn[wn_mask]
ex.etable = ex.etable[:,:,:,wn_mask]
spec.nwave = ex.nwave = len(ex.wn)
# Keep wavenumber oversampling factor:
spec.ownstep = spec.wnstep / spec.wnosamp
spec.onwave = (spec.nwave - 1) * spec.wnosamp + 1
spec.own = np.linspace(spec.wn[0], spec.wn[-1], spec.onwave)
# Update interpolated stellar spectrum:
if pyrat.phy.starflux is not None:
fill_value = (pyrat.phy.starflux[0], pyrat.phy.starflux[-1])
sinterp = sip.interp1d(
pyrat.phy.starwn, pyrat.phy.starflux,
bounds_error=False,
fill_value=fill_value,
)
spec.starflux = sinterp(spec.wn)
# Update observational variables:
pyrat.set_filters()
log.warning(
"Wavenumber sampling from extinction-coefficient "
"table does not match the input wavenumber sampling. Adopting "
f"tabulated array with {ex.nwave} samples, {sampling_text}, and "
f"ranges [{spec.wnlow:.2f}, {spec.wnhigh:.2f}] cm-1."
)
def calc_extinction(pyrat):
"""
Compute the extinction-coefficient (per species) for a tabulated grid of
temperatures and pressures, over a wavenumber array.
"""
ex = pyrat.ex
spec = pyrat.spec
log = pyrat.log
# Temperature boundaries check:
if ex.tmin < pyrat.lt.tmin:
log.error(
'Requested extinction-coefficient table temperature '
f'(tmin={ex.tmin:.1f} K) below the lowest available TLI '
f'temperature ({pyrat.lt.tmin:.1f} K).'
)
if ex.tmax > pyrat.lt.tmax:
log.error(
'Requested extinction-coefficient table temperature '
f'(tmax={ex.tmax:.1f} K) above the highest available TLI '
f'temperature ({pyrat.lt.tmax:.1f} K).'
)
# Create the temperature array:
ex.ntemp = int((ex.tmax-ex.tmin)/ex.tstep) + 1
ex.temp = np.linspace(ex.tmin, ex.tmin + (ex.ntemp-1)*ex.tstep, ex.ntemp)
with np.printoptions(formatter={'float':'{:.1f}'.format}):
log.msg(f"Temperature sample (K):\n {ex.temp}", indent=2)
# Evaluate the partition function at the given temperatures:
log.msg("Interpolate partition function.", indent=2)
ex.z = np.zeros((pyrat.iso.niso, ex.ntemp), np.double)
for i in range(pyrat.lt.ndb): # For each Database
for j in range(pyrat.lt.db[i].niso): # For each isotope in DB
zinterp = sip.interp1d(
pyrat.lt.db[i].temp, pyrat.lt.db[i].z[j], kind='slinear',
)
ex.z[pyrat.lt.db[i].iiso+j] = zinterp(ex.temp)
# Allocate wavenumber, pressure, and isotope arrays:
ex.wn = spec.wn
ex.nwave = spec.nwave
ex.press = pyrat.atm.press
ex.nlayers = pyrat.atm.nlayers
# Allocate extinction-coefficient array:
log.msg("Calculate extinction coefficient.", indent=2)
size = ex.nspec * ex.ntemp * ex.nlayers * ex.nwave
sm_ect = mp.Array(ctypes.c_double, np.zeros(size, np.double))
ex.etable = np.ctypeslib.as_array(
sm_ect.get_obj()).reshape((ex.nspec, ex.ntemp, ex.nlayers, ex.nwave))
# Multi-processing extinction calculation (in C):
processes = []
indices = np.arange(ex.ntemp*ex.nlayers) % pyrat.ncpu # CPU indices
for i in range(pyrat.ncpu):
grid = True
add = False
args = (pyrat, np.where(indices==i)[0], grid, add)
proc = mp.get_context('fork').Process(target=extinction, args=args)
processes.append(proc)
proc.start()
for proc in processes:
proc.join()
# Store values in file:
io.write_opacity(
ex.extfile[0], ex.species, ex.temp, ex.press, ex.wn, ex.etable)
log.head(
f"Extinction-coefficient table written to file: '{ex.extfile[0]}'.",
indent=2,
)
def extinction(pyrat, indices, grid=False, add=False):
"""
Python multiprocessing wrapper for the extinction-coefficient (EC)
calculation function for the atmospheric layers or EC grid.
Parameters
----------
pyrat: Pyrat Object
indices: 1D integer list
The indices of the atmospheric layer or EC grid to calculate.
grid: Bool
If True, compute EC per species for EC grid.
If False, compute EC for atmospheric layer.
add: Bool
If True, co-add EC contribution (cm-1) from all species.
If False, keep EC contribution (cm2 molec-1) from each species separated.
"""
atm = pyrat.atm
if add: # Total extinction coefficient spectrum (cm-1)
extinct_coeff = np.zeros((1, pyrat.spec.nwave))
else: # Opacity spectrum per molecule (cm2 molecule-1)
extinct_coeff = np.zeros((pyrat.ex.nspec, pyrat.spec.nwave))
if pyrat.iso.iext is None:
# Get species indices in opacity table for the isotopes:
pyrat.iso.iext = np.zeros(pyrat.iso.niso, int)
for i in range(pyrat.iso.niso):
if pyrat.iso.imol[i] != -1:
pyrat.iso.iext[i] = np.where(
pyrat.ex.species == pyrat.mol.name[pyrat.iso.imol[i]])[0][0]
else:
pyrat.iso.iext[i] = -1 # Isotope not in atmosphere
# FINDME: find patch for this case in ec.extinction()
# Turn off verb of all processes except the first:
verb = pyrat.log.verb
pyrat.log.verb = (0 in indices) * verb
for i,index in enumerate(indices):
ilayer = index % atm.nlayers # Layer index
pressure = atm.press[ilayer] # Layer pressure
molq = atm.q[ilayer] # Molecular abundance
density = atm.d[ilayer] # Molecular density
if grid: # Take from grid
itemp = int(index / atm.nlayers) # Temp. index in EC table
temp = pyrat.ex.temp[itemp]
ziso = pyrat.ex.z[:,itemp] # Isotopic ratio
pyrat.log.msg(
"Extinction-coefficient table: "
f"layer {ilayer+1:3d}/{atm.nlayers}, "
f"iteration {i+1:3d}/{len(indices)}.",
indent=2,
)
else: # Take from atmosphere
temp = atm.temp [ilayer]
ziso = pyrat.iso.z [:,ilayer] # Isotopic ratio
pyrat.log.msg(
f"Calculating extinction at layer {ilayer+1:3d}/{atm.nlayers} "
f"(T={temp:6.1f} K, p={pressure/pc.bar:.1e} bar).",
indent=2,
)
# Calculate extinction-coefficient in C:
extinct_coeff[:] = 0.0
interpolate_flag = int(
pyrat.spec.resolution is not None
or pyrat.spec.wnstep is not None
)
ec.extinction(
extinct_coeff,
pyrat.voigt.profile, pyrat.voigt.size, pyrat.voigt.index,
pyrat.voigt.lorentz, pyrat.voigt.doppler,
pyrat.spec.wn, pyrat.spec.own, pyrat.spec.odivisors,
density, molq, pyrat.mol.radius, pyrat.mol.mass,
pyrat.iso.imol, pyrat.iso.mass, pyrat.iso.ratio,
ziso, pyrat.iso.iext,
pyrat.lt.wn, pyrat.lt.elow, pyrat.lt.gf, pyrat.lt.isoid,
pyrat.voigt.cutoff, pyrat.ex.ethresh, pressure, temp,
verb-10, int(add),
interpolate_flag,
)
# Store output:
if grid: # Into grid
pyrat.ex.etable[:, itemp, ilayer] = extinct_coeff
elif add: # Into ex.ec array for atmosphere
pyrat.ex.ec[ilayer:ilayer+1] = extinct_coeff
else: # return single-layer EC of given layer
return extinct_coeff
def get_ec(pyrat, layer):
"""
Compute per-species extinction coefficient at requested layer.
"""
# Interpolate:
if pyrat.ex.extfile is not None:
exc = np.zeros((pyrat.ex.nspec, pyrat.spec.nwave))
label = []
temp = pyrat.atm.temp[layer]
itemp = np.where(pyrat.ex.temp <= temp)[0][-1]
if itemp == len(pyrat.ex.temp):
itemp -= 1
for i in range(pyrat.ex.nspec):
imol = np.where(pyrat.mol.name == pyrat.ex.species[i])[0][0]
label.append(pyrat.mol.name[imol])
etable = pyrat.ex.etable[i,:,layer,:]
exc[i] = ((etable[itemp ] * (pyrat.ex.temp[itemp+1] - temp) +
etable[itemp+1] * (temp - pyrat.ex.temp[itemp] ) ) /
(pyrat.ex.temp[itemp+1] - pyrat.ex.temp[itemp]) )
exc[i] *= pyrat.atm.d[layer, imol]
# Line-by-line:
else:
exc = extinction(pyrat, [layer], grid=False, add=False)
label = []
for i in range(pyrat.ex.nspec):
imol = np.where(pyrat.mol.name == pyrat.ex.species[i])[0][0]
exc[i] *= pyrat.atm.d[layer,imol]
label.append(pyrat.mol.name[imol])
return exc, label
|
pcubillosREPO_NAMEpyratbayPATH_START.@pyratbay_extracted@pyratbay-master@pyratbay@pyrat@extinction.py@.PATH_END.py
|
{
"filename": "testFun.py",
"repo_name": "oliverphilcox/HADES",
"repo_path": "HADES_extracted/HADES-master/hades/testFun.py",
"type": "Python"
}
|
from flipper import *
import numpy as np
from hades.params import BICEP
a=BICEP()
def low_dust_estimator2(map,map_id,lMin=a.lMin,lMax=a.lMax,FWHM=a.FWHM,noise_power=a.noise_power,\
slope=a.slope,factor=None,rot=0.,delensing_fraction=a.delensing_fraction,useTensors=a.useTensors,debiasAmplitude=True):
"""Use modified KK14 estimators to compute polarisation hexadecapole amplitude and angle via Afs,Afc parameters.
This uses the noise model in hades.NoisePower.noise_model.
A is computed recursively, since the S/N ratio depends on it (weakly)
Inputs: map (in power-space)
map_size = width of map in degrees
lMin,lMax= fitting range of l
slope -> fiducial C_l map slope
rot -> optional angle for pre-rotation of power-space map in degrees.
delensing_fraction -> efficiency of delensing (0.1-> 90% removed)
factor -> expected amplitude factor (to speed convergence)
useTensors -> Boolean whether to include r = 0.1 tensor modes from IGWs
debiasAmplitude -> Boolean whether to subtract noise+lensing C_l for estimation of A
Outputs:
A,fs,fc, Afs, Afc from estimators. NB these are corrected for any map pre-rotation.
"""
from hades.NoisePower import lensed_Cl,r_Cl,noise_model
def Cl_total_noise_func(l):
"""This is total C_l noise from lensing, B-modes and experimental noise"""
Cl_lens_func=lensed_Cl(delensing_fraction=delensing_fraction)
if useTensors:
Cl_r_func=r_Cl()
return Cl_lens_func(l)+Cl_r_func(l)+noise_model(l,FWHM=FWHM,noise_power=noise_power)
else:
return Cl_lens_func(l)+noise_model(l,FWHM=FWHM,noise_power=noise_power)
if factor==None:
# If no estimate for monopole amplitude, compute this recursively (needed in SNR)
# A only weakly depends on the SNR so convergence is usually quick
N=0
if a.f_dust>0.:
Afactor=1e-12*a.f_dust**2. # initial estimate
else:
Afactor=1e-16 # some arbitrary small value
while N<20: # max no. iterations
goodPix=np.where((map.modLMap.ravel()>lMin)&(map.modLMap.ravel()<lMax)) # correct pixels
lMap=map.modLMap.ravel()[goodPix] # mod(l)
PowerMap=map.powerMap.ravel()[goodPix] # B-mode (biased) power
OtherClMap=Cl_total_noise_func(lMap) # extra C_l power
if debiasAmplitude:
debiasedPowerMap=PowerMap-OtherClMap # power estimate only due to dust
else:
debiasedPowerMap=PowerMap
fiducialClMap=lMap**(-slope) # fiducial Cl
SNMap = (Afactor*fiducialClMap)/(Afactor*fiducialClMap+OtherClMap) # SN ratio
# Compute estimate for A
Anum=np.sum(debiasedPowerMap*(SNMap**2.)/fiducialClMap)
Aden=np.sum(SNMap**2.)
# Now record output
lastFactor=Afactor
Afactor=Anum/Aden
# Stop if approximate convergence reached
if np.abs((Afactor-lastFactor)/Afactor)<0.01:
break
N+=1
if N==20:
print 'Map %s did not converge with slope: %.3f, Afactor %.3e, last factor: %.3e' %(map_id,slope,Afactor,lastFactor)
finalFactor=Afactor
else:
finalFactor=factor # just use the A estimate from input
# Now compute A,Afs,Afc (recompute A s.t. all best estimators use same SNR)
goodPix=np.where((map.modLMap.ravel()>lMin)&(map.modLMap.ravel()<lMax)) # pixels in correct range
angMap=(map.thetaMap.ravel()[goodPix]+rot*np.ones_like(map.thetaMap.ravel()[goodPix]))*np.pi/180. # angle in radians
cosMap=np.cos(4.*angMap) # map of cos(4theta)
sinMap=np.sin(4.*angMap)
lMap=map.modLMap.ravel()[goodPix] # |ell|
PowerMap=map.powerMap.ravel()[goodPix] # B-mode biased power
OtherClMap=Cl_total_noise_func(lMap) # extra C_l power from lensing + noise (+ tensor modes)
if debiasAmplitude:
debiasedPowerMap=PowerMap-OtherClMap # power estimate only due to dust
else:
debiasedPowerMap=PowerMap
fiducialClMap=lMap**(-slope) # fiducial Cl
SNmap=(finalFactor*fiducialClMap)/(finalFactor*fiducialClMap+OtherClMap) # signal-to-noise ratio
# Now compute estimates for A, Afs, Afc
Anum=np.sum(debiasedPowerMap*(SNmap**2.)/fiducialClMap) # noise-debiased
Aden=np.sum(SNmap**2.)
Afcnum=np.sum(debiasedPowerMap*cosMap*(SNmap**2.)/fiducialClMap) # cosine coeff - these are not debiased (else messes up corrections)
Afcden=np.sum((SNmap*cosMap)**2.)
Afsnum=np.sum(debiasedPowerMap*sinMap*(SNmap**2.)/fiducialClMap) # sine coeff - these are not debiased (else messes up corrections)
Afsden=np.sum((SNmap*sinMap)**2.)
A=Anum/Aden
Afc=Afcnum/Afcden
Afs=Afsnum/Afsden
fs=Afs/A # fs,fc are less reliable since have error from both A and Afs
fc=Afc/A
# Now correct for the map rotation
#rot_rad=rot*np.pi/180. # angle of rotation in radians
#fs_corr=fs*np.cos(rot_rad*4.)-fc*np.sin(rot_rad*4.)
#fc_corr=fs*np.sin(rot_rad*4.)+fc*np.cos(rot_rad*4.)
#Afs_corr=Afs*np.cos(rot_rad*4.)-Afc*np.sin(rot_rad*4.)
#Afc_corr=Afs*np.sin(rot_rad*4.)+Afc*np.cos(rot_rad*4.)
if factor==None:
return A,fs,fc,Afs,Afc,finalFactor # to save finalFactor if different to A
else:
return A,fs,fc,Afs,Afc
|
oliverphilcoxREPO_NAMEHADESPATH_START.@HADES_extracted@HADES-master@hades@testFun.py@.PATH_END.py
|
{
"filename": "catalog.py",
"repo_name": "lwa-project/lsl",
"repo_path": "lsl_extracted/lsl-main/lsl/catalog.py",
"type": "Python"
}
|
"""
LWA astronomical source catalogs.
"""
import os
import math
import abc
import collections
try:
Mapping = collections.abc.Mapping
except AttributeError:
# Catch for < Py3.10
Mapping = collections.Mapping
from lsl import astro
from lsl import transform
from lsl.common.data_access import DataAccess
from lsl.misc import telemetry
telemetry.track_module()
__version__ = '0.2'
__all__ = ['CatalogEntry', 'Catalog', 'LWA_Catalog', 'PSR_Catalog', 'PKS_Catalog',
'PKS90_Catalog', 'C3C_Catalog', 'C4C_Catalog', 'F2FGL_Catalog',
'CatalogFactory']
__author__ = 'D. L. Wood'
__maintainer__ = 'Jayce Dowell'
class CatalogEntry(object):
"""
Represents one source entry in a catalogue.
Contains members:
* name - The source name.
* position - The source equatorial J2000 position as object
of type transform.CelestialPosition.
* alias_list - A list of strings providing alternate names for
the source.
"""
# limit the class attributes
__slots__ = ('name', 'position', 'alias_list')
def __init__(self, name, position):
"""
Create a catalog entry.
"""
self.name = name
self.position = position
self.alias_list = []
def __repr__(self):
"""
Low level string representation.
"""
return "%s.%s(%s,%s)" % (type(self).__module__, type(self).__name__, repr(self.name), repr(self.position))
class Catalog(Mapping):
"""
Class representing astronomical source catalog information.
This is an abstract class; derived classes must provide a
parse_file() method which populates the catalog object with
information from file or other source.
Catalog instances support the read-only collections.Mapping
interface. That is, they support the read-only methods of
the dict built-in type.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, name):
"""
Create a source catalog.
"""
# initialize catalog data structures
self.name = name
self.source_map = {}
self.alias_map = {}
# parse_file() is an abstract method which must be defined in
# a concrete implementation for a particular catalog
self.parse_file()
@abc.abstractmethod
def parse_file(self):
"""
Read catalog information from file into internal data
structures.
"""
pass
@staticmethod
def get_directory():
"""
Returns the path to the catalog data file directory.
"""
return 'catalog'
def __repr__(self):
"""
Low level string representation.
"""
return "%s.Catalog(%s)" % (type(self).__module__, repr(self.name))
def __len__(self):
"""
Return the number of sources in the catalog.
"""
return len(self.source_map)
def __getitem__(self, key):
"""
Access source by subscript name. Raises KeyError if the source
is not in the catalog.
"""
entry = self.lookup(key)
if entry is None:
raise KeyError(f"name {key} not in catalog")
return entry
def __iter__(self):
"""
Return an iterator to the primary names in the catalog.
"""
return iter(self.source_map.keys())
def lookup(self, name):
"""
Lookup a source in the catalog.
Param: name - The primary name or alias of the source.
Returns: An object of type CatalogEntry giving the source information,
or None if the name is not found in the catalog.
"""
try:
entry = self.source_map[name]
except KeyError:
try:
entry = self.alias_map[name]
except KeyError:
entry = None
return entry
class LWA_Catalog(Catalog):
"""
Specific definition for LWA observation source catalogue data file.
"""
def __init__(self):
"""
Create a LWA catalog instance.
"""
Catalog.__init__(self, 'LWA')
def parse_file(self):
"""
Read a source catalog data file.
"""
# open data file
fileName = os.path.join(self.get_directory(), 'lwa_catalog.dat')
with DataAccess.open(fileName, 'r') as catFile:
# read source info
lineNum = 0
for line in catFile:
lineNum += 1
if line.startswith('#') or line.isspace():
continue
try:
name = line[0:8]
raHours = int(line[9:11], 10)
raMinutes = int(line[12:14], 10)
raSeconds = float(line[15:20])
decSign = line[21]
decDegrees = int(line[22:24], 10)
decMinutes = int(line[25:27], 10)
decSeconds = float(line[28:32])
except ValueError as err:
raise RuntimeError(f"file {fileName}, line {lineNum} incorectly formated: '{line}' : {err}]")
name = name.rstrip()
ra = astro.hms(raHours, raMinutes, raSeconds)
if decSign == '-':
sign = True
else:
sign = False
dec = astro.dms(sign, decDegrees, decMinutes, decSeconds)
entry = CatalogEntry(name, transform.CelestialPosition((ra, dec), name=name))
self.source_map[name] = entry
aliasList = line[34:].split()
if len(aliasList) > 0:
for alias in aliasList:
entry.alias_list.append(alias)
self.alias_map[alias] = entry
class PSR_Catalog(Catalog):
"""
Specific definition for ATNF Pulsar (PSRCAT) catalog.
Data file is psrcat.db which can be retreived from:
<http://www.atnf.csiro.au/research/pulsar/psrcat/download.html>
"""
def __init__(self):
"""
Create an instance of the PSR catalog.
"""
Catalog.__init__(self, 'PSR')
def parse_file(self):
"""
Read a source catalog data file.
"""
debug = False
# open data file
fileName = os.path.join(self.get_directory(), 'psrcat.db')
with DataAccess.open(fileName, 'r') as catFile:
# read source info
psrb = None
psrj = None
ra = None
dec = None
bad = False
for line in catFile:
if line.startswith('PSRB'):
psrb = line.split()[1]
if line.startswith('PSRJ'):
psrj = line.split()[1]
if line.startswith('RAJ'):
rastr = line.split()[1]
sp = rastr.split(':')
if len(sp) == 3:
(raHours, raMinutes, raSeconds) = sp
elif len(sp) == 2:
(raHours, raMinutes) = sp
raSeconds = 0.0
else:
if debug:
print(f"Bad format for RAJ line: {line}")
bad = True
raHours = int(raHours)
raMinutes = int(raMinutes)
raSeconds = float(raSeconds)
try:
ra = astro.hms(raHours, raMinutes, raSeconds)
except:
if debug:
print(f"PSRCAT: Bad RA for {psrj}: {rastr}")
bad = True
if line.startswith('DECJ'):
decstr = line.split()[1]
sp = decstr.split(':')
if len(sp) == 3:
(decDegrees, decMinutes, decSeconds) = sp
elif len(sp) == 2:
(decDegrees, decMinutes) = sp
decSeconds = 0.0
else:
if debug:
print(f"PSRCAT: Bad format for DECJ line: {line}")
bad = True
continue
if decDegrees.startswith('-'):
decDegrees = decDegrees[1:]
sign = True
else:
sign = False
decDegrees = int(decDegrees)
decMinutes = int(decMinutes)
decSeconds = float(decSeconds)
try:
dec = astro.dms(sign, decDegrees, decMinutes, decSeconds)
except:
if debug:
print(f"PSRCAT: Bad DEC for {psrj}: {decstr}")
bad = True
if line.startswith('@-'):
# New source, save current source info
if psrb is not None:
name = psrb
alias = psrj
else:
name = psrj
alias = None
if ra is None or dec is None:
# These pulsars don't have RAJ, DECJ
# I think they may all have ecliptic positions
# which should be converted to ra,dec but I'm
# going to ignore them for now. -- paulr
#print "PSRCAT: No position for pulsar ",name
bad = True
# Add source to list if good.
if not bad:
sourcePos = astro.equ_posn(ra, dec)
entry = CatalogEntry(name, transform.CelestialPosition(sourcePos, name=name))
self.source_map[name] = entry
if debug:
print('Added ', name)
if alias is not None:
alias = alias.rstrip()
self.alias_map[alias] = entry
entry.alias_list.append(alias)
if debug:
print('Alias : ', alias.rstrip())
# Clear out vars for next source
psrb = None
psrj = None
ra = None
dec = None
bad = False
class PKS_Catalog(Catalog):
"""
Specific definition for PKS source catalog.
"""
def __init__(self):
"""
Create a PKS catalog instance.
"""
Catalog.__init__(self, 'PKS')
def parse_file(self):
"""
Read a source catalog data file.
"""
# open data file
fileName = os.path.join(self.get_directory(), 'pkscat.txt')
with DataAccess.open(fileName, 'r') as catFile:
# read source info
lineNum = 1
line = catFile.readline()
while line:
if line == '\n':
line = catFile.readline()
lineNum += 1
continue
try:
name = line[0:8]
alias = line[12:19]
raHours = int(line[22:24])
raMinutes = int(line[25:27])
raSeconds = float(line[28:32])
decSign = line[33]
decDegrees = int(line[34:36])
decMinutes = int(line[37:39])
decSeconds = int(line[40:42])
except ValueError:
raise RuntimeError(f"file {fileName}, line {lineNum} incorectly formated [{line}]")
ra = astro.hms(raHours, raMinutes, raSeconds)
if decSign == '-':
sign = True
else:
sign = False
dec = astro.dms(sign, decDegrees, decMinutes, decSeconds)
sourcePos = astro.equ_posn(ra, dec)
# precess coordinates from B1950
entry = CatalogEntry(name, transform.CelestialPosition(sourcePos, epoch='B1950', name=name))
self.source_map[name] = entry
if len(alias.strip()):
alias = alias.rstrip()
self.alias_map[alias] = entry
entry.alias_list.append(alias)
line = catFile.readline()
lineNum += 1
class PKS90_Catalog(Catalog):
"""
Specific definition for PKS90 source catalogue data file.
"""
def __init__(self):
"""
Create a PKS90 catalog instance.
"""
Catalog.__init__(self, 'PKS90')
def parse_file(self):
"""
Read a source catalog data file.
"""
# open data file
fileName = os.path.join(self.get_directory(), 'PKS90.txt')
with DataAccess.open(fileName, 'r') as catFile:
# read source info
lineNum = 1
line = catFile.readline()
while line:
if line == '\n':
line = catFile.readline()
lineNum += 1
continue
try:
name = line[0:10]
alias0 = line[139:148]
alias1 = line[168:175]
raHours = int(line[10:12])
raMinutes = int(line[13:15])
raSeconds = float(line[16:20])
decSign = line[23]
decDegrees = int(line[24:26])
decMinutes = int(line[27:29])
decSeconds = float(line[30:34])
except ValueError:
raise RuntimeError(f"file {fileName}, line {lineNum} incorectly formated [{line}]")
ra = astro.hms(raHours, raMinutes, raSeconds)
if decSign == '-':
sign = True
else:
sign = False
dec = astro.dms(sign, decDegrees, decMinutes, decSeconds)
sourcePos = astro.equ_posn(ra, dec)
entry = CatalogEntry(name, transform.CelestialPosition(sourcePos, name=name))
self.source_map[name] = entry
if len(alias0.strip()):
alias = alias0.rstrip()
entry.alias_list.append(alias)
self.alias_map[alias] = entry
if len(alias1.strip()):
alias = alias1.rstrip()
entry.alias_list.append(alias)
self.alias_map[alias] = entry
line = catFile.readline()
lineNum += 1
class C3C_Catalog(Catalog):
"""
Specific definition for Cambridge 3C source catalogue data file.
"""
def __init__(self):
"""
Create a 3C catalog instance.
"""
Catalog.__init__(self, '3C')
def parse_file(self):
"""
Read a source catalog data file.
"""
# open data file
fileName = os.path.join(self.get_directory(), '3c.dat')
with DataAccess.open(fileName, 'r') as catFile:
# read source info
lineNum = 1
line = catFile.readline()
while line:
try:
name = line[0:3]
raHours = int(line[12:14])
raMinutes = int(line[15:17])
raSeconds = float(line[18:22])
decSign = line[27]
decDegrees = int(line[28:30])
decMinutes = float(line[31:35])
except ValueError:
raise RuntimeError(f"file {fileName} line {lineNum} incorectly formated [{line}]")
name = ('3C' + name.strip())
ra = astro.hms(raHours, raMinutes, raSeconds)
(decSeconds, decMinutes) = math.modf(decMinutes)
if decSign == '-':
sign = True
else:
sign = False
dec = astro.dms(sign, decDegrees, int(decMinutes), (60 * decSeconds))
sourcePos = astro.equ_posn(ra, dec)
# precess coordinates from B1950
entry = CatalogEntry(name, transform.CelestialPosition(sourcePos, epoch='B1950', name=name))
self.source_map[name] = entry
line = catFile.readline()
lineNum += 1
class C4C_Catalog(Catalog):
"""
Specific definition for Cambridge 4C source catalogue data file.
"""
def __init__(self):
"""
Create a 4C catalog instance.
"""
Catalog.__init__(self, '4C')
def parse_file(self):
"""
Read a source catalog data file.
"""
# open data file
fileName = os.path.join(self.get_directory(), '4c.dat')
with DataAccess.open(fileName, 'r') as catFile:
# read source info
lineNum = 1
line = catFile.readline()
while line:
try:
name = line[0:8]
raHours = int(line[10:12])
raMinutes = int(line[13:15])
raSeconds = float(line[16:20])
decSign = line[22]
decDegrees = int(line[23:25])
decMinutes = float(line[26:30])
alias = line[64:-1]
except ValueError:
raise RuntimeError(f"file {fileName}, line {lineNum} incorectly formated [{line}]")
name = name.strip()
name = ('4C' + name)
alias = alias.strip()
alias = alias.rstrip('*')
ra = astro.hms(raHours, raMinutes, raSeconds)
(decSeconds, decMinutes) = math.modf(decMinutes)
if decSign == '-':
sign = True
else:
sign = False
dec = astro.dms(sign, decDegrees, int(decMinutes), (60 * decSeconds))
sourcePos = astro.equ_posn(ra, dec)
# precess coordinates from B1950
entry = CatalogEntry(name, transform.CelestialPosition(sourcePos, epoch='B1950', name=name))
self.source_map[name] = entry
if len(alias.strip()):
alias = alias.rstrip()
entry.alias_list.append(alias)
self.alias_map[alias] = entry
line = catFile.readline()
lineNum += 1
class Fermi_LAT_Catalog(Catalog):
"""
Base definition for the Fermi LAT point source catalogs.
"""
def __init__(self, name, filename):
"""
Create a Fermi LAT catalog instance.
"""
self._filename = filename
Catalog.__init__(self, name)
def parse_file(self):
"""
Read a source catalog data file.
"""
from astropy.io import fits as astrofits
# open data file
fileName = os.path.join(self.get_directory(), self._filename)
with DataAccess.open(fileName, 'rb') as fh:
catFile = astrofits.open(fh)
# read source info
sourceTable = catFile['LAT_POINT_SOURCE_CATALOG'].data
for row in sourceTable:
name = str(row.field('Source_Name')).replace(' ', '_')
ra = float(row.field('RAJ2000'))
dec = float(row.field('DEJ2000'))
entry = CatalogEntry(name,
transform.CelestialPosition((ra, dec), name=name))
self.source_map[name] = entry
for fieldname in ('0FGL_NAME', '1FGL_NAME', '2FGL_NAME',
'ASSOC_FGL', 'ASSOC_GAM1', 'ASSOC_GAM2', 'ASSOC_GAM3',
'ASSOC1', 'ASSOC2'):
try:
alias = str(row.field(fieldname))
if len(alias):
alias = alias.replace(' ', '_')
self.alias_map[alias] = entry
entry.alias_list.append(alias)
except KeyError:
pass
class F2FGL_Catalog(Fermi_LAT_Catalog):
"""
Specific definition for Fermi LAT 2-year point source catalog.
"""
def __init__(self):
Fermi_LAT_Catalog.__init__(self, '2FGL', 'gll_psc_v08.fit')
class F3FGL_Catalog(Fermi_LAT_Catalog):
"""
Specific definition for Fermi LAT 4-year point source catalog.
"""
def __init__(self):
Fermi_LAT_Catalog.__init__(self, '3FGL', 'gll_psc_v16.fit')
class F4FGL_Catalog(Fermi_LAT_Catalog):
"""
Specific definition for Fermi LAT 8-year point source catalog.
"""
def __init__(self):
Fermi_LAT_Catalog.__init__(self, '4FGL', 'gll_psc_v22.fit')
class F4FGLDR4_Catalog(Fermi_LAT_Catalog):
"""
Specific definition for Fermi LAT 14-year point source catalog.
"""
def __init__(self):
Fermi_LAT_Catalog.__init__(self, '4FGL-DR4', 'gll_psc_v33.fit')
class CatalogFactory(object):
"""
Get catalog objects by name. Caches the catalog data so that
the data file is parsed only once per session.
"""
# a mapping of catalog names to classes
catalog_class_map = \
{
'LWA' : LWA_Catalog,
'PSR' : PSR_Catalog,
'PKS' : PKS_Catalog,
'PKS90' : PKS90_Catalog,
'3C' : C3C_Catalog,
'4C' : C4C_Catalog,
'2FGL' : F2FGL_Catalog,
'3FGL' : F3FGL_Catalog,
'4FGL' : F4FGL_Catalog,
'4FGL-DR4': F4FGLDR4_Catalog,
}
# a mapping of catalog names to instances
catalog_instance_map = {}
@classmethod
def get_catalog(klass, name):
"""
Returns a Catalog object representing the catalog
given by name.
"""
# check parameters
if name not in list(klass.catalog_class_map.keys()):
raise ValueError(f"unknown catalog '{name}'")
# try to find an already created instance
# if not found, create a new instance and cache
try:
catalog = klass.catalog_instance_map[name]
except KeyError:
catalogClass = klass.catalog_class_map[name]
catalog = catalogClass()
klass.catalog_instance_map[name] = catalog
return catalog
@classmethod
def get_names(klass):
"""
Return a list of known catalog names.
"""
return list(klass.catalog_class_map.keys())
|
lwa-projectREPO_NAMElslPATH_START.@lsl_extracted@lsl-main@lsl@catalog.py@.PATH_END.py
|
{
"filename": "FAQ.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/restricted/abseil-cpp/FAQ.md",
"type": "Markdown"
}
|
# Abseil FAQ
## Is Abseil the right home for my utility library?
Most often the answer to the question is "no." As both the [About
Abseil](https://abseil.io/about/) page and our [contributing
guidelines](https://github.com/abseil/abseil-cpp/blob/master/CONTRIBUTING.md#contribution-guidelines)
explain, Abseil contains a variety of core C++ library code that is widely used
at [Google](https://www.google.com/). As such, Abseil's primary purpose is to be
used as a dependency by Google's open source C++ projects. While we do hope that
Abseil is also useful to the C++ community at large, this added constraint also
means that we are unlikely to accept a contribution of utility code that isn't
already widely used by Google.
## How to I set the C++ dialect used to build Abseil?
The short answer is that whatever mechanism you choose, you need to make sure
that you set this option consistently at the global level for your entire
project. If, for example, you want to set the C++ dialect to C++17, with
[Bazel](https://bazel/build/) as the build system and `gcc` or `clang` as the
compiler, there several ways to do this:
* Pass `--cxxopt=-std=c++17` on the command line (for example, `bazel build
--cxxopt=-std=c++17 ...`)
* Set the environment variable `BAZEL_CXXOPTS` (for example,
`BAZEL_CXXOPTS=-std=c++17`)
* Add `build --cxxopt=-std=c++17` to your [`.bazelrc`
file](https://docs.bazel.build/versions/master/guide.html#bazelrc)
If you are using CMake as the build system, you'll need to add a line like
`set(CMAKE_CXX_STANDARD 17)` to your top level `CMakeLists.txt` file. If you
are developing a library designed to be used by other clients, you should
instead leave `CMAKE_CXX_STANDARD` unset and configure the minimum C++ standard
required by each of your library targets via `target_compile_features`. See the
[CMake build
instructions](https://github.com/abseil/abseil-cpp/blob/master/CMake/README.md)
for more information.
For a longer answer to this question and to understand why some other approaches
don't work, see the answer to ["What is ABI and why don't you recommend using a
pre-compiled version of
Abseil?"](#what-is-abi-and-why-dont-you-recommend-using-a-pre-compiled-version-of-abseil)
## What is ABI and why don't you recommend using a pre-compiled version of Abseil?
For the purposes of this discussion, you can think of
[ABI](https://en.wikipedia.org/wiki/Application_binary_interface) as the
compiled representation of the interfaces in code. This is in contrast to
[API](https://en.wikipedia.org/wiki/Application_programming_interface), which
you can think of as the interfaces as defined by the code itself. [Abseil has a
strong promise of API compatibility, but does not make any promise of ABI
compatibility](https://abseil.io/about/compatibility). Let's take a look at what
this means in practice.
You might be tempted to do something like this in a
[Bazel](https://bazel.build/) `BUILD` file:
```
# DON'T DO THIS!!!
cc_library(
name = "my_library",
srcs = ["my_library.cc"],
copts = ["-std=c++17"], # May create a mixed-mode compile!
deps = ["@com_google_absl//absl/strings"],
)
```
Applying `-std=c++17` to an individual target in your `BUILD` file is going to
compile that specific target in C++17 mode, but it isn't going to ensure the
Abseil library is built in C++17 mode, since the Abseil library itself is a
different build target. If your code includes an Abseil header, then your
program may contain conflicting definitions of the same
class/function/variable/enum, etc. As a rule, all compile options that affect
the ABI of a program need to be applied to the entire build on a global basis.
C++ has something called the [One Definition
Rule](https://en.wikipedia.org/wiki/One_Definition_Rule) (ODR). C++ doesn't
allow multiple definitions of the same class/function/variable/enum, etc. ODR
violations sometimes result in linker errors, but linkers do not always catch
violations. Uncaught ODR violations can result in strange runtime behaviors or
crashes that can be hard to debug.
If you build the Abseil library and your code using different compile options
that affect ABI, there is a good chance you will run afoul of the One Definition
Rule. Examples of GCC compile options that affect ABI include (but aren't
limited to) language dialect (e.g. `-std=`), optimization level (e.g. `-O2`),
code generation flags (e.g. `-fexceptions`), and preprocessor defines
(e.g. `-DNDEBUG`).
If you use a pre-compiled version of Abseil, (for example, from your Linux
distribution package manager or from something like
[vcpkg](https://github.com/microsoft/vcpkg)) you have to be very careful to
ensure ABI compatibility across the components of your program. The only way you
can be sure your program is going to be correct regarding ABI is to ensure
you've used the exact same compile options as were used to build the
pre-compiled library. This does not mean that Abseil cannot work as part of a
Linux distribution since a knowledgeable binary packager will have ensured that
all packages have been built with consistent compile options. This is one of the
reasons we warn against - though do not outright reject - using Abseil as a
pre-compiled library.
Another possible way that you might afoul of ABI issues is if you accidentally
include two versions of Abseil in your program. Multiple versions of Abseil can
end up within the same binary if your program uses the Abseil library and
another library also transitively depends on Abseil (resulting in what is
sometimes called the diamond dependency problem). In cases such as this you must
structure your build so that all libraries use the same version of Abseil.
[Abseil's strong promise of API compatibility between
releases](https://abseil.io/about/compatibility) means the latest "HEAD" release
of Abseil is almost certainly the right choice if you are doing as we recommend
and building all of your code from source.
For these reasons we recommend you avoid pre-compiled code and build the Abseil
library yourself in a consistent manner with the rest of your code.
## What is "live at head" and how do I do it?
From Abseil's point-of-view, "live at head" means that every Abseil source
release (which happens on an almost daily basis) is either API compatible with
the previous release, or comes with an automated tool that you can run over code
to make it compatible. In practice, the need to use an automated tool is
extremely rare. This means that upgrading from one source release to another
should be a routine practice that can and should be performed often.
We recommend you update to the [latest commit in the `master` branch of
Abseil](https://github.com/abseil/abseil-cpp/commits/master) as often as
possible. Not only will you pick up bug fixes more quickly, but if you have good
automated testing, you will catch and be able to fix any [Hyrum's
Law](https://www.hyrumslaw.com/) dependency problems on an incremental basis
instead of being overwhelmed by them and having difficulty isolating them if you
wait longer between updates.
If you are using the [Bazel](https://bazel.build/) build system and its
[external dependencies](https://docs.bazel.build/versions/master/external.html)
feature, updating the
[`http_archive`](https://docs.bazel.build/versions/master/repo/http.html#http_archive)
rule in your
[`WORKSPACE`](https://docs.bazel.build/versions/master/be/workspace.html) for
`com_google_abseil` to point to the [latest commit in the `master` branch of
Abseil](https://github.com/abseil/abseil-cpp/commits/master) is all you need to
do. For example, on February 11, 2020, the latest commit to the master branch
was `98eb410c93ad059f9bba1bf43f5bb916fc92a5ea`. To update to this commit, you
would add the following snippet to your `WORKSPACE` file:
```
http_archive(
name = "com_google_absl",
urls = ["https://github.com/abseil/abseil-cpp/archive/98eb410c93ad059f9bba1bf43f5bb916fc92a5ea.zip"], # 2020-02-11T18:50:53Z
strip_prefix = "abseil-cpp-98eb410c93ad059f9bba1bf43f5bb916fc92a5ea",
sha256 = "aabf6c57e3834f8dc3873a927f37eaf69975d4b28117fc7427dfb1c661542a87",
)
```
To get the `sha256` of this URL, run `curl -sL --output -
https://github.com/abseil/abseil-cpp/archive/98eb410c93ad059f9bba1bf43f5bb916fc92a5ea.zip
| sha256sum -`.
You can commit the updated `WORKSPACE` file to your source control every time
you update, and if you have good automated testing, you might even consider
automating this.
One thing we don't recommend is using GitHub's `master.zip` files (for example
[https://github.com/abseil/abseil-cpp/archive/master.zip](https://github.com/abseil/abseil-cpp/archive/master.zip)),
which are always the latest commit in the `master` branch, to implement live at
head. Since these `master.zip` URLs are not versioned, you will lose build
reproducibility. In addition, some build systems, including Bazel, will simply
cache this file, which means you won't actually be updating to the latest
release until your cache is cleared or invalidated.
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@restricted@abseil-cpp@FAQ.md@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/README.md",
"type": "Markdown"
}
|
<div align="center">
<a href="http://pyro.ai"> <img width="220px" height="220px" src="docs/source/_static/img/pyro_logo_with_text.png"></a>
</div>
-----------------------------------------
[](https://github.com/pyro-ppl/pyro/actions)
[](https://coveralls.io/github/pyro-ppl/pyro?branch=dev)
[](https://pypi.python.org/pypi/pyro-ppl)
[](http://pyro-ppl.readthedocs.io/en/stable/?badge=dev)
[](https://bestpractices.coreinfrastructure.org/projects/3056)
[Getting Started](http://pyro.ai/examples) |
[Documentation](http://docs.pyro.ai/) |
[Community](http://forum.pyro.ai/) |
[Contributing](https://github.com/pyro-ppl/pyro/blob/master/CONTRIBUTING.md)
Pyro is a flexible, scalable deep probabilistic programming library built on PyTorch. Notably, it was designed with these principles in mind:
- **Universal**: Pyro is a universal PPL - it can represent any computable probability distribution.
- **Scalable**: Pyro scales to large data sets with little overhead compared to hand-written code.
- **Minimal**: Pyro is agile and maintainable. It is implemented with a small core of powerful, composable abstractions.
- **Flexible**: Pyro aims for automation when you want it, control when you need it. This is accomplished through high-level abstractions to express generative and inference models, while allowing experts easy-access to customize inference.
Pyro was originally developed at Uber AI and is now actively maintained by community contributors, including a dedicated team at the [Broad Institute](https://www.broadinstitute.org/).
In 2019, Pyro [became](https://www.linuxfoundation.org/press-release/2019/02/pyro-probabilistic-programming-language-becomes-newest-lf-deep-learning-project/) a project of the Linux Foundation, a neutral space for collaboration on open source software, open standards, open data, and open hardware.
For more information about the high level motivation for Pyro, check out our [launch blog post](http://eng.uber.com/pyro).
For additional blog posts, check out work on [experimental design](https://eng.uber.com/oed-pyro-release/) and
[time-to-event modeling](https://eng.uber.com/modeling-censored-time-to-event-data-using-pyro/) in Pyro.
## Installing
### Installing a stable Pyro release
**Install using pip:**
```sh
pip install pyro-ppl
```
**Install from source:**
```sh
git clone git@github.com:pyro-ppl/pyro.git
cd pyro
git checkout master # master is pinned to the latest release
pip install .
```
**Install with extra packages:**
To install the dependencies required to run the probabilistic models included in the `examples`/`tutorials` directories, please use the following command:
```sh
pip install pyro-ppl[extras]
```
Make sure that the models come from the same release version of the [Pyro source code](https://github.com/pyro-ppl/pyro/releases) as you have installed.
### Installing Pyro dev branch
For recent features you can install Pyro from source.
**Install Pyro using pip:**
```sh
pip install git+https://github.com/pyro-ppl/pyro.git
```
or, with the `extras` dependency to run the probabilistic models included in the `examples`/`tutorials` directories:
```sh
pip install git+https://github.com/pyro-ppl/pyro.git#egg=project[extras]
```
**Install Pyro from source:**
```sh
git clone https://github.com/pyro-ppl/pyro
cd pyro
pip install . # pip install .[extras] for running models in examples/tutorials
```
## Running Pyro from a Docker Container
Refer to the instructions [here](docker/README.md).
## Citation
If you use Pyro, please consider citing:
```
@article{bingham2019pyro,
author = {Eli Bingham and
Jonathan P. Chen and
Martin Jankowiak and
Fritz Obermeyer and
Neeraj Pradhan and
Theofanis Karaletsos and
Rohit Singh and
Paul A. Szerlip and
Paul Horsfall and
Noah D. Goodman},
title = {Pyro: Deep Universal Probabilistic Programming},
journal = {J. Mach. Learn. Res.},
volume = {20},
pages = {28:1--28:6},
year = {2019},
url = {http://jmlr.org/papers/v20/18-403.html}
}
```
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@README.md@.PATH_END.py
|
{
"filename": "latex.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pygments/py3/pygments/formatters/latex.py",
"type": "Python"
}
|
"""
pygments.formatters.latex
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for LaTeX fancyvrb output.
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from io import StringIO
from pygments.formatter import Formatter
from pygments.lexer import Lexer, do_insertions
from pygments.token import Token, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt
__all__ = ['LatexFormatter']
def escape_tex(text, commandprefix):
return text.replace('\\', '\x00'). \
replace('{', '\x01'). \
replace('}', '\x02'). \
replace('\x00', rf'\{commandprefix}Zbs{{}}'). \
replace('\x01', rf'\{commandprefix}Zob{{}}'). \
replace('\x02', rf'\{commandprefix}Zcb{{}}'). \
replace('^', rf'\{commandprefix}Zca{{}}'). \
replace('_', rf'\{commandprefix}Zus{{}}'). \
replace('&', rf'\{commandprefix}Zam{{}}'). \
replace('<', rf'\{commandprefix}Zlt{{}}'). \
replace('>', rf'\{commandprefix}Zgt{{}}'). \
replace('#', rf'\{commandprefix}Zsh{{}}'). \
replace('%', rf'\{commandprefix}Zpc{{}}'). \
replace('$', rf'\{commandprefix}Zdl{{}}'). \
replace('-', rf'\{commandprefix}Zhy{{}}'). \
replace("'", rf'\{commandprefix}Zsq{{}}'). \
replace('"', rf'\{commandprefix}Zdq{{}}'). \
replace('~', rf'\{commandprefix}Zti{{}}')
DOC_TEMPLATE = r'''
\documentclass{%(docclass)s}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[%(encoding)s]{inputenc}
%(preamble)s
%(styledefs)s
\begin{document}
\section*{%(title)s}
%(code)s
\end{document}
'''
## Small explanation of the mess below :)
#
# The previous version of the LaTeX formatter just assigned a command to
# each token type defined in the current style. That obviously is
# problematic if the highlighted code is produced for a different style
# than the style commands themselves.
#
# This version works much like the HTML formatter which assigns multiple
# CSS classes to each <span> tag, from the most specific to the least
# specific token type, thus falling back to the parent token type if one
# is not defined. Here, the classes are there too and use the same short
# forms given in token.STANDARD_TYPES.
#
# Highlighted code now only uses one custom command, which by default is
# \PY and selectable by the commandprefix option (and in addition the
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
# backwards compatibility purposes).
#
# \PY has two arguments: the classes, separated by +, and the text to
# render in that style. The classes are resolved into the respective
# style commands by magic, which serves to ignore unknown classes.
#
# The magic macros are:
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
# to render in \PY@do. Their definition determines the style.
# * \PY@reset resets \PY@it etc. to do nothing.
# * \PY@toks parses the list of classes, using magic inspired by the
# keyval package (but modified to use plusses instead of commas
# because fancyvrb redefines commas inside its environments).
# * \PY@tok processes one class, calling the \PY@tok@classname command
# if it exists.
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
# for its class.
# * \PY resets the style, parses the classnames and then calls \PY@do.
#
# Tip: to read this code, print it out in substituted form using e.g.
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
STYLE_TEMPLATE = r'''
\makeatletter
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
%(styles)s
\def\%(cp)sZbs{\char`\\}
\def\%(cp)sZus{\char`\_}
\def\%(cp)sZob{\char`\{}
\def\%(cp)sZcb{\char`\}}
\def\%(cp)sZca{\char`\^}
\def\%(cp)sZam{\char`\&}
\def\%(cp)sZlt{\char`\<}
\def\%(cp)sZgt{\char`\>}
\def\%(cp)sZsh{\char`\#}
\def\%(cp)sZpc{\char`\%%}
\def\%(cp)sZdl{\char`\$}
\def\%(cp)sZhy{\char`\-}
\def\%(cp)sZsq{\char`\'}
\def\%(cp)sZdq{\char`\"}
\def\%(cp)sZti{\char`\~}
%% for compatibility with earlier versions
\def\%(cp)sZat{@}
\def\%(cp)sZlb{[}
\def\%(cp)sZrb{]}
\makeatother
'''
def _get_ttype_name(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
class LatexFormatter(Formatter):
r"""
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
standard packages.
Without the `full` option, code is formatted as one ``Verbatim``
environment, like this:
.. sourcecode:: latex
\begin{Verbatim}[commandchars=\\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
Wrapping can be disabled using the `nowrap` option.
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
With the `full` option, a complete LaTeX document is output, including
the command definitions in the preamble.
The `get_style_defs()` method of a `LatexFormatter` returns a string
containing ``\def`` commands defining the macros needed inside the
``Verbatim`` environments.
Additional options accepted:
`nowrap`
If set to ``True``, don't wrap the tokens at all, not even inside a
``\begin{Verbatim}`` environment. This disables most other options
(default: ``False``).
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`docclass`
If the `full` option is enabled, this is the document class to use
(default: ``'article'``).
`preamble`
If the `full` option is enabled, this can be further preamble commands,
e.g. ``\usepackage`` (default: ``''``).
`linenos`
If set to ``True``, output line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`verboptions`
Additional options given to the Verbatim environment (see the *fancyvrb*
docs for possible values) (default: ``''``).
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
.. versionadded:: 0.7
.. versionchanged:: 0.10
The default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
``False``).
.. versionadded:: 1.2
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
``False``).
.. versionadded:: 1.2
`escapeinside`
If set to a string of length 2, enables escaping to LaTeX. Text
delimited by these 2 characters is read as LaTeX code and
typeset accordingly. It has no effect in string literals. It has
no effect in comments if `texcomments` or `mathescape` is
set. (default: ``''``).
.. versionadded:: 2.0
`envname`
Allows you to pick an alternative environment name replacing Verbatim.
The alternate environment still has to support Verbatim's option syntax.
(default: ``'Verbatim'``).
.. versionadded:: 2.0
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
filenames = ['*.tex']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.verboptions = options.get('verboptions', '')
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
self.escapeinside = options.get('escapeinside', '')
if len(self.escapeinside) == 2:
self.left = self.escapeinside[0]
self.right = self.escapeinside[1]
else:
self.escapeinside = ''
self.envname = options.get('envname', 'Verbatim')
self._create_stylesheet()
def _create_stylesheet(self):
t2n = self.ttype2name = {Token: ''}
c2d = self.cmd2def = {}
cp = self.commandprefix
def rgbcolor(col):
if col:
return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0)
for i in (0, 2, 4)])
else:
return '1,1,1'
for ttype, ndef in self.style:
name = _get_ttype_name(ttype)
cmndef = ''
if ndef['bold']:
cmndef += r'\let\$$@bf=\textbf'
if ndef['italic']:
cmndef += r'\let\$$@it=\textit'
if ndef['underline']:
cmndef += r'\let\$$@ul=\underline'
if ndef['roman']:
cmndef += r'\let\$$@ff=\textrm'
if ndef['sans']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['mono']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['color']:
cmndef += (r'\def\$$@tc##1{{\textcolor[rgb]{{{}}}{{##1}}}}'.format(rgbcolor(ndef['color'])))
if ndef['border']:
cmndef += (r'\def\$$@bc##1{{{{\setlength{{\fboxsep}}{{\string -\fboxrule}}'
r'\fcolorbox[rgb]{{{}}}{{{}}}{{\strut ##1}}}}}}'.format(rgbcolor(ndef['border']),
rgbcolor(ndef['bgcolor'])))
elif ndef['bgcolor']:
cmndef += (r'\def\$$@bc##1{{{{\setlength{{\fboxsep}}{{0pt}}'
r'\colorbox[rgb]{{{}}}{{\strut ##1}}}}}}'.format(rgbcolor(ndef['bgcolor'])))
if cmndef == '':
continue
cmndef = cmndef.replace('$$', cp)
t2n[ttype] = name
c2d[name] = cmndef
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in self.cmd2def.items():
styles.append(rf'\@namedef{{{cp}@tok@{name}}}{{{definition}}}')
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
def format_unencoded(self, tokensource, outfile):
# TODO: add support for background colors
t2n = self.ttype2name
cp = self.commandprefix
if self.full:
realoutfile = outfile
outfile = StringIO()
if not self.nowrap:
outfile.write('\\begin{' + self.envname + '}[commandchars=\\\\\\{\\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(',numbers=left' +
(start and ',firstnumber=%d' % start or '') +
(step and ',stepnumber=%d' % step or ''))
if self.mathescape or self.texcomments or self.escapeinside:
outfile.write(',codes={\\catcode`\\$=3\\catcode`\\^=7'
'\\catcode`\\_=8\\relax}')
if self.verboptions:
outfile.write(',' + self.verboptions)
outfile.write(']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
for i in range(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
value = value[len(start):]
start = escape_tex(start, cp)
# ... but do not escape inside comment.
value = start + value
elif self.mathescape:
# Only escape parts not inside a math environment.
parts = value.split('$')
in_math = False
for i, part in enumerate(parts):
if not in_math:
parts[i] = escape_tex(part, cp)
in_math = not in_math
value = '$'.join(parts)
elif self.escapeinside:
text = value
value = ''
while text:
a, sep1, text = text.partition(self.left)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
value += escape_tex(a, cp) + b
else:
value += escape_tex(a + sep1 + b, cp)
else:
value += escape_tex(a, cp)
else:
value = escape_tex(value, cp)
elif ttype not in Token.Escape:
value = escape_tex(value, cp)
styles = []
while ttype is not Token:
try:
styles.append(t2n[ttype])
except KeyError:
# not in current style
styles.append(_get_ttype_name(ttype))
ttype = ttype.parent
styleval = '+'.join(reversed(styles))
if styleval:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(f"\\{cp}{{{styleval}}}{{{line}}}")
outfile.write('\n')
if spl[-1]:
outfile.write(f"\\{cp}{{{styleval}}}{{{spl[-1]}}}")
else:
outfile.write(value)
if not self.nowrap:
outfile.write('\\end{' + self.envname + '}\n')
if self.full:
encoding = self.encoding or 'utf8'
# map known existings encodings from LaTeX distribution
encoding = {
'utf_8': 'utf8',
'latin_1': 'latin1',
'iso_8859_1': 'latin1',
}.get(encoding.replace('-', '_'), encoding)
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
encoding = encoding,
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
class LatexEmbeddedLexer(Lexer):
"""
This lexer takes one lexer as argument, the lexer for the language
being formatted, and the left and right delimiters for escaped text.
First everything is scanned using the language lexer to obtain
strings and comments. All other consecutive tokens are merged and
the resulting text is scanned for escaped segments, which are given
the Token.Escape type. Finally text that is not escaped is scanned
again with the language lexer.
"""
def __init__(self, left, right, lang, **options):
self.left = left
self.right = right
self.lang = lang
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
# find and remove all the escape tokens (replace with an empty string)
# this is very similar to DelegatingLexer.get_tokens_unprocessed.
buffered = ''
insertions = []
insertion_buf = []
for i, t, v in self._find_safe_escape_tokens(text):
if t is None:
if insertion_buf:
insertions.append((len(buffered), insertion_buf))
insertion_buf = []
buffered += v
else:
insertion_buf.append((i, t, v))
if insertion_buf:
insertions.append((len(buffered), insertion_buf))
return do_insertions(insertions,
self.lang.get_tokens_unprocessed(buffered))
def _find_safe_escape_tokens(self, text):
""" find escape tokens that are not in strings or comments """
for i, t, v in self._filter_to(
self.lang.get_tokens_unprocessed(text),
lambda t: t in Token.Comment or t in Token.String
):
if t is None:
for i2, t2, v2 in self._find_escape_tokens(v):
yield i + i2, t2, v2
else:
yield i, None, v
def _filter_to(self, it, pred):
""" Keep only the tokens that match `pred`, merge the others together """
buf = ''
idx = 0
for i, t, v in it:
if pred(t):
if buf:
yield idx, None, buf
buf = ''
yield i, t, v
else:
if not buf:
idx = i
buf += v
if buf:
yield idx, None, buf
def _find_escape_tokens(self, text):
""" Find escape tokens within text, give token=None otherwise """
index = 0
while text:
a, sep1, text = text.partition(self.left)
if a:
yield index, None, a
index += len(a)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
yield index + len(sep1), Token.Escape, b
index += len(sep1) + len(b) + len(sep2)
else:
yield index, Token.Error, sep1
index += len(sep1)
text = b
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pygments@py3@pygments@formatters@latex.py@.PATH_END.py
|
{
"filename": "_stream.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterpolargl/_stream.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="stream", parent_name="scatterpolargl", **kwargs):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Stream"),
data_docs=kwargs.pop(
"data_docs",
"""
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterpolargl@_stream.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.