metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "galaxy-merger-rate.ipynb",
"repo_name": "nanograv/holodeck",
"repo_path": "holodeck_extracted/holodeck-main/notebooks/devs/sams/galaxy-merger-rate.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
import matplotlib.pyplot as plt
import holodeck as holo
import holodeck.sams
from holodeck import utils, plot
from holodeck.constants import MSOL, GYR
```
## Illustris Galaxy Merger Rate [Rodriguez-Gomez+2015]
```python
gmr = holo.sams.GMR_Illustris()
```
Reproduce Rodriguez-Gomez+2015 Fig.6, upper panel
```python
redz_list = [0.1, 1.0, 2.0]
mrat_list = [1e-3, 1e-2, 1e-1, 0.25]
mcuts = [5e10, 5e9, 5e8, 0.0]
mtot = np.logspace(8, 12, 101) * MSOL
fig, axes = plot.figax(figsize=[15, 5], ncols=3, wspace=0.0, ylim=[1e-2, 1e4], sharey=True)
for ii, (ax, redz) in enumerate(zip(axes, redz_list)):
for jj, mrat in enumerate(mrat_list):
rate = gmr(mtot, mrat, redz)
hh, = ax.plot(mtot/MSOL, rate * GYR, alpha=0.25)
sel = (mtot/MSOL > mcuts[jj])
hh, = ax.plot(mtot[sel]/MSOL, rate[sel] * GYR, color=hh.get_color(), label=fr"$q_\star \approx 1/{1/mrat:.0f}$")
if ii == 0:
ax.legend(loc='center left', fontsize=12)
elif ii == 1:
ax.set(xlabel='Descendent (Total) Mass ($M_\star \, [M_\odot]$)')
plt.show()
```
Reproduce Rodriguez-Gomez+2015 Fig.6, lower panel
```python
redz_list = [0.1, 1.0, 2.0]
mtot_list = [1e11, 1e10, 1e9]
qcuts = [2e-4, 2e-3, 2e-2]
mrat = np.logspace(-4, 0, 101)
fig, axes = plot.figax(figsize=[15, 5], ncols=3, wspace=0.0, ylim=[2e-3, 1e5], sharey=True)
for ii, (ax, redz) in enumerate(zip(axes, redz_list)):
for jj, mtot in enumerate(mtot_list):
rate = gmr(mtot*MSOL, mrat, redz)
sel = (mrat > qcuts[jj])
hh, = ax.plot(mrat, rate * GYR, alpha=0.25)
ax.plot(mrat[sel], rate[sel] * GYR, color=hh.get_color(), label=fr"$M_\star \approx 10^{{{np.log10(mtot):.0f}}} \, M_\odot$")
if ii == 0:
ax.legend(loc='lower left', fontsize=12)
elif ii == 1:
ax.set(xlabel='Mass Ratio ($q_\star$)')
plt.show()
```
|
nanogravREPO_NAMEholodeckPATH_START.@holodeck_extracted@holodeck-main@notebooks@devs@sams@galaxy-merger-rate.ipynb@.PATH_END.py
|
{
"filename": "_hoverlabel.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/isosurface/_hoverlabel.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="hoverlabel", parent_name="isosurface", **kwargs):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@isosurface@_hoverlabel.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "ggalloni/cobaya",
"repo_path": "cobaya_extracted/cobaya-master/docs/conf.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
#
# cobaya documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 3 19:28:54 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import ast
import yaml
project = "cobaya"
sys.path.insert(-1, os.path.abspath(f'../{project}'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints', # must be loaded *after* napoleon
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.graphviz',
'sphinxcontrib.programoutput',
'sphinx_rtd_theme',
'sphinxcontrib.jquery'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates', 'theme_customisation']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
fields = {"__year__": None, "__author__": None, "__version__": None}
with open(f"../{project}/__init__.py") as f:
tree = ast.parse(f.read())
for node in ast.walk(tree):
if isinstance(node, ast.Assign) and node.targets[0].id in fields:
fields[node.targets[0].id] = ast.literal_eval(node.value)
copyright = fields["__year__"] + " " + fields["__author__"]
author = fields["__author__"]
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = fields["__version__"]
# The full version, including alpha/beta/rc tags.
release = version
from cobaya.tools import get_available_internal_classes
from cobaya.likelihood import Likelihood
from cobaya.theory import Theory
from cobaya.sampler import Sampler
# Config of the inheritance diagram
from sphinx.ext import inheritance_diagram
inheritance_graph_attrs = dict(rankdir="LR", size='""')
# change inheritance diagram to pull all internal component classes
bases = (Likelihood, Theory, Sampler)
old_names = inheritance_diagram.InheritanceGraph.class_name
old_import = inheritance_diagram.import_classes
def import_classes(name, currmodule):
if name in ['likelihoods', 'samplers', 'theorys']:
return list(get_available_internal_classes(name[:-1], True))
else:
return old_import(name, currmodule)
def class_name(self, cls, parts, aliases):
if issubclass(cls, bases) and cls not in bases:
name = cls.get_qualified_class_name()
if name.startswith('_') or name.startswith('.') or name.startswith('likelihood.'):
name = name.split('.')[-1]
return name
else:
return old_names(self, cls, parts, aliases)
inheritance_diagram.import_classes = import_classes
inheritance_diagram.InheritanceGraph.class_name = class_name
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {"logo_only": False, "style_external_links": True}
# For the readthedocs theme, check
# https://github.com/rtfd/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/theme.conf
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'cobaya v0.1 (alpha)'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = "../img/cobaya_clear.png"
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "../img/cobaya.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['theme_customisation']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
html_use_index = False
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'cobayadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cobaya.tex', 'Cobaya Documentation',
'Jesus Torrado, Antony Lewis, etc', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cobaya', 'cobaya Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cobaya', 'cobaya Documentation',
author, 'cobaya', 'A code for Bayesian analysis.',
'Miscellaneous'),
]
pdf_documents = [(master_doc, 'cobaya', 'Cobaya Documentation', author)]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Options for autodoc
autodoc_member_order = 'bysource'
# Mock modules, so that readthedocs works!
autodoc_mock_imports = ['getdist']
|
ggalloniREPO_NAMEcobayaPATH_START.@cobaya_extracted@cobaya-master@docs@conf.py@.PATH_END.py
|
{
"filename": "bnre.py",
"repo_name": "mackelab/sbi",
"repo_path": "sbi_extracted/sbi-main/sbi/inference/trainers/nre/bnre.py",
"type": "Python"
}
|
# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
# under the Apache License Version 2.0, see <https://www.apache.org/licenses/>
from typing import Callable, Dict, Optional, Union
import torch
from torch import Tensor, nn, ones
from torch.distributions import Distribution
from sbi.inference.trainers.nre.nre_a import NRE_A
from sbi.sbi_types import TensorboardSummaryWriter
from sbi.utils.sbiutils import del_entries
class BNRE(NRE_A):
def __init__(
self,
prior: Optional[Distribution] = None,
classifier: Union[str, Callable] = "resnet",
device: str = "cpu",
logging_level: Union[int, str] = "warning",
summary_writer: Optional[TensorboardSummaryWriter] = None,
show_progress_bars: bool = True,
):
r"""Balanced neural ratio estimation (BNRE)[1].
BNRE is a variation of NRE aiming to produce more conservative posterior
approximations.
[1] Delaunoy, A., Hermans, J., Rozet, F., Wehenkel, A., & Louppe, G..
Towards Reliable Simulation-Based Inference with Balanced Neural Ratio
Estimation.
NeurIPS 2022. https://arxiv.org/abs/2208.13624
Args:
prior: A probability distribution that expresses prior knowledge about the
parameters, e.g. which ranges are meaningful for them. If `None`, the
prior must be passed to `.build_posterior()`.
classifier: Classifier trained to approximate likelihood ratios. If it is
a string, use a pre-configured network of the provided type (one of
linear, mlp, resnet). Alternatively, a function that builds a custom
neural network can be provided. The function will be called with the
first batch of simulations $(\theta, x)$, which can thus be used for
shape inference and potentially for z-scoring. It needs to return a
PyTorch `nn.Module` implementing the classifier.
device: Training device, e.g., "cpu", "cuda" or "cuda:{0, 1, ...}".
logging_level: Minimum severity of messages to log. One of the strings
INFO, WARNING, DEBUG, ERROR and CRITICAL.
summary_writer: A tensorboard `SummaryWriter` to control, among others, log
file location (default is `<current working directory>/logs`.)
show_progress_bars: Whether to show a progressbar during simulation and
sampling.
"""
kwargs = del_entries(locals(), entries=("self", "__class__"))
super().__init__(**kwargs)
def train(
self,
regularization_strength: float = 100.0,
training_batch_size: int = 200,
learning_rate: float = 5e-4,
validation_fraction: float = 0.1,
stop_after_epochs: int = 20,
max_num_epochs: int = 2**31 - 1,
clip_max_norm: Optional[float] = 5.0,
resume_training: bool = False,
discard_prior_samples: bool = False,
retrain_from_scratch: bool = False,
show_train_summary: bool = False,
dataloader_kwargs: Optional[Dict] = None,
) -> nn.Module:
r"""Return classifier that approximates the ratio $p(\theta,x)/p(\theta)p(x)$.
Args:
regularization_strength: The multiplicative coefficient applied to the
balancing regularizer ($\lambda$).
training_batch_size: Training batch size.
learning_rate: Learning rate for Adam optimizer.
validation_fraction: The fraction of data to use for validation.
stop_after_epochs: The number of epochs to wait for improvement on the
validation set before terminating training.
max_num_epochs: Maximum number of epochs to run. If reached, we stop
training even when the validation loss is still decreasing. Otherwise,
we train until validation loss increases (see also `stop_after_epochs`).
clip_max_norm: Value at which to clip the total gradient norm in order to
prevent exploding gradients. Use None for no clipping.
exclude_invalid_x: Whether to exclude simulation outputs `x=NaN` or `x=±∞`
during training. Expect errors, silent or explicit, when `False`.
resume_training: Can be used in case training time is limited, e.g. on a
cluster. If `True`, the split between train and validation set, the
optimizer, the number of epochs, and the best validation log-prob will
be restored from the last time `.train()` was called.
discard_prior_samples: Whether to discard samples simulated in round 1, i.e.
from the prior. Training may be sped up by ignoring such less targeted
samples.
retrain_from_scratch: Whether to retrain the conditional density
estimator for the posterior from scratch each round.
show_train_summary: Whether to print the number of epochs and validation
loss and leakage after the training.
dataloader_kwargs: Additional or updated kwargs to be passed to the training
and validation dataloaders (like, e.g., a collate_fn)
Returns:
Classifier that approximates the ratio $p(\theta,x)/p(\theta)p(x)$.
"""
kwargs = del_entries(locals(), entries=("self", "__class__"))
kwargs["loss_kwargs"] = {
"regularization_strength": kwargs.pop("regularization_strength")
}
return super().train(**kwargs)
def _loss(
self, theta: Tensor, x: Tensor, num_atoms: int, regularization_strength: float
) -> Tensor:
"""Returns the binary cross-entropy loss for the trained classifier.
The classifier takes as input a $(\theta,x)$ pair. It is trained to predict 1
if the pair was sampled from the joint $p(\theta,x)$, and to predict 0 if the
pair was sampled from the marginals $p(\theta)p(x)$.
"""
assert theta.shape[0] == x.shape[0], "Batch sizes for theta and x must match."
batch_size = theta.shape[0]
logits = self._classifier_logits(theta, x, num_atoms)
likelihood = torch.sigmoid(logits).squeeze()
# Alternating pairs where there is one sampled from the joint and one
# sampled from the marginals. The first element is sampled from the
# joint p(theta, x) and is labelled 1. The second element is sampled
# from the marginals p(theta)p(x) and is labelled 0. And so on.
labels = ones(2 * batch_size, device=self._device) # two atoms
labels[1::2] = 0.0
# Binary cross entropy to learn the likelihood (AALR-specific)
bce = nn.BCELoss()(likelihood, labels)
# Balancing regularizer
regularizer = (
(torch.sigmoid(logits[0::2]) + torch.sigmoid(logits[1::2]) - 1)
.mean()
.square()
)
return bce + regularization_strength * regularizer
|
mackelabREPO_NAMEsbiPATH_START.@sbi_extracted@sbi-main@sbi@inference@trainers@nre@bnre.py@.PATH_END.py
|
{
"filename": "sparse_bcoo_bcsr_test.py",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/tests/sparse_bcoo_bcsr_test.py",
"type": "Python"
}
|
# Copyright 2021 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
from functools import partial
import itertools
import math
import operator
import random
import unittest
from absl.testing import absltest
import jax
from jax import jit
from jax import lax
from jax import vmap
from jax._src import test_util as jtu
from jax._src import xla_bridge
from jax._src.lax.lax import remaining
from jax._src.util import unzip2
from jax.experimental import sparse
from jax.experimental.sparse import bcoo as sparse_bcoo
from jax.experimental.sparse import bcsr as sparse_bcsr
from jax.experimental.sparse import test_util as sptu
from jax.experimental.sparse import util as sparse_util
import jax.numpy as jnp
import jax.random
from jax.util import split_list
import numpy as np
jax.config.parse_flags_with_absl()
COMPATIBLE_SHAPE_PAIRS = [
[(), ()],
[(), (1,)],
[(3,), (1, 3)],
[(3, 1), (3,)],
[(6,), (2, 3)],
[(3, 2), (6,)],
[(2, 3), (1, 6)],
[(2, 4), (4, 1, 2)],
[(3, 4, 5), (2, 6, 5)],
[(2,), (2,)],
]
def _generate_batched_dot_general_properties(
shapes=((5,), (2, 3), (2, 3, 4), (2, 3, 4, 4)), sparse_format="bcoo"
) -> sptu.BatchedDotGeneralProperties:
"""Generator of properties for bcoo_dot_general tests."""
rng = random.Random(0)
if sparse_format not in ['bcoo', 'bcsr']:
raise ValueError(f"Sparse format {sparse_format} not supported.")
for shape in shapes:
for layout in sptu.iter_sparse_layouts(shape):
if sparse_format == "bcsr" and layout.n_sparse != 2:
continue
subsets = split_list(range(len(shape)), [layout.n_batch, layout.n_sparse])
for batch_dims in sptu.iter_subsets(range(layout.n_batch)):
for contracting_dims in sptu.iter_subsets(
remaining(range(layout.n_batch + layout.n_sparse), batch_dims)
):
# We want coverage of permutations without generating hundreds of thousands of test cases;
# we do this by deterministic pseudo-random sampling instead of iterating.
rhs_permute = rng.sample(range(len(shape)), len(shape))
lhs_permute = list(
itertools.chain.from_iterable(
rng.sample(subset, len(subset)) for subset in subsets
)
)
yield sptu.BatchedDotGeneralProperties(
lhs_shape=tuple(shape[p] for p in lhs_permute),
rhs_shape=tuple(shape[p] for p in rhs_permute),
n_batch=layout.n_batch,
n_dense=layout.n_dense,
dimension_numbers=(
(
[lhs_permute.index(d) for d in contracting_dims],
[rhs_permute.index(d) for d in contracting_dims],
),
(
[lhs_permute.index(d) for d in batch_dims],
[rhs_permute.index(d) for d in batch_dims],
),
),
)
def _generate_bcoo_dot_general_sampled_properties(
shapes=((5,), (2, 3), (2, 3, 4), (2, 3, 4, 4))
) -> sptu.BatchedDotGeneralProperties:
"""Generator of properties for bcoo_dot_general_sampled tests."""
rng = random.Random(0)
for shape in shapes:
for batch_dims in sptu.iter_subsets(range(len(shape))):
for contracting_dims in sptu.iter_subsets(
remaining(range(len(shape)), batch_dims)
):
# We want coverage of permutations without generating hundreds of thousands of test cases;
# we do this by deterministic pseudo-random sampling instead of iterating.
lhs_permute = rng.sample(range(len(shape)), len(shape))
rhs_permute = rng.sample(range(len(shape)), len(shape))
lhs_shape = tuple(shape[p] for p in lhs_permute)
rhs_shape = tuple(shape[p] for p in rhs_permute)
dimension_numbers = (
(
[lhs_permute.index(d) for d in contracting_dims],
[rhs_permute.index(d) for d in contracting_dims],
),
(
[lhs_permute.index(d) for d in batch_dims],
[rhs_permute.index(d) for d in batch_dims],
),
)
out = jax.eval_shape(partial(lax.dot_general, dimension_numbers=dimension_numbers),
jax.ShapeDtypeStruct(lhs_shape, 'float32'), jax.ShapeDtypeStruct(rhs_shape, 'float32'))
for layout in sptu.iter_sparse_layouts(out.shape):
yield sptu.BatchedDotGeneralProperties(
lhs_shape=lhs_shape,
rhs_shape=rhs_shape,
n_batch=layout.n_batch,
n_dense=layout.n_dense,
dimension_numbers=dimension_numbers,
)
all_dtypes = jtu.dtypes.integer + jtu.dtypes.floating + jtu.dtypes.complex
def _is_required_cuda_version_satisfied(cuda_version):
version = xla_bridge.get_backend().platform_version
if version == "<unknown>" or "rocm" in version.split():
return False
else:
return int(version.split()[-1]) >= cuda_version
class BCOOTest(sptu.SparseTestCase):
def gpu_matmul_warning_context(self, msg):
if jax.config.jax_bcoo_cusparse_lowering:
return self.assertWarnsRegex(sparse.CuSparseEfficiencyWarning, msg)
return contextlib.nullcontext()
def test_repr(self):
x = sparse.BCOO.fromdense(jnp.arange(5, dtype='float32'))
self.assertEqual(repr(x), "BCOO(float32[5], nse=4)")
y = sparse.BCOO.fromdense(jnp.arange(6, dtype='float32').reshape(2, 3), n_batch=1)
self.assertEqual(repr(y), "BCOO(float32[2, 3], nse=3, n_batch=1)")
y = sparse.BCOO.fromdense(jnp.arange(6, dtype='float32').reshape(2, 3), n_batch=1, n_dense=1)
self.assertEqual(repr(y), "BCOO(float32[2, 3], nse=1, n_batch=1, n_dense=1)")
M_invalid = sparse.BCOO.fromdense(jnp.arange(6, dtype='float32').reshape(2, 3))
M_invalid.indices = jnp.array([])
self.assertEqual(repr(M_invalid), "BCOO(<invalid>)")
@jit
def f(x):
self.assertEqual(repr(x), "DynamicJaxprTracer[BCOO(float32[5], nse=4)]")
f(x)
@jtu.sample_product(
[
dict(shape=shape, n_batch=layout.n_batch, n_dense=layout.n_dense)
for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for layout in sptu.iter_sparse_layouts(shape)
],
dtype=all_dtypes,
)
def test_empty(self, shape, dtype, n_batch, n_dense):
M = sparse.empty(shape, dtype=dtype, n_batch=n_batch, n_dense=n_dense)
self.assertIsInstance(M, sparse.BCOO)
self.assertEqual(M.nse, 0)
self.assertEqual(M.n_batch, n_batch)
self.assertEqual(M.n_dense, n_dense)
self.assertEqual(M.dtype, dtype)
self.assertArraysEqual(M.todense(), jnp.empty(shape, dtype))
@jtu.sample_product(
[
dict(n_batch=layout.n_batch, n_dense=layout.n_dense)
for layout in sptu.iter_sparse_layouts((3, 3))
],
N=[3, 5],
M=[None, 4],
k=[-3, -1, 0, 2, 4],
dtype=all_dtypes,
)
def test_eye(self, N, M, k, dtype, n_batch, n_dense):
mat = sparse.eye(N, M, k, dtype=dtype, n_batch=n_batch, n_dense=n_dense)
expected = jnp.eye(N, M, k, dtype=dtype)
expected_nse = sparse.BCOO.fromdense(expected, n_batch=n_batch, n_dense=n_dense).nse
self.assertIsInstance(mat, sparse.BCOO)
self.assertEqual(mat.n_batch, n_batch)
self.assertEqual(mat.n_dense, n_dense)
self.assertEqual(mat.dtype, dtype)
self.assertEqual(mat.nse, expected_nse)
self.assertArraysEqual(mat.todense(), expected)
@jtu.sample_product(
[
dict(shape=shape, n_batch=layout.n_batch, n_dense=layout.n_dense)
for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for layout in sptu.iter_sparse_layouts(shape)
],
dtype=all_dtypes,
)
def test_bcoo_dense_round_trip(self, shape, dtype, n_batch, n_dense):
n_sparse = len(shape) - n_batch - n_dense
rng = sptu.rand_sparse(self.rng())
M = rng(shape, dtype)
nse = sparse.util._count_stored_elements(M, n_batch=n_batch, n_dense=n_dense)
def round_trip(M):
return sparse.BCOO.fromdense(M, nse=nse, n_batch=n_batch, n_dense=n_dense).todense()
args_maker = lambda: [M]
ident = lambda x: x
self._CheckAgainstNumpy(ident, round_trip, args_maker)
self._CompileAndCheck(round_trip, args_maker)
self._CheckBatchingSparse(ident, round_trip, args_maker, bdims=self._random_bdims(n_batch))
if jnp.issubdtype(dtype, jnp.floating):
# For n_sparse != 0, we can't use an identity because output zeros must not
# be dependent on input zeros. This mimics the code in count_stored_elements().
def expected(M):
if n_sparse == 0: return M
mask = (M != 0).any(range(M.ndim - n_dense, M.ndim), keepdims=True)
return jnp.where(mask, M, 0)
self._CheckGradsSparse(expected, round_trip, args_maker)
def test_bcoo_fromdense_sorted_and_unique_indices(self):
rng = self.rng()
rng_sparse = sptu.rand_sparse(rng)
mat = sparse.BCOO.fromdense(rng_sparse((5, 6), np.float32))
perm = rng.permutation(mat.nse)
mat_unsorted = sparse.BCOO((mat.data[perm], mat.indices[perm]),
shape=mat.shape,
unique_indices=mat.unique_indices)
mat_resorted = mat_unsorted.sort_indices()
with self.subTest('sorted indices'):
self.assertArraysEqual(mat.indices, mat_resorted.indices)
self.assertArraysEqual(mat.data, mat_resorted.data)
with self.subTest('unique indices'):
self.assertTrue(mat.unique_indices)
self.assertTrue(mat_unsorted.unique_indices)
self.assertTrue(mat_resorted.unique_indices)
@jtu.sample_product(
[
dict(shape=shape, n_batch=layout.n_batch, n_dense=layout.n_dense)
for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for layout in sptu.iter_sparse_layouts(shape)
],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
assume_unique=[True, False, None],
)
def test_bcoo_extract(self, shape, dtype, n_batch, n_dense, assume_unique):
rng = sptu.rand_sparse(self.rng())
def args_maker():
x = rng(shape, dtype)
x_bcoo = sparse.bcoo_fromdense(x, n_batch=n_batch, n_dense=n_dense)
# Unique indices are required for this test when assume_unique == True.
self.assertTrue(x_bcoo.unique_indices)
return x_bcoo, x
dense_op = lambda _, x: x
sparse_op = partial(sparse.bcoo_extract, assume_unique=assume_unique)
self._CheckAgainstDense(dense_op, sparse_op, args_maker)
self._CheckBatchingSparse(dense_op, sparse_op, args_maker, bdims=2 * self._random_bdims(n_batch))
def test_bcoo_extract_duplicate_indices(self):
data = jnp.array([1, 3, 9, 27, 81, 243])
indices = jnp.array([[0], [5], [0], [3], [2], [3]])
shape = (6,)
mat = sparse.BCOO((data, indices), shape=shape).todense()
data1 = sparse_bcoo._bcoo_extract(indices, mat, assume_unique=True)
self.assertArraysEqual(data1, jnp.array([10, 3, 10, 270, 81, 270]))
data2 = sparse_bcoo._bcoo_extract(indices, mat, assume_unique=False)
self.assertArraysEqual(data2, jnp.array([10, 3, 0, 270, 81, 0]))
def test_bcoo_extract_duplicate_indices_n_sparse_0(self):
data = jnp.arange(6).reshape(3, 2)
indices = jnp.empty((3, 2, 0), dtype=int)
shape = (3,)
mat = sparse.BCOO((data, indices), shape=shape).todense()
data1 = sparse_bcoo._bcoo_extract(indices, mat, assume_unique=True)
self.assertArraysEqual(data1, jnp.array([[1, 1], [5, 5], [9, 9]]))
data2 = sparse_bcoo._bcoo_extract(indices, mat, assume_unique=False)
self.assertArraysEqual(data2, jnp.array([[1, 0], [5, 0], [9, 0]]))
def test_bcoo_extract_batching(self):
# https://github.com/jax-ml/jax/issues/9431
indices = jnp.zeros((4, 1, 1), dtype=int)
mat = jnp.arange(4.).reshape((4, 1))
# in_axes = (0, None)
expected = jnp.vstack([sparse_bcoo._bcoo_extract(i, mat[0]) for i in indices])
actual = vmap(sparse_bcoo._bcoo_extract, in_axes=(0, None))(indices, mat[0])
self.assertArraysEqual(expected, actual)
# in_axes = (None, 0)
expected = jnp.vstack([sparse_bcoo._bcoo_extract(indices[0], m) for m in mat])
actual = vmap(sparse_bcoo._bcoo_extract, in_axes=(None, 0))(indices[0], mat)
self.assertArraysEqual(expected, actual)
# in_axes = (0, 0)
expected = jnp.vstack([sparse_bcoo._bcoo_extract(i, m) for i, m in zip(indices, mat)])
actual = vmap(sparse_bcoo._bcoo_extract, in_axes=0)(indices, mat)
self.assertArraysEqual(expected, actual)
@jtu.sample_product(
[
dict(shape=shape, n_batch=layout.n_batch, n_dense=layout.n_dense)
for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for layout in sptu.iter_sparse_layouts(shape)
],
dtype=jtu.dtypes.floating,
)
def test_bcoo_extract_ad(self, shape, dtype, n_batch, n_dense):
rng = sptu.rand_sparse(self.rng())
M = rng(shape, dtype)
nse = sparse.util._count_stored_elements(M, n_batch=n_batch,
n_dense=n_dense)
data, indices = sparse_bcoo._bcoo_fromdense(M, nse=nse, n_batch=n_batch, n_dense=n_dense)
extract = partial(sparse_bcoo._bcoo_extract, indices)
j1 = jax.jacfwd(extract)(M)
j2 = jax.jacrev(extract)(M)
hess = jax.hessian(extract)(M)
self.assertArraysAllClose(j1, j2)
self.assertEqual(j1.shape, data.shape + M.shape)
self.assertEqual(hess.shape, data.shape + 2 * M.shape)
def test_bcoo_extract_zero_nse(self):
# Regression test for https://github.com/jax-ml/jax/issues/13653
# (n_batch, n_sparse, n_dense) = (1, 0, 0), nse = 2
args_maker = lambda: (jnp.zeros((3, 2, 0), dtype='int32'), jnp.arange(3))
self._CompileAndCheck(sparse_bcoo._bcoo_extract, args_maker)
# (n_batch, n_sparse, n_dense) = (0, 0, 1), nse = 2
args_maker = lambda: (jnp.zeros((2, 0), dtype='int32'), jnp.arange(3))
self._CompileAndCheck(sparse_bcoo._bcoo_extract, args_maker)
@jtu.sample_product(
[
dict(shape=shape, n_batch=layout.n_batch, n_dense=layout.n_dense)
for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for layout in sptu.iter_sparse_layouts(shape)
],
dtype=jtu.dtypes.numeric,
)
def test_bcoo_transpose(self, shape, dtype, n_batch, n_dense):
n_sparse = len(shape) - n_batch - n_dense
rng = self.rng()
sprng = sptu.rand_bcoo(rng, n_batch=n_batch, n_dense=n_dense)
permutation = np.concatenate([
rng.permutation(range(n_batch)),
rng.permutation(range(n_batch, n_batch + n_sparse)),
rng.permutation(range(n_batch + n_sparse, len(shape))),
]).astype(int)
args_maker = lambda: [sprng(shape, dtype)]
dense_func = partial(lax.transpose, permutation=permutation)
sparse_func = partial(sparse.bcoo_transpose, permutation=permutation)
self._CheckAgainstDense(dense_func, sparse_func, args_maker)
if jnp.issubdtype(dtype, jnp.floating):
self._CheckGradsSparse(dense_func, sparse_func, args_maker)
self._CheckBatchingSparse(dense_func, sparse_func, args_maker, bdims=self._random_bdims(n_batch))
def test_bcoo_transpose_indices_sorted(self):
rng = self.rng()
rng_sparse = sptu.rand_sparse(rng)
n_batch, n_dense = 2, 2
shape = (2, 3, 4, 5, 6, 7, 8)
mat = sparse.BCOO.fromdense(rng_sparse(shape, np.float32),
n_dense=n_dense, n_batch=n_batch)
permutations = (1, 0, 2, 3, 4, 6, 5)
mat_T_indices_sorted = mat.transpose(axes=permutations)
self.assertTrue(mat_T_indices_sorted.indices_sorted)
permutations = (0, 1, 3, 2, 4, 5, 6)
mat_T_indices_unsorted = mat.transpose(axes=permutations)
self.assertFalse(mat_T_indices_unsorted.indices_sorted)
@jtu.sample_product(
[
dict(shape=shape, n_batch=layout.n_batch, n_dense=layout.n_dense)
for shape in [(5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for layout in sptu.iter_sparse_layouts(shape, min_n_batch=1)
],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
def test_bcoo_todense_partial_batch(self, shape, dtype, n_batch, n_dense):
rng = sptu.rand_sparse(self.rng())
M = rng(shape, dtype)
nse = sparse.util._count_stored_elements(M, n_batch=n_batch,
n_dense=n_dense)
data, indices = sparse_bcoo._bcoo_fromdense(M, nse=nse, n_batch=n_batch, n_dense=n_dense)
M1 = sparse_bcoo._bcoo_todense(data, indices[:1], spinfo=sparse_util.SparseInfo(M.shape))
M2 = sparse_bcoo._bcoo_todense(data, jnp.stack(shape[0] * [indices[0]]), spinfo=sparse_util.SparseInfo(M.shape))
self.assertAllClose(M1, M2)
M3 = sparse_bcoo._bcoo_todense(data[:1], indices, spinfo=sparse_util.SparseInfo(M.shape))
M4 = sparse_bcoo._bcoo_todense(jnp.stack(shape[0] * [data[0]]), indices, spinfo=sparse_util.SparseInfo(M.shape))
self.assertAllClose(M3, M4)
@jtu.sample_product(
props=_generate_batched_dot_general_properties(),
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
@jax.default_matmul_precision("float32")
def test_bcoo_dot_general(
self, dtype: np.dtype, props: sptu.BatchedDotGeneralProperties
):
rng = jtu.rand_default(self.rng())
sprng = sptu.rand_bcoo(self.rng(), n_batch=props.n_batch, n_dense=props.n_dense)
args_maker = lambda: [sprng(props.lhs_shape, dtype),
rng(props.rhs_shape, dtype)]
dense_fun = partial(lax.dot_general, dimension_numbers=props.dimension_numbers)
sparse_fun = partial(sparse.bcoo_dot_general, dimension_numbers=props.dimension_numbers)
tol = {np.float64: 1E-12, np.complex128: 1E-12,
np.float32: 1E-5, np.complex64: 1E-5}
self._CheckAgainstDense(dense_fun, sparse_fun, args_maker, tol=tol)
if jnp.issubdtype(dtype, jnp.floating) and props.n_dense == 0:
# Dense dimensions not yet fully supported in reverse mode.
modes = ['fwd'] if props.n_dense != 0 else ['fwd', 'rev']
self._CheckGradsSparse(dense_fun, sparse_fun, args_maker, modes=modes, atol=tol, rtol=tol)
self._CheckBatchingSparse(dense_fun, sparse_fun, args_maker, atol=tol, rtol=tol,
bdims=self._random_bdims(props.n_batch, len(props.rhs_shape)))
@jtu.sample_product(
[
dict(
lhs_shape=lhs_shape,
rhs_shape=rhs_shape,
lhs_contracting=lhs_contracting,
rhs_contracting=rhs_contracting,
)
for lhs_shape, rhs_shape, lhs_contracting, rhs_contracting in [
[(5,), (5,), [0], [0]],
[(5,), (5, 7), [0], [0]],
[(5,), (7, 5), [0], [1]],
[(5, 7), (5,), [0], [0]],
[(7, 5), (5,), [1], [0]],
[(3, 5), (2, 5), [1], [1]],
[(3, 5), (5, 2), [1], [0]],
[(5, 3), (2, 5), [0], [1]],
[(5, 3), (5, 2), [0], [0]],
]
],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
@jax.default_matmul_precision("float32")
@jtu.run_on_devices("gpu")
def test_bcoo_dot_general_cusparse(
self, lhs_shape, rhs_shape, dtype, lhs_contracting, rhs_contracting
):
rng = jtu.rand_small(self.rng())
rng_sparse = sptu.rand_sparse(self.rng())
def args_maker():
lhs = rng_sparse(lhs_shape, dtype)
rhs = rng(rhs_shape, dtype)
nse = sparse.util._count_stored_elements(lhs, n_batch=0, n_dense=0)
lhs_bcoo = sparse_bcoo.bcoo_fromdense(lhs, nse=nse, index_dtype=jnp.int32)
return lhs_bcoo, lhs, rhs
dimension_numbers = ((lhs_contracting, rhs_contracting), ([], []))
def f_dense(lhs_bcoo, lhs, rhs):
return lax.dot_general(lhs, rhs, dimension_numbers=dimension_numbers)
def f_sparse(lhs_bcoo, lhs, rhs):
return sparse_bcoo.bcoo_dot_general(lhs_bcoo, rhs,
dimension_numbers=dimension_numbers)
self._CompileAndCheck(f_sparse, args_maker)
self._CheckAgainstNumpy(f_dense, f_sparse, args_maker)
@jtu.sample_product(
[
dict(
n_batch=n_batch,
lhs_shape=lhs_shape,
rhs_shape=rhs_shape,
lhs_contracting=lhs_contracting,
rhs_contracting=rhs_contracting,
)
for n_batch, lhs_shape, rhs_shape, lhs_contracting, rhs_contracting in [
[1, (1, 2, 3), (3, 2), [2], [0]],
[1, (1, 3, 2), (3, 2), [1], [0]],
[1, (1, 3, 2), (4, 3), [1], [1]],
[1, (4, 2, 3), (3, 5), [2], [0]],
[1, (4, 2, 3), (2, 5), [1], [0]],
[1, (4, 2, 3), (5, 3), [2], [1]],
]
],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
@jax.default_matmul_precision("float32")
@jtu.run_on_devices("gpu")
def test_bcoo_batched_matmat_cusparse(
self,
n_batch,
lhs_shape,
rhs_shape,
dtype,
lhs_contracting,
rhs_contracting,
):
rng = jtu.rand_small(self.rng())
rng_sparse = sptu.rand_sparse(self.rng())
def args_maker():
lhs = rng_sparse(lhs_shape, dtype)
rhs = rng(rhs_shape, dtype)
nse = sparse.util._count_stored_elements(lhs, n_batch=n_batch,
n_dense=0)
lhs_bcoo = sparse_bcoo.bcoo_fromdense(lhs, n_batch=n_batch, nse=nse,
index_dtype=jnp.int32)
return lhs_bcoo, lhs, rhs
dimension_numbers = ((lhs_contracting, rhs_contracting), ([], []))
def f_dense(lhs_bcoo, lhs, rhs):
return lax.dot_general(lhs, rhs, dimension_numbers=dimension_numbers)
def f_sparse(lhs_bcoo, lhs, rhs):
return sparse_bcoo.bcoo_dot_general(lhs_bcoo, rhs,
dimension_numbers=dimension_numbers)
# TODO(tianjianlu): In some cases, this fails python_should_be_executing.
# self._CompileAndCheck(f_sparse, args_maker)
self._CheckAgainstNumpy(f_dense, f_sparse, args_maker)
@jtu.sample_product(
[
dict(
n_batch=n_batch,
lhs_shape=lhs_shape,
rhs_shape=rhs_shape,
lhs_contracting=lhs_contracting,
rhs_contracting=rhs_contracting,
)
for n_batch, lhs_shape, rhs_shape, lhs_contracting, rhs_contracting in [
[1, (1, 2, 3), (3), [2], [0]],
[1, (1, 2), (3, 2), [1], [1]],
]
],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
@jtu.run_on_devices("gpu")
def test_bcoo_batched_matmat_default_lowering(
self,
n_batch,
lhs_shape,
rhs_shape,
dtype,
lhs_contracting,
rhs_contracting,
):
rng = jtu.rand_small(self.rng())
rng_sparse = sptu.rand_sparse(self.rng())
lhs = rng_sparse(lhs_shape, dtype)
rhs = rng(rhs_shape, dtype)
nse = sparse.util._count_stored_elements(lhs, n_batch=n_batch,
n_dense=0)
lhs_bcoo = sparse_bcoo.bcoo_fromdense(
lhs, n_batch=n_batch, nse=nse, index_dtype=jnp.int32
)
dimension_numbers = ((lhs_contracting, rhs_contracting), ([], []))
matmat_expected = lax.dot_general(lhs, rhs,
dimension_numbers=dimension_numbers)
sp_matmat = jit(partial(sparse_bcoo.bcoo_dot_general,
dimension_numbers=dimension_numbers))
# TODO(jakevdp): uncomment once batching is supported again.
# with self.gpu_matmul_warning_context(
# "bcoo_dot_general GPU lowering currently does not support this batch-mode computation.*"):
matmat_default_lowering_fallback = sp_matmat(lhs_bcoo, rhs)
self.assertArraysEqual(matmat_expected, matmat_default_lowering_fallback)
@jtu.run_on_devices("gpu")
def test_bcoo_dot_general_oob_and_unsorted_indices_cusparse(self):
"""Tests bcoo dot general with out-of-bound and unsorted indices."""
rhs = jnp.ones((5, 3), dtype=jnp.float32)
# It creates out-of-bound indices when nse > nnz.
lhs_mat_dense = jnp.array([[1, 0, 2, 3, 0], [0, 0, 0, 4, 0]],
dtype=jnp.float32)
lhs_mat_bcoo = sparse.BCOO.fromdense(lhs_mat_dense, nse=7)
rng = self.rng()
perm = rng.permutation(lhs_mat_bcoo.nse)
lhs_mat_bcoo_unsorted = sparse.BCOO(
(lhs_mat_bcoo.data[perm], lhs_mat_bcoo.indices[perm]),
shape=lhs_mat_dense.shape)
dimension_numbers_2d = (([1], [0]), ([], []))
sp_matmat = jit(partial(sparse_bcoo.bcoo_dot_general,
dimension_numbers=dimension_numbers_2d))
matmat_expected = lax.dot_general(lhs_mat_dense, rhs,
dimension_numbers=dimension_numbers_2d)
with self.subTest(msg="2D"):
with self.gpu_matmul_warning_context(
"bcoo_dot_general GPU lowering requires matrices with sorted indices*"):
matmat_unsorted_fallback = sp_matmat(lhs_mat_bcoo_unsorted, rhs)
self.assertArraysEqual(matmat_expected, matmat_unsorted_fallback)
lhs_vec_dense = jnp.array([0, 1, 0, 2, 0], dtype=jnp.float32)
lhs_vec_bcoo = sparse.BCOO.fromdense(lhs_vec_dense, nse=5)
rng = self.rng()
perm = rng.permutation(lhs_vec_bcoo.nse)
lhs_vec_bcoo_unsorted = sparse.BCOO(
(lhs_vec_bcoo.data[perm], lhs_vec_bcoo.indices[perm]),
shape=lhs_vec_dense.shape, indices_sorted=False)
dimension_numbers_1d = (([0], [0]), ([], []))
sp_vecmat = jit(partial(sparse_bcoo.bcoo_dot_general,
dimension_numbers=dimension_numbers_1d))
vecmat_expected = lax.dot_general(lhs_vec_dense, rhs,
dimension_numbers=dimension_numbers_1d)
with self.subTest(msg="1D"):
with self.gpu_matmul_warning_context(
"bcoo_dot_general GPU lowering requires matrices with sorted indices*"):
vecmat_unsorted_fallback = sp_vecmat(lhs_vec_bcoo_unsorted, rhs)
self.assertArraysEqual(vecmat_expected, vecmat_unsorted_fallback)
@jtu.sample_product(
props=_generate_batched_dot_general_properties(),
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
@jax.default_matmul_precision("float32")
def test_bcoo_rdot_general(
self, dtype: np.dtype, props: sptu.BatchedDotGeneralProperties
):
rng = jtu.rand_default(self.rng())
sprng = sptu.rand_bcoo(self.rng(), n_batch=props.n_batch, n_dense=props.n_dense)
args_maker = lambda: [rng(props.rhs_shape, dtype),
sprng(props.lhs_shape, dtype)]
dimension_numbers = tuple(d[::-1] for d in props.dimension_numbers)
sparse_fun = partial(sparse.bcoo_dot_general, dimension_numbers=dimension_numbers)
dense_fun = partial(lax.dot_general, dimension_numbers=dimension_numbers)
tol = {np.float64: 1E-12, np.complex128: 1E-12,
np.float32: 1E-5, np.complex64: 1E-5}
self._CheckAgainstDense(dense_fun, sparse_fun, args_maker, tol=tol)
if jnp.issubdtype(dtype, jnp.floating):
# Dense dimensions not yet fully supported in reverse mode.
modes = ['fwd'] if props.n_dense != 0 else ['fwd', 'rev']
self._CheckGradsSparse(dense_fun, sparse_fun, args_maker, modes=modes, atol=tol, rtol=tol)
@jtu.sample_product(
[
dict(
n_batch=n_batch,
n_dense=n_dense,
lhs_shape=lhs_shape,
rhs_shape=rhs_shape,
dimension_numbers=dimension_numbers,
)
for lhs_shape, rhs_shape, dimension_numbers, n_batch, n_dense in [
((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0])), 1, 0),
((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0])), 2, 0),
((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1])), 1, 0),
((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1])), 2, 0),
(
(3, 4, 2, 4),
(3, 4, 3, 2),
(([2], [3]), ([0, 1], [0, 1])),
2,
0,
),
(
(3, 4, 2, 4),
(3, 4, 3, 2),
(([2], [3]), ([0, 1], [0, 1])),
2,
1,
),
]
],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
@jax.default_matmul_precision("float32")
def test_bcoo_dot_general_partial_batch(
self, lhs_shape, rhs_shape, dtype, dimension_numbers, n_batch, n_dense
):
rng = jtu.rand_small(self.rng())
rng_sparse = sptu.rand_sparse(self.rng())
X = rng_sparse(lhs_shape, dtype)
nse = sparse.util._count_stored_elements(X, n_batch=n_batch,
n_dense=n_dense)
data, indices = sparse_bcoo._bcoo_fromdense(X, nse=nse, n_batch=n_batch, n_dense=n_dense)
Y = rng(rhs_shape, dtype)
def f_dense(X, Y):
return lax.dot_general(X, Y, dimension_numbers=dimension_numbers)
def f_sparse(data, indices, Y):
return sparse_bcoo._bcoo_dot_general(data, indices, Y, lhs_spinfo=sparse_util.SparseInfo(X.shape),
dimension_numbers=dimension_numbers, preferred_element_type=None)
for data, indices in itertools.product([data, data[:1]], [indices, indices[:1]]):
X = sparse_bcoo._bcoo_todense(data, indices, spinfo=sparse_util.SparseInfo(X.shape))
self.assertAllClose(f_dense(X, Y), f_sparse(data, indices, Y))
@jtu.sample_product(
props=_generate_bcoo_dot_general_sampled_properties(),
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
@jax.default_matmul_precision("float32")
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_bcoo_dot_general_sampled(self, props, dtype):
rng = jtu.rand_default(self.rng())
sprng = sptu.rand_bcoo(self.rng(), n_batch=props.n_batch, n_dense=props.n_dense)
out = jax.eval_shape(partial(lax.dot_general, dimension_numbers=props.dimension_numbers),
jax.ShapeDtypeStruct(props.lhs_shape, dtype),
jax.ShapeDtypeStruct(props.rhs_shape, dtype))
args_maker = lambda: [rng(props.lhs_shape, dtype), rng(props.rhs_shape, dtype),
sprng(out.shape, dtype).indices]
def dense_fun(lhs, rhs, indices):
AB = lax.dot_general(lhs, rhs, dimension_numbers=props.dimension_numbers)
return sparse_bcoo._bcoo_extract(indices, AB)
def sparse_fun(lhs, rhs, indices):
return sparse.bcoo_dot_general_sampled(
lhs, rhs, indices, dimension_numbers=props.dimension_numbers)
self._CheckAgainstDense(dense_fun, sparse_fun, args_maker)
if jnp.issubdtype(dtype, jnp.floating):
# Note: forward mode fails for some sparse layouts.
# TODO(jakevdp) fix forward-mode autodiff & enable tests here.
self._CheckGradsSparse(dense_fun, sparse_fun, args_maker, modes=['rev'], argnums=[0, 1])
@jtu.sample_product(
[
{
"xshape": xshape,
"yshape": yshape,
"lhs_contract": lhs_contract,
"rhs_contract": rhs_contract,
}
for (xshape, yshape, lhs_contract, rhs_contract) in [
[(4, 3), (4, 5), (0,), (0,)],
[(3, 4), (4, 5), (1,), (0,)],
[(4, 3), (5, 4), (0,), (1,)],
[(3, 4), (5, 4), (1,), (1,)],
[(3,), (3,), (), ()],
[(3,), (5,), (), ()],
[(5,), (3,), (), ()],
[(5,), (5,), (), ()],
]
],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
n_batch=[0, 1, 2],
)
@jax.default_matmul_precision("float32")
def test_bcoo_dot_general_sampled_fast_cases(
self, xshape, yshape, lhs_contract, rhs_contract, n_batch, dtype):
rng = jtu.rand_default(self.rng())
sprng = sptu.rand_bcoo(self.rng(), n_batch=n_batch)
dimension_numbers = ((lhs_contract, rhs_contract), ([], []))
out_shape = jax.eval_shape(partial(lax.dot_general, dimension_numbers=dimension_numbers),
jax.ShapeDtypeStruct(xshape, dtype), jax.ShapeDtypeStruct(yshape, dtype))
args_maker = lambda: [rng(xshape, dtype), rng(yshape, dtype),
sprng(out_shape.shape, out_shape.dtype).indices]
def f1(x, y, indices):
mat_full = lax.dot_general(x, y, dimension_numbers=dimension_numbers)
return sparse_bcoo._bcoo_extract(indices, mat_full)
def f2(x, y, indices):
return sparse.bcoo_dot_general_sampled(x, y, indices, dimension_numbers=dimension_numbers)
self._CheckAgainstNumpy(f1, f2, args_maker, tol=sptu.MATMUL_TOL)
self._CompileAndCheck(f2, args_maker, tol=sptu.MATMUL_TOL)
@jtu.sample_product(
[
dict(
n_batch=n_batch,
n_dense=n_dense,
lhs_shape=lhs_shape,
rhs_shape=rhs_shape,
dimension_numbers=dimension_numbers,
)
for lhs_shape, rhs_shape, dimension_numbers, n_batch, n_dense in [
((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0])), 1, 0),
((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0])), 1, 1),
((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0])), 2, 0),
((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1])), 1, 0),
((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1])), 1, 1),
((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1])), 2, 0),
(
(3, 4, 2, 4),
(3, 4, 3, 2),
(([2], [3]), ([0, 1], [0, 1])),
2,
0,
),
(
(3, 4, 2, 4),
(3, 4, 3, 2),
(([2], [3]), ([0, 1], [0, 1])),
2,
1,
),
]
],
dtype=jtu.dtypes.floating,
)
@jax.default_matmul_precision("float32")
def test_bcoo_dot_general_sampled_ad(self, lhs_shape, rhs_shape, dtype, dimension_numbers, n_batch, n_dense):
rng = jtu.rand_default(self.rng())
sprng = sptu.rand_sparse(self.rng())
out_shape = lax.dot_general(
jnp.zeros(lhs_shape),
jnp.zeros(rhs_shape),
dimension_numbers=dimension_numbers,
).shape
lhs = rng(lhs_shape, dtype)
rhs = rng(rhs_shape, dtype)
indices = sparse.BCOO.fromdense(sprng(out_shape, dtype),
n_batch=n_batch, n_dense=n_dense).indices
def dense_fun(lhs, rhs, indices):
AB = lax.dot_general(lhs, rhs, dimension_numbers=dimension_numbers)
return sparse_bcoo._bcoo_extract(indices, AB)
def sparse_fun(lhs, rhs, indices):
return sparse.bcoo_dot_general_sampled(
lhs, rhs, indices, dimension_numbers=dimension_numbers
)
jf_dense = jax.jacfwd(dense_fun)(lhs, rhs, indices)
jf_sparse = jax.jacfwd(sparse_fun)(lhs, rhs, indices)
jr_dense = jax.jacrev(dense_fun)(lhs, rhs, indices)
jr_sparse = jax.jacrev(sparse_fun)(lhs, rhs, indices)
self.assertAllClose(jf_sparse, jf_dense)
self.assertAllClose(jr_sparse, jr_dense)
self.assertAllClose(jf_sparse, jr_sparse)
@jtu.sample_product(
[
dict(
lhs_n_batch=lhs_n_batch,
rhs_n_batch=rhs_n_batch,
lhs_shape=lhs_shape,
rhs_shape=rhs_shape,
dimension_numbers=dimension_numbers,
)
for lhs_shape, lhs_n_batch, rhs_shape, rhs_n_batch, dimension_numbers in [
# (batched) outer products (no contraction)
((5,), 0, (6,), 0, (([], []), ([], []))),
((3, 5), 0, (2, 4), 0, (([], []), ([], []))),
((3, 5), 1, (3, 4), 1, (([], []), ([0], [0]))),
# (batched) vector-vector products
((5,), 0, (5,), 0, (([0], [0]), ([], []))),
((7,), 0, (7,), 0, (([0], [0]), ([], []))),
((5, 7), 1, (7,), 0, (([1], [0]), ([], []))),
((2, 3, 4), 2, (2, 4), 1, (([2], [1]), ([0], [0]))),
((2, 3, 4), 2, (2, 4), 1, (([2], [1]), ([], []))),
((2, 3, 4), 2, (3, 4), 1, (([2], [1]), ([1], [0]))),
((2, 3, 4), 2, (3, 4), 1, (([2], [1]), ([], []))),
# (batched) matrix-vector products
((5, 7), 0, (7,), 0, (([1], [0]), ([], []))),
((2, 3, 4), 1, (4,), 0, (([2], [0]), ([], []))),
((2, 3, 4), 1, (2, 4), 1, (([2], [1]), ([0], [0]))),
((3, 2, 4), 1, (3, 4), 1, (([2], [1]), ([0], [0]))),
((2, 3, 4), 0, (2,), 0, (([0], [0]), ([], []))),
# (batched) matrix-matrix products
((5, 7), 0, (7, 3), 0, (([1], [0]), ([], []))),
((2, 3, 4), 1, (4, 3), 0, (([2], [0]), ([], []))),
((2, 3, 4), 1, (2, 4, 3), 1, (([2], [1]), ([0], [0]))),
# more general operations
(
(2, 3, 4, 3),
1,
(2, 4, 3, 4),
1,
(([2, 3], [1, 2]), ([0], [0])),
),
(
(2, 3, 4, 3, 1),
2,
(3, 2, 3, 4),
2,
(([2, 3], [3, 2]), ([0, 1], [1, 0])),
),
]
],
swap=[True, False],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
@jax.default_matmul_precision("float32")
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_bcoo_spdot_general(self, lhs_shape, lhs_n_batch, rhs_shape, rhs_n_batch, dtype, swap, dimension_numbers):
if swap:
dimension_numbers = tuple(d[::-1] for d in dimension_numbers)
lhs_shape, rhs_shape = rhs_shape, lhs_shape
lhs_n_batch, rhs_n_batch = rhs_n_batch, lhs_n_batch
lhs_n_sparse = len(lhs_shape) - lhs_n_batch
rhs_batch = dimension_numbers[1][1]
lhs_contracting = dimension_numbers[0][0]
should_error = (rhs_n_batch > len(rhs_batch) and lhs_n_sparse > len(lhs_contracting))
sprng = sptu.rand_bcoo(self.rng())
args_maker = lambda: [sprng(lhs_shape, dtype, n_batch=lhs_n_batch),
sprng(rhs_shape, dtype, n_batch=rhs_n_batch)]
def f_dense(x, y):
return lax.dot_general(x, y, dimension_numbers=dimension_numbers)
def f_sparse(xsp, ysp):
return sparse.bcoo_dot_general(xsp, ysp, dimension_numbers=dimension_numbers)
if should_error:
with self.assertRaisesRegex(ValueError, ".*cannot have unused batch dims on rhs with unused sparse dims on lhs."):
f_sparse(*args_maker())
else:
tol = {"float32": 1E-5, "complex64": 1E-5, "float64": 1E-14, "complex128": 1E-14}
self._CheckAgainstDense(f_dense, f_sparse, args_maker, tol=tol)
self._CheckBatchingSparse(f_dense, f_sparse, args_maker, tol=tol)
if jnp.issubdtype(dtype, jnp.floating):
self._CheckGradsSparse(f_dense, f_sparse, args_maker, modes=['fwd'])
@jtu.sample_product(lhs_shape=[(5,), (4, 5)], rhs_shape=[(5,), (5, 4)])
@jax.default_matmul_precision("float32")
def test_bcoo_spdot_general_nse(self, lhs_shape, rhs_shape):
rng = sptu.rand_bcoo(self.rng())
dtype = jnp.float32
lhs = rng(lhs_shape, dtype)
rhs = rng(rhs_shape, dtype)
out = lhs @ rhs
expected_out = lhs.todense() @ rhs.todense()
expected_nse = min(lhs.nse * rhs.nse, out.size)
self.assertArraysAllClose(out.todense(), expected_out)
self.assertEqual(out.nse, expected_nse)
@jtu.ignore_warning(message="bcoo_dot_general cusparse/hipsparse lowering not available")
def test_bcoo_spdot_general_ad_bug(self):
# Regression test for https://github.com/jax-ml/jax/issues/10163
A_indices = jnp.array([[0, 1], [0, 2], [1, 1], [1, 2], [1, 0]])
A_values = jnp.array([-2.0, 1.0, -1.0, 0.5, 2.0])
A_shape = (2, 3)
B_indices = jnp.array([[0, 2], [2, 1], [0, 3], [1, 3], [1, 0], [0, 0]])
B_values = jnp.array([10.0, 100.0, 1000.0, -5.0, -50.0, -500.0])
B_shape = (3, 4)
def sp_sp_product(v1, v2):
A = sparse.BCOO((v1, A_indices), shape=A_shape)
B = sparse.BCOO((v2, B_indices), shape=B_shape)
return (A @ B).todense()
def sp_de_product(v1, v2):
A = sparse.BCOO((v1, A_indices), shape=A_shape)
B = sparse.BCOO((v2, B_indices), shape=B_shape).todense()
return A @ B
def de_de_product(v1, v2):
sparse1 = sparse.BCOO((v1, A_indices), shape=A_shape).todense()
dense2 = sparse.BCOO((v2, B_indices), shape=B_shape).todense()
return sparse1 @ dense2
sp_sp_jac = jax.jacfwd(sp_sp_product, argnums=1)(A_values, B_values)
sp_de_jac = jax.jacfwd(sp_de_product, argnums=1)(A_values, B_values)
de_de_jac = jax.jacfwd(de_de_product, argnums=1)(A_values, B_values)
self.assertAllClose(sp_sp_jac, de_de_jac)
self.assertAllClose(sp_de_jac, de_de_jac)
@jtu.sample_product(
[
dict(shape=shape, n_batch=layout.n_batch, n_dense=layout.n_dense)
for shape in [(), (5,), (5, 8), (3, 4, 5), (3, 4, 3, 2)]
for layout in sptu.iter_sparse_layouts(shape)
],
dtype=jtu.dtypes.numeric,
)
def test_bcoo_slice(self, shape, dtype, n_batch, n_dense):
rng = self.rng()
sprng = sptu.rand_bcoo(rng, n_batch=n_batch, n_dense=n_dense)
args_maker = lambda: [sprng(shape, dtype)]
slices = rng.randint(0, np.array(shape) + 1, (2, len(shape))).T
slices.sort(1)
start_indices, limit_indices = unzip2(slices)
strides = list(rng.randint(1, 4, len(shape)))
kwds = dict(start_indices=start_indices, limit_indices=limit_indices, strides=strides)
dense_func = partial(lax.slice, **kwds)
sparse_func = partial(sparse.bcoo_slice, **kwds)
self._CheckAgainstDense(dense_func, sparse_func, args_maker)
if jnp.issubdtype(dtype, jnp.floating):
self._CheckGradsSparse(dense_func, sparse_func, args_maker)
mat, = args_maker()
out = sparse_func(mat)
# Array layout is the same
self.assertEqual(mat.n_batch, out.n_batch)
self.assertEqual(mat.n_sparse, out.n_sparse)
self.assertEqual(mat.n_dense, out.n_dense)
# Unnecessary padding eliminated
max_nse = math.prod(out.shape[out.n_batch: out.n_batch + out.n_sparse])
self.assertLessEqual(out.nse, max_nse)
@jtu.sample_product(
[
dict(shape=shape, n_batch=layout.n_batch, n_dense=layout.n_dense)
for shape in [(), (5,), (5, 8), (3, 4, 5), (3, 4, 3, 2)]
for layout in sptu.iter_sparse_layouts(shape)
],
dtype=jtu.dtypes.numeric,
)
def test_bcoo_dynamic_slice(self, shape, dtype, n_batch, n_dense):
rng = self.rng()
sprng = sptu.rand_bcoo(rng, n_batch=n_batch, n_dense=n_dense)
args_maker = lambda: [sprng(shape, dtype)]
rng = self.rng()
# Note: test out-of-range start indices
start_indices = rng.randint(-max(shape, default=0), max(shape, default=0), len(shape))
slice_sizes = rng.randint(0, shape, len(shape))
kwds = dict(start_indices=start_indices, slice_sizes=slice_sizes)
dense_func = partial(lax.dynamic_slice, **kwds)
sparse_func = partial(sparse.bcoo_dynamic_slice, **kwds)
self._CheckAgainstDense(dense_func, sparse_func, args_maker)
if jnp.issubdtype(dtype, jnp.floating):
self._CheckGradsSparse(dense_func, sparse_func, args_maker)
mat, = args_maker()
out = sparse_func(mat)
# Array layout is the same
self.assertEqual(mat.n_batch, out.n_batch)
self.assertEqual(mat.n_sparse, out.n_sparse)
self.assertEqual(mat.n_dense, out.n_dense)
# Unnecessary padding eliminated
max_nse = math.prod(out.shape[out.n_batch: out.n_batch + out.n_sparse])
self.assertLessEqual(out.nse, max_nse)
@jtu.sample_product(
[
dict(shape=shape, n_batch=n_batch, n_dense=n_dense, idx=idx)
for shape, idx in [
[(5,), np.index_exp[:]],
[(5,), np.index_exp[4]],
[(5,), np.index_exp[::2]],
[(5,), np.index_exp[1::2]],
[(5,), 1],
[(3, 4), np.index_exp[1]],
[(3, 4), np.index_exp[1, 2]],
[(3, 4), np.index_exp[np.array([1, 2])]],
[(3, 4), np.index_exp[np.array([[1], [2]]), 0]],
[(3, 4), np.index_exp[np.array([[1], [2]]), 1:]],
[(3, 4), np.index_exp[np.array([True, False, True])]],
[(3, 4), np.index_exp[:2, np.array([True, False, True, False])]],
[(3, 4), np.index_exp[None, 0, np.array([[2]])]],
[(3, 4, 5), np.index_exp[2]],
[(3, 4, 5), np.index_exp[:, 2]],
]
for n_batch in range(len(shape) + 1)
for n_dense in [0] # TODO(jakevdp): add tests with n_dense
],
dtype=jtu.dtypes.numeric,
)
def test_bcoo_getitem(self, shape, dtype, n_batch, n_dense, idx):
sprng = sptu.rand_bcoo(self.rng(), n_batch=n_batch, n_dense=n_dense)
args_maker = lambda: [sprng(shape, dtype)]
fun = lambda x: x[idx]
self._CheckAgainstDense(fun, fun, args_maker)
if jnp.issubdtype(dtype, jnp.floating):
self._CheckGradsSparse(fun, fun, args_maker)
@jtu.sample_product(
[
dict(shape=shape, n_batch=n_batch, n_dense=n_dense)
for shape in [(2,), (3, 4), (5, 6, 2)]
for n_batch in range(len(shape) + 1)
for n_dense in [0] # TODO(jakevdp): add tests with n_dense
],
dtype=jtu.dtypes.numeric,
)
def test_bcoo_iter(self, shape, dtype, n_batch, n_dense):
sprng = sptu.rand_sparse(self.rng())
args_maker = lambda: [sprng(shape, dtype)]
self._CheckAgainstDense(list, list, args_maker)
@jtu.sample_product(
[
dict(
shape=shape,
n_batch=layout.n_batch,
n_dense=layout.n_dense,
nse=nse,
)
for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for layout in sptu.iter_sparse_layouts(shape)
for nse in [None, math.prod(shape) - 1]
],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
remove_zeros=[True, False],
)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_bcoo_sum_duplicates(self, shape, dtype, n_batch, n_dense, nse, remove_zeros):
sprng = sptu.rand_bcoo(self.rng(), n_batch=n_batch, n_dense=n_dense)
def args_maker():
# Create a matrix with duplicate indices
M = sprng(shape, dtype)
new_indices = jnp.concatenate([M.indices, M.indices], axis=n_batch)
new_data = jnp.concatenate([M.data, M.data], axis=n_batch)
return [sparse.BCOO((new_data, new_indices), shape=M.shape)]
dense_fun = lambda x: x
def sparse_fun(x):
out = x.sum_duplicates(nse=nse, remove_zeros=remove_zeros)
self.assertTrue(out.unique_indices)
if nse:
self.assertEqual(out.nse, nse)
return out
self._CheckAgainstDense(dense_fun, sparse_fun, args_maker, check_jit=(nse is not None))
if jnp.issubdtype(dtype, jnp.floating):
self._CheckGradsSparse(dense_fun, sparse_fun, args_maker)
if nse is not None:
self._CheckBatchingSparse(dense_fun, sparse_fun, args_maker)
@jtu.sample_product(
[
dict(shape=shape, n_batch=layout.n_batch, n_dense=layout.n_dense)
for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for layout in sptu.iter_sparse_layouts(shape)
],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
def test_bcoo_sort_indices(self, shape, dtype, n_batch, n_dense):
rng_sparse = sptu.rand_sparse(self.rng(), rand_method=jtu.rand_some_zero)
M = sparse.BCOO.fromdense(rng_sparse(shape, dtype), n_batch=n_batch, n_dense=n_dense)
M.indices = M.indices[..., ::-1, :]
M.indices_sorted = False
M_sorted = M.sort_indices()
self.assertArraysEqual(M.todense(), M_sorted.todense())
self.assertEqual(M.unique_indices, M_sorted.unique_indices)
self.assertEqual(True, M_sorted.indices_sorted)
indices = M_sorted.indices
if indices.size > 0:
flatind = indices.reshape(-1, *indices.shape[-2:]).transpose(0, 2, 1)
sorted = jax.vmap(jnp.lexsort)(flatind[:, ::-1])
self.assertArraysEqual(sorted, lax.broadcasted_iota(sorted.dtype, sorted.shape, sorted.ndim - 1))
@jtu.sample_product(
[
dict(shape=shape, n_batch=layout.n_batch, n_dense=layout.n_dense)
for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for layout in sptu.iter_sparse_layouts(shape, min_n_batch=1)
],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
def test_bcoo_sort_indices_batching(self, shape, dtype, n_batch, n_dense):
rng_sparse = sptu.rand_sparse(self.rng(), rand_method=jtu.rand_some_zero)
M = sparse.BCOO.fromdense(rng_sparse(shape, dtype), n_batch=n_batch, n_dense=n_dense)
M.indices = M.indices[..., ::-1, :]
M.indices_sorted = False
identity = lambda M: M
sort_ind = lambda M: M.sort_indices()
for b in range(n_batch):
identity = jax.vmap(identity, in_axes=b)
sort_ind = jax.vmap(sort_ind, in_axes=b)
M_sorted = sort_ind(M)
M_expected = identity(M)
self.assertArraysEqual(M_expected.todense(), M_sorted.todense())
self.assertEqual(M.unique_indices, M_sorted.unique_indices)
self.assertEqual(True, M_sorted.indices_sorted)
@jtu.sample_product(
[
dict(shape=shape, n_batch=layout.n_batch, n_dense=layout.n_dense)
for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for layout in sptu.iter_sparse_layouts(shape)
],
dtype=jtu.dtypes.floating,
)
def test_bcoo_sort_indices_ad(self, shape, dtype, n_batch, n_dense):
rng_sparse = sptu.rand_sparse(self.rng(), rand_method=jtu.rand_some_zero)
M = sparse.BCOO.fromdense(rng_sparse(shape, dtype), n_batch=n_batch, n_dense=n_dense)
M.indices = M.indices[..., ::-1, :]
def sort_indices(data):
return sparse.BCOO((data, M.indices), shape=M.shape).sort_indices().data
data_dot_fwd = jax.jacfwd(sort_indices)(M.data)
data_dot_rev = jax.jacrev(sort_indices)(M.data)
self.assertAllClose(data_dot_fwd, data_dot_rev)
def test_bcoo_sort_indices_broadcasted(self):
rng_index = jtu.rand_int(self.rng(), low=0, high=10)
rng_data = jtu.rand_default(self.rng())
# Construct matrix with three broadcasted batch dimensions.
indices = rng_index((1, 3, 1, 10, 2), dtype='int32')
data = rng_data((1, 1, 4, 10, 3), dtype='int32')
shape = (2, 3, 4, 5, 4, 3)
mat = sparse.BCOO((data, indices), shape=shape)
indices_shape_out = indices.shape
data_shape_out = (*map(max, indices.shape[:3], data.shape[:3]), *data.shape[3:])
mat_sorted = sparse.bcoo_sort_indices(mat)
assert mat_sorted.indices.shape == indices_shape_out
assert mat_sorted.data.shape == data_shape_out
self.assertArraysEqual(mat.todense(), mat_sorted.todense())
mat_sorted_jit = jit(sparse.bcoo_sort_indices)(mat)
assert mat_sorted_jit.indices.shape == indices_shape_out
assert mat_sorted_jit.data.shape == data_shape_out
self.assertArraysEqual(mat.todense(), mat_sorted_jit.todense())
def test_bcoo_sum_duplicates_inferred_nse(self):
x = sparse.BCOO.fromdense(jnp.diag(jnp.arange(4)))
self.assertEqual(x.nse, 3)
y = x + x.T
self.assertEqual(y.nse, 6)
y2 = y.sum_duplicates()
self.assertEqual(y2.nse, 3)
self.assertArraysEqual(y.todense(), y2.todense())
def test_bcoo_sum_duplicates_remove_zeros(self):
data = jnp.array([0, 1, 0, 0])
indices = jnp.array([[0], [1], [2], [3]])
x = sparse.BCOO((data, indices), shape=(4,))
self.assertEqual(x.nse, 4)
y1 = x.sum_duplicates(remove_zeros=True)
self.assertArraysEqual(x.todense(), y1.todense())
self.assertEqual(y1.nse, 1)
y2 = x.sum_duplicates(remove_zeros=False)
self.assertArraysEqual(x.todense(), y2.todense())
self.assertEqual(y2.nse, x.nse)
def test_bcoo_sum_duplicates_padding(self):
# Regression test for https://github.com/jax-ml/jax/issues/8163
size = 3
data = jnp.array([1, 0, 0])
indices = jnp.array([1, size, size])[:, None]
x = sparse.BCOO((data, indices), shape=(3,))
y = x.sum_duplicates(nse=x.nse)
self.assertArraysEqual(x.todense(), y.todense())
self.assertArraysEqual(x.indices, y.indices)
self.assertArraysEqual(x.data, y.data)
@jtu.sample_product(
[
dict(
shape=shape,
n_batch=layout.n_batch,
n_dense=layout.n_dense,
axes=axes,
)
for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for layout in sptu.iter_sparse_layouts(shape)
for naxes in range(len(shape))
for axes in itertools.combinations(range(len(shape)), naxes)
],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
def test_bcoo_reduce_sum(self, shape, dtype, n_batch, n_dense, axes):
sprng = sptu.rand_bcoo(self.rng(), n_batch=n_batch, n_dense=n_dense)
args_maker = lambda: [sprng(shape, dtype)]
sparse_fun = partial(sparse.bcoo_reduce_sum, axes=axes)
dense_fun = partial(lambda x: x.sum(axes))
tol = {np.float64: 1E-14}
self._CheckAgainstDense(dense_fun, sparse_fun, args_maker, tol=tol)
if jnp.issubdtype(dtype, jnp.floating):
self._CheckGradsSparse(dense_fun, sparse_fun, args_maker)
@jtu.sample_product(
[
dict(
shape=shape,
dimensions=dimensions,
n_batch=layout.n_batch,
n_dense=layout.n_dense,
)
for shape, dimensions in [
[(1,), (0,)],
[(1,), (-1,)],
[(2, 1, 4), (1,)],
[(2, 1, 3, 1), (1,)],
[(2, 1, 3, 1), (1, 3)],
[(2, 1, 3, 1), (3,)],
]
for layout in sptu.iter_sparse_layouts(shape)
],
dtype=jtu.dtypes.numeric,
)
def test_bcoo_squeeze(self, shape, dtype, dimensions, n_batch, n_dense):
sprng = sptu.rand_bcoo(self.rng(), n_batch=n_batch, n_dense=n_dense)
args_maker = lambda: [sprng(shape, dtype)]
dense_func = partial(lax.squeeze, dimensions=dimensions)
sparse_func = partial(sparse.bcoo_squeeze, dimensions=dimensions)
self._CheckAgainstDense(dense_func, sparse_func, args_maker)
if jnp.issubdtype(dtype, jnp.floating):
self._CheckGradsSparse(dense_func, sparse_func, args_maker)
@jtu.sample_product(
[
dict(batch_shapes=shapes, batch_perm=perm)
for shapes in COMPATIBLE_SHAPE_PAIRS
for perm in itertools.permutations(range(len(shapes[0])))
],
[
dict(sparse_shapes=shapes, sparse_perm=perm)
for shapes in COMPATIBLE_SHAPE_PAIRS
for perm in itertools.permutations(range(len(shapes[0])))
],
[
dict(dense_shapes=shapes, dense_perm=perm)
for shapes in [[(), ()]] # TODO(jakevdp) add support for dense shapes
for perm in itertools.permutations(range(len(shapes[0])))
],
dtype=jtu.dtypes.numeric,
)
def test_bcoo_reshape(
self,
batch_shapes,
sparse_shapes,
dense_shapes,
batch_perm,
sparse_perm,
dense_perm,
dtype,
):
# Sparse reshapes cannot mix between sparse, dense, and batch dimensions.
shape = (*batch_shapes[0], *sparse_shapes[0], *dense_shapes[0])
new_sizes = (*batch_shapes[1], *sparse_shapes[1], *dense_shapes[1])
n_batch = len(batch_shapes[0])
n_sparse = len(sparse_shapes[0])
n_dense = len(dense_shapes[0])
dimensions = (
*batch_perm,
*(dim + n_batch for dim in sparse_perm),
*(dim + n_batch + n_sparse for dim in dense_perm),
)
rng = sptu.rand_bcoo(self.rng(), n_batch=n_batch, n_dense=n_dense)
args_maker = lambda: [rng(shape, dtype)]
sparse_func = partial(sparse.bcoo_reshape, new_sizes=new_sizes, dimensions=dimensions)
dense_func = partial(lax.reshape, new_sizes=new_sizes, dimensions=dimensions)
self._CheckAgainstDense(dense_func, sparse_func, args_maker)
if jnp.issubdtype(dtype, jnp.floating):
self._CheckGradsSparse(dense_func, sparse_func, args_maker)
def test_bcoo_reshape_error(self):
x = sparse.BCOO.fromdense(jnp.ones((2, 2, 3)), n_batch=1)
with self.assertRaisesRegex(ValueError, ".*cannot mix batch and sparse dimensions.*"):
x.reshape(3, 2, 2)
y = sparse.BCOO((x.data[:1], x.indices), shape=x.shape)
with self.assertRaisesRegex(NotImplementedError, "reshape of arrays with broadcasted batch dimensions."):
y.reshape(2, 3, 2)
@jtu.sample_product(
[
dict(
shape=shape,
dimensions=dimensions,
n_batch=layout.n_batch,
n_dense=layout.n_dense,
)
for shape in [(3,), (3, 4), (3, 4, 5)]
for dimensions in sptu.iter_subsets(range(len(shape)))
for layout in sptu.iter_sparse_layouts(shape)
],
dtype=jtu.dtypes.numeric,
)
def test_bcoo_rev(self, shape, dtype, n_batch, n_dense, dimensions):
sprng = sptu.rand_bcoo(self.rng(), n_batch=n_batch, n_dense=n_dense)
args_maker = lambda: [sprng(shape, dtype)]
dense_func = partial(lax.rev, dimensions=dimensions)
sparse_func = partial(sparse.bcoo_rev, dimensions=dimensions)
self._CheckAgainstDense(dense_func, sparse_func, args_maker)
if jnp.issubdtype(dtype, jnp.floating):
self._CheckGradsSparse(dense_func, sparse_func, args_maker)
def test_bcsr_matmul_with_out_of_bounds_data(self):
# Simple regression test of a failure mode for cuSparse.
data = jnp.array([1, 2, 3, 4], dtype='float32')
indices = jnp.array([0, 1, 2, 3])
indptr = jnp.array([0, 1, 3, 3])
M = sparse.BCSR((data, indices, indptr), shape=(3, 4))
x = jnp.array([1, 2, 3, 4], dtype='float32')
sparse_result = jax.jit(operator.matmul)(M, x)
dense_result = jax.jit(operator.matmul)(M.todense(), x)
self.assertAllClose(sparse_result, dense_result)
@jtu.sample_product(
[
dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape)
for lhs_shape, rhs_shape in [
[(3, 4), (4,)],
[(3, 4), (4, 5)],
[(3, 4), (2, 4, 5)],
]
],
lhs_dtype=all_dtypes,
rhs_dtype=all_dtypes,
)
@jax.default_matmul_precision("float32")
@jtu.ignore_warning(category=sparse.CuSparseEfficiencyWarning)
def test_bcsr_matmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):
# Note: currently, batch dimensions in matmul must correspond to batch
# dimensions in the sparse representation.
n_batch_lhs = max(0, len(lhs_shape) - 2)
rng = jtu.rand_default(self.rng())
sprng = sptu.rand_bcsr(self.rng())
args_maker = lambda: [sprng(lhs_shape, lhs_dtype, n_batch=n_batch_lhs),
jnp.array(rng(rhs_shape, rhs_dtype))]
tol = {np.float64: 1E-7, np.complex128: 1E-6,
np.float32: 2E-6, np.complex64: 2E-6}
with jtu.strict_promotion_if_dtypes_match([lhs_dtype, rhs_dtype]):
self._CheckAgainstDense(operator.matmul, operator.matmul, args_maker,
tol=tol)
@jtu.sample_product(
[
dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape)
for lhs_shape, rhs_shape in [
[(3,), (3,)],
[(3, 4), (4,)],
[(4,), (4, 5)],
[(3, 4), (4, 5)],
[(3, 4), (2, 4, 5)],
[(2, 3, 4), (4, 5)],
[(2, 3, 4), (2, 4, 5)],
]
],
lhs_dtype=all_dtypes,
rhs_dtype=all_dtypes,
)
@jax.default_matmul_precision("float32")
@jtu.ignore_warning(category=sparse.CuSparseEfficiencyWarning)
def test_bcoo_matmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):
if (jtu.test_device_matches(["cuda"]) and
_is_required_cuda_version_satisfied(12000)):
raise unittest.SkipTest("Triggers a bug in cuda-12 b/287344632")
# Note: currently, batch dimensions in matmul must correspond to batch
# dimensions in the sparse representation.
n_batch_lhs = max(0, len(lhs_shape) - 2)
n_batch_rhs = max(0, len(rhs_shape) - 2)
rng = jtu.rand_default(self.rng())
sprng = sptu.rand_bcoo(self.rng())
args_maker_de_sp = lambda: [jnp.array(rng(lhs_shape, lhs_dtype)),
sprng(rhs_shape, rhs_dtype, n_batch=n_batch_rhs)]
args_maker_sp_de = lambda: [sprng(lhs_shape, lhs_dtype, n_batch=n_batch_lhs),
jnp.array(rng(rhs_shape, rhs_dtype))]
tol = {np.float64: 1E-7, np.complex128: 1E-7,
np.float32: 1E-6, np.complex64: 1E-6}
with jtu.strict_promotion_if_dtypes_match([lhs_dtype, rhs_dtype]):
self._CheckAgainstDense(operator.matmul, operator.matmul, args_maker_de_sp, tol=tol)
self._CheckAgainstDense(operator.matmul, operator.matmul, args_maker_sp_de, tol=tol)
@jtu.sample_product(
[
dict(
lhs_shape=lhs_shape,
rhs_shape=rhs_shape,
n_batch=layout.n_batch,
n_dense=layout.n_dense,
)
for lhs_shape, rhs_shape in [
[(3,), ()],
[(3,), (1,)],
[(3,), (3,)],
[(3, 4), ()],
[(3, 4), (4,)],
[(3, 4), (3, 1)],
[(3, 4), (3, 4)],
[(3, 4, 5), (4, 5)],
[(3, 4, 5), (3, 1, 1)],
[(3, 4, 5), (1, 4, 1)],
]
for layout in sptu.iter_sparse_layouts(lhs_shape)
],
lhs_dtype=all_dtypes,
rhs_dtype=all_dtypes,
)
@jax.numpy_rank_promotion(
"allow"
) # This test explicitly exercises implicit rank promotion.
def test_bcoo_mul_dense(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, n_batch, n_dense):
rng = jtu.rand_default(self.rng())
sprng = sptu.rand_bcoo(self.rng(), n_batch=n_batch, n_dense=n_dense)
args_maker_sp_de = lambda: [sprng(lhs_shape, lhs_dtype), jnp.array(rng(rhs_shape, rhs_dtype))]
args_maker_de_sp = lambda: [jnp.array(rng(rhs_shape, rhs_dtype)), sprng(lhs_shape, lhs_dtype)]
tol = {np.float64: 1E-13, np.complex128: 1E-13,
np.float32: 1E-6, np.complex64: 1E-6}
with jtu.strict_promotion_if_dtypes_match([lhs_dtype, rhs_dtype]):
self._CheckAgainstDense(operator.mul, operator.mul, args_maker_de_sp, tol=tol)
self._CheckAgainstDense(operator.mul, operator.mul, args_maker_sp_de, tol=tol)
@jtu.sample_product(
[
dict(
lhs_shape=lhs_shape,
rhs_shape=rhs_shape,
lhs_n_batch=lhs_n_batch,
rhs_n_batch=rhs_n_batch,
n_dense=n_dense,
)
# TODO(jakevdp): add broadcasted shapes (from bcoo_mul_dense) once sparse-sparse mul
# supports inputs of differing rank.
for lhs_shape, rhs_shape in [
[(3,), (1,)],
[(3,), (3,)],
[(3, 4), (1, 1)],
[(3, 4), (1, 4)],
[(3, 4), (3, 1)],
[(3, 4), (3, 4)],
[(3, 4, 5), (1, 4, 5)],
[(3, 4, 5), (3, 1, 1)],
[(3, 4, 5), (1, 4, 1)],
]
# TODO(jakevdp): add tests for batch & dense dimensions.
for lhs_n_batch in range(len(lhs_shape) + 1)
for rhs_n_batch in range(len(lhs_shape) + 1)
for n_dense in range(
len(lhs_shape) + 1 - max(lhs_n_batch, rhs_n_batch)
)
],
lhs_dtype=all_dtypes,
rhs_dtype=all_dtypes,
)
def test_bcoo_mul_sparse(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, lhs_n_batch, rhs_n_batch, n_dense):
sprng = sptu.rand_bcoo(self.rng(), n_dense=n_dense)
args_maker = lambda: [sprng(lhs_shape, lhs_dtype, n_batch=lhs_n_batch),
sprng(rhs_shape, rhs_dtype, n_batch=rhs_n_batch)]
tol = {np.float64: 1E-13, np.complex128: 1E-13,
np.float32: 1E-5, np.complex64: 1E-5}
with jtu.strict_promotion_if_dtypes_match([lhs_dtype, rhs_dtype]):
self._CheckAgainstDense(operator.mul, operator.mul, args_maker, tol=tol)
def test_bcoo_mul_sparse_with_duplicates(self):
# Regression test for https://github.com/jax-ml/jax/issues/8888
indices = jnp.array([[0, 1, 0, 0, 1, 1],
[1, 0, 1, 2, 0, 2]]).T
data = jnp.array([1, 2, 3, 4, 5, 6])
mat = sparse.BCOO((data, indices), shape=(3, 3))
self.assertArraysEqual((mat * mat).todense(), mat.todense() * mat.todense())
@jtu.sample_product(
[
dict(shape=shape, n_batch=layout.n_batch, n_dense=layout.n_dense)
for shape in [(), (3,), (3, 5), (3, 5, 4)]
for layout in sptu.iter_sparse_layouts(shape)
],
dtype=all_dtypes,
)
def test_bcoo_broadcast_in_dim(self, shape, dtype, n_batch, n_dense):
rng = sptu.rand_sparse(self.rng())
x = jnp.array(rng(shape, dtype))
xsp = sparse.BCOO.fromdense(x, n_batch=n_batch, n_dense=n_dense)
self.assertEqual(xsp[None].n_batch, xsp.n_batch + 1)
self.assertArraysEqual(xsp[None].todense(), x[None])
if len(shape) >= 1:
self.assertEqual(xsp[:, None].n_batch, xsp.n_batch if xsp.n_batch < 1 else xsp.n_batch + 1)
self.assertArraysEqual(xsp[:, None].todense(), x[:, None])
self.assertArraysEqual(xsp[:, None, None].todense(), x[:, None, None])
if len(shape) >= 2:
self.assertEqual(xsp[:, :, None].n_batch, xsp.n_batch if xsp.n_batch < 2 else xsp.n_batch + 1)
self.assertArraysEqual(xsp[:, :, None].todense(), x[:, :, None])
self.assertArraysEqual(xsp[:, None, :, None].todense(), x[:, None, :, None])
@jtu.sample_product(
[
dict(
shape=shape,
n_batch=layout.n_batch,
n_dense=layout.n_dense,
dimension=dimension,
)
for shape in [(3,), (3, 5), (3, 5, 4)]
for layout in sptu.iter_sparse_layouts(shape)
for dimension in range(
len(shape) - layout.n_dense
) # Concatenation of dense dimensions not implemented.
],
dtype=all_dtypes,
)
def test_bcoo_concatenate(self, shape, dtype, n_batch, n_dense, dimension):
sprng = sptu.rand_bcoo(self.rng(), n_batch=n_batch, n_dense=n_dense)
args_maker = lambda: [[sprng(shape, dtype) for i in range(3)]]
dense_func = partial(lax.concatenate, dimension=dimension)
sparse_func = partial(sparse.bcoo_concatenate, dimension=dimension)
self._CheckAgainstDense(dense_func, sparse_func, args_maker)
if jnp.issubdtype(dtype, jnp.floating):
self._CheckGradsSparse(dense_func, sparse_func, args_maker)
@jtu.sample_product(
lhs_shape=[(1, 1, 5), (1, 1, 10)],
rhs_shape=[(1, 1, 5), (1, 1, 10)],
padding=["SAME", "VALID", [(3, 3)]],
dtype=jtu.dtypes.inexact,
format=["sp-de", "de-sp", "sp-sp"],
)
@jax.default_matmul_precision("float32")
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_bcoo_conv_general_dilated(self, lhs_shape, rhs_shape, dtype, padding, format):
kwds = dict(window_strides=(1,), padding=padding)
sparse_fun = partial(sparse.bcoo_conv_general_dilated, **kwds)
dense_fun = partial(lax.conv_general_dilated, **kwds)
sprng = sptu.rand_bcoo(self.rng(), n_batch=2, n_dense=0)
rng = jtu.rand_default(self.rng())
def args_maker():
lhs = (sprng if format.startswith('sp') else rng)(lhs_shape, dtype)
rhs = (sprng if format.endswith('sp') else rng)(rhs_shape, dtype)
return lhs, rhs
tol = {np.float32: 1E-5, np.complex64: 1E-5, np.float64: 1E-14, np.complex128: 1E-14}
self._CheckAgainstDense(dense_fun, sparse_fun, args_maker, tol=tol)
def test_bcoo_vmap_shape(self, shape=(2, 3, 4, 5), dtype=np.float32):
# This test checks that BCOO shape metadata interacts correctly with vmap.
rng = sptu.rand_sparse(self.rng())
M = rng(shape, dtype)
def make_bcoo(M):
return sparse_bcoo._bcoo_fromdense(M, nse=math.prod(M.shape[:-1]), n_dense=1)
todense = partial(sparse_bcoo._bcoo_todense, spinfo=sparse_util.SparseInfo(shape))
for _ in range(3):
make_bcoo = jax.vmap(make_bcoo)
Msp_data, Msp_indices = make_bcoo(M)
Msp_dense = todense(Msp_data, Msp_indices)
self.assertEqual(Msp_dense.shape, M.shape)
self.assertArraysEqual(Msp_dense, M)
@jtu.sample_product(
[
dict(
shape=shape,
n_batch=layout.n_batch,
n_dense=layout.n_dense,
n_batch_out=layout_out.n_batch,
n_dense_out=layout_out.n_dense,
)
for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for layout in sptu.iter_sparse_layouts(shape)
for layout_out in sptu.iter_sparse_layouts(shape)
],
dtype=jtu.dtypes.integer,
)
def test_bcoo_update_layout(self, shape, dtype, n_batch, n_batch_out, n_dense, n_dense_out):
rng = sptu.rand_sparse(self.rng())
mat = sparse.BCOO.fromdense(rng(shape, dtype), n_batch=n_batch, n_dense=n_dense)
kwds = dict(n_batch=n_batch_out, n_dense=n_dense_out)
# TODO(jakevdp): in case of length-0 or length-1 shapes errors/warnings will not be raised.
if n_dense_out > n_dense or n_batch_out > n_batch:
with self.assertRaises(sparse.SparseEfficiencyError):
sparse.bcoo_update_layout(mat, **kwds)
with self.assertRaises(sparse.SparseEfficiencyError):
sparse.bcoo_update_layout(mat, **kwds, on_inefficient='error')
with self.assertWarns(sparse.SparseEfficiencyWarning):
sparse.bcoo_update_layout(mat, **kwds, on_inefficient='warn')
kwds['on_inefficient'] = None
mat_new = sparse.bcoo_update_layout(mat, **kwds)
self.assertEqual(mat_new.n_batch, n_batch_out)
self.assertEqual(mat_new.n_dense, n_dense_out)
self.assertArraysEqual(mat.todense(), mat_new.todense())
def test_bcoo_update_layout_method(self, shape=(2, 3, 4)):
# simple test to make sure update_layout method properly forwards.
rng = sptu.rand_sparse(self.rng())
mat = sparse.BCOO.fromdense(rng((2, 3, 4), 'float32'), n_batch=1, n_dense=1)
mat_new = mat.update_layout(n_batch=0, n_dense=0)
self.assertEqual(mat_new.n_batch, 0)
self.assertEqual(mat_new.n_dense, 0)
self.assertArraysEqual(mat.todense(), mat_new.todense())
def test_bcoo_bad_fillvals(self):
# Extra values have 100 rather than zero. This lets us check that logic is
# properly ignoring these indices.
data = jnp.array([1, 2, 3, 100, 100])
indices = jnp.array([1, 2, 3, 5, 5])[:, None]
x_sp = sparse.BCOO((data, indices), shape=(5,))
x_de = x_sp.todense()
data = jnp.array([3, 2, 100, 100])
indices = jnp.array([2, 3, 5, 5])[:, None]
y_sp = sparse.BCOO((data, indices), shape=(5,))
y_de = y_sp.todense()
self.assertArraysEqual(x_de, jnp.array([0, 1, 2, 3, 0]))
self.assertArraysEqual(y_de, jnp.array([0, 0, 3, 2, 0]))
self.assertArraysEqual(x_sp.sum_duplicates().todense(), x_de)
self.assertArraysEqual(y_sp.sum_duplicates().todense(), y_de)
# reduce_sum:
self.assertArraysEqual(x_sp.sum(), x_de.sum())
# bcoo_dot_general
self.assertArraysEqual(x_sp @ y_de, x_de @ y_de)
# bcoo_rdot_general
self.assertArraysEqual(x_de @ y_sp, x_de @ y_de)
# bcoo_spdot_general
self.assertArraysEqual((x_sp @ y_sp).todense(), x_de @ y_de)
self.assertArraysEqual((y_sp @ x_sp).todense(), y_de @ x_de)
# TODO(tianjianlu): Unify the testing for BCOOTest and BCSRTest.
class BCSRTest(sptu.SparseTestCase):
@jtu.sample_product(
[
dict(shape=shape, n_batch=layout.n_batch, n_dense=layout.n_dense)
for shape in [(5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for layout in sptu.iter_bcsr_layouts(shape)
],
dtype=all_dtypes,
)
def test_bcsr_dense_round_trip(self, shape, dtype, n_batch, n_dense):
n_sparse = len(shape) - n_batch - n_dense
rng = sptu.rand_sparse(self.rng())
M = rng(shape, dtype)
nse = sparse.util._count_stored_elements(M, n_batch=n_batch, n_dense=n_dense)
def round_trip(M):
return sparse.BCSR.fromdense(M, nse=nse, n_batch=n_batch, n_dense=n_dense).todense()
args_maker = lambda: [M]
ident = lambda x: x
self._CheckAgainstNumpy(ident, round_trip, args_maker)
self._CompileAndCheck(round_trip, args_maker)
self._CheckBatchingSparse(ident, round_trip, args_maker, bdims=self._random_bdims(n_batch))
if jnp.issubdtype(dtype, jnp.floating):
# For n_sparse != 0, we can't use an identity because output zeros must not
# be dependent on input zeros. This mimics the code in count_stored_elements().
def expected(M):
if n_sparse == 0: return M
mask = (M != 0).any(range(M.ndim - n_dense, M.ndim), keepdims=True)
return jnp.where(mask, M, 0)
self._CheckGradsSparse(expected, round_trip, args_maker)
@jtu.sample_product(
[
dict(shape=shape, n_batch=n_batch)
for shape in [(5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for n_batch in range(len(shape) - 1)
],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
def test_bcsr_bcoo_round_trip(self, shape, n_batch, dtype):
n_sparse = 2
n_dense = len(shape) - n_sparse - n_batch
rng = self.rng()
sprng = sptu.rand_bcsr(rng, n_batch=n_batch, n_dense=n_dense)
M_bcsr = sprng(shape, dtype)
self.assertIsInstance(M_bcsr, sparse.BCSR)
M_dense = M_bcsr.todense()
M_bcoo = M_bcsr.to_bcoo()
self.assertIsInstance(M_bcoo, sparse.BCOO)
self.assertAllClose(M_dense, M_bcoo.todense())
M_bcsr2 = sparse.BCSR.from_bcoo(M_bcoo)
self.assertAllClose(M_dense, M_bcsr2.todense())
self.assertArraysEqual(M_bcsr.indptr, M_bcsr2.indptr)
# TODO(jakevdp): This will only be true in general when M_bcsr.indices is sorted.
# self.assertSparseArraysEquivalent(M_bcsr, M_bcsr2)
@jtu.sample_product(
[
dict(shape=shape, n_batch=n_batch)
for shape in [(5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for n_batch in range(len(shape) - 1)
],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
def test_bcsr_extract(self, shape, dtype, n_batch):
n_dense = len(shape) - n_batch - 2
rng = sptu.rand_sparse(self.rng())
M = rng(shape, dtype)
nse = sparse.util._count_stored_elements(M, n_batch=n_batch,
n_dense=n_dense)
data, indices, indptr = sparse_bcsr._bcsr_fromdense(
M, nse=nse, n_batch=n_batch, n_dense=n_dense)
data2 = sparse.bcsr_extract(indices, indptr, M)
self.assertArraysEqual(data, data2)
args_maker_bcsr_extract = lambda: [indices, indptr, M]
self._CompileAndCheck(sparse.bcsr_extract, args_maker_bcsr_extract)
@jtu.sample_product(
props=_generate_batched_dot_general_properties(
shapes=((2, 3), (2, 3, 4), (2, 3, 4, 4)), sparse_format="bcsr"
),
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
@jax.default_matmul_precision("float32")
def test_bcsr_dot_general(
self, dtype: np.dtype, props: sptu.BatchedDotGeneralProperties
):
rng = jtu.rand_default(self.rng())
sprng = sptu.rand_bcsr(self.rng(), n_batch=props.n_batch, n_dense=props.n_dense)
args_maker = lambda: [sprng(props.lhs_shape, dtype),
rng(props.rhs_shape, dtype)]
dense_fun = partial(lax.dot_general,
dimension_numbers=props.dimension_numbers)
sparse_fun = partial(sparse.bcsr_dot_general,
dimension_numbers=props.dimension_numbers)
tol = {np.float64: 1E-12, np.complex128: 1E-12,
np.float32: 1E-5, np.complex64: 1E-5}
self._CheckAgainstDense(dense_fun, sparse_fun, args_maker, tol=tol)
if jnp.issubdtype(dtype, jnp.floating) and props.n_dense == 0:
# Dense dimensions not yet fully supported in reverse mode.
modes = ['fwd'] if props.n_dense != 0 else ['fwd', 'rev']
self._CheckGradsSparse(dense_fun, sparse_fun, args_maker, modes=modes, atol=tol, rtol=tol)
self._CheckBatchingSparse(dense_fun, sparse_fun, args_maker, atol=tol, rtol=tol,
bdims=self._random_bdims(props.n_batch, len(props.rhs_shape)))
@jtu.sample_product(
[
dict(shape=shape, n_batch=layout.n_batch, n_dense=layout.n_dense)
for shape in [(3, 5), (3, 5, 4)]
for layout in sptu.iter_bcsr_layouts(shape)
],
dtype=all_dtypes,
)
def test_bcsr_broadcast_in_dim(self, shape, dtype, n_batch, n_dense):
rng = sptu.rand_sparse(self.rng())
x = jnp.array(rng(shape, dtype))
xsp = sparse.BCSR.fromdense(x, n_batch=n_batch, n_dense=n_dense)
self.assertEqual(xsp[None].n_batch, xsp.n_batch + 1)
self.assertArraysEqual(xsp[None].todense(), x[None])
if n_batch == 1:
self.assertEqual(xsp[:, None].n_batch, xsp.n_batch + 1)
self.assertArraysEqual(xsp[:, None].todense(), x[:, None])
@jtu.sample_product(
[
dict(
shape=shape,
n_batch=layout.n_batch,
n_dense=layout.n_dense,
dimension=dimension,
)
for shape in [(3, 5), (3, 5, 4)]
for layout in sptu.iter_sparse_layouts(shape)
for dimension in range(
len(shape) - layout.n_dense
) # Concatenation of dense dimensions not implemented.
],
dtype=all_dtypes,
)
def test_bcsr_concatenate(self, shape, dtype, n_batch, n_dense, dimension):
sprng = sptu.rand_bcoo(self.rng(), n_batch=n_batch, n_dense=n_dense)
args_maker = lambda: [[sprng(shape, dtype) for i in range(3)]]
dense_func = partial(lax.concatenate, dimension=dimension)
sparse_func = partial(sparse.bcoo_concatenate, dimension=dimension)
self._CheckAgainstDense(dense_func, sparse_func, args_maker)
if jnp.issubdtype(dtype, jnp.floating):
self._CheckGradsSparse(dense_func, sparse_func, args_maker)
def test_bcoo_spdot_abstract_eval_bug(self):
# Regression test for https://github.com/jax-ml/jax/issues/21921
lhs = sparse.BCOO(
(jnp.float32([[1]]), lax.broadcasted_iota(jnp.int32, (10, 1, 1), 0)),
shape=(10, 10))
rhs = sparse.BCOO(
(jnp.float32([1]), jnp.int32([[3]])),
shape=(10,))
args_maker = lambda: [lhs, rhs]
def func(lhs, rhs):
return (lhs @ rhs).todense()
self._CompileAndCheck(func, args_maker)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@tests@sparse_bcoo_bcsr_test.py@.PATH_END.py
|
{
"filename": "plot_op.py",
"repo_name": "DedalusProject/dedalus",
"repo_path": "dedalus_extracted/dedalus-master/dedalus/tools/plot_op.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
from ..core.future import Future
from ..core.operators import Convert
class Node:
def __init__(self, label, level):
# Plotting label
self.label = label
# Position information
self.level = level
self.position = 0.
def __repr__(self):
return self.label
class Leaf(Node):
count = 0.
def __init__(self, *args):
Node.__init__(self, *args)
# Set position based on leaf count
self.position = Leaf.count + 1.
Leaf.count += 1.
class Tree:
def __init__(self, operator, skip_convert=True):
# Define node and branch containers
self.nodes = []
self.branches = defaultdict(list)
# Build tree
self.skip_convert = skip_convert
root = self.build(operator, 0)
# Set positions
self.set_position(root)
def build(self, arg, level):
# Recursively construct nodes and add branches
if isinstance(arg, Future):
if isinstance(arg, Convert) and self.skip_convert:
return self.build(arg.args[0], level)
else:
node = Node(arg.name, level)
for a in arg.args:
self.branches[node].append(self.build(a, level+1))
else:
node = Leaf(str(arg), level)
# Add node
self.nodes.append(node)
return node
def set_position(self, node):
# Set node positions to mean of sub node positions
if not isinstance(node, Leaf):
sub_pos = [self.set_position(sub) for sub in self.branches[node]]
node.position = np.mean(sub_pos)
return node.position
def plot_operator(operator, fontsize=8, figsize=8, opsize=0.3, saveas=None, skip_convert=True):
# Create tree
tree = Tree(operator, skip_convert=skip_convert)
# Create figure
fig = plt.figure(figsize=(figsize, figsize))
fig.clear()
fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
# Store node positions in lists
x = []
y = []
for node in tree.nodes:
# Add node positions
x.append(node.position)
y.append(-node.level)
s = node.label
# Plot branches to sub nodes
for sub in tree.branches[node]:
sx = sub.position
sy = -sub.level
plt.plot([x[-1], sx], [y[-1], sy], '-k', alpha=0.5, zorder=0)
# Plot circle at node
if isinstance(node, Leaf):
fc = '#9CBA7F'
else:
fc = '#B4CDCD'
c = plt.Circle((x[-1], y[-1]), radius=opsize, fc=fc, ec='k', zorder=1)
fig.gca().add_artist(c)
# Plot node label
plt.text(x[-1], y[-1], s, fontsize=fontsize, zorder=2,
verticalalignment='center', horizontalalignment='center')
# Set limits
plt.axis(pad(*plt.axis(), pad=0.1, square=True))
plt.axis('off')
# Save
if saveas:
plt.savefig(saveas, dpi=200)
def pad(xmin, xmax, ymin, ymax, pad=0., square=False):
xcenter = (xmin + xmax) / 2.
ycenter = (ymin + ymax) / 2.
xradius = (xmax - xmin) / 2.
yradius = (ymax - ymin) / 2.
if square:
xradius = yradius = max(xradius, yradius)
xradius *= (1. + pad)
yradius *= (1. + pad)
xmin = xcenter - xradius
ymin = ycenter - yradius
xmax = xcenter + xradius
ymax = ycenter + yradius
return [xmin, xmax, ymin, ymax]
|
DedalusProjectREPO_NAMEdedalusPATH_START.@dedalus_extracted@dedalus-master@dedalus@tools@plot_op.py@.PATH_END.py
|
{
"filename": "SiGaps_18.ipynb",
"repo_name": "Echelle/AO_bonding_paper",
"repo_path": "AO_bonding_paper_extracted/AO_bonding_paper-master/notebooks/SiGaps_18.ipynb",
"type": "Jupyter Notebook"
}
|
###This IPython Notebook is for performing a fit and generating a figure of the spectrum of sample VG09-12, in the mesh region with 49+/-6 nm gap. This version is modified to fit for two gaps, updated with new data from February 2015.
The filename of the figure is **[TBD].pdf**.
Author: Michael Gully-Santiago, `gully@astro.as.utexas.edu`
Date: February 16, 2015
```
%pylab inline
import emcee
import triangle
import pandas as pd
import seaborn as sns
from astroML.decorators import pickle_results
```
Populating the interactive namespace from numpy and matplotlib
```
sns.set_context("paper", font_scale=2.0, rc={"lines.linewidth": 2.5})
sns.set(style="ticks")
```
```
from etalon import *
np.random.seed(78704)
```
```
df = pd.read_csv('../data/VG09_12_gap_20150206.csv', index_col=0)
df.head()
```
<div style="max-height:1000px;max-width:1500px;overflow:auto;">
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>VG09-12_1_0</th>
<th>VG09-12_1_2</th>
<th>VG09-12_1_4</th>
<th>VG09-12_1_6</th>
<th>VG09-12_1_8</th>
<th>VG09-12_1_10</th>
<th>VG09-12_1_12</th>
<th>VG09-12_1_14</th>
<th>VG09-12_1_16</th>
<th>VG09-12_1_18</th>
<th>...</th>
<th>VG09-12_1_32</th>
<th>VG09-12_1_34</th>
<th>VG09-12_1_36</th>
<th>VG09-12_1_38</th>
<th>VG09-12_1_40</th>
<th>VG09-12_1_42</th>
<th>VG09-12_1_44</th>
<th>VG09-12_1_46</th>
<th>VG09-12_1_48</th>
<th>VG09-12_1_50</th>
</tr>
<tr>
<th>wavelength</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>1775</th>
<td> 1.880152</td>
<td> 1.641429</td>
<td> 0.976230</td>
<td> 0.964145</td>
<td> 0.976460</td>
<td> 0.979872</td>
<td> 0.987270</td>
<td> 0.986529</td>
<td> 1.070819</td>
<td> 1.002500</td>
<td>...</td>
<td> 1.002960</td>
<td> 1.002864</td>
<td> 1.003209</td>
<td> 1.003811</td>
<td> 0.965806</td>
<td> 0.918209</td>
<td> 1.876167</td>
<td> 1.881628</td>
<td> 1.883350</td>
<td> 1.883947</td>
</tr>
<tr>
<th>1765</th>
<td> 1.879758</td>
<td> 1.642331</td>
<td> 0.974838</td>
<td> 0.964183</td>
<td> 0.976321</td>
<td> 0.978982</td>
<td> 0.986934</td>
<td> 0.985437</td>
<td> 1.070772</td>
<td> 1.002522</td>
<td>...</td>
<td> 1.002708</td>
<td> 1.002673</td>
<td> 1.003248</td>
<td> 1.003872</td>
<td> 0.966022</td>
<td> 0.917161</td>
<td> 1.875934</td>
<td> 1.881610</td>
<td> 1.883944</td>
<td> 1.884051</td>
</tr>
<tr>
<th>1755</th>
<td> 1.880479</td>
<td> 1.643144</td>
<td> 0.973691</td>
<td> 0.964016</td>
<td> 0.975474</td>
<td> 0.978551</td>
<td> 0.986437</td>
<td> 0.984843</td>
<td> 1.070936</td>
<td> 1.002670</td>
<td>...</td>
<td> 1.002381</td>
<td> 1.002297</td>
<td> 1.003009</td>
<td> 1.003320</td>
<td> 0.965691</td>
<td> 0.917684</td>
<td> 1.876912</td>
<td> 1.882451</td>
<td> 1.884022</td>
<td> 1.884329</td>
</tr>
<tr>
<th>1745</th>
<td> 1.880220</td>
<td> 1.643559</td>
<td> 0.973281</td>
<td> 0.963117</td>
<td> 0.974474</td>
<td> 0.977558</td>
<td> 0.986889</td>
<td> 0.984795</td>
<td> 1.070851</td>
<td> 1.002239</td>
<td>...</td>
<td> 1.002931</td>
<td> 1.002059</td>
<td> 1.002659</td>
<td> 1.003579</td>
<td> 0.965511</td>
<td> 0.916989</td>
<td> 1.876531</td>
<td> 1.882771</td>
<td> 1.884626</td>
<td> 1.884169</td>
</tr>
<tr>
<th>1735</th>
<td> 1.880693</td>
<td> 1.643988</td>
<td> 0.973618</td>
<td> 0.963056</td>
<td> 0.973207</td>
<td> 0.976588</td>
<td> 0.987083</td>
<td> 0.984501</td>
<td> 1.070377</td>
<td> 1.001896</td>
<td>...</td>
<td> 1.002166</td>
<td> 1.002575</td>
<td> 1.003052</td>
<td> 1.003299</td>
<td> 0.965231</td>
<td> 0.917656</td>
<td> 1.876819</td>
<td> 1.882872</td>
<td> 1.884520</td>
<td> 1.884540</td>
</tr>
</tbody>
</table>
<p>5 rows × 26 columns</p>
</div>
```
# Introduce the Real data, decimate the data.
x = df.index.values
N = len(x)
# Define T_DSP for the model
T_DSP = T_gap_Si(x, 0.0)
n1 = sellmeier_Si(x)
# Define uncertainty
yerr = 0.0002*np.ones(N)
yerr[(x > 1350) & (x < 1420)] = 0.0005 #higher noise in this region
iid_cov = np.diag(yerr ** 2)
# Select the spectrum of interest
# Normalize the spectrum by measured DSP Si wafer.
y = df['VG09-12_1_26'].values
```
Define the likelihood.
```
def lnlike(d, f, lna, lns):
a, s = np.exp(lna), np.exp(lns)
off_diag_terms = a**2 * np.exp(-0.5 * (x[:, None] - x[None, :])**2 / s**2)
C = iid_cov + off_diag_terms
sgn, logdet = np.linalg.slogdet(C)
if sgn <= 0:
return -np.inf
r = y - T_gap_Si_withFF_fast(x, d, f, n1)/T_DSP
return -0.5 * (np.dot(r, np.linalg.solve(C, r)) + logdet)
```
Define the prior.
```
def lnprior(d, f, lna, lns):
if not (0 < d < 100 and 0.0 < f < 1.0 and -12 < lna < -2 and 0 < lns < 10):
return -np.inf
return 0.0
```
Combine likelihood and prior to obtain the posterior.
```
def lnprob(p):
lp = lnprior(*p)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(*p)
```
Set up emcee.
```
@pickle_results('SiGaps_18_0gap.pkl')
def hammer_time(ndim, nwalkers, d_Guess, f_Guess, a_Guess, s_Guess, nburnins, ntrials):
# Initialize the walkers
p0 = np.array([d_Guess, f_Guess, np.log(a_Guess), np.log(s_Guess)])
pos = [p0 + 1.0e-2*p0 * np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
pos, lp, state = sampler.run_mcmc(pos, nburnins)
sampler.reset()
pos, lp, state = sampler.run_mcmc(pos, ntrials)
return sampler
```
Set up the initial conditions.
```
np.random.seed(78704)
ndim, nwalkers = 4, 32
d_Guess = 15.0
f_Guess = 0.95
a_Guess = 0.0016
s_Guess = 500.0
nburnins = 300
ntrials = 9000
```
Run the burn-in phase. Run the full MCMC. Pickle the results.
```
sampler = hammer_time(ndim, nwalkers, d_Guess, f_Guess, a_Guess, s_Guess, nburnins, ntrials)
```
@pickle_results: using precomputed results from 'SiGaps_18_0gap.pkl'
Linearize $a$ and $s$ for easy inspection of the values.
```
chain = sampler.chain
samples_lin = copy(sampler.flatchain)
samples_lin[:, 2:] = np.exp(samples_lin[:, 2:])
```
```
fig, axes = plt.subplots(4, 1, figsize=(5, 6), sharex=True)
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.96, top=0.98,
wspace=0.0, hspace=0.05)
[a.plot(np.arange(chain.shape[1]), chain[:, :, i].T, "k", alpha=0.5)
for i, a in enumerate(axes)]
[a.set_ylabel("${0}$".format(l)) for a, l in zip(axes, ["d", "f", "\ln a", "\ln s"])]
axes[-1].set_xlim(0, chain.shape[1])
axes[-1].set_xlabel("iteration");
```

Make a triangle corner plot.
```
fig = triangle.corner(samples_lin,
labels=map("${0}$".format, ["d", "f", "a", "s"]),
quantiles=[0.16, 0.84])
```
Quantiles:
[(0.16, 3.9775785423732453), (0.84, 31.600191065182045)]
Quantiles:
[(0.16, 0.03449491294438941), (0.84, 0.7088700630482242)]
Quantiles:
[(0.16, 0.0010558971716583014), (0.84, 0.0096825717902464681)]
Quantiles:
[(0.16, 311.06441219384976), (0.84, 4062.0138060398622)]

```
fig = triangle.corner(samples_lin[:,0:2],
labels=map("${0}$".format, ["d", "f"]),
quantiles=[0.16, 0.84])
plt.savefig("VG0912_0gap_corner.pdf")
```
Quantiles:
[(0.16, 3.9775785423732453), (0.84, 31.600191065182045)]
Quantiles:
[(0.16, 0.03449491294438941), (0.84, 0.7088700630482242)]
/Users/gully/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_pdf.py:2264: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
different = bool(ours != theirs)

```
d_mcmc, f_mcmc, a_mcmc, s_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples_lin, [16, 50, 84],
axis=0)))
d_mcmc, f_mcmc, a_mcmc, s_mcmc
```
((10.677500418273841, 20.922690646908205, 6.6999218759005954),
(0.24973230076548836, 0.45913776228273584, 0.21523738782109894),
(0.0026829125467427973, 0.0069996592435036704, 0.0016270153750844959),
(956.47471519625674, 3105.5390908436057, 645.41030300240698))
```
print "{:.0f}^{{+{:.0f}}}_{{-{:.0f}}}".format(*d_mcmc)
print "{:.3f}^{{+{:.3f}}}_{{-{:.3f}}}".format(*f_mcmc)
```
11^{+21}_{-7}
0.250^{+0.459}_{-0.215}
```
plt.figure(figsize=(4,3))
for d, f, a, s in samples_lin[np.random.randint(len(samples_lin), size=60)]:
off_diag_terms = a**2 * np.exp(-0.5 * (x[:, None] - x[None, :])**2 / s**2)
C = iid_cov + off_diag_terms
fit = T_gap_Si_withFF_fast(x, d, f, n1)/T_DSP
vec = np.random.multivariate_normal(fit, C)
plt.plot(x, vec,"-b", alpha=0.06)
plt.step(x, y,color="k", label='Measurement')
plt.errorbar(x, y,yerr=yerr, color="k")
fit = T_gap_Si_withFF_fast(x, 14, 1.0, n1)/T_DSP
fit_label = 'Model with $d={:.0f}$ nm, $f={:.2f}$'.format(14, 1.0)
plt.plot(x, fit, '--', color=sns.xkcd_rgb["pale red"], alpha=1.0, label=fit_label)
plt.plot([-10, -9], [-10, -9],"-b", alpha=0.45, label='Draws from GP')
plt.plot([0, 5000], [1.0, 1.0], '-.k', alpha=0.5)
plt.fill_between([1200, 1250], 2.0, 0.0, hatch='\\', alpha=0.4, color='k', label='Si absorption cutoff')
plt.xlabel('$\lambda$ (nm)');
plt.ylabel('$T_{gap}$');
plt.xlim(1200, 1900);
plt.ylim(0.96, 1.019);
plt.legend(loc='lower right')
plt.savefig("VG0912_0gap.pdf", bbox_inches='tight')
```

```
y.mean()
```
0.99980907280714904
```
y.std()
```
0.00084827408755901135
```
ids = samples_lin[:,1] > 0.8
ids.sum()/(len(ids)*1.0)
np.percentile(samples_lin[ids, 0], [99.5])
```
array([ 13.23949635])
The VG09-12 off-mesh spectrum rules out gaps greater than 13 nm over $>80$\% of the measurement area.
|
EchelleREPO_NAMEAO_bonding_paperPATH_START.@AO_bonding_paper_extracted@AO_bonding_paper-master@notebooks@SiGaps_18.ipynb@.PATH_END.py
|
{
"filename": "_tickfont.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/heatmapgl/colorbar/_tickfont.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "heatmapgl.colorbar"
_path_str = "heatmapgl.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One",
"Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow",
"Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# lineposition
# ------------
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
# shadow
# ------
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# style
# -----
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
# textcase
# --------
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
# variant
# -------
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
# weight
# ------
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.heatmapgl.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.heatmapgl.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.heatmapgl.colorbar.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("lineposition", None)
_v = lineposition if lineposition is not None else _v
if _v is not None:
self["lineposition"] = _v
_v = arg.pop("shadow", None)
_v = shadow if shadow is not None else _v
if _v is not None:
self["shadow"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("style", None)
_v = style if style is not None else _v
if _v is not None:
self["style"] = _v
_v = arg.pop("textcase", None)
_v = textcase if textcase is not None else _v
if _v is not None:
self["textcase"] = _v
_v = arg.pop("variant", None)
_v = variant if variant is not None else _v
if _v is not None:
self["variant"] = _v
_v = arg.pop("weight", None)
_v = weight if weight is not None else _v
if _v is not None:
self["weight"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@heatmapgl@colorbar@_tickfont.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/splom/legendgrouptitle/_font.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "splom.legendgrouptitle"
_path_str = "splom.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One",
"Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow",
"Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# lineposition
# ------------
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
# shadow
# ------
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# style
# -----
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
# textcase
# --------
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
# variant
# -------
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
# weight
# ------
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.splom.legendgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.splom.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.splom.legendgrouptitle.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("lineposition", None)
_v = lineposition if lineposition is not None else _v
if _v is not None:
self["lineposition"] = _v
_v = arg.pop("shadow", None)
_v = shadow if shadow is not None else _v
if _v is not None:
self["shadow"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("style", None)
_v = style if style is not None else _v
if _v is not None:
self["style"] = _v
_v = arg.pop("textcase", None)
_v = textcase if textcase is not None else _v
if _v is not None:
self["textcase"] = _v
_v = arg.pop("variant", None)
_v = variant if variant is not None else _v
if _v is not None:
self["variant"] = _v
_v = arg.pop("weight", None)
_v = weight if weight is not None else _v
if _v is not None:
self["weight"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@splom@legendgrouptitle@_font.py@.PATH_END.py
|
{
"filename": "vol-points.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/doc/source/cookbook/vol-points.py",
"type": "Python"
}
|
import numpy as np
import yt
from yt.units import kpc
from yt.visualization.volume_rendering.api import PointSource
ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
sc = yt.create_scene(ds)
np.random.seed(1234567)
npoints = 1000
# Random particle positions
vertices = np.random.random([npoints, 3]) * 200 * kpc
# Random colors
colors = np.random.random([npoints, 4])
# Set alpha value to something that produces a good contrast with the volume
# rendering
colors[:, 3] = 0.1
points = PointSource(vertices, colors=colors)
sc.add_source(points)
sc.camera.width = 300 * kpc
sc.save(sigma_clip=5)
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@doc@source@cookbook@vol-points.py@.PATH_END.py
|
{
"filename": "model.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/cffi/py2/cffi/model.py",
"type": "Python"
}
|
import types
import weakref
from .lock import allocate_lock
from .error import CDefError, VerificationError, VerificationMissing
# type qualifiers
Q_CONST = 0x01
Q_RESTRICT = 0x02
Q_VOLATILE = 0x04
def qualify(quals, replace_with):
if quals & Q_CONST:
replace_with = ' const ' + replace_with.lstrip()
if quals & Q_VOLATILE:
replace_with = ' volatile ' + replace_with.lstrip()
if quals & Q_RESTRICT:
# It seems that __restrict is supported by gcc and msvc.
# If you hit some different compiler, add a #define in
# _cffi_include.h for it (and in its copies, documented there)
replace_with = ' __restrict ' + replace_with.lstrip()
return replace_with
class BaseTypeByIdentity(object):
is_array_type = False
is_raw_function = False
def get_c_name(self, replace_with='', context='a C file', quals=0):
result = self.c_name_with_marker
assert result.count('&') == 1
# some logic duplication with ffi.getctype()... :-(
replace_with = replace_with.strip()
if replace_with:
if replace_with.startswith('*') and '&[' in result:
replace_with = '(%s)' % replace_with
elif not replace_with[0] in '[(':
replace_with = ' ' + replace_with
replace_with = qualify(quals, replace_with)
result = result.replace('&', replace_with)
if '$' in result:
raise VerificationError(
"cannot generate '%s' in %s: unknown type name"
% (self._get_c_name(), context))
return result
def _get_c_name(self):
return self.c_name_with_marker.replace('&', '')
def has_c_name(self):
return '$' not in self._get_c_name()
def is_integer_type(self):
return False
def get_cached_btype(self, ffi, finishlist, can_delay=False):
try:
BType = ffi._cached_btypes[self]
except KeyError:
BType = self.build_backend_type(ffi, finishlist)
BType2 = ffi._cached_btypes.setdefault(self, BType)
assert BType2 is BType
return BType
def __repr__(self):
return '<%s>' % (self._get_c_name(),)
def _get_items(self):
return [(name, getattr(self, name)) for name in self._attrs_]
class BaseType(BaseTypeByIdentity):
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self._get_items() == other._get_items())
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.__class__, tuple(self._get_items())))
class VoidType(BaseType):
_attrs_ = ()
def __init__(self):
self.c_name_with_marker = 'void&'
def build_backend_type(self, ffi, finishlist):
return global_cache(self, ffi, 'new_void_type')
void_type = VoidType()
class BasePrimitiveType(BaseType):
def is_complex_type(self):
return False
class PrimitiveType(BasePrimitiveType):
_attrs_ = ('name',)
ALL_PRIMITIVE_TYPES = {
'char': 'c',
'short': 'i',
'int': 'i',
'long': 'i',
'long long': 'i',
'signed char': 'i',
'unsigned char': 'i',
'unsigned short': 'i',
'unsigned int': 'i',
'unsigned long': 'i',
'unsigned long long': 'i',
'float': 'f',
'double': 'f',
'long double': 'f',
'float _Complex': 'j',
'double _Complex': 'j',
'_Bool': 'i',
# the following types are not primitive in the C sense
'wchar_t': 'c',
'char16_t': 'c',
'char32_t': 'c',
'int8_t': 'i',
'uint8_t': 'i',
'int16_t': 'i',
'uint16_t': 'i',
'int32_t': 'i',
'uint32_t': 'i',
'int64_t': 'i',
'uint64_t': 'i',
'int_least8_t': 'i',
'uint_least8_t': 'i',
'int_least16_t': 'i',
'uint_least16_t': 'i',
'int_least32_t': 'i',
'uint_least32_t': 'i',
'int_least64_t': 'i',
'uint_least64_t': 'i',
'int_fast8_t': 'i',
'uint_fast8_t': 'i',
'int_fast16_t': 'i',
'uint_fast16_t': 'i',
'int_fast32_t': 'i',
'uint_fast32_t': 'i',
'int_fast64_t': 'i',
'uint_fast64_t': 'i',
'intptr_t': 'i',
'uintptr_t': 'i',
'intmax_t': 'i',
'uintmax_t': 'i',
'ptrdiff_t': 'i',
'size_t': 'i',
'ssize_t': 'i',
}
def __init__(self, name):
assert name in self.ALL_PRIMITIVE_TYPES
self.name = name
self.c_name_with_marker = name + '&'
def is_char_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'c'
def is_integer_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'i'
def is_float_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'f'
def is_complex_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'j'
def build_backend_type(self, ffi, finishlist):
return global_cache(self, ffi, 'new_primitive_type', self.name)
class UnknownIntegerType(BasePrimitiveType):
_attrs_ = ('name',)
def __init__(self, name):
self.name = name
self.c_name_with_marker = name + '&'
def is_integer_type(self):
return True
def build_backend_type(self, ffi, finishlist):
raise NotImplementedError("integer type '%s' can only be used after "
"compilation" % self.name)
class UnknownFloatType(BasePrimitiveType):
_attrs_ = ('name', )
def __init__(self, name):
self.name = name
self.c_name_with_marker = name + '&'
def build_backend_type(self, ffi, finishlist):
raise NotImplementedError("float type '%s' can only be used after "
"compilation" % self.name)
class BaseFunctionType(BaseType):
_attrs_ = ('args', 'result', 'ellipsis', 'abi')
def __init__(self, args, result, ellipsis, abi=None):
self.args = args
self.result = result
self.ellipsis = ellipsis
self.abi = abi
#
reprargs = [arg._get_c_name() for arg in self.args]
if self.ellipsis:
reprargs.append('...')
reprargs = reprargs or ['void']
replace_with = self._base_pattern % (', '.join(reprargs),)
if abi is not None:
replace_with = replace_with[:1] + abi + ' ' + replace_with[1:]
self.c_name_with_marker = (
self.result.c_name_with_marker.replace('&', replace_with))
class RawFunctionType(BaseFunctionType):
# Corresponds to a C type like 'int(int)', which is the C type of
# a function, but not a pointer-to-function. The backend has no
# notion of such a type; it's used temporarily by parsing.
_base_pattern = '(&)(%s)'
is_raw_function = True
def build_backend_type(self, ffi, finishlist):
raise CDefError("cannot render the type %r: it is a function "
"type, not a pointer-to-function type" % (self,))
def as_function_pointer(self):
return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi)
class FunctionPtrType(BaseFunctionType):
_base_pattern = '(*&)(%s)'
def build_backend_type(self, ffi, finishlist):
result = self.result.get_cached_btype(ffi, finishlist)
args = []
for tp in self.args:
args.append(tp.get_cached_btype(ffi, finishlist))
abi_args = ()
if self.abi == "__stdcall":
if not self.ellipsis: # __stdcall ignored for variadic funcs
try:
abi_args = (ffi._backend.FFI_STDCALL,)
except AttributeError:
pass
return global_cache(self, ffi, 'new_function_type',
tuple(args), result, self.ellipsis, *abi_args)
def as_raw_function(self):
return RawFunctionType(self.args, self.result, self.ellipsis, self.abi)
class PointerType(BaseType):
_attrs_ = ('totype', 'quals')
def __init__(self, totype, quals=0):
self.totype = totype
self.quals = quals
extra = qualify(quals, " *&")
if totype.is_array_type:
extra = "(%s)" % (extra.lstrip(),)
self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra)
def build_backend_type(self, ffi, finishlist):
BItem = self.totype.get_cached_btype(ffi, finishlist, can_delay=True)
return global_cache(self, ffi, 'new_pointer_type', BItem)
voidp_type = PointerType(void_type)
def ConstPointerType(totype):
return PointerType(totype, Q_CONST)
const_voidp_type = ConstPointerType(void_type)
class NamedPointerType(PointerType):
_attrs_ = ('totype', 'name')
def __init__(self, totype, name, quals=0):
PointerType.__init__(self, totype, quals)
self.name = name
self.c_name_with_marker = name + '&'
class ArrayType(BaseType):
_attrs_ = ('item', 'length')
is_array_type = True
def __init__(self, item, length):
self.item = item
self.length = length
#
if length is None:
brackets = '&[]'
elif length == '...':
brackets = '&[/*...*/]'
else:
brackets = '&[%s]' % length
self.c_name_with_marker = (
self.item.c_name_with_marker.replace('&', brackets))
def length_is_unknown(self):
return isinstance(self.length, str)
def resolve_length(self, newlength):
return ArrayType(self.item, newlength)
def build_backend_type(self, ffi, finishlist):
if self.length_is_unknown():
raise CDefError("cannot render the type %r: unknown length" %
(self,))
self.item.get_cached_btype(ffi, finishlist) # force the item BType
BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist)
return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length)
char_array_type = ArrayType(PrimitiveType('char'), None)
class StructOrUnionOrEnum(BaseTypeByIdentity):
_attrs_ = ('name',)
forcename = None
def build_c_name_with_marker(self):
name = self.forcename or '%s %s' % (self.kind, self.name)
self.c_name_with_marker = name + '&'
def force_the_name(self, forcename):
self.forcename = forcename
self.build_c_name_with_marker()
def get_official_name(self):
assert self.c_name_with_marker.endswith('&')
return self.c_name_with_marker[:-1]
class StructOrUnion(StructOrUnionOrEnum):
fixedlayout = None
completed = 0
partial = False
packed = 0
def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None):
self.name = name
self.fldnames = fldnames
self.fldtypes = fldtypes
self.fldbitsize = fldbitsize
self.fldquals = fldquals
self.build_c_name_with_marker()
def anonymous_struct_fields(self):
if self.fldtypes is not None:
for name, type in zip(self.fldnames, self.fldtypes):
if name == '' and isinstance(type, StructOrUnion):
yield type
def enumfields(self, expand_anonymous_struct_union=True):
fldquals = self.fldquals
if fldquals is None:
fldquals = (0,) * len(self.fldnames)
for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes,
self.fldbitsize, fldquals):
if (name == '' and isinstance(type, StructOrUnion)
and expand_anonymous_struct_union):
# nested anonymous struct/union
for result in type.enumfields():
yield result
else:
yield (name, type, bitsize, quals)
def force_flatten(self):
# force the struct or union to have a declaration that lists
# directly all fields returned by enumfields(), flattening
# nested anonymous structs/unions.
names = []
types = []
bitsizes = []
fldquals = []
for name, type, bitsize, quals in self.enumfields():
names.append(name)
types.append(type)
bitsizes.append(bitsize)
fldquals.append(quals)
self.fldnames = tuple(names)
self.fldtypes = tuple(types)
self.fldbitsize = tuple(bitsizes)
self.fldquals = tuple(fldquals)
def get_cached_btype(self, ffi, finishlist, can_delay=False):
BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist,
can_delay)
if not can_delay:
self.finish_backend_type(ffi, finishlist)
return BType
def finish_backend_type(self, ffi, finishlist):
if self.completed:
if self.completed != 2:
raise NotImplementedError("recursive structure declaration "
"for '%s'" % (self.name,))
return
BType = ffi._cached_btypes[self]
#
self.completed = 1
#
if self.fldtypes is None:
pass # not completing it: it's an opaque struct
#
elif self.fixedlayout is None:
fldtypes = [tp.get_cached_btype(ffi, finishlist)
for tp in self.fldtypes]
lst = list(zip(self.fldnames, fldtypes, self.fldbitsize))
extra_flags = ()
if self.packed:
if self.packed == 1:
extra_flags = (8,) # SF_PACKED
else:
extra_flags = (0, self.packed)
ffi._backend.complete_struct_or_union(BType, lst, self,
-1, -1, *extra_flags)
#
else:
fldtypes = []
fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout
for i in range(len(self.fldnames)):
fsize = fieldsize[i]
ftype = self.fldtypes[i]
#
if isinstance(ftype, ArrayType) and ftype.length_is_unknown():
# fix the length to match the total size
BItemType = ftype.item.get_cached_btype(ffi, finishlist)
nlen, nrest = divmod(fsize, ffi.sizeof(BItemType))
if nrest != 0:
self._verification_error(
"field '%s.%s' has a bogus size?" % (
self.name, self.fldnames[i] or '{}'))
ftype = ftype.resolve_length(nlen)
self.fldtypes = (self.fldtypes[:i] + (ftype,) +
self.fldtypes[i+1:])
#
BFieldType = ftype.get_cached_btype(ffi, finishlist)
if isinstance(ftype, ArrayType) and ftype.length is None:
assert fsize == 0
else:
bitemsize = ffi.sizeof(BFieldType)
if bitemsize != fsize:
self._verification_error(
"field '%s.%s' is declared as %d bytes, but is "
"really %d bytes" % (self.name,
self.fldnames[i] or '{}',
bitemsize, fsize))
fldtypes.append(BFieldType)
#
lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs))
ffi._backend.complete_struct_or_union(BType, lst, self,
totalsize, totalalignment)
self.completed = 2
def _verification_error(self, msg):
raise VerificationError(msg)
def check_not_partial(self):
if self.partial and self.fixedlayout is None:
raise VerificationMissing(self._get_c_name())
def build_backend_type(self, ffi, finishlist):
self.check_not_partial()
finishlist.append(self)
#
return global_cache(self, ffi, 'new_%s_type' % self.kind,
self.get_official_name(), key=self)
class StructType(StructOrUnion):
kind = 'struct'
class UnionType(StructOrUnion):
kind = 'union'
class EnumType(StructOrUnionOrEnum):
kind = 'enum'
partial = False
partial_resolved = False
def __init__(self, name, enumerators, enumvalues, baseinttype=None):
self.name = name
self.enumerators = enumerators
self.enumvalues = enumvalues
self.baseinttype = baseinttype
self.build_c_name_with_marker()
def force_the_name(self, forcename):
StructOrUnionOrEnum.force_the_name(self, forcename)
if self.forcename is None:
name = self.get_official_name()
self.forcename = '$' + name.replace(' ', '_')
def check_not_partial(self):
if self.partial and not self.partial_resolved:
raise VerificationMissing(self._get_c_name())
def build_backend_type(self, ffi, finishlist):
self.check_not_partial()
base_btype = self.build_baseinttype(ffi, finishlist)
return global_cache(self, ffi, 'new_enum_type',
self.get_official_name(),
self.enumerators, self.enumvalues,
base_btype, key=self)
def build_baseinttype(self, ffi, finishlist):
if self.baseinttype is not None:
return self.baseinttype.get_cached_btype(ffi, finishlist)
#
if self.enumvalues:
smallest_value = min(self.enumvalues)
largest_value = max(self.enumvalues)
else:
import warnings
try:
# XXX! The goal is to ensure that the warnings.warn()
# will not suppress the warning. We want to get it
# several times if we reach this point several times.
__warningregistry__.clear()
except NameError:
pass
warnings.warn("%r has no values explicitly defined; "
"guessing that it is equivalent to 'unsigned int'"
% self._get_c_name())
smallest_value = largest_value = 0
if smallest_value < 0: # needs a signed type
sign = 1
candidate1 = PrimitiveType("int")
candidate2 = PrimitiveType("long")
else:
sign = 0
candidate1 = PrimitiveType("unsigned int")
candidate2 = PrimitiveType("unsigned long")
btype1 = candidate1.get_cached_btype(ffi, finishlist)
btype2 = candidate2.get_cached_btype(ffi, finishlist)
size1 = ffi.sizeof(btype1)
size2 = ffi.sizeof(btype2)
if (smallest_value >= ((-1) << (8*size1-1)) and
largest_value < (1 << (8*size1-sign))):
return btype1
if (smallest_value >= ((-1) << (8*size2-1)) and
largest_value < (1 << (8*size2-sign))):
return btype2
raise CDefError("%s values don't all fit into either 'long' "
"or 'unsigned long'" % self._get_c_name())
def unknown_type(name, structname=None):
if structname is None:
structname = '$%s' % name
tp = StructType(structname, None, None, None)
tp.force_the_name(name)
tp.origin = "unknown_type"
return tp
def unknown_ptr_type(name, structname=None):
if structname is None:
structname = '$$%s' % name
tp = StructType(structname, None, None, None)
return NamedPointerType(tp, name)
global_lock = allocate_lock()
_typecache_cffi_backend = weakref.WeakValueDictionary()
def get_typecache(backend):
# returns _typecache_cffi_backend if backend is the _cffi_backend
# module, or type(backend).__typecache if backend is an instance of
# CTypesBackend (or some FakeBackend class during tests)
if isinstance(backend, types.ModuleType):
return _typecache_cffi_backend
with global_lock:
if not hasattr(type(backend), '__typecache'):
type(backend).__typecache = weakref.WeakValueDictionary()
return type(backend).__typecache
def global_cache(srctype, ffi, funcname, *args, **kwds):
key = kwds.pop('key', (funcname, args))
assert not kwds
try:
return ffi._typecache[key]
except KeyError:
pass
try:
res = getattr(ffi._backend, funcname)(*args)
except NotImplementedError as e:
raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e))
# note that setdefault() on WeakValueDictionary is not atomic
# and contains a rare bug (http://bugs.python.org/issue19542);
# we have to use a lock and do it ourselves
cache = ffi._typecache
with global_lock:
res1 = cache.get(key)
if res1 is None:
cache[key] = res
return res
else:
return res1
def pointer_cache(ffi, BType):
return global_cache('?', ffi, 'new_pointer_type', BType)
def attach_exception_info(e, name):
if e.args and type(e.args[0]) is str:
e.args = ('%s: %s' % (name, e.args[0]),) + e.args[1:]
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@cffi@py2@cffi@model.py@.PATH_END.py
|
{
"filename": "_namelengthsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/table/hoverlabel/_namelengthsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="namelengthsrc", parent_name="table.hoverlabel", **kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@table@hoverlabel@_namelengthsrc.py@.PATH_END.py
|
{
"filename": "calc_background.py",
"repo_name": "kpicteam/kpic_pipeline",
"repo_path": "kpic_pipeline_extracted/kpic_pipeline-main/examples/calc_background.py",
"type": "Python"
}
|
# EXAMPLE:
# JX: compute master background and bad pixel map from background images
from glob import glob
import os
import sys
from kpicdrp import background
import matplotlib.pyplot as plt
import numpy as np
from kpicdrp.data import Dataset, DetectorFrame
from kpicdrp.caldb import det_caldb
from pipeline_utils import get_filenums, get_filelist
obsdate = input("Enter UT Date (e.g.20220723) >>> ")
obsdate = obsdate.strip()
print(obsdate)
main_calibdir = os.path.join("/scr3/kpic/KPIC_Campaign/calibs/", obsdate)
raw_datadir = os.path.join("/scr3/kpic/Data/", obsdate[2:], "spec")
print(raw_datadir)
filestr = "nspec"+obsdate[2:]+"_{0:04d}.fits"
filestr_alt = "nspec"+ str(int(obsdate[2:])+1) +"_{0:04d}.fits"
print(filestr, filestr_alt)
bkgddir = os.path.join(main_calibdir, "bkgd_bpmap/") # main data dir
if not os.path.exists(bkgddir):
os.makedirs(bkgddir)
# check if raw directory exists. If not, create it and pull files over
raw_dir = os.path.join(bkgddir,"raw")
if not os.path.exists(raw_dir):
os.makedirs(raw_dir)
# read calib config file
sys.path.insert(1, main_calibdir)
from calib_info import calib_info
print('Loaded calibration config file for ' + obsdate)
filenums = get_filenums(calib_info['raw_bkgd_range'])
# Only copy files if there are less files in raw dir than bkgd filenums
# figure out which are raw frame nums
## Hack up here ##
# If passing filestr_alt, it will try both
filelist = get_filelist(raw_dir, filenums, filestr, raw_datadir, filestr_alt=filestr_alt)
# if previous step failed, just grab all files in raw
if len(filelist) == 0:
filelist = glob(os.path.join(raw_dir,"*.fits"))
print(filelist)
# read in the dataset
raw_dataset = Dataset(filelist=filelist, dtype=DetectorFrame)
# For multiple tint and coadds in one folder
# It will save the master backgrounds and bad pixel maps to bkgddir.
master_bkgds, badpixmaps, unique_tint, unique_coadds = background.process_backgrounds(raw_dataset, save_loc=bkgddir, fileprefix=obsdate, caldb_save_loc=det_caldb)
# Plot the resulting master backgrounds and bad pixels
plt.figure(1, figsize=(10,10))
for k,(background_med,badpixmap, tint,coadd) in enumerate(zip(master_bkgds,badpixmaps,unique_tint,unique_coadds)):
print(k)
plt.subplot(2,len(master_bkgds),k+1)
plt.title("tint={0} coadd={1}".format(tint,coadd))
plt.imshow(background_med.data, interpolation="nearest", origin="lower")
med_val = np.nanmedian(background_med.data)
plt.clim([0,2*med_val])
plt.subplot(2,len(master_bkgds),k+len(master_bkgds)+1)
plt.imshow(badpixmap.data, interpolation="nearest", origin="lower")
plt.subplot(2,len(master_bkgds),1)
plt.ylabel("Master backgrounds")
plt.subplot(2,len(master_bkgds),len(master_bkgds)+1)
plt.ylabel("Bad pix maps")
plt.savefig( os.path.join(bkgddir, 'bkgd_bpmap.png'), dpi=200 )
plt.show()
|
kpicteamREPO_NAMEkpic_pipelinePATH_START.@kpic_pipeline_extracted@kpic_pipeline-main@examples@calc_background.py@.PATH_END.py
|
{
"filename": "prop_load_fftw_wisdom.py",
"repo_name": "ajeldorado/falco-python",
"repo_path": "falco-python_extracted/falco-python-master/falco/proper/prop_load_fftw_wisdom.py",
"type": "Python"
}
|
# Copyright 2019 California Institute of Technology
# Users must agree to abide by the restrictions listed in the
# file "LegalStuff.txt" in the PROPER library directory.
#
# PROPER developed at Jet Propulsion Laboratory/California Inst. Technology
# Original IDL version by John Krist
# Python translation by Navtej Saini, with Luis Marchen and Nikta Amiri
#
# Written by J. Krist - 19 April 2019
import falco.proper as proper
import numpy as np
import os
import pickle
def prop_load_fftw_wisdom( gridsize, nthreads ):
if proper.use_ffti == True:
return
try:
import pyfftw
except ImportError:
raise ImportError("pyfftw is not installed. Stopping.")
wisdompath = os.path.join( os.path.expanduser('~'), '.proper_{}pix'.format(str(gridsize)) + '{}threads'.format(str(nthreads)) + '_wisdomfile' )
if os.path.exists(wisdompath):
pyfftw.forget_wisdom()
with open(wisdompath, 'rb') as infile:
wisdom = pickle.load(infile)
pyfftw.import_wisdom(wisdom)
proper.fftw_use_wisdom = True
else:
proper.fftw_use_wisdom = False
return
|
ajeldoradoREPO_NAMEfalco-pythonPATH_START.@falco-python_extracted@falco-python-master@falco@proper@prop_load_fftw_wisdom.py@.PATH_END.py
|
{
"filename": "_uirevision.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/ohlc/_uirevision.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class UirevisionValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="uirevision", parent_name="ohlc", **kwargs):
super(UirevisionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@ohlc@_uirevision.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/indicator/title/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._font import Font
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(__name__, [], ["._font.Font"])
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@indicator@title@__init__.py@.PATH_END.py
|
{
"filename": "softdtw_variants.py",
"repo_name": "tslearn-team/tslearn",
"repo_path": "tslearn_extracted/tslearn-main/tslearn/metrics/softdtw_variants.py",
"type": "Python"
}
|
import numpy as np
from joblib import Parallel, delayed
from numba import njit
from sklearn.utils import check_random_state
from tslearn.backend import instantiate_backend
from tslearn.utils import (
check_equal_size,
to_time_series,
to_time_series_dataset,
ts_size,
)
from .dtw_variants import dtw, dtw_path
from .soft_dtw_fast import (
_jacobian_product_sq_euc,
_njit_jacobian_product_sq_euc,
_njit_soft_dtw,
_njit_soft_dtw_grad,
_soft_dtw,
_soft_dtw_grad,
)
from .utils import _cdist_generic
__author__ = "Romain Tavenard romain.tavenard[at]univ-rennes2.fr"
GLOBAL_CONSTRAINT_CODE = {None: 0, "": 0, "itakura": 1, "sakoe_chiba": 2}
TSLEARN_VALID_METRICS = ["dtw", "gak", "softdtw", "sax"]
VARIABLE_LENGTH_METRICS = ["dtw", "gak", "softdtw", "sax"]
def _gak(gram, be=None):
"""Compute Global Alignment Kernel (GAK) between (possibly
multidimensional) time series and return it.
Parameters
----------
gram : array-like, shape=(sz1, sz2)
Gram matrix.
be : Backend object or string or None
Backend. If `be` is an instance of the class `NumPyBackend` or the string `"numpy"`,
the NumPy backend is used.
If `be` is an instance of the class `PyTorchBackend` or the string `"pytorch"`,
the PyTorch backend is used.
If `be` is `None`, the backend is determined by the input arrays.
See our :ref:`dedicated user-guide page <backend>` for more information.
Returns
-------
float
Kernel value
"""
be = instantiate_backend(be, gram)
gram = be.array(gram)
sz1, sz2 = be.shape(gram)
cum_sum = be.zeros((sz1 + 1, sz2 + 1))
cum_sum[0, 0] = 1.0
for i in range(sz1):
for j in range(sz2):
cum_sum[i + 1, j + 1] = (
cum_sum[i, j + 1] + cum_sum[i + 1, j] + cum_sum[i, j]
) * gram[i, j]
return cum_sum[sz1, sz2]
@njit(nogil=True)
def _njit_gak(gram):
"""Compute Global Alignment Kernel (GAK) between (possibly
multidimensional) time series and return it.
Parameters
----------
gram : array-like, shape=(sz1, sz2)
Gram matrix.
Returns
-------
float
Kernel value
"""
sz1, sz2 = gram.shape
cum_sum = np.zeros((sz1 + 1, sz2 + 1))
cum_sum[0, 0] = 1.0
for i in range(sz1):
for j in range(sz2):
cum_sum[i + 1, j + 1] = (
cum_sum[i, j + 1] + cum_sum[i + 1, j] + cum_sum[i, j]
) * gram[i, j]
return cum_sum[sz1, sz2]
def _gak_gram(s1, s2, sigma=1.0, be=None):
"""Compute Global Alignment Kernel (GAK) Gram matrix between (possibly
multidimensional) time series and return it.
Parameters
----------
s1 : array-like, shape=(sz1, d)
A time series.
s2 : array-like, shape=(sz2, d)
Another time series.
sigma : float (default 1.)
Bandwidth of the internal gaussian kernel used for GAK.
be : Backend object or string or None
Backend. If `be` is an instance of the class `NumPyBackend` or the string `"numpy"`,
the NumPy backend is used.
If `be` is an instance of the class `PyTorchBackend` or the string `"pytorch"`,
the PyTorch backend is used.
If `be` is `None`, the backend is determined by the input arrays.
See our :ref:`dedicated user-guide page <backend>` for more information.
Returns
-------
gram : array-like, shape=(sz1, sz2)
Gram matrix.
"""
be = instantiate_backend(be, s1)
gram = -be.cdist(s1, s2, "sqeuclidean") / (2 * sigma**2)
gram = be.array(gram)
gram -= be.log(2 - be.exp(gram))
return be.exp(gram)
def unnormalized_gak(s1, s2, sigma=1.0, be=None):
r"""Compute Global Alignment Kernel (GAK) between (possibly
multidimensional) time series and return it.
It is not required that both time series share the same size, but they must
be the same dimension. GAK was
originally presented in [1]_.
This is an unnormalized version.
Parameters
----------
s1 : array-like, shape=(sz1, d) or (sz1,)
A time series.
If shape is (sz1,), the time series is assumed to be univariate.
s2 : array-like, shape=(sz2, d) or (sz2,)
Another time series.
If shape is (sz2,), the time series is assumed to be univariate.
sigma : float (default 1.)
Bandwidth of the internal gaussian kernel used for GAK.
be : Backend object or string or None
Backend. If `be` is an instance of the class `NumPyBackend` or the string `"numpy"`,
the NumPy backend is used.
If `be` is an instance of the class `PyTorchBackend` or the string `"pytorch"`,
the PyTorch backend is used.
If `be` is `None`, the backend is determined by the input arrays.
See our :ref:`dedicated user-guide page <backend>` for more information.
Returns
-------
float
Kernel value
Examples
--------
>>> unnormalized_gak([1, 2, 3],
... [1., 2., 2., 3.],
... sigma=2.) # doctest: +ELLIPSIS
15.358...
>>> unnormalized_gak([1, 2, 3],
... [1., 2., 2., 3., 4.]) # doctest: +ELLIPSIS
3.166...
See Also
--------
gak : normalized version of GAK that ensures that k(x,x) = 1
cdist_gak : Compute cross-similarity matrix using Global Alignment kernel
References
----------
.. [1] M. Cuturi, "Fast global alignment kernels," ICML 2011.
"""
be = instantiate_backend(be, s1, s2)
s1 = to_time_series(s1, remove_nans=True, be=be)
s2 = to_time_series(s2, remove_nans=True, be=be)
gram = _gak_gram(s1, s2, sigma=sigma, be=be)
if be.is_numpy:
return _njit_gak(gram)
return _gak(gram, be=be)
def gak(s1, s2, sigma=1.0, be=None): # TODO: better doc (formula for the kernel)
r"""Compute Global Alignment Kernel (GAK) between (possibly
multidimensional) time series and return it.
It is not required that both time series share the same size, but they must
be the same dimension. GAK was
originally presented in [1]_.
This is a normalized version that ensures that :math:`k(x,x)=1` for all
:math:`x` and :math:`k(x,y) \in [0, 1]` for all :math:`x, y`.
Parameters
----------
s1 : array-like, shape=(sz1, d) or (sz1,)
A time series.
If shape is (sz1,), the time series is assumed to be univariate.
s2 : array-like, shape=(sz2, d) or (sz2,)
Another time series.
If shape is (sz2,), the time series is assumed to be univariate.
sigma : float (default 1.)
Bandwidth of the internal gaussian kernel used for GAK.
be : Backend object or string or None
Backend. If `be` is an instance of the class `NumPyBackend` or the string `"numpy"`,
the NumPy backend is used.
If `be` is an instance of the class `PyTorchBackend` or the string `"pytorch"`,
the PyTorch backend is used.
If `be` is `None`, the backend is determined by the input arrays.
See our :ref:`dedicated user-guide page <backend>` for more information.
Returns
-------
float
Kernel value
Examples
--------
>>> gak([1, 2, 3], [1., 2., 2., 3.], sigma=2.) # doctest: +ELLIPSIS
0.839...
>>> gak([1, 2, 3], [1., 2., 2., 3., 4.]) # doctest: +ELLIPSIS
0.273...
See Also
--------
cdist_gak : Compute cross-similarity matrix using Global Alignment kernel
References
----------
.. [1] M. Cuturi, "Fast global alignment kernels," ICML 2011.
"""
be = instantiate_backend(be, s1, s2)
s1 = be.array(s1)
s2 = be.array(s2)
denom = be.sqrt(
unnormalized_gak(s1, s1, sigma=sigma, be=be)
* unnormalized_gak(s2, s2, sigma=sigma, be=be)
)
return unnormalized_gak(s1, s2, sigma=sigma, be=be) / denom
def cdist_gak(dataset1, dataset2=None, sigma=1.0, n_jobs=None, verbose=0, be=None):
r"""Compute cross-similarity matrix using Global Alignment kernel (GAK).
GAK was originally presented in [1]_.
Parameters
----------
dataset1 : array-like, shape=(n_ts1, sz1, d) or (n_ts1, sz1) or (sz1,)
A dataset of time series.
If shape is (n_ts1, sz1), the dataset is composed of univariate time series.
If shape is (sz1,), the dataset is composed of a unique univariate time series.
dataset2 : None or array-like, shape=(n_ts2, sz2, d) or (n_ts2, sz2) or (sz2,) (default: None)
Another dataset of time series.
If `None`, self-similarity of `dataset1` is returned.
If shape is (n_ts2, sz2), the dataset is composed of univariate time series.
If shape is (sz2,), the dataset is composed of a unique univariate time series.
sigma : float (default 1.)
Bandwidth of the internal gaussian kernel used for GAK
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See scikit-learns'
`Glossary <https://scikit-learn.org/stable/glossary.html#term-n-jobs>`__
for more details.
verbose : int, optional (default=0)
The verbosity level: if non zero, progress messages are printed.
Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
`Glossary <https://joblib.readthedocs.io/en/latest/parallel.html#parallel-reference-documentation>`__
for more details.
be : Backend object or string or None
Backend. If `be` is an instance of the class `NumPyBackend` or the string `"numpy"`,
the NumPy backend is used.
If `be` is an instance of the class `PyTorchBackend` or the string `"pytorch"`,
the PyTorch backend is used.
If `be` is `None`, the backend is determined by the input arrays.
See our :ref:`dedicated user-guide page <backend>` for more information.
Returns
-------
array-like, shape=(n_ts1, n_ts2)
Cross-similarity matrix.
Examples
--------
>>> cdist_gak([[1, 2, 2, 3], [1., 2., 3., 4.]], sigma=2.)
array([[1. , 0.65629661],
[0.65629661, 1. ]])
>>> cdist_gak([[1, 2, 2], [1., 2., 3., 4.]],
... [[1, 2, 2, 3], [1., 2., 3., 4.], [1, 2, 2, 3]],
... sigma=2.)
array([[0.71059484, 0.29722877, 0.71059484],
[0.65629661, 1. , 0.65629661]])
See Also
--------
gak : Compute Global Alignment kernel
References
----------
.. [1] M. Cuturi, "Fast global alignment kernels," ICML 2011.
""" # noqa: E501
be = instantiate_backend(be, dataset1, dataset2)
unnormalized_matrix = _cdist_generic(
dist_fun=unnormalized_gak,
dataset1=dataset1,
dataset2=dataset2,
n_jobs=n_jobs,
verbose=verbose,
sigma=sigma,
compute_diagonal=True,
be=be,
)
dataset1 = to_time_series_dataset(dataset1, be=be)
if dataset2 is None:
diagonal = be.diag(be.sqrt(1.0 / be.diag(unnormalized_matrix)))
diagonal_left = diagonal_right = diagonal
else:
dataset2 = to_time_series_dataset(dataset2, be=be)
diagonal_left = Parallel(n_jobs=n_jobs, prefer="threads", verbose=verbose)(
delayed(unnormalized_gak)(dataset1[i], dataset1[i], sigma=sigma, be=be)
for i in range(len(dataset1))
)
diagonal_right = Parallel(n_jobs=n_jobs, prefer="threads", verbose=verbose)(
delayed(unnormalized_gak)(dataset2[j], dataset2[j], sigma=sigma, be=be)
for j in range(len(dataset2))
)
diagonal_left = be.diag(1.0 / be.sqrt(be.array(diagonal_left)))
diagonal_right = be.diag(1.0 / be.sqrt(be.array(diagonal_right)))
return diagonal_left @ unnormalized_matrix @ diagonal_right
def sigma_gak(dataset, n_samples=100, random_state=None, be=None):
r"""Compute sigma value to be used for GAK.
This method was originally presented in [1]_.
Parameters
----------
dataset : array-like, shape=(n_ts, sz, d) or (n_ts, sz1) or (sz,)
A dataset of time series.
If shape is (n_ts, sz), the dataset is composed of univariate time series.
If shape is (sz,), the dataset is composed of a unique univariate time series.
n_samples : int (default: 100)
Number of samples on which median distance should be estimated.
random_state : integer or numpy.RandomState or None (default: None)
The generator used to draw the samples. If an integer is given, it
fixes the seed. Defaults to the global numpy random number generator.
be : Backend object or string or None
Backend. If `be` is an instance of the class `NumPyBackend` or the string `"numpy"`,
the NumPy backend is used.
If `be` is an instance of the class `PyTorchBackend` or the string `"pytorch"`,
the PyTorch backend is used.
If `be` is `None`, the backend is determined by the input arrays.
See our :ref:`dedicated user-guide page <backend>` for more information.
Returns
-------
float
Suggested bandwidth (:math:`\sigma`) for the Global Alignment kernel.
Examples
--------
>>> dataset = [[1, 2, 2, 3], [1., 2., 3., 4.]]
>>> sigma_gak(dataset=dataset,
... n_samples=200,
... random_state=0) # doctest: +ELLIPSIS
2.0...
See Also
--------
gak : Compute Global Alignment kernel
cdist_gak : Compute cross-similarity matrix using Global Alignment kernel
References
----------
.. [1] M. Cuturi, "Fast global alignment kernels," ICML 2011.
"""
be = instantiate_backend(be, dataset)
random_state = check_random_state(random_state)
dataset = to_time_series_dataset(dataset, be=be)
n_ts, sz, d = be.shape(dataset)
if not check_equal_size(dataset, be=be):
sz = be.min([ts_size(ts) for ts in dataset])
if n_ts * sz < n_samples:
replace = True
else:
replace = False
sample_indices = random_state.choice(n_ts * sz, size=n_samples, replace=replace)
dists = be.pdist(
dataset[:, :sz, :].reshape((-1, d))[sample_indices],
metric="euclidean",
)
return be.median(dists) * be.sqrt(sz)
def gamma_soft_dtw(dataset, n_samples=100, random_state=None, be=None):
r"""Compute gamma value to be used for GAK/Soft-DTW.
This method was originally presented in [1]_.
Parameters
----------
dataset : array-like, shape=(n_ts, sz, d) or (n_ts, sz1) or (sz,)
A dataset of time series.
If shape is (n_ts, sz), the dataset is composed of univariate time series.
If shape is (sz,), the dataset is composed of a unique univariate time series.
n_samples : int (default: 100)
Number of samples on which median distance should be estimated.
random_state : integer or numpy.RandomState or None (default: None)
The generator used to draw the samples. If an integer is given, it
fixes the seed. Defaults to the global numpy random number generator.
be : Backend object or string or None
Backend. If `be` is an instance of the class `NumPyBackend` or the string `"numpy"`,
the NumPy backend is used.
If `be` is an instance of the class `PyTorchBackend` or the string `"pytorch"`,
the PyTorch backend is used.
If `be` is `None`, the backend is determined by the input arrays.
See our :ref:`dedicated user-guide page <backend>` for more information.
Returns
-------
float
Suggested :math:`\gamma` parameter for the Soft-DTW.
Examples
--------
>>> dataset = [[1, 2, 2, 3], [1., 2., 3., 4.]]
>>> gamma_soft_dtw(dataset=dataset,
... n_samples=200,
... random_state=0) # doctest: +ELLIPSIS
8.0...
See Also
--------
sigma_gak : Compute sigma parameter for Global Alignment kernel
References
----------
.. [1] M. Cuturi, "Fast global alignment kernels," ICML 2011.
"""
be = instantiate_backend(be, dataset)
return (
2.0
* sigma_gak(
dataset=dataset, n_samples=n_samples, random_state=random_state, be=be
)
** 2
)
def soft_dtw(ts1, ts2, gamma=1.0, be=None, compute_with_backend=False):
r"""Compute Soft-DTW metric between two time series.
Soft-DTW was originally presented in [1]_ and is
discussed in more details in our
:ref:`user-guide page on DTW and its variants<dtw>`.
Soft-DTW is computed as:
.. math::
\text{soft-DTW}_{\gamma}(X, Y) =
\min_{\pi}{}^\gamma \sum_{(i, j) \in \pi} \|X_i, Y_j\|^2
where :math:`\min^\gamma` is the soft-min operator of parameter
:math:`\gamma`.
In the limit case :math:`\gamma = 0`, :math:`\min^\gamma` reduces to a
hard-min operator and soft-DTW is defined as the square of the DTW
similarity measure.
Parameters
----------
ts1 : array-like, shape=(sz1, d) or (sz1,)
A time series.
If shape is (sz1,), the time series is assumed to be univariate.
ts2 : array-like, shape=(sz2, d) or (sz2,)
Another time series.
If shape is (sz2,), the time series is assumed to be univariate.
gamma : float (default 1.)
Gamma parameter for Soft-DTW.
be : Backend object or string or None
Backend. If `be` is an instance of the class `NumPyBackend` or the string `"numpy"`,
the NumPy backend is used.
If `be` is an instance of the class `PyTorchBackend` or the string `"pytorch"`,
the PyTorch backend is used.
If `be` is `None`, the backend is determined by the input arrays.
See our :ref:`dedicated user-guide page <backend>` for more information.
compute_with_backend : bool, default=False
This parameter has no influence when the NumPy backend is used.
When a backend different from NumPy is used (cf parameter `be`):
If `True`, the computation is done with the corresponding backend.
If `False`, a conversion to the NumPy backend can be used to accelerate the computation.
Returns
-------
float
Similarity
Examples
--------
>>> soft_dtw([1, 2, 2, 3],
... [1., 2., 3., 4.],
... gamma=1.) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
-0.89...
>>> soft_dtw([1, 2, 3, 3],
... [1., 2., 2.1, 3.2],
... gamma=0.01) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
0.089...
The PyTorch backend can be used to compute gradients:
>>> import torch
>>> ts1 = torch.tensor([[1.0], [2.0], [3.0]], requires_grad=True)
>>> ts2 = torch.tensor([[3.0], [4.0], [-3.0]])
>>> sim = soft_dtw(ts1, ts2, gamma=1.0, be="pytorch", compute_with_backend=True)
>>> print(sim)
tensor(41.1876, dtype=torch.float64, grad_fn=<SelectBackward0>)
>>> sim.backward()
>>> print(ts1.grad)
tensor([[-4.0001],
[-2.2852],
[10.1643]])
>>> ts1_2d = torch.tensor([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]], requires_grad=True)
>>> ts2_2d = torch.tensor([[3.0, 3.0], [4.0, 4.0], [-3.0, -3.0]])
>>> sim = soft_dtw(ts1_2d, ts2_2d, gamma=1.0, be="pytorch", compute_with_backend=True)
>>> print(sim)
tensor(83.2951, dtype=torch.float64, grad_fn=<SelectBackward0>)
>>> sim.backward()
>>> print(ts1_2d.grad)
tensor([[-4.0000, -4.0000],
[-2.0261, -2.0261],
[10.0206, 10.0206]])
See Also
--------
cdist_soft_dtw : Cross similarity matrix between time series datasets
References
----------
.. [1] M. Cuturi, M. Blondel "Soft-DTW: a Differentiable Loss Function for
Time-Series," ICML 2017.
""" # noqa: E501
be = instantiate_backend(be, ts1, ts2)
ts1 = be.array(ts1)
ts2 = be.array(ts2)
if gamma == 0.0:
return dtw(ts1, ts2, be=be) ** 2
return SoftDTW(
SquaredEuclidean(ts1[: ts_size(ts1)], ts2[: ts_size(ts2)], be=be),
gamma=gamma,
be=be,
compute_with_backend=compute_with_backend,
).compute()
def soft_dtw_alignment(ts1, ts2, gamma=1.0, be=None, compute_with_backend=False):
r"""Compute Soft-DTW metric between two time series and return both the
similarity measure and the alignment matrix.
Soft-DTW was originally presented in [1]_ and is
discussed in more details in our
:ref:`user-guide page on DTW and its variants<dtw>`.
Soft-DTW is computed as:
.. math::
\text{soft-DTW}_{\gamma}(X, Y) =
\min_{\pi}{}^\gamma \sum_{(i, j) \in \pi} \|X_i, Y_j\|^2
where :math:`\min^\gamma` is the soft-min operator of parameter
:math:`\gamma`.
In the limit case :math:`\gamma = 0`, :math:`\min^\gamma` reduces to a
hard-min operator and soft-DTW is defined as the square of the DTW
similarity measure.
Parameters
----------
ts1 : array-like, shape=(sz1, d) or (sz1,)
A time series.
If shape is (sz1,), the time series is assumed to be univariate.
ts2 : array-like, shape=(sz2, d) or (sz2,)
Another time series.
If shape is (sz2,), the time series is assumed to be univariate.
gamma : float (default 1.)
Gamma parameter for Soft-DTW.
be : Backend object or string or None
Backend. If `be` is an instance of the class `NumPyBackend` or the string `"numpy"`,
the NumPy backend is used.
If `be` is an instance of the class `PyTorchBackend` or the string `"pytorch"`,
the PyTorch backend is used.
If `be` is `None`, the backend is determined by the input arrays.
See our :ref:`dedicated user-guide page <backend>` for more information.
compute_with_backend : bool, default=False
This parameter has no influence when the NumPy backend is used.
When a backend different from NumPy is used (cf parameter `be`):
If `True`, the computation is done with the corresponding backend.
If `False`, a conversion to the NumPy backend can be used to accelerate the computation.
Returns
-------
array-like, shape=(sz1, sz2)
Soft-alignment matrix
float
Similarity
Examples
--------
>>> a, dist = soft_dtw_alignment([1, 2, 2, 3],
... [1., 2., 3., 4.],
... gamma=1.) # doctest: +ELLIPSIS
>>> dist
-0.89...
>>> a # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
array([[1.00...e+00, 1.88...e-01, 2.83...e-04, 4.19...e-11],
[3.40...e-01, 8.17...e-01, 8.87...e-02, 3.94...e-05],
[5.05...e-02, 7.09...e-01, 5.30...e-01, 6.98...e-03],
[1.37...e-04, 1.31...e-01, 7.30...e-01, 1.00...e+00]])
The PyTorch backend can be used to compute gradients:
>>> import torch
>>> ts1 = torch.tensor([[1.0], [2.0], [3.0]], requires_grad=True)
>>> ts2 = torch.tensor([[3.0], [4.0], [-3.0]])
>>> path, sim = soft_dtw_alignment(ts1, ts2, gamma=1.0, be="pytorch", compute_with_backend=True)
>>> print(sim)
tensor(41.1876, dtype=torch.float64, grad_fn=<AsStridedBackward0>)
>>> sim.backward()
>>> print(ts1.grad)
tensor([[-4.0001],
[-2.2852],
[10.1643]])
>>> ts1_2d = torch.tensor([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]], requires_grad=True)
>>> ts2_2d = torch.tensor([[3.0, 3.0], [4.0, 4.0], [-3.0, -3.0]])
>>> path, sim = soft_dtw_alignment(ts1_2d, ts2_2d, gamma=1.0, be="pytorch", compute_with_backend=True)
>>> print(sim)
tensor(83.2951, dtype=torch.float64, grad_fn=<AsStridedBackward0>)
>>> sim.backward()
>>> print(ts1_2d.grad)
tensor([[-4.0000, -4.0000],
[-2.0261, -2.0261],
[10.0206, 10.0206]])
See Also
--------
soft_dtw : Returns soft-DTW score alone
References
----------
.. [1] M. Cuturi, M. Blondel "Soft-DTW: a Differentiable Loss Function for
Time-Series," ICML 2017.
""" # noqa: E501
be = instantiate_backend(be, ts1, ts2)
ts1 = be.array(ts1)
ts2 = be.array(ts2)
if gamma == 0.0:
path, dist = dtw_path(ts1, ts2, be=be)
dist_sq = dist**2
a = be.zeros((ts_size(ts1), ts_size(ts2)))
for i, j in path:
a[i, j] = 1.0
else:
sdtw = SoftDTW(
SquaredEuclidean(ts1[: ts_size(ts1)], ts2[: ts_size(ts2)], be=be),
gamma=gamma,
be=be,
compute_with_backend=compute_with_backend,
)
dist_sq = sdtw.compute()
a = sdtw.grad()
return a, dist_sq
def cdist_soft_dtw(dataset1, dataset2=None, gamma=1.0, be=None, compute_with_backend=False):
r"""Compute cross-similarity matrix using Soft-DTW metric.
Soft-DTW was originally presented in [1]_ and is
discussed in more details in our
:ref:`user-guide page on DTW and its variants<dtw>`.
Soft-DTW is computed as:
.. math::
\text{soft-DTW}_{\gamma}(X, Y) =
\min_{\pi}{}^\gamma \sum_{(i, j) \in \pi} \|X_i, Y_j\|^2
where :math:`\min^\gamma` is the soft-min operator of parameter
:math:`\gamma`.
In the limit case :math:`\gamma = 0`, :math:`\min^\gamma` reduces to a
hard-min operator and soft-DTW is defined as the square of the DTW
similarity measure.
Parameters
----------
dataset1 : array-like, shape=(n_ts1, sz1, d) or (n_ts1, sz1) or (sz1,)
A dataset of time series.
If shape is (n_ts1, sz1), the dataset is composed of univariate time series.
If shape is (sz1,), the dataset is composed of a unique univariate time series.
dataset2 : None or array-like, shape=(n_ts2, sz2, d) or (n_ts2, sz2) or (sz2,) (default: None)
Another dataset of time series. If `None`, self-similarity of
`dataset1` is returned.
If shape is (n_ts2, sz2), the dataset is composed of univariate time series.
If shape is (sz2,), the dataset is composed of a unique univariate time series.
gamma : float (default 1.)
Gamma parameter for Soft-DTW.
be : Backend object or string or None
Backend. If `be` is an instance of the class `NumPyBackend` or the string `"numpy"`,
the NumPy backend is used.
If `be` is an instance of the class `PyTorchBackend` or the string `"pytorch"`,
the PyTorch backend is used.
If `be` is `None`, the backend is determined by the input arrays.
See our :ref:`dedicated user-guide page <backend>` for more information.
compute_with_backend : bool, default=False
This parameter has no influence when the NumPy backend is used.
When a backend different from NumPy is used (cf parameter `be`):
If `True`, the computation is done with the corresponding backend.
If `False`, a conversion to the NumPy backend can be used to accelerate the computation.
Returns
-------
array-like, shape=(n_ts1, n_ts2)
Cross-similarity matrix.
Examples
--------
>>> cdist_soft_dtw([[1, 2, 2, 3], [1., 2., 3., 4.]], gamma=.01)
array([[-0.01098612, 1. ],
[ 1. , 0. ]])
>>> cdist_soft_dtw([[1, 2, 2, 3], [1., 2., 3., 4.]],
... [[1, 2, 2, 3], [1., 2., 3., 4.]], gamma=.01)
array([[-0.01098612, 1. ],
[ 1. , 0. ]])
The PyTorch backend can be used to compute gradients:
>>> import torch
>>> dataset1 = torch.tensor([[[1.0], [2.0], [3.0]], [[1.0], [2.0], [3.0]]], requires_grad=True)
>>> dataset2 = torch.tensor([[[3.0], [4.0], [-3.0]], [[3.0], [4.0], [-3.0]]])
>>> sim_mat = cdist_soft_dtw(dataset1, dataset2, gamma=1.0, be="pytorch", compute_with_backend=True)
>>> print(sim_mat)
tensor([[41.1876, 41.1876],
[41.1876, 41.1876]], grad_fn=<CopySlices>)
>>> sim = sim_mat[0, 0]
>>> sim.backward()
>>> print(dataset1.grad)
tensor([[[-4.0001],
[-2.2852],
[10.1643]],
<BLANKLINE>
[[ 0.0000],
[ 0.0000],
[ 0.0000]]])
See Also
--------
soft_dtw : Compute Soft-DTW
cdist_soft_dtw_normalized : Cross similarity matrix between time series
datasets using a normalized version of Soft-DTW
References
----------
.. [1] M. Cuturi, M. Blondel "Soft-DTW: a Differentiable Loss Function for
Time-Series," ICML 2017.
""" # noqa: E501
be = instantiate_backend(be, dataset1, dataset2)
dataset1 = to_time_series_dataset(dataset1, dtype=be.float64, be=be)
if dataset2 is None:
dataset2 = dataset1
self_similarity = True
else:
dataset2 = to_time_series_dataset(dataset2, dtype=be.float64, be=be)
self_similarity = False
dists = be.empty((dataset1.shape[0], dataset2.shape[0]))
equal_size_ds1 = check_equal_size(dataset1, be=be)
equal_size_ds2 = check_equal_size(dataset2, be=be)
for i, ts1 in enumerate(dataset1):
if equal_size_ds1:
ts1_short = ts1
else:
ts1_short = ts1[: ts_size(ts1)]
for j, ts2 in enumerate(dataset2):
if equal_size_ds2:
ts2_short = ts2
else:
ts2_short = ts2[: ts_size(ts2)]
if self_similarity and j < i:
dists[i, j] = dists[j, i]
else:
dists[i, j] = soft_dtw(
ts1_short, ts2_short, gamma=gamma, be=be, compute_with_backend=compute_with_backend
)
return dists
def cdist_soft_dtw_normalized(dataset1, dataset2=None, gamma=1.0, be=None, compute_with_backend=False):
r"""Compute cross-similarity matrix using a normalized version of the
Soft-DTW metric.
Soft-DTW was originally presented in [1]_ and is
discussed in more details in our
:ref:`user-guide page on DTW and its variants<dtw>`.
Soft-DTW is computed as:
.. math::
\text{soft-DTW}_{\gamma}(X, Y) =
\min_{\pi}{}^\gamma \sum_{(i, j) \in \pi} \|X_i, Y_j\|^2
where :math:`\min^\gamma` is the soft-min operator of parameter
:math:`\gamma`.
In the limit case :math:`\gamma = 0`, :math:`\min^\gamma` reduces to a
hard-min operator and soft-DTW is defined as the square of the DTW
similarity measure.
This normalized version is defined as:
.. math::
\text{norm-soft-DTW}_{\gamma}(X, Y) =
\text{soft-DTW}_{\gamma}(X, Y) -
\frac{1}{2} \left(\text{soft-DTW}_{\gamma}(X, X) +
\text{soft-DTW}_{\gamma}(Y, Y)\right)
and ensures that all returned values are positive and that
:math:`\text{norm-soft-DTW}_{\gamma}(X, X) = 0`.
Parameters
----------
dataset1 : array-like, shape=(n_ts1, sz1, d) or (n_ts1, sz1) or (sz1,)
A dataset of time series.
If shape is (n_ts1, sz1), the dataset is composed of univariate time series.
If shape is (sz1,), the dataset is composed of a unique univariate time series.
dataset2 : None or array-like, shape=(n_ts2, sz2, d) or (n_ts2, sz2) or (sz2,) (default: None)
Another dataset of time series. If `None`, self-similarity of
`dataset1` is returned.
If shape is (n_ts2, sz2), the dataset is composed of univariate time series.
If shape is (sz2,), the dataset is composed of a unique univariate time series.
gamma : float (default 1.)
Gamma parameter for Soft-DTW.
be : Backend object or string or None
Backend. If `be` is an instance of the class `NumPyBackend` or the string `"numpy"`,
the NumPy backend is used.
If `be` is an instance of the class `PyTorchBackend` or the string `"pytorch"`,
the PyTorch backend is used.
If `be` is `None`, the backend is determined by the input arrays.
See our :ref:`dedicated user-guide page <backend>` for more information.
compute_with_backend : bool, default=False
This parameter has no influence when the NumPy backend is used.
When a backend different from NumPy is used (cf parameter `be`):
If `True`, the computation is done with the corresponding backend.
If `False`, a conversion to the NumPy backend can be used to accelerate the computation.
Returns
-------
array-like, shape=(n_ts1, n_ts2)
Cross-similarity matrix.
Examples
--------
>>> time_series = np.random.randn(10, 15, 1)
>>> np.alltrue(cdist_soft_dtw_normalized(time_series) >= 0.)
True
>>> time_series2 = np.random.randn(4, 15, 1)
>>> np.alltrue(cdist_soft_dtw_normalized(time_series, time_series2) >= 0.)
True
The PyTorch backend can be used to compute gradients:
>>> import torch
>>> dataset1 = torch.tensor([[[1.0], [2.0], [3.0]], [[1.0], [2.0], [3.0]]], requires_grad=True)
>>> dataset2 = torch.tensor([[[3.0], [4.0], [-3.0]], [[3.0], [4.0], [-3.0]]])
>>> sim_mat = cdist_soft_dtw_normalized(dataset1, dataset2, gamma=1.0, be="pytorch", compute_with_backend=True)
>>> print(sim_mat)
tensor([[42.0586, 42.0586],
[42.0586, 42.0586]], grad_fn=<AddBackward0>)
>>> sim = sim_mat[0, 0]
>>> sim.backward()
>>> print(dataset1.grad)
tensor([[[-3.5249],
[-2.2852],
[ 9.6891]],
<BLANKLINE>
[[ 0.0000],
[ 0.0000],
[ 0.0000]]])
See Also
--------
soft_dtw : Compute Soft-DTW
cdist_soft_dtw : Cross similarity matrix between time series
datasets using the unnormalized version of Soft-DTW
References
----------
.. [1] M. Cuturi, M. Blondel "Soft-DTW: a Differentiable Loss Function for
Time-Series," ICML 2017.
""" # noqa: E501
be = instantiate_backend(be, dataset1, dataset2)
dataset1 = to_time_series_dataset(dataset1, be=be)
if dataset2 is not None:
dataset2 = to_time_series_dataset(dataset2, be=be)
dists = cdist_soft_dtw(
dataset1, dataset2=dataset2, gamma=gamma, be=be, compute_with_backend=compute_with_backend
)
if dataset2 is None:
d_ii = be.diag(dists)
normalizer = -0.5 * (be.reshape(d_ii, (-1, 1)) + be.reshape(d_ii, (1, -1)))
else:
self_dists1 = be.empty((dataset1.shape[0], 1))
for i, ts1 in enumerate(dataset1):
ts1_short = ts1[:ts_size(ts1)]
self_dists1[i, 0] = soft_dtw(
ts1_short, ts1_short, gamma=gamma, be=be, compute_with_backend=compute_with_backend
)
self_dists2 = be.empty((1, dataset2.shape[0]))
for j, ts2 in enumerate(dataset2):
ts2_short = ts2[:ts_size(ts2)]
self_dists2[0, j] = soft_dtw(
ts2_short, ts2_short, gamma=gamma, be=be, compute_with_backend=compute_with_backend
)
normalizer = -0.5 * (self_dists1 + self_dists2)
dists += normalizer
return dists
class SoftDTW:
def __init__(self, D, gamma=1.0, be=None, compute_with_backend=False):
"""Soft Dynamic Time Warping.
Parameters
----------
D : array-like, shape=(m, n), dtype=float64 or class computing distances with a method 'compute'
Distances. An example of class computing distance is 'SquaredEuclidean'.
gamma: float
Regularization parameter.
Lower is less smoothed (closer to true DTW).
be : Backend object or string or None
Backend.
compute_with_backend : bool, default=False
This parameter has no influence when the NumPy backend is used.
When a backend different from NumPy is used (cf parameter `be`):
If `True`, the computation is done with the corresponding backend.
If `False`, a conversion to the NumPy backend can be used to accelerate the computation.
Attributes
----------
self.R_: array-like, shape =(m + 2, n + 2)
Accumulated cost matrix (stored after calling `compute`).
"""
be = instantiate_backend(be, D)
self.be = be
self.compute_with_backend = compute_with_backend
if hasattr(D, "compute"):
self.D = D.compute()
else:
self.D = D
self.D = self.be.cast(self.D, dtype=self.be.float64)
# Allocate memory.
# We need +2 because we use indices starting from 1
# and to deal with edge cases in the backward recursion.
m, n = self.be.shape(self.D)
self.R_ = self.be.zeros((m + 2, n + 2), dtype=self.be.float64)
self.computed = False
self.gamma = self.be.array(gamma, dtype=self.be.float64)
def compute(self):
"""Compute soft-DTW by dynamic programming.
Returns
-------
sdtw: float
soft-DTW discrepancy.
"""
m, n = self.be.shape(self.D)
if self.be.is_numpy:
_njit_soft_dtw(self.D, self.R_, gamma=self.gamma)
elif not self.compute_with_backend:
_njit_soft_dtw(
self.be.to_numpy(self.D),
self.be.to_numpy(self.R_),
gamma=self.be.to_numpy(self.gamma),
)
self.R_ = self.be.array(self.R_)
else:
_soft_dtw(self.D, self.R_, gamma=self.gamma, be=self.be)
self.computed = True
return self.R_[m, n]
def grad(self):
"""Compute gradient of soft-DTW w.r.t. D by dynamic programming.
Returns
-------
grad: array-like, shape=(m, n)
Gradient w.r.t. D.
"""
if not self.computed:
raise ValueError("Needs to call compute() first.")
m, n = self.be.shape(self.D)
# Add an extra row and an extra column to D.
# Needed to deal with edge cases in the recursion.
D = self.be.vstack((self.D, self.be.zeros(n)))
D = self.be.hstack((D, self.be.zeros((m + 1, 1))))
# Allocate memory.
# We need +2 because we use indices starting from 1
# and to deal with edge cases in the recursion.
E = self.be.zeros((m + 2, n + 2), dtype=self.be.float64)
if self.be.is_numpy:
_njit_soft_dtw_grad(D, self.R_, E, gamma=self.gamma)
elif not self.compute_with_backend:
_njit_soft_dtw_grad(
self.be.to_numpy(D),
self.be.to_numpy(self.R_),
self.be.to_numpy(E),
gamma=self.be.to_numpy(self.gamma),
)
self.R_ = self.be.array(self.R_)
else:
_soft_dtw_grad(D, self.R_, E, gamma=self.gamma, be=self.be)
return E[1:-1, 1:-1]
class SquaredEuclidean:
def __init__(self, X, Y, be=None, compute_with_backend=False):
"""Squared Euclidean distance.
Parameters
----------
X: array-like, shape=(m, d)
First time series.
Y: array-like, shape=(n, d)
Second time series.
be : Backend object or string or None
Backend.
compute_with_backend : bool, default=False
This parameter has no influence when the NumPy backend is used.
When a backend different from NumPy is used (cf parameter `be`):
If `True`, the computation is done with the corresponding backend.
If `False`, a conversion to the NumPy backend can be used to accelerate the computation.
Examples
--------
>>> SquaredEuclidean([1, 2, 2, 3], [1, 2, 3, 4]).compute()
array([[0., 1., 4., 9.],
[1., 0., 1., 4.],
[1., 0., 1., 4.],
[4., 1., 0., 1.]])
"""
self.be = instantiate_backend(be, X, Y)
self.compute_with_backend = compute_with_backend
self.X = self.be.cast(to_time_series(X, be=be), dtype=self.be.float64)
self.Y = self.be.cast(to_time_series(Y, be=be), dtype=self.be.float64)
def compute(self):
"""Compute distance matrix.
Returns
-------
D: array-like, shape=(m, n)
Distance matrix.
"""
return self.be.pairwise_euclidean_distances(self.X, self.Y) ** 2
def jacobian_product(self, E):
"""Compute the product between the Jacobian
(a linear map from m x d to m x n) and a matrix E.
Parameters
----------
E: array-like, shape=(m, n)
Second time series.
Returns
-------
G: array-like, shape=(m, d)
Product with Jacobian.
([m x d, m x n] * [m x n] = [m x d]).
"""
G = self.be.zeros_like(self.X, dtype=self.be.float64)
if self.be.is_numpy:
_njit_jacobian_product_sq_euc(self.X, self.Y, E.astype(np.float64), G)
elif not self.compute_with_backend:
_njit_jacobian_product_sq_euc(
self.be.to_numpy(self.X),
self.be.to_numpy(self.Y),
self.be.to_numpy(E).astype(np.float64),
self.be.to_numpy(G),
)
G = self.be.array(G)
else:
_jacobian_product_sq_euc(
self.X, self.Y, self.be.cast(E, self.be.float64), G
)
return G
|
tslearn-teamREPO_NAMEtslearnPATH_START.@tslearn_extracted@tslearn-main@tslearn@metrics@softdtw_variants.py@.PATH_END.py
|
{
"filename": "native.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pygments/py3/pygments/styles/native.py",
"type": "Python"
}
|
"""
pygments.styles.native
~~~~~~~~~~~~~~~~~~~~~~
pygments version of my "native" vim theme.
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
__all__ = ['NativeStyle']
class NativeStyle(Style):
"""
Pygments version of the "native" vim theme.
"""
name = 'native'
background_color = '#202020'
highlight_color = '#404040'
line_number_color = '#aaaaaa'
styles = {
Token: '#d0d0d0',
Whitespace: '#666666',
Comment: 'italic #ababab',
Comment.Preproc: 'noitalic bold #ff3a3a',
Comment.Special: 'noitalic bold #e50808 bg:#520000',
Keyword: 'bold #6ebf26',
Keyword.Pseudo: 'nobold',
Operator.Word: 'bold #6ebf26',
String: '#ed9d13',
String.Other: '#ffa500',
Number: '#51b2fd',
Name.Builtin: '#2fbccd',
Name.Variable: '#40ffff',
Name.Constant: '#40ffff',
Name.Class: 'underline #71adff',
Name.Function: '#71adff',
Name.Namespace: 'underline #71adff',
Name.Exception: '#bbbbbb',
Name.Tag: 'bold #6ebf26',
Name.Attribute: '#bbbbbb',
Name.Decorator: '#ffa500',
Generic.Heading: 'bold #ffffff',
Generic.Subheading: 'underline #ffffff',
Generic.Deleted: '#ff3a3a',
Generic.Inserted: '#589819',
Generic.Error: '#ff3a3a',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.EmphStrong: 'bold italic',
Generic.Prompt: '#aaaaaa',
Generic.Output: '#cccccc',
Generic.Traceback: '#ff3a3a',
Error: 'bg:#e3d2d2 #a61717'
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pygments@py3@pygments@styles@native.py@.PATH_END.py
|
{
"filename": "intro.md",
"repo_name": "federicomarulli/CosmoBolognaLib",
"repo_path": "CosmoBolognaLib_extracted/CosmoBolognaLib-master/External/CLASS/doc/input/intro.md",
"type": "Markdown"
}
|
CLASS: Cosmic Linear Anisotropy Solving System
==============================================
Author: Julien Lesgourgues
_This manual is under construction; this is only a provisional version. The definitive version will be made available soon, as well as all the necessary documentation to generate new versions of the manual. Currently the introduction is outdated and the definitions for some specific variables in the header files are missing. There are also some unresolved formatting issues in the documentation for harmonic.c and transfer.c, which will be corrected shortly._
Overall architecture of `class`
==========================================
The seven-module backbone
-------------------------
The purpose of `class` consists in computing some power
spectra for a given set of cosmological parameters. This task can be
decomposed in few steps or modules:
1. compute the evolution of cosmological background quantitites.
2. compute the evolution of thermodynamical quantitites (ionization
fractions, etc.)
3. compute the evolution of source functions \f$S(k,\eta)\f$ (by
integrating over all perturbations).
4. compute Bessel functions (in order to go from Fourier to harmonic
space).
5. compute transfer functions \f$\Delta_l(k)\f$ (unless one needs only
Fourier spectra \f$P(k)\f$’s and no harmonic spectra \f$C_l\f$’s).
6. compute the primordial spectrum for scalars, tensors, etc.
(straightforward if the input consists in spectral parameters \f$A_s\f$,
\f$n_s\f$, \f$r\f$, ..., but this module will incorporate the option of
integrating over inflationary perturbations).
7. compute power spectra \f$C_l\f$’s and/or \f$P(k)\f$’s.
In `class`, each of these steps is associated with a
structure:
1. `struct background ` for cosmological background,
2. `struct thermodynamics ` for thermodynamics,
3. `struct perturbations ` for source functions,
4. `struct bessels ` for bessel functions,
5. `struct transfer ` for transfer functions,
6. `struct primordial ` for primordial spectra,
7. `struct harmonic ` for output spectra.
A given structure contains “everything concerning one step that the
subsequent steps need to know” (for instance, everything about source
functions that the transfer module needs to know). In particular, each
structure contains one array of tabulated values (background quantitites
as a function of time, thermodynamical quantitites as a function of
redshift, sources as a function of \f$(k, \eta)\f$, etc.). It also contains
information about the size of this array and the value of the index of
each physical quantity, so that the table can be easily read and
interpolated. Finally, it contains any derived quantity that other
modules might need to know. Hence, the comunication from one module A to
another module B consists in passing a pointer to the structure filled
by A, and nothing else.
Each structure is defined and filled in one of the following modules
(and precisely in the order below):
1. `background.c `
2. `thermodynamics.c `
3. `perturbations.c `
4. `bessel.c `
5. `transfer.c `
6. `primordial.c `
7. `harmonic.c `
Each of these modules contains at least three functions:
- *module*\_`init(...)`
- *module*\_`free()`
- *module*\_*something*\_`at`\_*somevalue*`(...)`
The first function allocates and fills each structure. This can be done
provided that the previous structures in the hierarchy have been already
allocated and filled. In summary, calling one of
*module*\_`init(...)` amounts in solving
entirely one of the steps 1 to 7.
The second function deallocates the fields of each structure. This can
be done optionally at the end of the code (or, when the code is embedded
in a sampler, this *must* be done between each execution of
`class`, and especially before calling
*module*\_`init(...)` again with different input
parameters).
The third function is able to interpolate the pre-computed tables. For
instance, `background\_init()` fills a table of background
quantitites for discrete values of conformal time \f$\eta\f$, but
`background\_at\_eta(eta, \* values)` will return these
values for any arbitrary \f$\eta\f$.
Note that functions of the type
*module*\_*something*\_`at`\_*somevalue*`(...)`
are the only ones which are called from another module, while functions
of the type *module*\_`init(...)` and
*module*\_`free()` are the only one called by
the main executable. All other functions are for internal use in each
module.
Input
-----
There are two types of input:
1. “precision parameters” (controlling the precision of the output and
the execution time),
2. “input parameters” (cosmological parameters, flags telling to the
code what it should compute, ...)
All “precision parameters” have been grouped in a single structure
`struct precision`. The code contains *no other
arbitrary numerical coefficient*. This structure is initialized
in a simple module `precision.c` by the function
`precision\_init()`. Nothing is allocated dynamically in this
function, so there is no need for a `precision\_free()`
function.
Each “input parameter” refers to one particular step in the computation:
background, thermodynamics, perturbations, etc. Hence they are defined
as part of the corresponding structure. Their values are assigned in a
simple module `input.c`, by a function
`input\_init(...)` which has a pointer towards each structure
in its list of arguments. Hence, when a given function
*module*\_`init(...)` is called, the
corresponding structure already contains input parameters; the function
fills the rest of this structure. The function
`input\_init(...)` does not allocate any field dynamically,
so there is no need for an `input\_free()` function.
Output
------
A simple module `output.c` writes the final results in files.
The name of the files are considered as input parameters making part of
a small structure `struct output`. Like for all other input
parameters, these names are assigned inside the function
`input\_init(...)`. Again this structure contains no
dynamically allocated quantitites, so there is no need for an
`output\_free()` function.
Summary
-------
We hope that after this short overview, it is clear for the reader that
the main executable of `class` should consist only in the
following lines (not including comments and error-management lines):
For a given purpose, somebody could only be interested in the
intermediate steps (only background quantitites, only the
thermodynamics, only the perturbations and sources, etc.) It is then
straightforward to truncate the full hierarchy of modules 1, ... 7 at
some arbitrary order. We provide several “reduced executables”
`test`\_*module* achieving precisely this.
Note also that if `class` is embedded in a parameter sampler
and only “fast” parameters are varied (i.e., parameters related to the
primordial spectra), then it is only necessary to repeat the following
steps after `output\_init(...)`:
`spectra\_free()
primordial\_free()
input\_init(&ba,&th,&pt,&bs,&tr,&pm,&hr,&op)
primordial\_init(&pt,&pr,&pm)
spectra\_init(&pt,&tr,&pm,&hr)
output\_init(&pt,&tr,&hr,&op)
`
General principles
==================
Flexibility
-----------
Explain allocation of indices, ...
Control of precision
--------------------
Explain precision structure, ...
Control of errors
-----------------
|
federicomarulliREPO_NAMECosmoBolognaLibPATH_START.@CosmoBolognaLib_extracted@CosmoBolognaLib-master@External@CLASS@doc@input@intro.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/layout/xaxis/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._rangebreak import Rangebreak
from ._rangeselector import Rangeselector
from ._rangeslider import Rangeslider
from ._tickfont import Tickfont
from ._tickformatstop import Tickformatstop
from ._title import Title
from . import rangeselector
from . import rangeslider
from . import title
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[".rangeselector", ".rangeslider", ".title"],
[
"._rangebreak.Rangebreak",
"._rangeselector.Rangeselector",
"._rangeslider.Rangeslider",
"._tickfont.Tickfont",
"._tickformatstop.Tickformatstop",
"._title.Title",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@layout@xaxis@__init__.py@.PATH_END.py
|
{
"filename": "testing.py",
"repo_name": "FRBs/zdm",
"repo_path": "zdm_extracted/zdm-main/zdm/craco/testing.py",
"type": "Python"
}
|
""" Run tests with CRACO FRBs """
######
# first run this to generate surveys and parameter sets, by
# setting NewSurveys=True NewGrids=True
# Then set these to False and run with command line arguments
# to generate *many* outputs
#####
# It should be possible to remove all the matplotlib calls from this
# but in the current implementation it is not removed.
import argparse
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
import pandas
from zdm import iteration as it
from zdm.craco import loading
from IPython import embed
matplotlib.rcParams["image.interpolation"] = None
defaultsize = 14
ds = 4
font = {"family": "normal", "weight": "normal", "size": defaultsize}
matplotlib.rc("font", **font)
# import igm
defaultsize = 14
ds = 4
font = {"family": "normal", "weight": "normal", "size": defaultsize}
matplotlib.rc("font", **font)
def main(pargs):
isurvey, igrid = loading.survey_and_grid(
survey_name=pargs.survey,
NFRB=pargs.nFRB,
iFRB=pargs.iFRB,
lum_func=pargs.lum_func,
)
surveys = [isurvey]
grids = [igrid]
pvals = np.linspace(pargs.min, pargs.max, pargs.nstep)
vparams = {}
vparams[pargs.param] = None
vparams["lC"] = -0.9
# JXP Fussing
#vparams["H0"] = 55.
'''
tparams = pandas.read_csv('tst_params.csv')
for key in ['lEmax', 'alpha','gamma','sfr_n','lmean','lsigma','F']:
vparams[key] = tparams[key][0]
tmp_dict = {
'lEmax': 40.5, 'H0': 64.375, 'alpha': 0.2, 'gamma': -0.5,
'sfr_n': 0.0, 'lmean': 1.7, 'lsigma': 0.3, 'F': 0.11}
vparams.update(tmp_dict)
#embed(header='64 of testing')
'''
lls = []
nterms = [] # LL term related to norm (i.e. rates)
pvterms = [] # LL term related to norm (i.e. rates)
pvvals = [] #
wzvals = [] #
for tt, pval in enumerate(pvals):
vparams[pargs.param] = pval
C, llC = it.minimise_const_only(vparams, grids, surveys, Verbose=False)
# Set lC
vparams["lC"] = C
igrid.state.FRBdemo.lC = C
# Grab final LL
lls_final, nterm, pvterm, lpvals, lwz = it.calc_likelihoods_2D(
igrid, isurvey, norm=True, psnr=True, dolist=4
)
# Hold
lls.append(lls_final)
nterms.append(nterm)
pvterms.append(pvterm)
pvvals.append(lpvals)
wzvals.append(lwz)
print(f"{pargs.param}: pval={pval}, C={C}, lltot={lls_final}")
# Max
imx = np.nanargmax(lls)
print(f"Max LL at {pargs.param}={pvals[imx]}")
# Plot
plt.clf()
ax = plt.gca()
ax.plot(pvals, lls, "o")
# Nan
bad = np.isnan(lls)
nbad = np.sum(bad)
if nbad > 0:
ax.plot(pvals[bad], [np.nanmin(lls)] * nbad, "x", color="r")
ax.set_xlabel(pargs.param)
ax.set_ylabel("LL")
# Max
ax.axvline(pvals[imx], color="g", ls="--", label=f"max={pvals[imx]}")
ax.legend()
# Save?
if pargs.opfile is not None:
plt.savefig(pargs.opfile)
print(f"Wrote: {pargs.opfile}")
else:
plt.show()
plt.close()
# Plot nterm
plt.clf()
ax = plt.gca()
ax.plot(pvals, nterms, "o")
ax.set_xlabel(pargs.param)
ax.set_ylabel("nterm")
plt.savefig("nterms.png")
plt.close()
# Plot nterm
plt.clf()
ax = plt.gca()
ax.plot(pvals, pvterms, "o")
ax.set_xlabel(pargs.param)
ax.set_ylabel("pvterm")
plt.savefig("pvterms.png")
plt.close()
# command-line arguments here
parser = argparse.ArgumentParser()
parser.add_argument("param", type=str, help="paramter to test on")
parser.add_argument("min", type=float, help="minimum value")
parser.add_argument("max", type=float, help="maximum value")
parser.add_argument(
"--nstep", type=int, default=10, required=False, help="number of steps"
)
parser.add_argument(
"--nFRB", type=int, default=1000, required=False, help="number of FRBs to analyze"
)
parser.add_argument(
"--iFRB",
type=int,
default=0,
required=False,
help="starting number of FRBs to analyze",
)
parser.add_argument(
"-o", "--opfile", type=str, required=False, help="Output file for the data"
)
parser.add_argument(
"--survey",
type=str,
default="CRACO_std_May2022",
required=False,
help="Survey name",
)
parser.add_argument(
"--lum_func",
type=int,
default=0,
required=False,
help="Luminosity function (0=power-law, 1=gamma)",
)
pargs = parser.parse_args()
main(pargs)
"""
# OUT OF DATE TESTS
python test_with_craco.py sfr_n 0.2 2. --nstep 100 --nFRB 1000 --cosmo Planck15 -o CRACO_1000_sfr_n.png
python test_with_craco.py gamma -1.5 -0.8 --nstep 30 --nFRB 1000 --cosmo Planck15 -o CRACO_1000_gamma.png
python test_with_craco.py alpha 0.0 1.0 --nstep 50 --nFRB 1000 --cosmo Planck15 -o CRACO_1000_alpha.png
python test_with_craco.py lEmax 41. 43. --nstep 50 --nFRB 1000 --cosmo Planck15 -o CRACO_1000_lEmax.png
python test_with_craco.py H0 60. 80. --nstep 50 --nFRB 1000 --cosmo Planck15 -o CRACO_1000_H0.png
python test_with_craco.py lmean 1.9 2.5 --nstep 30 --nFRB 1000 --cosmo Planck15 -o CRACO_1000_lmean.png
# OUT OF DATE TESTS
#
python test_with_craco.py alpha 0.0 1.0 --nstep 50 --nFRB 100 --cosmo Planck15 --survey CRAFT/CRACO_1 -o CRACO_100_alpha_anew.png
python test_with_craco.py H0 60.0 80.0 --nstep 50 --nFRB 100 --cosmo Planck15 --survey CRAFT/CRACO_1 -o CRACO_100_H0_Gamma_new.png --lum_func 1
python test_with_craco.py lEmax 41. 43. --nstep 50 --nFRB 100 --cosmo Planck15 --survey CRAFT/CRACO_1 -o CRACO_100_Emax_Gamma_new.png --lum_func 1
python test_with_craco.py H0 60.0 80.0 --nstep 50 --nFRB 100 --cosmo Planck15 --survey CRAFT/CRACO_1 -o CRACO_100_H0_new.png
python test_with_craco.py lEmax 41. 43. --nstep 50 --nFRB 100 --cosmo Planck15 --survey CRAFT/CRACO_1 -o CRACO_100_Emax_new.png
# Newest round
python testing.py lEmax 41. 43. --nstep 50 --nFRB 100 -o MC_Plots/CRACO_100_Emax_new.png
# Gamma
python testing.py H0 60. 80. --nstep 50 --nFRB 100 --survey CRACO_alpha1_Planck18_Gamma -o MC_Plots/CRACO_100_H0_Gamma.png --lum_func 2
python testing.py lEmax 41. 43. --nstep 50 --nFRB 100 --iFRB 100 --survey CRACO_alpha1_Planck18_Gamma -o MC_Plots/CRACO_100_Emax_Gamma.png --lum_func 2
python testing.py alpha 0. 2. --nstep 50 --nFRB 100 --survey CRACO_alpha1_Planck18_Gamma -o MC_Plots/CRACO_100_alpha_Gamma.png --lum_func 2
python testing.py sfr_n 0. 5. --nstep 100 --nFRB 100 --iFRB 100 --survey CRACO_alpha1_Planck18_Gamma -o MC_Plots/CRACO_100_sfr_Gamma.png --lum_func 2
#
python testing.py H0 60. 80. --nstep 50 --nFRB 100 -o MC_Plots/CRACO_100_H0.png --lum_func 2
#
python testing.py F .001 .1 --nstep 100 --nFRB 1000 -o MC_F/Plots/synth_100_F_0.01.png --lum_func 2 --survey ../MC_F/Surveys/F_0.01_survey
python testing.py F .1 .999 --nstep 100 --nFRB 1000 -o MC_F/Plots/synth_100_F_0.7.png --lum_func 2 --survey ../MC_F/Surveys/F_0.7_survey
python testing.py F .1 .999 --nstep 100 --nFRB 1000 -o MC_F/Plots/synth_100_F_0.9.png --lum_func 2 --survey ../MC_F/Surveys/F_0.9_survey
python testing.py F .1 .999 --nstep 100 --nFRB 1000 -o MC_F/Plots/synth_100_F_vanilla.png --lum_func 2 --survey ../MC_F/Surveys/F_vanilla_survey
python testing.py F .001 .1 --nstep 100 --nFRB 1000 -o MC_F/Plots/synth_100_F_dmhost_suppressed_0.01.png --lum_func 2 --survey ../MC_F/Surveys/F_0.01_dmhost_suppressed_survey
python testing.py F .1 .999 --nstep 100 --nFRB 1000 -o MC_F/Plots/synth_100_F_dmhost_suppressed_0.7.png --lum_func 2 --survey ../MC_F/Surveys/F_0.7_dmhost_suppressed_survey
python testing.py F .1 .999 --nstep 100 --nFRB 1000 -o MC_F/Plots/synth_100_F_dmhost_suppressed_0.9.png --lum_func 2 --survey ../MC_F/Surveys/F_0.9_dmhost_suppressed_survey
python testing.py F .1 .999 --nstep 100 --nFRB 1000 -o MC_F/Plots/synth_100_F_dmhost_suppressed_vanilla.png --lum_func 2 --survey ../MC_F/Surveys/F_vanilla_dmhost_suppressed_survey
python testing.py H0 60. 80. --nstep 50 --nFRB 100 -o MC_Plots/CRACO_100_H0_TEST.png --lum_func 2 --survey CRACO_alpha1_Planck18_Gamma
python testing.py H0 60. 80. --nstep 50 --nFRB 100 -o MC_Plots/CRACO_100_H0_TEST.png --lum_func 2 --survey CRACO_alpha1_Planck18_Gamma
python testing.py H0 60. 80. --nstep 50 --nFRB 100 -o MC_Plots/CRACO_100_H0_TEST_F32.png --lum_func 2 --survey ../MC_F/Surveys/F_0.32_survey
# More fussing about with F and related
python testing.py H0 60. 80. --nstep 50 --nFRB 100 -o MC_Plots/CRACO_100_H0_TEST_F32.png --lum_func 2 --survey ../MC_F/Surveys/F_0.32_survey
# Square debugging
python testing.py F 0.1 0.99 --nstep 50 --nFRB 100 -o MC_Plots/CRACO_100_F_TEST_F32.png --lum_func 2 --survey ../MC_F/Surveys/F_0.32_survey --iFRB 100
# Best F: pval=0.2997959183673469, C=3.630489354871595, lltot=-565.4650145414604
python testing.py F 0.1 0.99 --nstep 50 --nFRB 100 -o MC_Plots/CRACO_100_F_TEST_F32_H055.png --lum_func 2 --survey ../MC_F/Surveys/F_0.32_survey --iFRB 100
# Best F: pval=0.8265306122448979, C=3.6174093413949553, lltot=-567.7777429522436
"""
|
FRBsREPO_NAMEzdmPATH_START.@zdm_extracted@zdm-main@zdm@craco@testing.py@.PATH_END.py
|
{
"filename": "test_timestep_thinner.py",
"repo_name": "pynbody/tangos",
"repo_path": "tangos_extracted/tangos-master/tests/test_timestep_thinner.py",
"type": "Python"
}
|
from pytest import fixture
import tangos
from tangos import testing
from tangos.testing import simulation_generator
from tangos.tools import timestep_thinner
@fixture
def fresh_database():
testing.init_blank_db_for_testing()
generator = simulation_generator.SimulationGeneratorForTests()
times = [0.1, 0.2, 0.3, 0.31, 0.4, 0.5]
for t in times:
generator.add_timestep(time = t)
generator.add_objects_to_timestep(3)
generator.add_properties_to_halos(test_property = lambda i: i*t)
if t>0.11:
generator.link_last_halos()
_assert_everything_present()
yield
tangos.core.close_db()
def _assert_everything_present():
session = tangos.core.get_default_session()
assert len(tangos.get_simulation("sim").timesteps) == 6
assert tangos.get_timestep("sim/ts4").time_gyr == 0.31
assert session.query(tangos.core.SimulationObjectBase).count() == 18
assert session.query(tangos.core.HaloProperty).count() == 18
assert session.query(tangos.core.HaloLink).count() == 30
def _assert_timestep_removed(target_timestep_id):
session = tangos.core.get_default_session()
expected_times = [0.1, 0.2, 0.3, 0.4, 0.5]
for i, t in enumerate(expected_times):
ts = tangos.get_simulation("sim").timesteps[i]
assert ts.time_gyr == t
assert len(ts.objects.all()) == 3
assert ts.objects[0]['test_property'] == t
if i > 0:
assert (ts[1].previous is None and ts.time_gyr == 0.4) or \
(ts[1].previous == tangos.get_simulation("sim").timesteps[i - 1][1])
# finally check there are no orphan objects
assert session.query(tangos.core.SimulationObjectBase).count() == 15
assert (tangos.core.get_default_session().query(tangos.core.SimulationObjectBase)
.filter_by(timestep_id=target_timestep_id).count() == 0)
# indirectly check that the haloproperties are also gone, just by counting them:
assert session.query(tangos.core.HaloProperty).count() == 15
# and the halo links:
assert session.query(tangos.core.HaloLink).count() == 18
def test_timestep_thinner_no_thinning(fresh_database):
tt = timestep_thinner.TimestepThinner()
tt.parse_command_line(["-r", "0.05", "-f"])
tt.run_calculation_loop()
_assert_everything_present()
def test_timestep_thinner_relative(fresh_database):
_assert_everything_present()
target_ts_id = tangos.get_timestep("sim/ts4").id
tt = timestep_thinner.TimestepThinner()
tt.parse_command_line(["-r", "0.5","-f"])
tt.run_calculation_loop()
_assert_timestep_removed(target_ts_id)
def test_timestep_thinner_doesnt_over_thin(fresh_database):
"""Check that when the threshold delta_time is more than all the delta times, we retain
some timesteps, just not spaced more regularly than delta_time"""
_assert_everything_present()
tt = timestep_thinner.TimestepThinner()
tt.parse_command_line(["0.1999", "-f"])
tt.run_calculation_loop()
assert [t.extension for t in tangos.get_simulation("sim").timesteps] == ["ts1", "ts3", "ts6"]
def test_timestep_thinner_absolute(fresh_database):
_assert_everything_present()
target_id = tangos.get_timestep("sim/ts4").id
tt = timestep_thinner.TimestepThinner()
tt.parse_command_line(["0.05", "-f"])
tt.run_calculation_loop()
_assert_timestep_removed(target_id)
|
pynbodyREPO_NAMEtangosPATH_START.@tangos_extracted@tangos-master@tests@test_timestep_thinner.py@.PATH_END.py
|
{
"filename": "test_var.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/tsa/vector_ar/tests/test_var.py",
"type": "Python"
}
|
"""
Test VAR Model
"""
from statsmodels.compat.pandas import QUARTER_END, assert_index_equal
from statsmodels.compat.python import lrange
from io import BytesIO, StringIO
import os
import sys
import warnings
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
import pandas as pd
import pytest
from statsmodels.datasets import macrodata
import statsmodels.tools.data as data_util
from statsmodels.tools.sm_exceptions import ValueWarning
from statsmodels.tsa.base.datetools import dates_from_str
import statsmodels.tsa.vector_ar.util as util
from statsmodels.tsa.vector_ar.var_model import VAR, forecast, var_acf
DECIMAL_12 = 12
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
@pytest.fixture()
def bivariate_var_data(reset_randomstate):
"""A bivariate dataset for VAR estimation"""
e = np.random.standard_normal((252, 2))
y = np.zeros_like(e)
y[:2] = e[:2]
for i in range(2, 252):
y[i] = 0.2 * y[i - 1] + 0.1 * y[i - 2] + e[i]
return y
@pytest.fixture()
def bivariate_var_result(bivariate_var_data):
"""A bivariate VARResults for reuse"""
mod = VAR(bivariate_var_data)
return mod.fit()
class CheckVAR: # FIXME: not inherited, so these tests are never run!
# just so pylint will not complain
res1 = None
res2 = None
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3)
def test_neqs(self):
assert_equal(self.res1.neqs, self.res2.neqs)
def test_nobs(self):
assert_equal(self.res1.avobs, self.res2.nobs)
def test_df_eq(self):
assert_equal(self.res1.df_eq, self.res2.df_eq)
def test_rmse(self):
results = self.res1.results
for i in range(len(results)):
assert_almost_equal(
results[i].mse_resid ** 0.5,
eval("self.res2.rmse_" + str(i + 1)),
DECIMAL_6,
)
def test_rsquared(self):
results = self.res1.results
for i in range(len(results)):
assert_almost_equal(
results[i].rsquared,
eval("self.res2.rsquared_" + str(i + 1)),
DECIMAL_3,
)
def test_llf(self):
results = self.res1.results
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_2)
for i in range(len(results)):
assert_almost_equal(
results[i].llf, eval("self.res2.llf_" + str(i + 1)), DECIMAL_2
)
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic)
def test_hqic(self):
assert_almost_equal(self.res1.hqic, self.res2.hqic)
def test_fpe(self):
assert_almost_equal(self.res1.fpe, self.res2.fpe)
def test_detsig(self):
assert_almost_equal(self.res1.detomega, self.res2.detsig)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def get_macrodata():
data = macrodata.load_pandas().data[["realgdp", "realcons", "realinv"]]
data = data.to_records(index=False)
nd = data.view((float, 3), type=np.ndarray)
nd = np.diff(np.log(nd), axis=0)
return nd.ravel().view(data.dtype, type=np.ndarray)
def generate_var(): # FIXME: make a test?
import pandas.rpy.common as prp
from rpy2.robjects import r
r.source("tests/var.R")
return prp.convert_robj(r["result"], use_pandas=False)
def write_generate_var(): # FIXME: make a test?
result = generate_var()
np.savez("tests/results/vars_results.npz", **result)
class RResults:
"""
Simple interface with results generated by "vars" package in R.
"""
def __init__(self):
# data = np.load(resultspath + 'vars_results.npz')
from .results.results_var_data import var_results
data = var_results.__dict__
self.names = data["coefs"].dtype.names
self.params = data["coefs"].view((float, len(self.names)), type=np.ndarray)
self.stderr = data["stderr"].view((float, len(self.names)), type=np.ndarray)
self.irf = data["irf"].item()
self.orth_irf = data["orthirf"].item()
self.nirfs = int(data["nirfs"][0])
self.nobs = int(data["obs"][0])
self.totobs = int(data["totobs"][0])
crit = data["crit"].item()
self.aic = crit["aic"][0]
self.sic = self.bic = crit["sic"][0]
self.hqic = crit["hqic"][0]
self.fpe = crit["fpe"][0]
self.detomega = data["detomega"][0]
self.loglike = data["loglike"][0]
self.nahead = int(data["nahead"][0])
self.ma_rep = data["phis"]
self.causality = data["causality"]
_orig_stdout = None
def setup_module():
global _orig_stdout
_orig_stdout = sys.stdout
sys.stdout = StringIO()
class CheckIRF:
ref = None
res = None
irf = None
k = None
# ---------------------------------------------------------------------------
# IRF tests
def test_irf_coefs(self):
self._check_irfs(self.irf.irfs, self.ref.irf)
self._check_irfs(self.irf.orth_irfs, self.ref.orth_irf)
def _check_irfs(self, py_irfs, r_irfs):
for i, name in enumerate(self.res.names):
ref_irfs = r_irfs[name].view((float, self.k), type=np.ndarray)
res_irfs = py_irfs[:, :, i]
assert_almost_equal(ref_irfs, res_irfs)
@pytest.mark.matplotlib
def test_plot_irf(self, close_figures):
self.irf.plot()
self.irf.plot(plot_stderr=False)
self.irf.plot(impulse=0, response=1)
self.irf.plot(impulse=0)
self.irf.plot(response=0)
self.irf.plot(orth=True)
self.irf.plot(impulse=0, response=1, orth=True)
@pytest.mark.matplotlib
def test_plot_cum_effects(self, close_figures):
self.irf.plot_cum_effects()
self.irf.plot_cum_effects(plot_stderr=False)
self.irf.plot_cum_effects(impulse=0, response=1)
self.irf.plot_cum_effects(orth=True)
self.irf.plot_cum_effects(impulse=0, response=1, orth=True)
@pytest.mark.matplotlib
def test_plot_figsizes(self):
assert_equal(self.irf.plot().get_size_inches(), (10, 10))
assert_equal(self.irf.plot(figsize=(14, 10)).get_size_inches(), (14, 10))
assert_equal(self.irf.plot_cum_effects().get_size_inches(), (10, 10))
assert_equal(
self.irf.plot_cum_effects(figsize=(14, 10)).get_size_inches(),
(14, 10),
)
@pytest.mark.smoke
class CheckFEVD:
fevd = None
# ---------------------------------------------------------------------------
# FEVD tests
@pytest.mark.matplotlib
def test_fevd_plot(self, close_figures):
self.fevd.plot()
def test_fevd_repr(self):
self.fevd
def test_fevd_summary(self):
self.fevd.summary()
def test_fevd_cov(self):
# test does not crash
# not implemented
with pytest.raises(NotImplementedError):
self.fevd.cov()
class TestVARResults(CheckIRF, CheckFEVD):
@classmethod
def setup_class(cls):
cls.p = 2
cls.data = get_macrodata()
cls.model = VAR(cls.data)
cls.names = cls.model.endog_names
cls.ref = RResults()
cls.k = len(cls.ref.names)
cls.res = cls.model.fit(maxlags=cls.p)
cls.irf = cls.res.irf(cls.ref.nirfs)
cls.nahead = cls.ref.nahead
cls.fevd = cls.res.fevd()
def test_constructor(self):
# make sure this works with no names
ndarr = self.data.view((float, 3), type=np.ndarray)
model = VAR(ndarr)
model.fit(self.p)
def test_names(self):
assert_equal(self.model.endog_names, self.ref.names)
model2 = VAR(self.data)
assert_equal(model2.endog_names, self.ref.names)
def test_get_eq_index(self):
assert type(self.res.names) is list # noqa: E721
for i, name in enumerate(self.names):
idx = self.res.get_eq_index(i)
idx2 = self.res.get_eq_index(name)
assert_equal(idx, i)
assert_equal(idx, idx2)
with pytest.raises(Exception):
self.res.get_eq_index("foo")
@pytest.mark.smoke
def test_repr(self):
# just want this to work
str(self.res)
repr(self.res)
def test_params(self):
assert_almost_equal(self.res.params, self.ref.params, DECIMAL_3)
@pytest.mark.smoke
def test_cov_params(self):
# do nothing for now
self.res.cov_params
@pytest.mark.smoke
def test_cov_ybar(self):
self.res.cov_ybar()
@pytest.mark.smoke
def test_tstat(self):
self.res.tvalues
@pytest.mark.smoke
def test_pvalues(self):
self.res.pvalues
@pytest.mark.smoke
def test_summary(self):
summ = self.res.summary()
assert "Summary of " in str(summ)
def test_detsig(self):
assert_almost_equal(self.res.detomega, self.ref.detomega)
def test_aic(self):
assert_almost_equal(self.res.aic, self.ref.aic)
def test_bic(self):
assert_almost_equal(self.res.bic, self.ref.bic)
def test_hqic(self):
assert_almost_equal(self.res.hqic, self.ref.hqic)
def test_fpe(self):
assert_almost_equal(self.res.fpe, self.ref.fpe)
def test_lagorder_select(self):
ics = ["aic", "fpe", "hqic", "bic"]
for ic in ics:
# Smoke test
self.model.fit(maxlags=10, ic=ic, verbose=True)
with pytest.raises(Exception):
self.model.fit(ic="foo")
def test_nobs(self):
assert_equal(self.res.nobs, self.ref.nobs)
def test_stderr(self):
assert_almost_equal(self.res.stderr, self.ref.stderr, DECIMAL_4)
def test_loglike(self):
assert_almost_equal(self.res.llf, self.ref.loglike)
def test_ma_rep(self):
ma_rep = self.res.ma_rep(self.nahead)
assert_almost_equal(ma_rep, self.ref.ma_rep)
# --------------------------------------------------
# Lots of tests to make sure stuff works...need to check correctness
def test_causality(self):
causedby = self.ref.causality["causedby"]
for i, name in enumerate(self.names):
variables = self.names[:i] + self.names[i + 1 :]
result = self.res.test_causality(name, variables, kind="f")
assert_almost_equal(result.pvalue, causedby[i], DECIMAL_4)
rng = lrange(self.k)
rng.remove(i)
result2 = self.res.test_causality(i, rng, kind="f")
assert_almost_equal(result.pvalue, result2.pvalue, DECIMAL_12)
# make sure works
result = self.res.test_causality(name, variables, kind="wald")
# corner cases
_ = self.res.test_causality(self.names[0], self.names[1])
_ = self.res.test_causality(0, 1)
with pytest.raises(Exception):
self.res.test_causality(0, 1, kind="foo")
def test_causality_no_lags(self):
res = VAR(self.data).fit(maxlags=0)
with pytest.raises(RuntimeError, match="0 lags"):
res.test_causality(0, 1)
@pytest.mark.smoke
def test_select_order(self):
result = self.model.fit(10, ic="aic", verbose=True)
assert isinstance(result.params, np.ndarray)
result = self.model.fit(10, ic="fpe", verbose=True)
assert isinstance(result.params, np.ndarray)
# bug
model = VAR(self.model.endog)
model.select_order()
def test_is_stable(self):
# may not necessarily be true for other datasets
assert self.res.is_stable(verbose=True)
def test_acf(self):
# test that it works...for now
acfs = self.res.acf(10)
# defaults to nlags=lag_order
acfs = self.res.acf()
assert len(acfs) == self.p + 1
def test_acf_2_lags(self):
c = np.zeros((2, 2, 2))
c[0] = np.array([[0.2, 0.1], [0.15, 0.15]])
c[1] = np.array([[0.1, 0.9], [0, 0.1]])
acf = var_acf(c, np.eye(2), 3)
gamma = np.zeros((6, 6))
gamma[:2, :2] = acf[0]
gamma[2:4, 2:4] = acf[0]
gamma[4:6, 4:6] = acf[0]
gamma[2:4, :2] = acf[1].T
gamma[4:, :2] = acf[2].T
gamma[:2, 2:4] = acf[1]
gamma[:2, 4:] = acf[2]
recovered = np.dot(gamma[:2, 2:], np.linalg.inv(gamma[:4, :4]))
recovered = [recovered[:, 2 * i : 2 * (i + 1)] for i in range(2)]
recovered = np.array(recovered)
assert_allclose(recovered, c, atol=1e-7)
@pytest.mark.smoke
def test_acorr(self):
acorrs = self.res.acorr(10)
assert acorrs.shape == (11, 3, 3)
@pytest.mark.smoke
def test_forecast(self):
self.res.forecast(self.res.endog[-5:], 5)
@pytest.mark.smoke
def test_forecast_interval(self):
y = self.res.endog[: -self.p :]
point, lower, upper = self.res.forecast_interval(y, 5)
@pytest.mark.matplotlib
def test_plot_sim(self, close_figures):
self.res.plotsim(steps=100)
@pytest.mark.matplotlib
def test_plot(self, close_figures):
self.res.plot()
@pytest.mark.matplotlib
def test_plot_acorr(self, close_figures):
self.res.plot_acorr()
@pytest.mark.matplotlib
def test_plot_forecast(self, close_figures):
self.res.plot_forecast(5)
def test_reorder(self):
# manually reorder
data = self.data.view((float, 3), type=np.ndarray)
names = self.names
data2 = np.append(
np.append(data[:, 2, None], data[:, 0, None], axis=1),
data[:, 1, None],
axis=1,
)
names2 = []
names2.append(names[2])
names2.append(names[0])
names2.append(names[1])
res2 = VAR(data2).fit(maxlags=self.p)
# use reorder function
res3 = self.res.reorder(["realinv", "realgdp", "realcons"])
# check if the main results match
assert_almost_equal(res2.params, res3.params)
assert_almost_equal(res2.sigma_u, res3.sigma_u)
assert_almost_equal(res2.bic, res3.bic)
assert_almost_equal(res2.stderr, res3.stderr)
def test_pickle(self):
fh = BytesIO()
# test wrapped results load save pickle
del self.res.model.data.orig_endog
self.res.save(fh)
fh.seek(0, 0)
res_unpickled = self.res.__class__.load(fh)
assert type(res_unpickled) is type(self.res) # noqa: E721
class E1_Results:
"""
Results from Lütkepohl (2005) using E2 dataset
"""
def __init__(self):
# Lutkepohl p. 120 results
# I asked the author about these results and there is probably rounding
# error in the book, so I adjusted these test results to match what is
# coming out of the Python (double-checked) calculations
self.irf_stderr = np.array(
[
[
[0.125, 0.546, 0.664],
[0.032, 0.139, 0.169],
[0.026, 0.112, 0.136],
],
[
[0.129, 0.547, 0.663],
[0.032, 0.134, 0.163],
[0.026, 0.108, 0.131],
],
[
[0.084, 0.385, 0.479],
[0.016, 0.079, 0.095],
[0.016, 0.078, 0.103],
],
]
)
self.cum_irf_stderr = np.array(
[
[
[0.125, 0.546, 0.664],
[0.032, 0.139, 0.169],
[0.026, 0.112, 0.136],
],
[
[0.149, 0.631, 0.764],
[0.044, 0.185, 0.224],
[0.033, 0.140, 0.169],
],
[
[0.099, 0.468, 0.555],
[0.038, 0.170, 0.205],
[0.033, 0.150, 0.185],
],
]
)
self.lr_stderr = np.array(
[
[0.134, 0.645, 0.808],
[0.048, 0.230, 0.288],
[0.043, 0.208, 0.260],
]
)
basepath = os.path.split(__file__)[0]
resultspath = os.path.join(basepath, "results")
def get_lutkepohl_data(name="e2"):
path = os.path.join(resultspath, f"{name}.dat")
return util.parse_lutkepohl_data(path)
def test_lutkepohl_parse():
files = ["e%d" % i for i in range(1, 7)]
for f in files:
get_lutkepohl_data(f)
class TestVARResultsLutkepohl:
"""
Verify calculations using results from Lütkepohl's book
"""
@classmethod
def setup_class(cls):
cls.p = 2
sdata, dates = get_lutkepohl_data("e1")
data = data_util.struct_to_ndarray(sdata)
adj_data = np.diff(np.log(data), axis=0)
# est = VAR(adj_data, p=2, dates=dates[1:], names=names)
cls.model = VAR(adj_data[:-16], dates=dates[1:-16], freq=f"B{QUARTER_END}-MAR")
cls.res = cls.model.fit(maxlags=cls.p)
cls.irf = cls.res.irf(10)
cls.lut = E1_Results()
def test_approx_mse(self):
# 3.5.18, p. 99
mse2 = (
np.array(
[
[25.12, 0.580, 1.300],
[0.580, 1.581, 0.586],
[1.300, 0.586, 1.009],
]
)
* 1e-4
)
assert_almost_equal(mse2, self.res.forecast_cov(3)[1], DECIMAL_3)
def test_irf_stderr(self):
irf_stderr = self.irf.stderr(orth=False)
for i in range(1, 1 + len(self.lut.irf_stderr)):
assert_almost_equal(np.round(irf_stderr[i], 3), self.lut.irf_stderr[i - 1])
def test_cum_irf_stderr(self):
stderr = self.irf.cum_effect_stderr(orth=False)
for i in range(1, 1 + len(self.lut.cum_irf_stderr)):
assert_almost_equal(np.round(stderr[i], 3), self.lut.cum_irf_stderr[i - 1])
def test_lr_effect_stderr(self):
stderr = self.irf.lr_effect_stderr(orth=False)
orth_stderr = self.irf.lr_effect_stderr(orth=True)
assert orth_stderr.shape == stderr.shape
assert_almost_equal(np.round(stderr, 3), self.lut.lr_stderr)
def test_get_trendorder():
results = {"c": 1, "n": 0, "ct": 2, "ctt": 3}
for t, trendorder in results.items():
assert util.get_trendorder(t) == trendorder
def test_var_constant():
# see 2043
import datetime
from pandas import DataFrame, DatetimeIndex
series = np.array([[2.0, 2.0], [1, 2.0], [1, 2.0], [1, 2.0], [1.0, 2.0]])
data = DataFrame(series)
d = datetime.datetime.now()
delta = datetime.timedelta(days=1)
index = []
for i in range(data.shape[0]):
index.append(d)
d += delta
data.index = DatetimeIndex(index)
# with pytest.warns(ValueWarning): #does not silence warning in test output
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ValueWarning)
model = VAR(data)
with pytest.raises(ValueError):
model.fit(1)
def test_var_trend():
# see 2271
data = get_macrodata().view((float, 3), type=np.ndarray)
model = VAR(data)
results = model.fit(4) # , trend = 'c')
irf = results.irf(10)
assert irf.irfs.shape == (11, 3, 3)
data_nc = data - data.mean(0)
model_nc = VAR(data_nc)
# Fit once with a trend
model_nc.fit(4, trend="n")
with pytest.raises(ValueError):
# Attempt to change the trend
model.fit(4, trend="t")
def test_irf_trend():
# test for irf with different trend see #1636
# this is a rough comparison by adding trend or subtracting mean to data
# to get similar AR coefficients and IRF
data = get_macrodata().view((float, 3), type=np.ndarray)
model = VAR(data)
results = model.fit(4) # , trend = 'c')
irf = results.irf(10)
data_nc = data - data.mean(0)
model_nc = VAR(data_nc)
results_nc = model_nc.fit(4, trend="n")
irf_nc = results_nc.irf(10)
assert_allclose(irf_nc.stderr()[1:4], irf.stderr()[1:4], rtol=0.01)
trend = 1e-3 * np.arange(len(data)) / (len(data) - 1)
# for pandas version, currently not used, if data is a pd.DataFrame
# data_t = pd.DataFrame(data.values + trend[:,None], index=data.index, columns=data.columns)
data_t = data + trend[:, None]
model_t = VAR(data_t)
results_t = model_t.fit(4, trend="ct")
irf_t = results_t.irf(10)
assert_allclose(irf_t.stderr()[1:4], irf.stderr()[1:4], rtol=0.03)
class TestVARExtras:
@classmethod
def setup_class(cls):
mdata = macrodata.load_pandas().data
mdata = mdata[["realgdp", "realcons", "realinv"]]
data = mdata.values
data = np.diff(np.log(data), axis=0) * 400
cls.res0 = VAR(data).fit(maxlags=2)
cls.resl1 = VAR(data).fit(maxlags=1)
cls.data = data
def test_process(self, close_figures):
res0 = self.res0
k_ar = res0.k_ar
fc20 = res0.forecast(res0.endog[-k_ar:], 20)
mean_lr = res0.mean()
assert_allclose(mean_lr, fc20[-1], rtol=5e-4)
ysim = res0.simulate_var(seed=987128)
assert_allclose(ysim.mean(0), mean_lr, rtol=0.1)
# initialization does not use long run intercept, see #4542
assert_allclose(ysim[0], res0.intercept, rtol=1e-10)
assert_allclose(ysim[1], res0.intercept, rtol=1e-10)
data = self.data
resl1 = self.resl1
y_sim_init = res0.simulate_var(seed=987128, initial_values=data[-k_ar:])
y_sim_init_2 = res0.simulate_var(seed=987128, initial_values=data[-1])
assert_allclose(y_sim_init[:k_ar], data[-k_ar:])
assert_allclose(y_sim_init_2[0], data[-1])
assert_allclose(y_sim_init_2[k_ar - 1], data[-1])
y_sim_init_3 = resl1.simulate_var(seed=987128, initial_values=data[-1])
assert_allclose(y_sim_init_3[0], data[-1])
n_sim = 900
ysimz = res0.simulate_var(steps=n_sim, offset=np.zeros((n_sim, 3)), seed=987128)
zero3 = np.zeros(3)
assert_allclose(ysimz.mean(0), zero3, atol=0.4)
# initialization does not use long run intercept, see #4542
assert_allclose(ysimz[0], zero3, atol=1e-10)
assert_allclose(ysimz[1], zero3, atol=1e-10)
# check attributes
assert_equal(res0.k_trend, 1)
assert_equal(res0.k_exog_user, 0)
assert_equal(res0.k_exog, 1)
assert_equal(res0.k_ar, 2)
# Smoke test
res0.irf()
@pytest.mark.matplotlib
def test_process_plotting(self, close_figures):
# Partially a smoke test
res0 = self.res0
k_ar = res0.k_ar
fc20 = res0.forecast(res0.endog[-k_ar:], 20)
irf = res0.irf()
res0.plotsim()
res0.plot_acorr()
fig = res0.plot_forecast(20)
fcp = fig.axes[0].get_children()[1].get_ydata()[-20:]
# Note values are equal, but keep rtol buffer
assert_allclose(fc20[:, 0], fcp, rtol=1e-13)
fcp = fig.axes[1].get_children()[1].get_ydata()[-20:]
assert_allclose(fc20[:, 1], fcp, rtol=1e-13)
fcp = fig.axes[2].get_children()[1].get_ydata()[-20:]
assert_allclose(fc20[:, 2], fcp, rtol=1e-13)
fig_asym = irf.plot()
fig_mc = irf.plot(stderr_type="mc", repl=1000, seed=987128)
for k in range(3):
a = fig_asym.axes[1].get_children()[k].get_ydata()
m = fig_mc.axes[1].get_children()[k].get_ydata()
# use m as desired because it is larger
# a is for some irf much smaller than m
assert_allclose(a, m, atol=0.1, rtol=0.9)
def test_forecast_cov(self):
# forecast_cov can include parameter uncertainty if contant-only
res = self.res0
covfc1 = res.forecast_cov(3)
assert_allclose(covfc1, res.mse(3), rtol=1e-13)
# ignore warning, TODO: assert OutputWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
covfc2 = res.forecast_cov(3, method="auto")
assert_allclose(covfc2, covfc1, rtol=0.05)
# regression test, TODO: replace with verified numbers (Stata)
res_covfc2 = np.array(
[
[
[9.45802013, 4.94142038, 37.1999646],
[4.94142038, 7.09273624, 5.66215089],
[37.1999646, 5.66215089, 259.61275869],
],
[
[11.30364479, 5.72569141, 49.28744123],
[5.72569141, 7.409761, 10.98164091],
[49.28744123, 10.98164091, 336.4484723],
],
[
[12.36188803, 6.44426905, 53.54588026],
[6.44426905, 7.88850029, 13.96382545],
[53.54588026, 13.96382545, 352.19564327],
],
]
)
assert_allclose(covfc2, res_covfc2, atol=1e-6)
def test_exog(self):
# check that trend and exog are equivalent for basics and varsim
data = self.res0.model.endog
res_lin_trend = VAR(data).fit(maxlags=2, trend="ct")
ex = np.arange(len(data))
res_lin_trend1 = VAR(data, exog=ex).fit(maxlags=2)
ex2 = np.arange(len(data))[:, None] ** [0, 1]
res_lin_trend2 = VAR(data, exog=ex2).fit(maxlags=2, trend="n")
# TODO: intercept differs by 4e-3, others are < 1e-12
assert_allclose(res_lin_trend.params, res_lin_trend1.params, rtol=5e-3)
assert_allclose(res_lin_trend.params, res_lin_trend2.params, rtol=5e-3)
assert_allclose(res_lin_trend1.params, res_lin_trend2.params, rtol=1e-10)
y1 = res_lin_trend.simulate_var(seed=987128)
y2 = res_lin_trend1.simulate_var(seed=987128)
y3 = res_lin_trend2.simulate_var(seed=987128)
assert_allclose(y2.mean(0), y1.mean(0), rtol=1e-12)
assert_allclose(y3.mean(0), y1.mean(0), rtol=1e-12)
assert_allclose(y3.mean(0), y2.mean(0), rtol=1e-12)
h = 10
fc1 = res_lin_trend.forecast(res_lin_trend.endog[-2:], h)
exf = np.arange(len(data), len(data) + h)
fc2 = res_lin_trend1.forecast(res_lin_trend1.endog[-2:], h, exog_future=exf)
with pytest.raises(ValueError, match="exog_future only has"):
wrong_exf = np.arange(len(data), len(data) + h // 2)
res_lin_trend1.forecast(res_lin_trend1.endog[-2:], h, exog_future=wrong_exf)
exf2 = exf[:, None] ** [0, 1]
fc3 = res_lin_trend2.forecast(res_lin_trend2.endog[-2:], h, exog_future=exf2)
assert_allclose(fc2, fc1, rtol=1e-12, atol=1e-12)
assert_allclose(fc3, fc1, rtol=1e-12, atol=1e-12)
assert_allclose(fc3, fc2, rtol=1e-12, atol=1e-12)
fci1 = res_lin_trend.forecast_interval(res_lin_trend.endog[-2:], h)
exf = np.arange(len(data), len(data) + h)
fci2 = res_lin_trend1.forecast_interval(
res_lin_trend1.endog[-2:], h, exog_future=exf
)
exf2 = exf[:, None] ** [0, 1]
fci3 = res_lin_trend2.forecast_interval(
res_lin_trend2.endog[-2:], h, exog_future=exf2
)
assert_allclose(fci2, fci1, rtol=1e-12, atol=1e-12)
assert_allclose(fci3, fci1, rtol=1e-12, atol=1e-12)
assert_allclose(fci3, fci2, rtol=1e-12, atol=1e-12)
def test_multiple_simulations(self):
res0 = self.res0
k_ar = res0.k_ar
neqs = res0.neqs
init = self.data[-k_ar:]
sim1 = res0.simulate_var(seed=987128, steps=10)
sim2 = res0.simulate_var(seed=987128, steps=10, nsimulations=2)
assert_equal(sim2.shape, (2, 10, neqs))
assert_allclose(sim1, sim2[0])
sim2_init = res0.simulate_var(
seed=987128, steps=10, initial_values=init, nsimulations=2
)
assert_allclose(sim2_init[0, :k_ar], init)
assert_allclose(sim2_init[1, :k_ar], init)
def test_var_cov_params_pandas(bivariate_var_data):
df = pd.DataFrame(bivariate_var_data, columns=["x", "y"])
mod = VAR(df)
res = mod.fit(2)
cov = res.cov_params()
assert isinstance(cov, pd.DataFrame)
exog_names = ("const", "L1.x", "L1.y", "L2.x", "L2.y")
index = pd.MultiIndex.from_product((exog_names, ("x", "y")))
assert_index_equal(cov.index, cov.columns)
assert_index_equal(cov.index, index)
def test_summaries_exog(reset_randomstate):
y = np.random.standard_normal((500, 6))
df = pd.DataFrame(y)
cols = [f"endog_{i}" for i in range(2)] + [f"exog_{i}" for i in range(4)]
df.columns = cols
df.index = pd.date_range("1-1-1950", periods=500, freq="MS")
endog = df.iloc[:, :2]
exog = df.iloc[:, 2:]
res = VAR(endog=endog, exog=exog).fit(maxlags=0)
summ = res.summary().summary
assert "exog_0" in summ
assert "exog_1" in summ
assert "exog_2" in summ
assert "exog_3" in summ
res = VAR(endog=endog, exog=exog).fit(maxlags=2)
summ = res.summary().summary
assert "exog_0" in summ
assert "exog_1" in summ
assert "exog_2" in summ
assert "exog_3" in summ
def test_whiteness_nlag(reset_randomstate):
# GH 6686
y = np.random.standard_normal((200, 2))
res = VAR(y).fit(maxlags=1, ic=None)
with pytest.raises(ValueError, match="The whiteness test can only"):
res.test_whiteness(1)
def test_var_maxlag(reset_randomstate):
y = np.random.standard_normal((22, 10))
VAR(y).fit(maxlags=None, ic="aic")
with pytest.raises(ValueError, match="maxlags is too large"):
VAR(y).fit(maxlags=8, ic="aic")
def test_from_formula():
with pytest.raises(NotImplementedError):
VAR.from_formula("y ~ x", None)
def test_correct_nobs():
# GH6748
mdata = macrodata.load_pandas().data
# prepare the dates index
dates = mdata[["year", "quarter"]].astype(int).astype(str)
quarterly = dates["year"] + "Q" + dates["quarter"]
quarterly = dates_from_str(quarterly)
mdata = mdata[["realgdp", "realcons", "realinv"]]
mdata.index = pd.DatetimeIndex(quarterly)
data = np.log(mdata).diff().dropna()
data.index.freq = data.index.inferred_freq
data_exog = pd.DataFrame(index=data.index)
data_exog["exovar1"] = np.random.normal(size=data_exog.shape[0])
# make a VAR model
model = VAR(endog=data, exog=data_exog)
results = model.fit(maxlags=1)
irf = results.irf_resim(orth=False, repl=100, steps=10, seed=1, burn=100, cum=False)
assert irf.shape == (100, 11, 3, 3)
@pytest.mark.slow
def test_irf_err_bands():
# smoke tests
data = get_macrodata()
model = VAR(data)
results = model.fit(maxlags=2)
irf = results.irf()
# Smoke tests only
irf.err_band_sz1()
irf.err_band_sz2()
irf.err_band_sz3()
irf.errband_mc()
def test_0_lag(reset_randomstate):
# GH 9412
y = np.random.rand(500, 2)
results = VAR(y).fit(maxlags=1, ic="bic", trend="c")
assert results.params.shape == (1, 2)
fcasts = results.forecast(y, steps=5)
assert_allclose(fcasts, np.ones((5, 1)) * results.params)
def test_forecast_wrong_shape_params(reset_randomstate):
# GH 9412
y = np.random.rand(300, 2)
mod = VAR(y)
results = mod.fit(maxlags=1, ic="aic", trend="c")
with pytest.raises(ValueError):
forecast(y, results.params, results.params, steps=5)
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@tsa@vector_ar@tests@test_var.py@.PATH_END.py
|
{
"filename": "polconv.py",
"repo_name": "rvweeren/lofar_facet_selfcal",
"repo_path": "lofar_facet_selfcal_extracted/lofar_facet_selfcal-main/polconv.py",
"type": "Python"
}
|
"""
This code is based on MockPyStep.py from https://dp3.readthedocs.io/en/latest/steps/PythonStep.html
Polarization conversion implemented by Jurjen de Jong.
To use this script in DPPP/DP3, you can save this file in the same folder as from where you run your DPPP command.
Or you save this python file somewhere you like and run:
export PYTHONPATH=/somewhere/you/like:$PYTHONPATH
"""
from subprocess import check_output
import re
import numpy as np
import shutil
import sys
#hacky way to figure out the DPPP/DP3 version (important to run this script properly)
DP3name = shutil.which('DP3')
if not DP3name:
DP3name = shutil.which('DPPP')
try:
rgx = '[0-9]+(\.[0-9]+)+'
grep_version_string = str(check_output(DP3name+' --version', shell=True), 'utf-8')
DP3_VERSION = float(re.search(rgx, grep_version_string).group()[0:3])
except AttributeError:
print('WARNING: grep of DP3 version failed.')
DP3_VERSION=0.0 # set to default
if DP3_VERSION > 5.3:
from dp3 import Fields
try:
from dppp import DPStep as Step
except:
if DP3_VERSION >= 6:
from dp3.pydp3 import Step
else:
from dp3 import Step
class PolConv(Step):
"""
Convert UV data polarization.
lin2circ --> convert from linear to circular UV data
circ2lin --> convert from circular to linear UV data
----------------------------------------------------------------
Text from: George Heald (2010) and Reinout van Weeren (2014) from lin2circ.py code
Assuming that the definitions follow the IAU ones, which are
described by Hamaker & Bregman (1996, A&AS, 117, 161).
In particular, we use the coordinate transformation given in Section 3,
1 / 1 +i \
C_A = ------- | |
sqrt(2) \ 1 -i /
The Hermitian conjugate of this is,
1 / 1 1 \
C+_A = ------- | |
sqrt(2) \ -i +i /
So V_RL = C_A * V_XY * C+_A
where V_XY is the visibilities in linear coordinates,
V_RL is the visibilities in circular coordinates.
Since the transformation matrices are hermitian, C_A ^-1 = C+_A and C+_A^-1 = C_A.
So, we have:
V_XY = C+_A*V_RL*C_A
----------------------------------------------------------------
"""
def __init__(self, parset, prefix):
"""
Set up the step (constructor). Read the parset here.
Set fetch_weights to True if the weights need to be read.
Similarly for fetch_uvw.
Args:
parset: Parameter set for the entire pipeline
prefix: Prefix for this step."
"""
super().__init__()
try:
self.lin2circ = bool(parset.getInt(prefix + "lin2circ"))
except RuntimeError:
self.lin2circ = False
except AttributeError:
# DP3 Python bindings have been renamed.
try:
self.lin2circ = bool(parset.get_int(prefix + "lin2circ"))
except RuntimeError:
self.lin2circ = False
try:
self.circ2lin = bool(parset.getInt(prefix + "circ2lin"))
except RuntimeError:
self.circ2lin=False
except AttributeError:
# DP3 Python bindings have been renamed.
try:
self.circ2lin = bool(parset.get_int(prefix + "circ2lin"))
except RuntimeError:
self.circ2lin=False
if self.lin2circ and self.circ2lin:
sys.exit("Cannot do both lin2circ and circ2lin."
"\nChoose:"
"\npystep.circ2lin=1 pystep.lin2circ=0"
"\nOR"
"\npystep.circ2lin=0 pystep.lin2circ=0")
elif not self.lin2circ and not self.circ2lin:
sys.exit("Cannot do both lin2circ and circ2lin."
"\nChoose:"
"\npystep.circ2lin=1 pystep.lin2circ=0"
"\nOR"
"\npystep.circ2lin=0 pystep.lin2circ=0")
self.fetch_uvw = True
def get_required_fields(self):
if DP3_VERSION>5.3:
return (Fields.DATA | Fields.FLAGS | Fields.WEIGHTS | Fields.UVW)
else:
pass
def get_provided_fields(self):
if DP3_VERSION>5.3:
return Fields()
else:
pass
def update_info(self, dpinfo):
"""
Process metadata. This will be called before any call to process.
Args:
dpinfo: DPInfo object with all metadata, see docs in pydp3.cc
"""
super().update_info(dpinfo)
# Make sure data is read
self.info().set_need_vis_data()
# Make sure data will be written
self.info().set_write_data()
def show(self):
"""Print a summary of the step and its settings"""
print("\nPolConv")
if self.lin2circ:
print("\nConverting UV data polarization from linear to circular\n")
elif self.circ2lin:
print("\nConverting UV data polarization from circular to linear\n")
def process(self, dpbuffer):
"""
Process one time slot of data. This function MUST call process_next_step.
Args:
dpbuffer: DPBuffer object which can contain data, flags and weights
for one time slot.
"""
data = np.array(dpbuffer.get_data(), copy=False, dtype=np.complex64)
if self.circ2lin:
"""
circ2lin
XX = RR + RL + LR + LL
XY = iRR - iRL + iLR - iLL
YX = -iRR - iRL + iLR + iLL
YY = RR - RL - LR + LL
"""
newdata = np.transpose(np.array([
0.5 * (data[:, :, 0] + data[:, :, 1] + data[:, :, 2] + data[:, :, 3]),
0.5 * (1j * data[:, :, 0] - 1j * data[:, :, 1] + 1j * data[:, :, 2] - 1j * data[:, :, 3]),
0.5 * (-1j * data[:, :, 0] - 1j * data[:, :, 1] + 1j * data[:, :, 2] + 1j * data[:, :, 3]),
0.5 * (data[:, :, 0] - data[:, :, 1] - data[:, :, 2] + data[:, :, 3])]),
(1, 2, 0))
elif self.lin2circ:
"""
lin2circ
RR = XX - iXY + iYX + YY
RL = XX + iXY + iYX - YY
LR = XX - iXY - iYX - YY
LL = XX + iXY - iYX + YY
"""
newdata = np.transpose(np.array([
0.5 * (data[:, :, 0] - 1j * data[:, :, 1] + 1j * data[:, :, 2] + data[:, :, 3]),
0.5 * (data[:, :, 0] + 1j * data[:, :, 1] + 1j * data[:, :, 2] - data[:, :, 3]),
0.5 * (data[:, :, 0] - 1j * data[:, :, 1] - 1j * data[:, :, 2] - data[:, :, 3]),
0.5 * (data[:, :, 0] + 1j * data[:, :, 1] - 1j * data[:, :, 2] + data[:, :, 3])]),
(1, 2, 0))
data *= 0 # trick to change the UV data
data += newdata
# Send processed data to the next step
if DP3_VERSION>5.3:
next_step = self.get_next_step()
if next_step is not None:
next_step.process(dpbuffer)
else:
self.process_next_step(dpbuffer)
def finish(self):
"""
If there is any remaining data, process it. This can be useful if the
step accumulates multiple time slots.
"""
if self.circ2lin:
print('Converted UV data from circular (RR,RL,LR,LL) to linear polarization (XX,XY,YX,YY)')
elif self.lin2circ:
print('Converted UV data from linear (XX,XY,YX,YY) to circular polarization (RR,RL,LR,LL)')
pass
|
rvweerenREPO_NAMElofar_facet_selfcalPATH_START.@lofar_facet_selfcal_extracted@lofar_facet_selfcal-main@polconv.py@.PATH_END.py
|
{
"filename": "_uirevision.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/contourcarpet/_uirevision.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class UirevisionValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="uirevision", parent_name="contourcarpet", **kwargs):
super(UirevisionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@contourcarpet@_uirevision.py@.PATH_END.py
|
{
"filename": "hypothesis_temporary_module_02664ba65877b87ba975fadffb95a5b1cd9b45ce.py",
"repo_name": "ryanvarley/exodata",
"repo_path": "exodata_extracted/exodata-master/exodata/tests/.hypothesis/eval_source/hypothesis_temporary_module_02664ba65877b87ba975fadffb95a5b1cd9b45ce.py",
"type": "Python"
}
|
from hypothesis.utils.conventions import not_set
def accept(f):
def test_works(self, T_eff=not_set, mu=not_set, g=not_set):
return f(self=self, T_eff=T_eff, mu=mu, g=g)
return test_works
|
ryanvarleyREPO_NAMEexodataPATH_START.@exodata_extracted@exodata-master@exodata@tests@.hypothesis@eval_source@hypothesis_temporary_module_02664ba65877b87ba975fadffb95a5b1cd9b45ce.py@.PATH_END.py
|
{
"filename": "getstarted.ipynb",
"repo_name": "gomesdasilva/ACTIN2",
"repo_path": "ACTIN2_extracted/ACTIN2-master/docs/getstarted.ipynb",
"type": "Jupyter Notebook"
}
|
## Getting started
In this tutorial we will read HARPS spectra, extract and plot three activity indicators and radial velocity for the star HD41248.
Start by importing and initializing `ACTIN`
```python
%matplotlib inline
from actin2 import ACTIN
actin = ACTIN()
```
Get the HARPS 1D fits files of the star HD41248 from the `test` folder
```python
import glob, os
files = glob.glob(os.path.join(os.pardir, "actin2/test/HARPS/HD41248", "*_s1d_A.fits"))
files
```
['../actin2/test/HARPS/HD41248/HARPS.2014-01-24T01:18:06.472_s1d_A.fits',
'../actin2/test/HARPS/HD41248/HARPS.2014-01-16T06:24:23.418_s1d_A.fits',
'../actin2/test/HARPS/HD41248/HARPS.2014-01-24T04:17:29.213_s1d_A.fits',
'../actin2/test/HARPS/HD41248/HARPS.2014-01-21T05:33:32.740_s1d_A.fits',
'../actin2/test/HARPS/HD41248/HARPS.2014-01-21T03:16:16.891_s1d_A.fits',
'../actin2/test/HARPS/HD41248/HARPS.2014-01-16T05:37:46.157_s1d_A.fits']
Check which indices come pre-installed. We start by looking at the full indices table:
```python
actin.IndTable().table
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>ind_id</th>
<th>ind_var</th>
<th>ln_id</th>
<th>ln_c</th>
<th>ln_ctr</th>
<th>ln_win</th>
<th>bandtype</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>I_CaII</td>
<td>L1</td>
<td>CaIIK</td>
<td>1.0</td>
<td>3933.664</td>
<td>1.09</td>
<td>tri</td>
</tr>
<tr>
<th>1</th>
<td>I_CaII</td>
<td>L2</td>
<td>CaIIH</td>
<td>1.0</td>
<td>3968.470</td>
<td>1.09</td>
<td>tri</td>
</tr>
<tr>
<th>2</th>
<td>I_CaII</td>
<td>R1</td>
<td>CaIIR1</td>
<td>1.0</td>
<td>3901.070</td>
<td>20.00</td>
<td>sq</td>
</tr>
<tr>
<th>3</th>
<td>I_CaII</td>
<td>R2</td>
<td>CaIIR2</td>
<td>1.0</td>
<td>4001.070</td>
<td>20.00</td>
<td>sq</td>
</tr>
<tr>
<th>4</th>
<td>I_NaI</td>
<td>L1</td>
<td>NaID1</td>
<td>1.0</td>
<td>5895.920</td>
<td>0.50</td>
<td>sq</td>
</tr>
<tr>
<th>5</th>
<td>I_NaI</td>
<td>L2</td>
<td>NaID2</td>
<td>1.0</td>
<td>5889.950</td>
<td>0.50</td>
<td>sq</td>
</tr>
<tr>
<th>6</th>
<td>I_NaI</td>
<td>R1</td>
<td>NaIR1</td>
<td>1.0</td>
<td>5805.000</td>
<td>10.00</td>
<td>sq</td>
</tr>
<tr>
<th>7</th>
<td>I_NaI</td>
<td>R2</td>
<td>NaIR2</td>
<td>1.0</td>
<td>6097.000</td>
<td>20.00</td>
<td>sq</td>
</tr>
<tr>
<th>8</th>
<td>I_Ha16</td>
<td>L1</td>
<td>Ha16</td>
<td>1.0</td>
<td>6562.808</td>
<td>1.60</td>
<td>sq</td>
</tr>
<tr>
<th>9</th>
<td>I_Ha16</td>
<td>R1</td>
<td>HaR1</td>
<td>1.0</td>
<td>6550.870</td>
<td>10.75</td>
<td>sq</td>
</tr>
<tr>
<th>10</th>
<td>I_Ha16</td>
<td>R2</td>
<td>HaR2</td>
<td>1.0</td>
<td>6580.310</td>
<td>8.75</td>
<td>sq</td>
</tr>
<tr>
<th>11</th>
<td>I_Ha06</td>
<td>L1</td>
<td>Ha06</td>
<td>1.0</td>
<td>6562.808</td>
<td>0.60</td>
<td>sq</td>
</tr>
<tr>
<th>12</th>
<td>I_Ha06</td>
<td>R1</td>
<td>HaR1</td>
<td>1.0</td>
<td>6550.870</td>
<td>10.75</td>
<td>sq</td>
</tr>
<tr>
<th>13</th>
<td>I_Ha06</td>
<td>R2</td>
<td>HaR2</td>
<td>1.0</td>
<td>6580.310</td>
<td>8.75</td>
<td>sq</td>
</tr>
<tr>
<th>14</th>
<td>I_HeI</td>
<td>L1</td>
<td>HeI</td>
<td>1.0</td>
<td>5875.620</td>
<td>0.40</td>
<td>sq</td>
</tr>
<tr>
<th>15</th>
<td>I_HeI</td>
<td>R1</td>
<td>HeIR1</td>
<td>1.0</td>
<td>5869.000</td>
<td>5.00</td>
<td>sq</td>
</tr>
<tr>
<th>16</th>
<td>I_HeI</td>
<td>R2</td>
<td>HeIR2</td>
<td>1.0</td>
<td>5881.000</td>
<td>5.00</td>
<td>sq</td>
</tr>
<tr>
<th>17</th>
<td>I_CaI</td>
<td>L1</td>
<td>CaI</td>
<td>1.0</td>
<td>6572.795</td>
<td>0.34</td>
<td>tri</td>
</tr>
<tr>
<th>18</th>
<td>I_CaI</td>
<td>R1</td>
<td>HaR1</td>
<td>1.0</td>
<td>6550.870</td>
<td>10.75</td>
<td>sq</td>
</tr>
<tr>
<th>19</th>
<td>I_CaI</td>
<td>R2</td>
<td>HaR2</td>
<td>1.0</td>
<td>6580.310</td>
<td>8.75</td>
<td>sq</td>
</tr>
</tbody>
</table>
</div>
The available indices are:
```python
actin.IndTable().indices
```
['I_CaI', 'I_CaII', 'I_Ha06', 'I_Ha16', 'I_HeI', 'I_NaI']
We are going to calculate the indices based on the CaII H&K, H$\alpha$ (using 0.6 ang central band) and NaI D2 lines using the respective index ID as in `ind_id`:
```python
indices = ['I_CaII', 'I_Ha06', 'I_NaI']
```
Now calculate the indices for the loaded files. The results will be stored in a pandas DataFrame.
```python
df = actin.run(files, indices)
```
100%|██████████| 6/6 [00:02<00:00, 2.94it/s]
See the results headers
```python
df.keys()
```
Index(['obj', 'instr', 'date_obs', 'bjd', 'drs', 'exptime', 'ra', 'dec',
'snr7', 'snr50', 'prog_id', 'pi_coi', 'cal_th_err', 'berv', 'spec_rv',
'snr_med', 'ftype', 'rv_flg', 'rv', 'dvrms', 'ccf_noise', 'fwhm',
'cont', 'ccf_mask', 'drift_noise', 'drift_rv', 'rv_wave_corr', 'rv_err',
'spec_flg', 'file', 'I_CaII', 'I_CaII_err', 'I_CaII_Rneg', 'I_Ha06',
'I_Ha06_err', 'I_Ha06_Rneg', 'I_NaI', 'I_NaI_err', 'I_NaI_Rneg',
'actin_ver'],
dtype='object')
and the full table
```python
df
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>obj</th>
<th>instr</th>
<th>date_obs</th>
<th>bjd</th>
<th>drs</th>
<th>exptime</th>
<th>ra</th>
<th>dec</th>
<th>snr7</th>
<th>snr50</th>
<th>...</th>
<th>I_CaII</th>
<th>I_CaII_err</th>
<th>I_CaII_Rneg</th>
<th>I_Ha06</th>
<th>I_Ha06_err</th>
<th>I_Ha06_Rneg</th>
<th>I_NaI</th>
<th>I_NaI_err</th>
<th>I_NaI_Rneg</th>
<th>actin_ver</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>HD41248</td>
<td>HARPS</td>
<td>2014-01-16T05:37:46.156</td>
<td>2.456674e+06</td>
<td>HARPS_3.7</td>
<td>600.0005</td>
<td>90.136003</td>
<td>-56.16330</td>
<td>12.0</td>
<td>55.6</td>
<td>...</td>
<td>0.126963</td>
<td>0.001206</td>
<td>0.002865</td>
<td>0.107759</td>
<td>0.000652</td>
<td>0.0</td>
<td>0.358022</td>
<td>0.001249</td>
<td>0.0</td>
<td>2.0 beta 8</td>
</tr>
<tr>
<th>1</th>
<td>HD41248</td>
<td>HARPS</td>
<td>2014-01-16T06:24:23.418</td>
<td>2.456674e+06</td>
<td>HARPS_3.7</td>
<td>600.0007</td>
<td>90.139297</td>
<td>-56.16415</td>
<td>12.4</td>
<td>55.6</td>
<td>...</td>
<td>0.138301</td>
<td>0.001236</td>
<td>0.001154</td>
<td>0.105833</td>
<td>0.000653</td>
<td>0.0</td>
<td>0.356299</td>
<td>0.001252</td>
<td>0.0</td>
<td>2.0 beta 8</td>
</tr>
<tr>
<th>2</th>
<td>HD41248</td>
<td>HARPS</td>
<td>2014-01-21T03:16:16.890</td>
<td>2.456679e+06</td>
<td>HARPS_3.7</td>
<td>900.0010</td>
<td>90.138288</td>
<td>-56.16339</td>
<td>26.5</td>
<td>101.3</td>
<td>...</td>
<td>0.145480</td>
<td>0.000697</td>
<td>0.000000</td>
<td>0.105242</td>
<td>0.000374</td>
<td>0.0</td>
<td>0.357987</td>
<td>0.000708</td>
<td>0.0</td>
<td>2.0 beta 8</td>
</tr>
<tr>
<th>3</th>
<td>HD41248</td>
<td>HARPS</td>
<td>2014-01-21T05:33:32.739</td>
<td>2.456679e+06</td>
<td>HARPS_3.7</td>
<td>900.0005</td>
<td>90.135625</td>
<td>-56.16338</td>
<td>24.6</td>
<td>98.0</td>
<td>...</td>
<td>0.141643</td>
<td>0.000732</td>
<td>0.000000</td>
<td>0.105055</td>
<td>0.000386</td>
<td>0.0</td>
<td>0.357836</td>
<td>0.000731</td>
<td>0.0</td>
<td>2.0 beta 8</td>
</tr>
<tr>
<th>4</th>
<td>HD41248</td>
<td>HARPS</td>
<td>2014-01-24T01:18:06.471</td>
<td>2.456682e+06</td>
<td>HARPS_3.7</td>
<td>600.0018</td>
<td>90.136782</td>
<td>-56.16351</td>
<td>15.7</td>
<td>67.6</td>
<td>...</td>
<td>0.139618</td>
<td>0.001047</td>
<td>0.000194</td>
<td>0.106154</td>
<td>0.000542</td>
<td>0.0</td>
<td>0.356930</td>
<td>0.001037</td>
<td>0.0</td>
<td>2.0 beta 8</td>
</tr>
<tr>
<th>5</th>
<td>HD41248</td>
<td>HARPS</td>
<td>2014-01-24T04:17:29.213</td>
<td>2.456682e+06</td>
<td>HARPS_3.7</td>
<td>600.0014</td>
<td>90.138346</td>
<td>-56.16322</td>
<td>16.4</td>
<td>68.2</td>
<td>...</td>
<td>0.137774</td>
<td>0.001003</td>
<td>0.000208</td>
<td>0.107019</td>
<td>0.000551</td>
<td>0.0</td>
<td>0.357135</td>
<td>0.001038</td>
<td>0.0</td>
<td>2.0 beta 8</td>
</tr>
</tbody>
</table>
<p>6 rows × 40 columns</p>
</div>
and plot the results
```python
import matplotlib.pylab as plt
plt.figure(figsize=(6, (len(indices)+1)*2))
plt.subplot(len(indices)+1, 1, 1)
plt.ylabel("RV [m/s]")
plt.errorbar(df.bjd - 2450000, df.rv, df.rv_err, fmt='k.')
for i, index in enumerate(indices):
plt.subplot(len(indices)+1, 1, i+2)
plt.ylabel(index)
plt.errorbar(df.bjd - 2450000, df[index], df[index + "_err"], fmt='k.')
plt.xlabel("BJD $-$ 2450000 [days]")
plt.tight_layout()
```

|
gomesdasilvaREPO_NAMEACTIN2PATH_START.@ACTIN2_extracted@ACTIN2-master@docs@getstarted.ipynb@.PATH_END.py
|
{
"filename": "conftest.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/python-package/ut/large/conftest.py",
"type": "Python"
}
|
import os
import tarfile
import zipfile
import shutil
from filelock import FileLock
import yatest.common
def unpack_python(dst_path, py_ver):
arch_name = {
"3.6": "python3.6.tar.gz",
}[py_ver]
tar = tarfile.open(yatest.common.binary_path("catboost/python-package/ut/large/pkg/" + arch_name))
tar.extractall(path=dst_path)
tar.close()
def unpack_deps(dst_path, py_ver):
curdir = os.getcwd()
try:
os.makedirs(dst_path, exist_ok=True)
os.chdir(dst_path)
deps_dir = "deps"
if not os.path.exists(deps_dir):
tar = tarfile.open(yatest.common.binary_path("catboost/python-package/ut/large/pkg/deps.tgz"))
tar.extractall(path=deps_dir)
tar.close()
for d in "catboost", "libs", "dynlibs":
if os.path.exists(d):
shutil.rmtree(d)
deps = [
os.path.join(deps_dir, dep) for dep in [
"attrs-21.2.0-py2.py3-none-any.whl",
"importlib_metadata-2.1.1-py2.py3-none-any.whl",
"iniconfig-1.1.1-py2.py3-none-any.whl",
"joblib-1.0.1-py3-none-any.whl",
"packaging-20.9-py2.py3-none-any.whl",
"pathlib2-2.3.6-py2.py3-none-any.whl",
"plotly-4.14.3-py2.py3-none-any.whl",
"pluggy-0.13.1-py2.py3-none-any.whl",
"py-1.10.0-py2.py3-none-any.whl",
"pyparsing-2.4.7-py2.py3-none-any.whl",
"pytest-6.1.2-py3-none-any.whl",
"python_dateutil-2.8.2-py2.py3-none-any.whl",
"pytz-2021.1-py2.py3-none-any.whl",
"six-1.16.0-py2.py3-none-any.whl",
"threadpoolctl-2.2.0-py3-none-any.whl",
"toml-0.10.2-py2.py3-none-any.whl",
"zipp-1.2.0-py2.py3-none-any.whl"
]
]
if py_ver == "3.6":
deps += [
os.path.join(deps_dir, dep) for dep in [
"numpy-1.16.0-cp36-cp36m-manylinux1_x86_64.whl",
"pandas-0.24.0-cp36-cp36m-manylinux1_x86_64.whl",
"scikit_learn-0.24.2-cp36-cp36m-manylinux2010_x86_64.whl",
"scipy-1.5.4-cp36-cp36m-manylinux1_x86_64.whl"
]
]
for dep in deps:
with zipfile.ZipFile(dep, "r") as f:
f.extractall("catboost")
files = [
yatest.common.source_path("catboost/pytest/lib/common_helpers.py"),
yatest.common.source_path("catboost/python-package/ut/large/catboost_pytest_lib.py"),
yatest.common.source_path("catboost/python-package/ut/large/list_plugin.py"),
]
for f in files:
shutil.copy(f, "catboost")
libs = os.path.join(deps_dir, "py" + py_ver + "libs.tgz")
dynlibs = os.path.join(deps_dir, "py" + py_ver + "dynlibs.tgz")
tar = tarfile.open(libs)
tar.extractall(path="libs")
tar.close()
tar = tarfile.open(dynlibs)
tar.extractall(path="dynlibs")
tar.close()
finally:
os.chdir(curdir)
def pytest_sessionstart(session):
test_root = yatest.common.source_path('catboost/python-package/ut/large/')
python_envs_dir = os.path.join(test_root, 'py_envs')
python_envs_lock = FileLock(os.path.join(test_root, 'py_envs.lock'))
with python_envs_lock:
if os.path.exists(python_envs_dir):
return
os.mkdir(python_envs_dir)
for py_ver in ['3.6']:
dst_path = os.path.join(python_envs_dir, py_ver)
whl_dir = yatest.common.source_path("catboost/python-package")
mk_wheel_env = os.environ.copy()
mk_wheel_env_keys = list(mk_wheel_env.keys())
for key in mk_wheel_env_keys:
if key.startswith("YA"):
del mk_wheel_env[key]
yatest.common.execute(
[
yatest.common.python_path(),
"mk_wheel.py",
"-DUSE_SYSTEM_PYTHON=" + py_ver,
"-DCATBOOST_OPENSOURCE=yes",
"-DCFLAGS=-DCATBOOST_OPENSOURCE=yes",
"--host-platform-flag", "CATBOOST_OPENSOURCE=yes",
"--host-platform-flag", "CFLAGS=-DCATBOOST_OPENSOURCE=yes",
"--build-widget=no",
"--build-system", "YA",
],
cwd=whl_dir,
env=mk_wheel_env,
)
whl_file = None
for f in os.listdir(whl_dir):
if f.endswith(".whl") and "cp" + py_ver.replace(".", "") in f:
whl_file = os.path.join(whl_dir, f)
break
unpack_deps(dst_path, py_ver)
with zipfile.ZipFile(whl_file, "r") as f:
f.extractall(os.path.join(dst_path, "catboost"))
unpack_python(dst_path, py_ver)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@python-package@ut@large@conftest.py@.PATH_END.py
|
{
"filename": "_axial_vector_mediator_cross_sections.py",
"repo_name": "LoganAMorrison/Hazma",
"repo_path": "Hazma_extracted/Hazma-master/hazma/experimental/axial_vector_mediator/_axial_vector_mediator_cross_sections.py",
"type": "Python"
}
|
# from cmath import sqrt, pi, log
from hazma.parameters import charged_pion_mass as mpi
from hazma.parameters import neutral_pion_mass as mpi0
# from ..parameters import fpi
# from ..parameters import qe
from hazma.parameters import muon_mass as mmu
from hazma.parameters import electron_mass as me
from scipy.integrate import quad
class AxialVectorMediatorCrossSections:
def sigma_xx_to_a_to_ff(self, Q, f):
"""
Returns the cross section for xbar x to fbar f.
Parameters
----------
Q : float
Center of mass energy.
f : float
Name of final state fermion: "e" or "mu".
self : object
Class containing the vector mediator parameters.
Returns
-------
cross_section : float
Cross section for xbar + x -> a -> fbar + f.
"""
if f == "e":
mf = me
# gall = self.gaee
elif f == "mu":
mf = mmu
# gall = self.gamumu
mx = self.mx
if Q >= 2.0 * mf and Q >= 2.0 * mx:
# gaxx = self.gaxx
# ma = self.ma
# width_a = self.width_a
ret_val = 0.0
assert ret_val.imag == 0
assert ret_val.real >= 0
return ret_val.real
else:
return 0.0
def dsigma_ds_xx_to_a_to_pi0pipi(self, s, Q):
mx = self.mx
if (
Q > 2.0 * mpi + mpi0
and Q > 2.0 * mx
and s > 4.0 * mpi ** 2
and s < (Q - mpi0) ** 2
):
# gauu = self.gauu
# gadd = self.gadd
# gaxx = self.gaxx
# ma = self.ma
# width_a = self.width_a
ret_val = 0.0
assert ret_val.imag == 0.0
return ret_val.real
else:
return 0.0
def sigma_xx_to_a_to_pi0pipi(self, Q):
if Q > 2.0 * mpi + mpi0 and Q > 2.0 * self.mx:
s_min = 4.0 * mpi ** 2
s_max = (Q - mpi0) ** 2
ret_val = quad(self.dsigma_ds_xx_to_a_to_pi0pipi, s_min, s_max, args=(Q))[0]
assert ret_val.imag == 0
assert ret_val.real >= 0
return ret_val.real
else:
return 0.0
def sigma_xx_to_aa(self, Q):
mx = self.mx
ma = self.ma
if Q >= 2.0 * ma and Q >= 2.0 * mx:
# gaxx = self.gaxx
ret_val = 0.0
assert ret_val.imag == 0
assert ret_val.real >= 0
return ret_val.real
else:
return 0.0
def annihilation_cross_sections(self, Q):
"""
Compute the total cross section for two fermions annihilating through a
vector mediator to mesons and leptons.
Parameters
----------
cme : float
Center of mass energy.
Returns
-------
cs : float
Total cross section.
"""
muon_contr = self.sigma_xx_to_a_to_ff(Q, "mu")
electron_contr = self.sigma_xx_to_a_to_ff(Q, "e")
pi0pipi_contr = self.sigma_xx_to_a_to_pi0pipi(Q)
aa_contr = self.sigma_xx_to_aa(Q)
total = muon_contr + electron_contr + pi0pipi_contr + aa_contr
# pi0pipi_contr
cross_secs = {
"mu mu": muon_contr,
"e e": electron_contr,
"pi0 pi pi": pi0pipi_contr,
"a a": aa_contr,
"total": total,
}
return cross_secs
def annihilation_branching_fractions(self, Q):
"""
Compute the branching fractions for two fermions annihilating through
an axial vector mediator to mesons and leptons.
Parameters
----------
Q : float
Center of mass energy.
Returns
-------
bfs : dictionary
Dictionary of the branching fractions. The keys are 'total',
'mu mu', 'e e', 'pi0 pi pi', 'a a'.
"""
CSs = self.cross_sections(Q)
if CSs["total"] == 0.0:
return {"mu mu": 0.0, "e e": 0.0, "pi0 pi pi": 0.0, "a a": 0.0}
else:
return {
"mu mu": CSs["mu mu"] / CSs["total"],
"e e": CSs["e e"] / CSs["total"],
"pi0 pi pi": CSs["pi0 pi pi"] / CSs["total"],
"a a": CSs["a a"] / CSs["total"],
}
|
LoganAMorrisonREPO_NAMEHazmaPATH_START.@Hazma_extracted@Hazma-master@hazma@experimental@axial_vector_mediator@_axial_vector_mediator_cross_sections.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "hpparvi/PyTransit",
"repo_path": "PyTransit_extracted/PyTransit-master/pytransit/lpf/baselines/__init__.py",
"type": "Python"
}
|
# PyTransit: fast and easy exoplanet transit modelling in Python.
# Copyright (C) 2010-2019 Hannu Parviainen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
hpparviREPO_NAMEPyTransitPATH_START.@PyTransit_extracted@PyTransit-master@pytransit@lpf@baselines@__init__.py@.PATH_END.py
|
{
"filename": "fcnet_g.py",
"repo_name": "Guo-Jian-Wang/colfi",
"repo_path": "colfi_extracted/colfi-master/colfi/fcnet_g.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
from . import sequence as seq
from . import nodeframe
import torch
import torch.nn as nn
import numpy as np
#%%ANN + Gaussian - for one data set & one parameter
class MLPGaussian(torch.nn.Module):
def __init__(self, node_in=100, node_out=1, hidden_layer=3, nodes=None,
activation_func='Softplus'):
super(MLPGaussian, self).__init__()
self.node_in = node_in
self.node_out = node_out
if nodes is None:
nodes = nodeframe.decreasingNode(node_in=node_in, node_out=node_out*2, hidden_layer=hidden_layer, get_allNode=True)
self.fc = seq.LinearSeq(nodes,mainActive=activation_func,finalActive='None',mainBN=True,finalBN=False,mainDropout='None',finalDropout='None').get_seq()
def forward(self, x):
x = self.fc(x)
params = x[:, :self.node_out]
params = params.view(-1, self.node_out)
sigma = nn.Softplus()(x[:, self.node_out:])
sigma = sigma.view(-1, self.node_out)
return params, sigma
def gaussian_loss(params, sigma, target):
"""
https://en.wikipedia.org/wiki/Normal_distribution
return:
"""
sqrt_2pi = torch.sqrt(torch.tensor(2*np.pi))
prob = -0.5*((target-params)/sigma)**2 - torch.log(sigma) - torch.log(sqrt_2pi)
prob = torch.sum(prob, dim=1) #dim=1 means sum for parameters dimension
return torch.mean(-prob)
#%%ANN + Multivariate Gaussian - for one data set & multiple parameters
class MLPMultivariateGaussian(torch.nn.Module):
def __init__(self, node_in=100, node_out=2, hidden_layer=3, nodes=None,
activation_func='Softplus'):
super(MLPMultivariateGaussian, self).__init__()
self.node_in = node_in
self.node_out = node_out
if nodes is None:
nodes = nodeframe.decreasingNode(node_in=node_in, node_out=node_out*2+(node_out**2-node_out)//2, hidden_layer=hidden_layer, get_allNode=True)
self.fc = seq.LinearSeq(nodes,mainActive=activation_func,finalActive='None',mainBN=True,finalBN=False,mainDropout='None',finalDropout='None').get_seq()
def forward(self, x):
x = self.fc(x)
params = x[:, :self.node_out]
params = params.view(-1, self.node_out, 1)
cholesky_diag = nn.Softplus()(x[:, self.node_out:self.node_out*2])
cholesky_diag = cholesky_diag.view(-1, self.node_out)
cholesky_factor = torch.diag_embed(cholesky_diag)
cholesky_offDiag = x[:, self.node_out*2:]
cholesky_offDiag = cholesky_offDiag.view(-1, (self.node_out**2-self.node_out)//2)
upper_index = torch.triu_indices(self.node_out, self.node_out, offset=1)
cholesky_factor[:, upper_index[0], upper_index[1]] = cholesky_offDiag
return params, cholesky_factor
#need further research
class MLPMultivariateGaussian_AvgMultiNoise(torch.nn.Module):
def __init__(self, node_in=100, node_out=2, hidden_layer=3, nodes=None,
activation_func='Softplus'):
super(MLPMultivariateGaussian_AvgMultiNoise, self).__init__()
self.node_in = node_in
self.node_out = node_out
if nodes is None:
nodes = nodeframe.decreasingNode(node_in=node_in, node_out=node_out*2+(node_out**2-node_out)//2, hidden_layer=hidden_layer, get_allNode=True)
self.fc = seq.LinearSeq(nodes,mainActive=activation_func,finalActive='None',mainBN=True,finalBN=False,mainDropout='None',finalDropout='None').get_seq()
#need further research
def forward(self, x, multi_noise=1):
x = self.fc(x)
params = x[:, :self.node_out]
params = params.view(-1, self.node_out, 1)
cholesky_diag = nn.Softplus()(x[:, self.node_out:self.node_out*2])
cholesky_diag = cholesky_diag.view(-1, self.node_out)
cholesky_factor = torch.diag_embed(cholesky_diag)
cholesky_offDiag = x[:, self.node_out*2:]
cholesky_offDiag = cholesky_offDiag.view(-1, (self.node_out**2-self.node_out)//2)
upper_index = torch.triu_indices(self.node_out, self.node_out, offset=1)
cholesky_factor[:, upper_index[0], upper_index[1]] = cholesky_offDiag
if multi_noise>1:
cholesky_factor_chunk = torch.chunk(cholesky_factor, multi_noise, dim=0)
cholesky_factor = cholesky_factor_chunk[0]
for i in range(multi_noise-1):
cholesky_factor = cholesky_factor + cholesky_factor_chunk[i+1]
cholesky_factor = cholesky_factor / torch.sqrt(torch.tensor(multi_noise))
cholesky_factor = cholesky_factor.repeat(multi_noise, 1, 1)
return params, cholesky_factor
def multivariateGaussian_loss(params, cholesky_factor, target):
target = target.unsqueeze(-1)
diff = target - params
params_n = cholesky_factor.size(-1)
sqrt_2pi = torch.sqrt(torch.tensor(2*np.pi)**params_n)
#learn Cholesky factor, here cholesky_factor is Cholesky factor of the inverse covariance matrix
#see arXiv:2003.05739
log_det_2 = torch.sum(torch.log(torch.diagonal(cholesky_factor, dim1=1, dim2=2)), dim=1)
comb = torch.matmul(cholesky_factor, diff)
prob = -0.5*torch.matmul(comb.transpose(1,2), comb)[:,0,0] + log_det_2 - torch.log(sqrt_2pi) #note: cov_mul[:,0,0]
return torch.mean(-prob)
#%% multi-branch network + (Multivariate) Gaussian - for multiple data sets & one (multiple) parameter
class MultiBranchMLPGaussian(nn.Module):
def __init__(self, nodes_in=[100,100,100], node_out=2, branch_hiddenLayer=1,
trunk_hiddenLayer=1, nodes_all=None, activation_func='Softplus'):
super(MultiBranchMLPGaussian, self).__init__()
self.nodes_in = nodes_in
self.node_out = node_out
if nodes_all is None:
nodes_all = []
branches_out = []
fc_hidden = branch_hiddenLayer*2 + 1
# fc_hidden = branch_hiddenLayer + trunk_hiddenLayer + 1 #also works, but not necessary
fc_out = node_out*2
for i in range(len(nodes_in)):
fc_node = nodeframe.decreasingNode(node_in=nodes_in[i], node_out=fc_out, hidden_layer=fc_hidden, get_allNode=True)
nodes_branch = fc_node[:branch_hiddenLayer+2]
nodes_all.append(nodes_branch)
branches_out.append(nodes_branch[-1])
nodes_all.append(nodeframe.decreasingNode(node_in=sum(branches_out), node_out=fc_out, hidden_layer=trunk_hiddenLayer, get_allNode=True))
self.branch_n = len(nodes_in)
for i in range(self.branch_n):
exec("self.branch%s = seq.LinearSeq(nodes_all[i],mainActive=activation_func,finalActive=activation_func,mainBN=True,\
finalBN=True,mainDropout='None',finalDropout='None').get_seq()"%(i+1))
self.trunk = seq.LinearSeq(nodes_all[-1],mainActive=activation_func,finalActive='None',mainBN=True,
finalBN=False,mainDropout='None',finalDropout='None').get_seq()
def forward(self, x_all):
x1 = self.branch1(x_all[0])
x_comb = x1
for i in range(1, self.branch_n-1+1):
x_n = eval('self.branch%s(x_all[i])'%(i+1))#Note:i & i+1
x_comb = torch.cat((x_comb, x_n),1)
x = self.trunk(x_comb)
params = x[:, :self.node_out]
params = params.view(-1, self.node_out)
sigma = nn.Softplus()(x[:, self.node_out:])
sigma = sigma.view(-1, self.node_out)
return params, sigma
class MultiBranchMLPMultivariateGaussian(nn.Module):
def __init__(self, nodes_in=[100,100,100], node_out=2, branch_hiddenLayer=1,
trunk_hiddenLayer=1, nodes_all=None, activation_func='Softplus'):
super(MultiBranchMLPMultivariateGaussian, self).__init__()
self.nodes_in = nodes_in
self.node_out = node_out
if nodes_all is None:
#method 1
nodes_all = []
branches_out = []
fc_hidden = branch_hiddenLayer*2 + 1
# fc_hidden = branch_hiddenLayer + trunk_hiddenLayer + 1 #also works, but not necessary
fc_out = node_out*2+(node_out**2-node_out)//2
for i in range(len(nodes_in)):
fc_node = nodeframe.decreasingNode(node_in=nodes_in[i], node_out=fc_out, hidden_layer=fc_hidden, get_allNode=True)
nodes_branch = fc_node[:branch_hiddenLayer+2]
nodes_all.append(nodes_branch)
branches_out.append(nodes_branch[-1])
nodes_all.append(nodeframe.decreasingNode(node_in=sum(branches_out), node_out=fc_out, hidden_layer=trunk_hiddenLayer, get_allNode=True))
# #method 2
# nodes_all = []
# branches_out = []
# fc_hidden = branch_hiddenLayer + trunk_hiddenLayer + 1
# fc_out = node_out*2+(node_out**2-node_out)//2
# fc_hidd_node = nodeframe.decreasingNode(node_in=sum(nodes_in), node_out=fc_out, hidden_layer=fc_hidden, get_allNode=False)
# fc_hidd_node_split = split_nodes(fc_hidd_node[:branch_hiddenLayer+1], weight=[nodes_in[i]/sum(nodes_in) for i in range(len(nodes_in))])
# for i in range(len(nodes_in)):
# branch_node = [nodes_in[i]] + fc_hidd_node_split[i]
# nodes_all.append(branch_node)
# branches_out.append(branch_node[-1])
# trunk_node = [sum(branches_out)] + list(fc_hidd_node[branch_hiddenLayer+1:]) + [fc_out]
# nodes_all.append(trunk_node)
# #method 3
# nodes_all = []
# nodes_comb = []
# fc_hidden = branch_hiddenLayer + trunk_hiddenLayer + 1
# fc_out = node_out*2+(node_out**2-node_out)//2
# for i in range(len(nodes_in)):
# fc_node = nodeframe.decreasingNode(node_in=nodes_in[i], node_out=fc_out, hidden_layer=fc_hidden, get_allNode=True)
# print(fc_node)
# branch_node = fc_node[:branch_hiddenLayer+2]
# nodes_all.append(branch_node)
# nodes_comb.append(fc_node[branch_hiddenLayer+1:-1])
# trunk_node = list(np.sum(np.array(nodes_comb), axis=0)) + [fc_out]
# nodes_all.append(trunk_node)
self.branch_n = len(nodes_in)
for i in range(self.branch_n):
exec("self.branch%s = seq.LinearSeq(nodes_all[i],mainActive=activation_func,finalActive=activation_func,mainBN=True,\
finalBN=True,mainDropout='None',finalDropout='None').get_seq()"%(i+1))
self.trunk = seq.LinearSeq(nodes_all[-1],mainActive=activation_func,finalActive='None',mainBN=True,
finalBN=False,mainDropout='None',finalDropout='None').get_seq()
def forward(self, x_all):
x1 = self.branch1(x_all[0])
x_comb = x1
for i in range(1, self.branch_n-1+1):
x_n = eval('self.branch%s(x_all[i])'%(i+1))#Note:i & i+1
x_comb = torch.cat((x_comb, x_n),1)
x = self.trunk(x_comb)
params = x[:, :self.node_out]
params = params.view(-1, self.node_out, 1)
cholesky_diag = nn.Softplus()(x[:, self.node_out:self.node_out*2])
cholesky_diag = cholesky_diag.view(-1, self.node_out)
cholesky_factor = torch.diag_embed(cholesky_diag)
cholesky_offDiag = x[:, self.node_out*2:]
cholesky_offDiag = cholesky_offDiag.view(-1, (self.node_out**2-self.node_out)//2)
upper_index = torch.triu_indices(self.node_out, self.node_out, offset=1)
cholesky_factor[:, upper_index[0], upper_index[1]] = cholesky_offDiag
return params, cholesky_factor
#%% loss functions
def loss_funcs(params_n):
if params_n==1:
return gaussian_loss
else:
return multivariateGaussian_loss
#%% Branch network
class Branch(nn.Module):
def __init__(self,):
super(Branch, self).__init__()
pass
|
Guo-Jian-WangREPO_NAMEcolfiPATH_START.@colfi_extracted@colfi-master@colfi@fcnet_g.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "rhayes777/PyAutoFit",
"repo_path": "PyAutoFit_extracted/PyAutoFit-main/autofit/non_linear/parallel/__init__.py",
"type": "Python"
}
|
from .process import AbstractJob
from .process import AbstractJobResult
from .process import Process
from .sneaky import SneakyJob
from .sneaky import SneakyPool
from .sneaky import SneakierPool
|
rhayes777REPO_NAMEPyAutoFitPATH_START.@PyAutoFit_extracted@PyAutoFit-main@autofit@non_linear@parallel@__init__.py@.PATH_END.py
|
{
"filename": "kay.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/retrievers/kay.ipynb",
"type": "Jupyter Notebook"
}
|
# Kay.ai
>[Kai Data API](https://www.kay.ai/) built for RAG 🕵️ We are curating the world's largest datasets as high-quality embeddings so your AI agents can retrieve context on the fly. Latest models, fast retrieval, and zero infra.
This notebook shows you how to retrieve datasets supported by [Kay](https://kay.ai/). You can currently search `SEC Filings` and `Press Releases of US companies`. Visit [kay.ai](https://kay.ai) for the latest data drops. For any questions, join our [discord](https://discord.gg/hAnE4e5T6M) or [tweet at us](https://twitter.com/vishalrohra_)
## Installation
First, install the [`kay` package](https://pypi.org/project/kay/).
```python
!pip install kay
```
You will also need an API key: you can get one for free at [https://kay.ai](https://kay.ai/). Once you have an API key, you must set it as an environment variable `KAY_API_KEY`.
`KayAiRetriever` has a static `.create()` factory method that takes the following arguments:
* `dataset_id: string` required -- A Kay dataset id. This is a collection of data about a particular entity such as companies, people, or places. For example, try `"company"`
* `data_type: List[string]` optional -- This is a category within a dataset based on its origin or format, such as ‘SEC Filings’, ‘Press Releases’, or ‘Reports’ within the “company” dataset. For example, try ["10-K", "10-Q", "PressRelease"] under the “company” dataset. If left empty, Kay will retrieve the most relevant context across all types.
* `num_contexts: int` optional, defaults to 6 -- The number of document chunks to retrieve on each call to `get_relevant_documents()`
## Examples
### Basic Retriever Usage
```python
# Setup API key
from getpass import getpass
KAY_API_KEY = getpass()
```
········
```python
import os
from langchain_community.retrievers import KayAiRetriever
os.environ["KAY_API_KEY"] = KAY_API_KEY
retriever = KayAiRetriever.create(
dataset_id="company", data_types=["10-K", "10-Q", "PressRelease"], num_contexts=3
)
docs = retriever.invoke(
"What were the biggest strategy changes and partnerships made by Roku in 2023??"
)
```
```python
docs
```
[Document(page_content='Company Name: ROKU INC\nCompany Industry: CABLE & OTHER PAY TELEVISION SERVICES\nArticle Title: Roku Is One of Fast Company\'s Most Innovative Companies for 2023\nText: The company launched several new devices, including the Roku Voice Remote Pro; upgraded its most premium player, the Roku Ultra; and expanded its products with a new line of smart home devices such as video doorbells, lights, and plugs integrated into the Roku ecosystem. Recently, the company announced it will launch Roku-branded TVs this spring to offer more choice and innovation to both consumers and Roku TV partners. Throughout 2022, Roku also updated its operating system (OS), the only OS purpose-built for TV, with more personalization features and enhancements across search, audio, and content discovery, launching The Buzz, Sports, and What to Watch, which provides tailored movie and TV recommendations on the Home Screen Menu. The company also released a new feature for streamers, Photo Streams, that allows customers to display and share photo albums through Roku streaming devices. Additionally, Roku unveiled Shoppable Ads, a new ad innovation that makes shopping on TV streaming as easy as it is on social media. Viewers simply press "OK" with their Roku remote on a shoppable ad and proceed to check out with their shipping and payment details pre-populated from Roku Pay, its proprietary payments platform. Walmart was the exclusive retailer for the launch, a first-of-its-kind partnership.', metadata={'chunk_type': 'text', 'chunk_years_mentioned': [2022, 2023], 'company_name': 'ROKU INC', 'company_sic_code_description': 'CABLE & OTHER PAY TELEVISION SERVICES', 'data_source': 'PressRelease', 'data_source_link': 'https://newsroom.roku.com/press-releases', 'data_source_publish_date': '2023-03-02T09:30:00-04:00', 'data_source_uid': '963d4a81-f58e-3093-af68-987fb1758c15', 'title': "ROKU INC | Roku Is One of Fast Company's Most Innovative Companies for 2023"}),
Document(page_content='Company Name: ROKU INC\nCompany Industry: CABLE & OTHER PAY TELEVISION SERVICES\nArticle Title: Roku Is One of Fast Company\'s Most Innovative Companies for 2023\nText: Finally, Roku grew its content offering with thousands of apps and watching options for users, including content on The Roku Channel, a top five app by reach and engagement on the Roku platform in the U.S. in 2022. In November, Roku released its first feature film, "WEIRD: The Weird Al\' Yankovic Story," a biopic starring Daniel Radcliffe. Throughout the year, The Roku Channel added FAST channels from NBCUniversal and the National Hockey League, as well as an exclusive AMC channel featuring its signature drama "Mad Men." This year, the company announced a deal with Warner Bros. Discovery, launching new channels that will include "Westworld" and "The Bachelor," in addition to 2,000 hours of on-demand content. Read more about Roku\'s journey here . Fast Company\'s Most Innovative Companies issue (March/April 2023) is available online here , as well as in-app via iTunes and on newsstands beginning March 14. About Roku, Inc.\nRoku pioneered streaming to the TV. We connect users to the streaming content they love, enable content publishers to build and monetize large audiences, and provide advertisers with unique capabilities to engage consumers. Roku streaming players and TV-related audio devices are available in the U.S. and in select countries through direct retail sales and licensing arrangements with service operators. Roku TV models are available in the U.S. and select countries through licensing arrangements with TV OEM brands.', metadata={'chunk_type': 'text', 'chunk_years_mentioned': [2022, 2023], 'company_name': 'ROKU INC', 'company_sic_code_description': 'CABLE & OTHER PAY TELEVISION SERVICES', 'data_source': 'PressRelease', 'data_source_link': 'https://newsroom.roku.com/press-releases', 'data_source_publish_date': '2023-03-02T09:30:00-04:00', 'data_source_uid': '963d4a81-f58e-3093-af68-987fb1758c15', 'title': "ROKU INC | Roku Is One of Fast Company's Most Innovative Companies for 2023"}),
Document(page_content='Company Name: ROKU INC\nCompany Industry: CABLE & OTHER PAY TELEVISION SERVICES\nArticle Title: Roku\'s New NFL Zone Gives Fans Easy Access to NFL Games Right On Time for 2023 Season\nText: In partnership with the NFL, the new NFL Zone offers viewers an easy way to find where to watch NFL live games Today, Roku (NASDAQ: ROKU ) and the National Football League (NFL) announced the recently launched NFL Zone within the Roku Sports experience to kick off the 2023 NFL season. This strategic partnership between Roku and the NFL marks the first official league-branded zone within Roku\'s Sports experience. Available now, the NFL Zone offers football fans a centralized location to find live and upcoming games, so they can spend less time figuring out where to watch the game and more time rooting for their favorite teams. Users can also tune in for weekly game previews, League highlights, and additional NFL content, all within the zone. This press release features multimedia. View the full release here: In partnership with the NFL, Roku\'s new NFL Zone offers viewers an easy way to find where to watch NFL live games (Photo: Business Wire) "Last year we introduced the Sports experience for our highly engaged sports audience, making it simpler for Roku users to watch sports programming," said Gidon Katz, President, Consumer Experience, at Roku. "As we start the biggest sports season of the year, providing easy access to NFL games and content to our millions of users is a top priority for us. We look forward to fans immersing themselves within the NFL Zone and making it their destination to find NFL games.', metadata={'chunk_type': 'text', 'chunk_years_mentioned': [2023], 'company_name': 'ROKU INC', 'company_sic_code_description': 'CABLE & OTHER PAY TELEVISION SERVICES', 'data_source': 'PressRelease', 'data_source_link': 'https://newsroom.roku.com/press-releases', 'data_source_publish_date': '2023-09-12T09:00:00-04:00', 'data_source_uid': '963d4a81-f58e-3093-af68-987fb1758c15', 'title': "ROKU INC | Roku's New NFL Zone Gives Fans Easy Access to NFL Games Right On Time for 2023 Season"})]
### Usage in a chain
```python
OPENAI_API_KEY = getpass()
```
········
```python
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
```
```python
from langchain.chains import ConversationalRetrievalChain
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-3.5-turbo")
qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)
```
```python
questions = [
"What were the biggest strategy changes and partnerships made by Roku in 2023?"
# "Where is Wex making the most money in 2023?",
]
chat_history = []
for question in questions:
result = qa({"question": question, "chat_history": chat_history})
chat_history.append((question, result["answer"]))
print(f"-> **Question**: {question} \n")
print(f"**Answer**: {result['answer']} \n")
```
-> **Question**: What were the biggest strategy changes and partnerships made by Roku in 2023?
**Answer**: In 2023, Roku made a strategic partnership with FreeWheel to bring Roku's leading ad tech to FreeWheel customers. This partnership aimed to drive greater interoperability and automation in the advertising-based video on demand (AVOD) space. Key highlights of this collaboration include streamlined integration of Roku's demand application programming interface (dAPI) with FreeWheel's TV platform, allowing for better inventory quality control and improved publisher yield and revenue. Additionally, publishers can now use Roku platform signals to enable advertisers to target audiences and measure campaign performance without relying on cookies. This partnership also involves the use of data clean room technology to enable the activation of additional data sets for better measurement and monetization for publishers and agencies. These partnerships and strategies aim to support Roku's growth in the AVOD market.
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@retrievers@kay.ipynb@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "scikit-image/scikit-image",
"repo_path": "scikit-image_extracted/scikit-image-main/skimage/restoration/__init__.py",
"type": "Python"
}
|
"""Restoration algorithms, e.g., deconvolution algorithms, denoising, etc."""
import lazy_loader as _lazy
__getattr__, __dir__, __all__ = _lazy.attach_stub(__name__, __file__)
|
scikit-imageREPO_NAMEscikit-imagePATH_START.@scikit-image_extracted@scikit-image-main@skimage@restoration@__init__.py@.PATH_END.py
|
{
"filename": "flux.py",
"repo_name": "HenrykHaniewicz/PSRVoid",
"repo_path": "PSRVoid_extracted/PSRVoid-master/flux.py",
"type": "Python"
}
|
# Calculates flux from a known continuum based on one of two formats
import os.path as osp
import mmap
import math
import numpy as np
import argparse
config_file = 'fluxcal.cfg'
config_abs = osp.join( osp.dirname( osp.abspath( __file__ ) ), config_file )
def find_flux_f1( frequency, source ):
"""
Returns the Format 1 (see .cfg file) flux as a float from a given source.
"""
if not isinstance( source, str ):
raise TypeError( "Source parsed in must be a string" )
new_f = float( frequency )
src = source.encode()
amp_line = b""
# Opens cfg file and creates read-only mmap. Then, searches for '%' (start of new source), before then searching for aliases.
# Once the alias is found, the last recorded '%' line is parsed into memory and the file closes.
with open( config_abs, 'rb', 0 ) as file, mmap.mmap( file.fileno(), 0, access = mmap.ACCESS_READ ) as s:
for lin_num, line in enumerate( iter(s.readline, b"") ):
if line.find( b'%' ) != -1:
amp_line = line
if src in line and not line.startswith( b"#" ):
file.close()
break
if line.find( b'Format 2' ) != -1:
file.close()
break
amp_line = amp_line.decode()
if amp_line == "":
raise ValueError( "No source matching name given was found" )
name, ra, dec, freq, flux, spec = amp_line.split()
freq, flux, spec = float( freq ), float( flux ), float( spec )
freq /= 1000
# Use spectral index to calculate flux at a different frequency
new_flux = flux * (( new_f / freq )**spec)
return new_flux
def find_source_params_f2( source ):
"""
Returns a list of Format 2 (see .cfg file) flux parameters for a given continuum source as strings.
"""
if not isinstance( source, str ):
raise TypeError( "Source parsed in must be a string" )
src = source.encode()
amp_line = b""
# Opens cfg file and creates read-only mmap. Then, searches for '&' (start of new source), before then searching for aliases.
# Once the alias is found, the last recorded '&' line is parsed into memory and the file closes.
with open( config_abs, 'rb', 0 ) as file, mmap.mmap( file.fileno(), 0, access = mmap.ACCESS_READ ) as s:
for lin_num, line in enumerate( iter(s.readline, b"") ):
if line.find( b'&' ) != -1:
amp_line = line
if src in line and not line.startswith( b"#" ):
file.close()
break
amp_line = amp_line.decode()
if amp_line == "":
raise ValueError( "No source matching name given was found" )
params = amp_line.split()
return params[1:3], params[3:]
def calculate_flux_f2( frequency, params ):
"""
Returns the Format 2 (see .cfg file) flux at a given frequency for a set of coefficients.
"""
# Turn all inputs to floats if not already
f = float( frequency )
p = list( map( float, params ) )
LogS = p[0]
for i, elem in enumerate( p[1:] ):
corr = elem*((math.log10(f))**(i+1))
LogS += corr
flux = 10**LogS
return flux
def get_flux( frequency, source, format1 = False ):
"""
Master method. Returns the flux of a given frequency, source and format.
"""
if format1:
flux = find_flux_f1( frequency, str( source ) )
else:
_, params = find_source_params_f2( str( source ) )
flux = calculate_flux_f2( frequency, params )
return flux
def get_fluxes( frequencies, source, **kwargs ):
fluxes = np.array([])
for f in frequencies:
fluxes = np.append( fluxes, get_flux( f, source, **kwargs ) )
return fluxes
if __name__ == "__main__":
def parser( progname ):
"""
Initializes argparse and collects the command line arguments.
Returns a list of input arguments.
"""
# Initialize the parser
parser = argparse.ArgumentParser( formatter_class = argparse.RawDescriptionHelpFormatter,
prog = progname,
description = '''\
Flux Calculator
-------------------------------------
Calculates continuum source
flux for calibration
''' )
# Arguments list
parser.add_argument( '-f', '--freq', dest = 'frequency', required = True, nargs = 1, help = "Freqency in GHz" )
parser.add_argument( '-s', '--src', dest = 'source', required = True, nargs = 1, help = "Provide source as a string e.g. \"J1445+0958\"" )
parser.add_argument( '--format1', dest = 'format1', action = 'store_true', default = False, help = "If true, looks for flux parameters in Format 1 (otherwise, Format 2)" )
parser.add_argument( '-c', '--compare', dest = 'compare', nargs = '*', default = None, help = "Compares flux at given frequencies with frequency given in -f" )
args = parser.parse_args()
return args
args = parser( 'flux.py' )
flux = get_flux( args.frequency[0], args.source[0], args.format1 )
print( "Flux (Jy): ", flux )
if args.compare:
print( "Freq (GHz)", ", ", "Flux (Jy)", ", ", "% error" )
for arg in args.compare:
comp_flux = get_flux( arg, args.source[0], args.format1 )
err = 100 * ( abs( comp_flux - flux ) / flux )
print( arg, " ", comp_flux, " ", err, "%" )
|
HenrykHaniewiczREPO_NAMEPSRVoidPATH_START.@PSRVoid_extracted@PSRVoid-master@flux.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/histogram2dcontour/colorbar/title/font/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._weight import WeightValidator
from ._variant import VariantValidator
from ._textcase import TextcaseValidator
from ._style import StyleValidator
from ._size import SizeValidator
from ._shadow import ShadowValidator
from ._lineposition import LinepositionValidator
from ._family import FamilyValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._weight.WeightValidator",
"._variant.VariantValidator",
"._textcase.TextcaseValidator",
"._style.StyleValidator",
"._size.SizeValidator",
"._shadow.ShadowValidator",
"._lineposition.LinepositionValidator",
"._family.FamilyValidator",
"._color.ColorValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@histogram2dcontour@colorbar@title@font@__init__.py@.PATH_END.py
|
{
"filename": "ad.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/jax/experimental/sparse/ad.py",
"type": "Python"
}
|
# Copyright 2021 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from collections.abc import Callable, Sequence
import itertools
from typing import Any
import jax
from jax._src import core
from jax import tree_util
from jax._src.api_util import _ensure_index, _ensure_index_tuple
from jax.util import safe_zip
from jax._src.util import split_list, wraps
from jax._src.traceback_util import api_boundary
from jax.experimental.sparse._base import JAXSparse
is_sparse = lambda x: isinstance(x, JAXSparse)
def flatten_fun_for_sparse_ad(fun, argnums: int | tuple[int, ...], args: tuple[Any, ...]):
argnums_tup = _ensure_index_tuple(argnums)
assert all(0 <= argnum < len(args) for argnum in argnums_tup)
# We do a two-step flattening to figure out how argnums maps to args_flat.
# First, flatten arguments to a list containing sparse and dense objects.
args_flat1, tree1 = tree_util.tree_flatten(args, is_leaf=is_sparse)
*leaf_argnums1, end = split_list(range(tree1.num_leaves),
[child.num_leaves for child in tree1.children()])
assert not end
argnums_flat1 = list(itertools.chain.from_iterable(
nums for i, nums in enumerate(leaf_argnums1) if i in argnums_tup))
# Next, fully flatten to a list of dense buffers.
args_flat, tree2 = tree_util.tree_flatten(args_flat1)
*leaf_argnums2, end = split_list(range(tree2.num_leaves),
[child.num_leaves for child in tree2.children()])
assert not end
# For sparse args, we only mark the first buffer (the data) for differentiation.
leaf_argnums2 = [nums[:1] if is_sparse(arg) else nums
for arg, nums in safe_zip(args_flat1, leaf_argnums2)]
argnums_flat = tuple(itertools.chain.from_iterable(
nums for i, nums in enumerate(leaf_argnums2) if i in argnums_flat1))
def fun_flat(*args_flat, **kwargs):
args = tree_util.tree_unflatten(tree1, tree_util.tree_unflatten(tree2, args_flat))
return fun(*args, **kwargs)
def reconstruct(i, grad_out):
bufs, tree = tree_util.tree_flatten(args_flat1[i])
f_recons = lambda g: tree_util.tree_unflatten(tree, [g, *bufs[1:]])
for _ in range(grad_out.ndim - bufs[0].ndim):
f_recons = jax.vmap(f_recons)
return f_recons(grad_out)
def postprocess_gradients(grads_out):
leaf_grads = [None] * tree1.num_leaves
for i, grad in safe_zip(argnums_flat1, grads_out):
leaf_grads[i] = reconstruct(i, grad)
grad_tree = tree_util.tree_unflatten(tree1, leaf_grads)
grad_tree = tuple(filter(lambda x: jax.tree.leaves(x), grad_tree))
return grad_tree[0] if len(grad_tree) == 1 else grad_tree
return fun_flat, argnums_flat, args_flat, postprocess_gradients
def value_and_grad(fun: Callable, argnums: int | Sequence[int] = 0,
has_aux=False, **kwargs) -> Callable[..., tuple[Any, Any]]:
"""Sparse-aware version of :func:`jax.value_and_grad`
Arguments and return values are the same as :func:`jax.value_and_grad`, but when
taking the gradient with respect to a :class:`jax.experimental.sparse` array, the
gradient is computed in the subspace defined by the array's sparsity pattern.
Examples:
>>> from jax.experimental import sparse
>>> X = sparse.BCOO.fromdense(jnp.arange(6.))
>>> y = jnp.ones(6)
>>> sparse.value_and_grad(lambda X, y: X @ y)(X, y)
(Array(15., dtype=float32), BCOO(float32[6], nse=5))
"""
raw_value_and_grad_fun = jax.value_and_grad(fun, argnums=argnums, has_aux=has_aux, **kwargs)
argnums = core.concrete_or_error(_ensure_index, argnums)
@wraps(fun, docstr=raw_value_and_grad_fun.__doc__, argnums=argnums)
@api_boundary
def value_and_grad_fun(*args, **kwargs):
fun_flat, argnums_flat, args_flat, postprocess_gradients = flatten_fun_for_sparse_ad(fun, argnums, args)
val_out, grad_out = jax.value_and_grad(fun_flat, argnums=argnums_flat, has_aux=has_aux, **kwargs)(*args_flat)
return val_out, postprocess_gradients(grad_out)
return value_and_grad_fun
def grad(fun: Callable, argnums: int | Sequence[int] = 0,
has_aux=False, **kwargs) -> Callable:
"""Sparse-aware version of :func:`jax.grad`
Arguments and return values are the same as :func:`jax.grad`, but when taking
the gradient with respect to a :class:`jax.experimental.sparse` array, the
gradient is computed in the subspace defined by the array's sparsity pattern.
Examples:
>>> from jax.experimental import sparse
>>> X = sparse.BCOO.fromdense(jnp.arange(6.))
>>> y = jnp.ones(6)
>>> sparse.grad(lambda X, y: X @ y)(X, y)
BCOO(float32[6], nse=5)
"""
raw_grad_fun = jax.grad(fun, argnums=argnums, **kwargs)
argnums = core.concrete_or_error(_ensure_index, argnums)
@wraps(fun, docstr=raw_grad_fun.__doc__, argnums=argnums)
@api_boundary
def grad_fun(*args, **kwargs):
fun_flat, argnums_flat, args_flat, postprocess_gradients = flatten_fun_for_sparse_ad(fun, argnums, args)
out = jax.grad(fun_flat, argnums=argnums_flat, has_aux=has_aux, **kwargs)(*args_flat)
if has_aux:
return postprocess_gradients(out[0]), out[1]
return postprocess_gradients(out)
return grad_fun
def jacfwd(fun: Callable, argnums: int | Sequence[int] = 0,
has_aux: bool = False, **kwargs) -> Callable:
"""Sparse-aware version of :func:`jax.jacfwd`
Arguments and return values are the same as :func:`jax.jacfwd`, but when taking
the gradient with respect to a :class:`jax.experimental.sparse` array, the
gradient is computed in the subspace defined by the array's sparsity pattern.
Currently this is only implemented for dense outputs.
"""
raw_jacfwd_fun = jax.jacfwd(fun, argnums=argnums, **kwargs)
argnums = core.concrete_or_error(_ensure_index, argnums)
@wraps(fun, docstr=raw_jacfwd_fun.__doc__, argnums=argnums)
@api_boundary
def jacfwd_fun(*args, **kwargs):
fun_flat, argnums_flat, args_flat, postprocess_gradients = flatten_fun_for_sparse_ad(fun, argnums, args)
out = jax.jacfwd(fun_flat, argnums=argnums_flat, has_aux=has_aux, **kwargs)(*args_flat)
if has_aux:
return postprocess_gradients(out[0]), out[1]
return postprocess_gradients(out)
return jacfwd_fun
def jacrev(fun: Callable, argnums: int | Sequence[int] = 0,
has_aux: bool = False, **kwargs) -> Callable:
"""Sparse-aware version of :func:`jax.jacrev`
Arguments and return values are the same as :func:`jax.jacrev`, but when taking
the gradient with respect to a :class:`jax.experimental.sparse` array, the
gradient is computed in the subspace defined by the array's sparsity pattern.
Currently this is only implemented for dense outputs.
"""
raw_jacrev_fun = jax.jacrev(fun, argnums=argnums, **kwargs)
argnums = core.concrete_or_error(_ensure_index, argnums)
@wraps(fun, docstr=raw_jacrev_fun.__doc__, argnums=argnums)
@api_boundary
def jacrev_fun(*args, **kwargs):
fun_flat, argnums_flat, args_flat, postprocess_gradients = flatten_fun_for_sparse_ad(fun, argnums, args)
out = jax.jacrev(fun_flat, argnums=argnums_flat, has_aux=has_aux, **kwargs)(*args_flat)
if has_aux:
return postprocess_gradients(out[0]), out[1]
return postprocess_gradients(out)
return jacrev_fun
jacobian = jacrev
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@experimental@sparse@ad.py@.PATH_END.py
|
{
"filename": "luminosity_study.py",
"repo_name": "AFD-Illinois/iharm3d",
"repo_path": "iharm3d_extracted/iharm3d-master/script/analysis/luminosity_study.py",
"type": "Python"
}
|
################################################################################
# #
# LUMINOSITY COMPARISON #
# #
################################################################################
import os, sys
import pickle
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import hdf5_to_dict as io
import plot as bplt
from analysis_fns import *
from luminosity_th_study import overlay_rth_contours
USEARRSPACE=False
run_name = sys.argv[1]
if "SANE" in run_name:
SIZE = 50
AT_R = 40
else:
SIZE = 400
AT_R = 100
window=[0,SIZE/2,0,SIZE]
FIGX = 15
FIGY = 15
dumpfile = os.path.join("/scratch/03002/bprather/pharm_dumps/M87SimulationLibrary/GRMHD",run_name,"dumps/dump_00001500.h5")
hdr,geom,dump = io.load_all(dumpfile)
plotfile = os.path.join("/work/03002/bprather/stampede2/movies",run_name,"eht_out.p")
avg = pickle.load(open(plotfile, "rb"))
fig = plt.figure(figsize=(FIGX, FIGY))
gs = gridspec.GridSpec(2, 2, width_ratios=[1,2])
ax = plt.subplot(gs[0,0])
bplt.plot_xz(ax, geom, np.log10(d_fns['FE_EM'](dump)), arrayspace=USEARRSPACE, average=True, window=window)
ax.set_title(r"$\log_{10}( -{{T_{EM}}^r}_t )$")
bplt.overlay_contours(ax, geom, geom['r'], [AT_R], color='k')
overlay_rth_contours(ax, geom, avg, legend=True)
ax = plt.subplot(gs[1,0])
bplt.plot_xz(ax, geom, np.log10(d_fns['FE'](dump)), arrayspace=USEARRSPACE, average=True, window=window)
ax.set_title(r"$\log_{10}( -{T^r}_t - \rho u^r )$")
bplt.overlay_contours(ax, geom, geom['r'], [AT_R], color='k')
overlay_rth_contours(ax, geom, avg)
# I can rely on this for now
start = int(avg['avg_start'])//5
end = int(avg['avg_end'])//5
# Average over quiescence
mdav = np.mean(np.abs(avg['mdot'][start:end]))
ax = plt.subplot(gs[0,1])
ax.plot(avg['r'], avg['LBZ_bg1_r']/mdav, label=r"$L_{BZ}$ ($\beta\gamma > 1.0$ cut)", color='k')
ax.plot(avg['r'], avg['LBZ_sigma1_r']/mdav, label=r"$L_{BZ}$ ($\sigma$ > 1 cut)", color='xkcd:green')
ax.plot(avg['r'], avg['LBZ_allp_r']/mdav, label=r"$L_{BZ}$ (FE > 0 cut)", color='xkcd:pink')
ax.plot(avg['r'], avg['LBZ_Be_nob0_r']/mdav, label=r"$L_{BZ}$ ($Be > 0.02$ cut)", color='xkcd:red')
ax.plot(avg['r'], avg['LBZ_mu2_r']/mdav, label=r"$L_{BZ}$ ($\mu > 2$ cut)", color='xkcd:blue')
ax.set_title(r"$L_{BZ} / \dot{M} = \int -{{T_{EM}}^r}_t \sqrt{-g} dx^{\theta} dx^{\phi} / \dot{M}$")
ax.set_xlim([0,SIZE])
ax.set_xlabel("$r$ (M)")
ax.axvline(AT_R, color='k')
#maxes = [np.max(ab_av(avg['LBZ_'+tag+'_r'])[hdr['n1']//4:]) for tag in ['sigma1', 'be_nob1', 'be_nob0']]
#mins = [np.min(ab_av(avg['LBZ_'+tag+'_r'])[hdr['n1']//4:]) for tag in ['sigma1', 'be_nob1', 'be_nob0']]
#yhi = max(maxes); ylow = max(min(mins),1e-4*yhi)
#print(yhi, ylow)
#ax.set_ylim([ylow ,yhi])
if "SANE" in run_name:
ax.set_yscale('log')
ax.legend(loc='upper right')
ax = plt.subplot(gs[1,1])
ax.plot(avg['r'], avg['Lj_bg1_r']/mdav, label=r"$L_{j}$ ($\beta\gamma > 1.0$ cut)", color='k')
ax.plot(avg['r'], avg['Lj_sigma1_r']/mdav, label=r"$L_{j}$ ($\sigma$ > 1 cut)", color='xkcd:green')
ax.plot(avg['r'], avg['Lj_allp_r']/mdav, label=r"$L_{j}$ (FE > 0 cut)", color='xkcd:pink')
ax.plot(avg['r'], avg['Lj_Be_nob0_r']/mdav, label=r"$L_{j}$ ($Be > 0.02$ cut)", color='xkcd:red')
ax.plot(avg['r'], avg['Lj_mu2_r']/mdav, label=r"$L_{j}$ ($\mu > 2$ cut)", color='xkcd:blue')
ax.set_title(r"$L_{tot} / \dot{M} = \int (-{T^r}_t - \rho u^r) \sqrt{-g} dx^{\theta} dx^{\phi} / \dot{M}$")
ax.set_xlim([0,SIZE])
ax.set_xlabel("$r$ (M)")
ax.axvline(AT_R, color='k')
#maxes = [np.max(ab_av(avg['Ltot_'+tag+'_r'])[hdr['n1']//4:]) for tag in ['sigma1', 'be_nob1', 'be_nob0']]
#mins = [np.min(ab_av(avg['Ltot_'+tag+'_r'])[hdr['n1']//4:]) for tag in ['sigma1', 'be_nob1', 'be_nob0']]
#yhi = max(maxes); ylow = max(min(mins),1e-4*yhi)
#print(yhi, ylow)
#ax.set_ylim([ylow,yhi])
if "SANE" in run_name:
ax.set_yscale('log')
ax.legend(loc='lower right')
plt.tight_layout()
plt.savefig(run_name.replace("/","_")+"_L_study.png", dpi=100)
plt.close(fig)
|
AFD-IllinoisREPO_NAMEiharm3dPATH_START.@iharm3d_extracted@iharm3d-master@script@analysis@luminosity_study.py@.PATH_END.py
|
{
"filename": "_delta.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/indicator/_delta.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DeltaValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="delta", parent_name="indicator", **kwargs):
super(DeltaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Delta"),
data_docs=kwargs.pop(
"data_docs",
"""
decreasing
:class:`plotly.graph_objects.indicator.delta.De
creasing` instance or dict with compatible
properties
font
Set the font used to display the delta
increasing
:class:`plotly.graph_objects.indicator.delta.In
creasing` instance or dict with compatible
properties
position
Sets the position of delta with respect to the
number.
reference
Sets the reference value to compute the delta.
By default, it is set to the current value.
relative
Show relative change
valueformat
Sets the value formatting rule using d3
formatting mini-language which is similar to
those of Python. See
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@indicator@_delta.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "neuraloperator/neuraloperator",
"repo_path": "neuraloperator_extracted/neuraloperator-main/neuralop/models/tests/__init__.py",
"type": "Python"
}
|
neuraloperatorREPO_NAMEneuraloperatorPATH_START.@neuraloperator_extracted@neuraloperator-main@neuralop@models@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "_textcase.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/icicle/legendgrouptitle/font/_textcase.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextcaseValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="textcase",
parent_name="icicle.legendgrouptitle.font",
**kwargs,
):
super(TextcaseValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
values=kwargs.pop("values", ["normal", "word caps", "upper", "lower"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@icicle@legendgrouptitle@font@_textcase.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "dwkim78/upsilon",
"repo_path": "upsilon_extracted/upsilon-master/upsilon/extract_features/__init__.py",
"type": "Python"
}
|
__author__ = 'kim'
|
dwkim78REPO_NAMEupsilonPATH_START.@upsilon_extracted@upsilon-master@upsilon@extract_features@__init__.py@.PATH_END.py
|
{
"filename": "test_memory.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/util/memory/ut_cython/test_memory.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
from util.memory.blob_ut import TestBlob
# Test discovery does not work in cython modules.
# Reexporting test classes here to satisfy pylint and pytest.
__all__ = [
'TestBlob',
]
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@util@memory@ut_cython@test_memory.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/surface/lightposition/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._z import ZValidator
from ._y import YValidator
from ._x import XValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._z.ZValidator", "._y.YValidator", "._x.XValidator"]
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@surface@lightposition@__init__.py@.PATH_END.py
|
{
"filename": "aixlink.py",
"repo_name": "duvall3/rat-pac",
"repo_path": "rat-pac_extracted/rat-pac-master/python/SCons/Tool/aixlink.py",
"type": "Python"
}
|
"""SCons.Tool.aixlink
Tool-specific initialization for the IBM Visual Age linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/aixlink.py 4043 2009/02/23 09:06:45 scons"
import os
import os.path
import SCons.Util
import aixcc
import link
cplusplus = __import__('c++', globals(), locals(), [])
def smart_linkflags(source, target, env, for_signature):
if cplusplus.iscplusplus(source):
build_dir = env.subst('$BUILDDIR', target=target, source=source)
if build_dir:
return '-qtempinc=' + os.path.join(build_dir, 'tempinc')
return ''
def generate(env):
"""
Add Builders and construction variables for Visual Age linker to
an Environment.
"""
link.generate(env)
env['SMARTLINKFLAGS'] = smart_linkflags
env['LINKFLAGS'] = SCons.Util.CLVar('$SMARTLINKFLAGS')
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -qmkshrobj -qsuppress=1501-218')
env['SHLIBSUFFIX'] = '.a'
def exists(env):
path, _cc, _shcc, version = aixcc.get_xlc(env)
if path and _cc:
xlc = os.path.join(path, _cc)
if os.path.exists(xlc):
return xlc
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
duvall3REPO_NAMErat-pacPATH_START.@rat-pac_extracted@rat-pac-master@python@SCons@Tool@aixlink.py@.PATH_END.py
|
{
"filename": "_token.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/sunburst/stream/_token.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TokenValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="token", parent_name="sunburst.stream", **kwargs):
super(TokenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@sunburst@stream@_token.py@.PATH_END.py
|
{
"filename": "dailylogger.py",
"repo_name": "sdss/idlspec2d",
"repo_path": "idlspec2d_extracted/idlspec2d-master/python/boss_drp/utils/dailylogger.py",
"type": "Python"
}
|
#!/usr/bin/env python3
import logging
import collections
from os import popen, getenv
import os.path as ptt
import smtplib
from email.message import EmailMessage
import numpy as np
class Formatter(logging.Formatter):
def __init__(self):
super().__init__(fmt="%(levelno)d: %(msg)s", datefmt=None, style='%')
def format(self, record):
# Save the original format configured by the user
# when the logger formatter was instantiated
format_orig = self._style._fmt
if record.levelno == logging.INFO:
self._style._fmt = "%(message)s"
elif record.levelno == logging.DEBUG:
self._style._fmt = '%(funcName)s: %(message)s'
else:
self._style._fmt = "%(levelname)s: %(message)s"
# Call the original formatter class to do the grunt work
result = logging.Formatter.format(self, record)
# Restore the original format configured by the user
self._style._fmt = format_orig
return result
class emailLogHandler(logging.Handler):
def __init__(self, log_queue):
logging.Handler.__init__(self)
self.log_queue = log_queue
def emit(self, record):
self.log_queue.append(self.format(record))
def send_email(subject, email_file, attachment, logger, content=None,
from_domain="chpc.utah.edu",allemail=False):
try:
emails = open(email_file).read().splitlines()
except:
emails = []
logger.info(email_file+' does not exist')
emails = ' '.join(emails).split()
if not allemail:
emails = [emails[0]]
msg = EmailMessage()
if content is None:
content = subject
msg.set_content(content)
msg['Subject'] = subject
msg['From'] = f"BOSS Pipeline <{getenv('USER')}@{from_domain}>"
msg['BCC'] = ', '.join(emails)
if attachment is not None:
attachment = np.atleast_1d(attachment)
msg.preamble = 'You will not see this in a MIME-aware mail reader.\n'
for fa in attachment:
if ptt.exists(fa):
with open(fa, 'rb') as fp:
logdata = fp.read()
msg.add_attachment(logdata, maintype='text', subtype='plain', filename=ptt.basename(fa))
s = smtplib.SMTP('localhost')
s.send_message(msg)
s.quit()
return(None)
class emailLogger(object):
def __init__(self, maxlen=None):
self._log_queue = collections.deque(maxlen=maxlen)
self._log_handler = emailLogHandler(self._log_queue)
def contents(self):
return '\n'.join(self._log_queue)
@property
def log_handler(self):
return self._log_handler
def send(self, subject, email_file,log, allemail=False):
try:
emails = open(email_file).read().splitlines()
except:
log.error(email_file+' does not exist')
emails = []
try:
send_email(subject, email_file, None, log, content=self.contents(), from_domain="chpc.utah.edu", allemail=allemail)
except:
outputs = []
for line in self.contents():
if 'slurm.session.Client:' in line:
continue
if 'slurm.session.Client: task #' in line:
continue
outputs.append(line)
self.contents = outputs
try:
send_email(subject, email_file, None, log, content=self.contents(), from_domain="chpc.utah.edu", allemail=allemail)
except:
self.contents = ['ERROR Building Email Log']
send_email(subject, email_file, None, log, content=self.contents(), from_domain="chpc.utah.edu", allemail=allemail)
return
|
sdssREPO_NAMEidlspec2dPATH_START.@idlspec2d_extracted@idlspec2d-master@python@boss_drp@utils@dailylogger.py@.PATH_END.py
|
{
"filename": "dir_lock.py",
"repo_name": "QEF/q-e",
"repo_path": "q-e_extracted/q-e-master/test-suite/testcode/lib/testcode2/dir_lock.py",
"type": "Python"
}
|
'''
testcode2.dir_lock
------------------
Threading lock initialisation and helper.
:copyright: (c) 2012 James Spencer.
:license: modified BSD; see LICENSE for more details.
'''
import os
import threading
import testcode2.compatibility as compat
class DirLock:
'''Helper class for working with threading locks.'''
def __init__(self):
self.lock = threading.Lock()
def with_lock(self, func):
'''Decorate function to be executed whilst holding the lock.
:param function func: arbitary function.
'''
@compat.functools.wraps(func)
def decorated_func(*args, **kwargs):
'''Function decorated by Lock.with_lock.'''
self.lock.acquire()
try:
return func(*args, **kwargs)
finally:
self.lock.release()
return decorated_func
def in_dir(self, ddir):
'''Decorate function so it is executed in the given directory ddir.
The thread executing the function holds the lock whilst entering ddir and
executing the function. This makes such actions thread-safe with respect to
the directory location but is not appropriate for computationally-demanding
functions.
:param string ddir: directory in which the decorated function is executed.
'''
# Because we wish to use this as a decorator with arguments passed to
# the decorator, we must return a wrapper function which in turn
# returns the decorated function. See the excellent explanation of
# decorators at: http://stackoverflow.com/a/1594484
def wrapper(func):
'''Wrap func to hold lock whilst being executed in ddir.
:param string func: arbitrary function.
'''
@compat.functools.wraps(func)
@self.with_lock
def decorated_func(*args, **kwargs):
'''Function decorated by Lock.in_dir.'''
cwd = os.getcwd()
os.chdir(ddir)
try:
val = func(*args, **kwargs)
except Exception:
# func has raised an error. Return to the original
# directory and then re-raise the error to allow the caller
# to handle it.
os.chdir(cwd)
raise
os.chdir(cwd)
return val
return decorated_func
return wrapper
|
QEFREPO_NAMEq-ePATH_START.@q-e_extracted@q-e-master@test-suite@testcode@lib@testcode2@dir_lock.py@.PATH_END.py
|
{
"filename": "_orientation.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/funnel/_orientation.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OrientationValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="orientation", parent_name="funnel", **kwargs):
super(OrientationValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
values=kwargs.pop("values", ["v", "h"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@funnel@_orientation.py@.PATH_END.py
|
{
"filename": "_family.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/pie/insidetextfont/_family.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="family", parent_name="pie.insidetextfont", **kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@pie@insidetextfont@_family.py@.PATH_END.py
|
{
"filename": "test_estimators.py",
"repo_name": "tslearn-team/tslearn",
"repo_path": "tslearn_extracted/tslearn-main/tslearn/tests/test_estimators.py",
"type": "Python"
}
|
"""
The :mod:`tslearn.testing_utils` module includes various utilities that can
be used for testing.
"""
import tslearn
import pkgutil
import inspect
from functools import partial
from operator import itemgetter
import sklearn
from sklearn.base import (BaseEstimator, ClassifierMixin, ClusterMixin,
RegressorMixin, TransformerMixin)
try:
# Most recent
from sklearn.utils._testing import SkipTest
except ImportError:
# Deprecated from sklearn v0.24 onwards
from sklearn.utils.testing import SkipTest
from sklearn.exceptions import SkipTestWarning
from sklearn.utils.estimator_checks import (
check_no_attributes_set_in_init,
check_parameters_default_constructible,
_maybe_skip
)
from tslearn.tests.sklearn_patches import (
check_clustering,
check_non_transf_est_n_iter,
check_fit_idempotent,
check_classifiers_classes,
check_classifiers_train,
check_estimators_pickle,
check_supervised_y_2d,
check_regressor_data_not_an_array,
check_classifier_data_not_an_array,
check_regressors_int_patched,
check_classifiers_cont_target,
check_pipeline_consistency,
yield_all_checks,
_create_large_ts_dataset)
from tslearn.shapelets import LearningShapelets
import warnings
import pytest
# Patching some check functions to work on ts data instead of tabular data.
checks = sklearn.utils.estimator_checks
checks._yield_all_checks = yield_all_checks
checks.check_clustering = check_clustering
checks.check_non_transformer_estimators_n_iter = check_non_transf_est_n_iter
checks.check_fit_idempotent = check_fit_idempotent
checks.check_classifiers_classes = check_classifiers_classes
checks.check_classifiers_train = check_classifiers_train
checks.check_estimators_pickle = check_estimators_pickle
checks.check_supervised_y_2d = check_supervised_y_2d
checks.check_regressor_data_not_an_array = check_regressor_data_not_an_array
checks.check_classifier_data_not_an_array = check_classifier_data_not_an_array
checks.check_regressors_int = check_regressors_int_patched
checks.check_classifiers_regression_target = check_classifiers_cont_target
checks.check_pipeline_consistency = check_pipeline_consistency
checks._regression_dataset = _create_large_ts_dataset
def _get_all_classes():
# Walk through all the packages from our base_path and
# add all the classes to a list
all_classes = []
base_path = tslearn.__path__
for _, name, _ in pkgutil.walk_packages(path=base_path,
prefix='tslearn.'):
try:
module = __import__(name, fromlist="dummy")
except ImportError:
if name.endswith('shapelets'):
# keras is likely not installed
warnings.warn('Skipped common tests for shapelets '
'as it could not be imported. keras '
'(and tensorflow) are probably not '
'installed!')
continue
elif name.endswith('pytorch_backend'):
# pytorch is likely not installed
continue
else:
raise Exception('Could not import module %s' % name)
all_classes.extend(inspect.getmembers(module, inspect.isclass))
return all_classes
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
def is_sklearn(x):
return inspect.getmodule(x).__name__.startswith('sklearn')
def get_estimators(type_filter='all'):
"""Return a list of classes that inherit from `sklearn.BaseEstimator`.
This code is based on `sklearn.utils.testing.all_estimators`.
Parameters
----------
type_filter : str (default: 'all')
A value in ['all', 'classifier', 'transformer', 'cluster'] which
defines which type of estimators to retrieve
Returns
-------
list
Collection of estimators of the type specified in `type_filter`
"""
if type_filter not in ['all', 'classifier', 'transformer', 'cluster']:
# TODO: make this exception more specific
raise Exception("type_filter should be element of "
"['all', 'classifier', 'transformer', 'cluster']")
all_classes = _get_all_classes()
# Filter out those that are not a subclass of `sklearn.BaseEstimator`
all_classes = [c for c in set(all_classes)
if issubclass(c[1], BaseEstimator)]
# get rid of abstract base classes
all_classes = filter(lambda c: not is_abstract(c[1]), all_classes)
# only keep those that are from tslearn
all_classes = filter(lambda c: not is_sklearn(c[1]), all_classes)
# Now filter out the estimators that are not of the specified type
filters = {
'all': [ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin],
'classifier': [ClassifierMixin],
'transformer': [TransformerMixin],
'cluster': [ClusterMixin]
}[type_filter]
filtered_classes = []
for _class in all_classes:
if any([issubclass(_class[1], mixin) for mixin in filters]):
filtered_classes.append(_class)
# Remove duplicates and return the list of remaining estimators
return sorted(set(filtered_classes), key=itemgetter(0))
def check_estimator(Estimator):
"""Check if estimator adheres to scikit-learn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
This test can be applied to classes or instances.
Classes currently have some additional tests that related to construction,
while passing instances allows the testing of multiple options.
Parameters
----------
estimator : estimator object or class
Estimator to check. Estimator is a class object or instance.
"""
if isinstance(Estimator, type):
# got a class
name = Estimator.__name__
estimator = Estimator()
check_parameters_default_constructible(name, estimator)
check_no_attributes_set_in_init(name, estimator)
else:
# got an instance
estimator = Estimator
name = type(estimator).__name__
if hasattr(estimator, 'max_iter'):
if isinstance(estimator, LearningShapelets):
estimator.set_params(max_iter=100)
else:
estimator.set_params(max_iter=10)
if hasattr(estimator, 'total_lengths'):
estimator.set_params(total_lengths=1)
if hasattr(estimator, 'probability'):
estimator.set_params(probability=True)
def checks_generator():
for check in checks._yield_all_checks(name, estimator):
check = _maybe_skip(estimator, check)
yield estimator, partial(check, name)
for estimator, check in checks_generator():
try:
check(estimator)
except SkipTest as exception:
# the only SkipTest thrown currently results from not
# being able to import pandas.
warnings.warn(str(exception), SkipTestWarning)
@pytest.mark.parametrize('name, Estimator', get_estimators('all'))
def test_all_estimators(name, Estimator):
"""Test all the estimators in tslearn."""
allow_nan = (hasattr(checks, 'ALLOW_NAN') and
Estimator().get_tags()["allow_nan"])
if allow_nan:
checks.ALLOW_NAN.append(name)
if name in ["ShapeletModel"]:
# Deprecated models
return
check_estimator(Estimator)
|
tslearn-teamREPO_NAMEtslearnPATH_START.@tslearn_extracted@tslearn-main@tslearn@tests@test_estimators.py@.PATH_END.py
|
{
"filename": "Polytropic_EOSs.py",
"repo_name": "zachetienne/nrpytutorial",
"repo_path": "nrpytutorial_extracted/nrpytutorial-master/TOV/Polytropic_EOSs.py",
"type": "Python"
}
|
## Polytropic EOSs Python Module
## Author(s): Leo Werneck and Zach Etienne
## In this NRPy+ module we set up useful "lowlevel" functions that compute
## useful polytropic quantities.
# Full documentation for this module may be found in the NRPy+ tutorial Jupyter notebook:
# Tutorial-TOV-Piecewise_Polytrope_EOSs.ipynb
# This ensures the availability of the argument end="" in the print() function.
from __future__ import print_function
# Step 0: Import needed Python/NRPy+ modules
import numpy as np # NumPy: A numerical methods module for Python
import sys # This module is used for system related function calls
from collections import namedtuple # This module is used to create named tuples
# Function : impose_continuity_on_P_cold()
# Author(s) : Leo Werneck
# Description : This function populates the array K_poly_tab
# by demanding that P_cold be everywhere continuous
# Dependencies : none
#
# Inputs : eos - named tuple containing the following:
# neos - number of EOSs to be used (single polytrope = 1)
# rho_poly_tab - values of rho distinguish one EOS from the
# other (not required for a single polytrope)
# Gamma_poly_tab - values of Gamma to be used within each EOS
# K_poly_tab - uninitialized, see output variable below
# P_poly_tab - uninitialized, see function
# compute_P_poly_tab() below
# K_poly_tab0 - value of K_poly_tab[0], for the first EOS
#
# Outputs : eos.K_poly_tab - values of K to be used within each EOS, determined
# by imposing that P_cold be everywhere continuous
def impose_continuity_on_P_cold(eos,K_poly_tab0):
# A piecewise polytropic EOS is given by
# .--------------------------------------------------------------------------.
# | / K_0 * rho^(Gamma_0) , rho < rho_0 ; |
# | | K_1 * rho^(Gamma_1) , rho_0 < rho < rho_1 ; |
# | | ... ... |
# | P = < K_j * rho^(Gamma_j) , rho_(j-1) < rho < rho_j ; |
# | | ... ... |
# | | K_(n-2) * rho^(Gamma_(n-2)) , rho_(neos-3) < rho < rho_(neos-2) ; |
# | \ K_(n-1) * rho^(Gamma_(n-1)) , rho > rho_(neos-2) . |
# .--------------------------------------------------------------------------.
# Notice that the case of a single polytropic EOS corresponds to
# the first EOS in the boxed equation above, with no condition on
# rho. Thus we need only return K_poly_tab0.
eos.K_poly_tab[0] = K_poly_tab0
if eos.neos==1:
return
# For the case of a piecewise polytropic EOS, emanding that P_cold
# be everywhere continuous results in the relation:
# .-----------------------------------------------------.
# | K_j = K_(j-1) * rho_(j-1)^( Gamma_(j-1) - Gamma_j ) |
# .-----------------------------------------------------.
for j in range(1,eos.neos):
eos.K_poly_tab[j] = eos.K_poly_tab[j-1]*eos.rho_poly_tab[j-1]**(eos.Gamma_poly_tab[j-1]-eos.Gamma_poly_tab[j])
return
# Function : compute_P_poly_tab()
# Author(s) : Leo Werneck
# Description : This function populates the array eos.P_poly_tab,
# used to distinguish which EOS we are using in the
# case of a piecewise polytropic EOS
# Dependencies : none
#
# Inputs : eos - named tuple containing the following:
# neos - number of EOSs to be used (single polytrope = 1)
# rho_poly_tab - values of rho used to distinguish one EOS from
# the other (not required for a single polytrope)
# Gamma_poly_tab - values of Gamma to be used within each EOS
# K_poly_tab - value of K to be used within each EOS
# P_poly_tab - uninitialized, see output variable below
#
# Outputs : eos.P_poly_tab - values of P used to distinguish one EOS from
# the other (not required for a single polytrope)
def compute_P_poly_tab(eos):
# We now compute the values of P_poly_tab that are used
# to find the appropriate polytropic index and, thus,
# EOS we must use.
# First, if we have a single polytrope EOS, we need to
# do nothing.
if eos.neos==1:
return
# For the case of a piecewise polytropic EOS, we have
# .---------------------------.
# | P_j = K_j*rho_j^(Gamma_j) |
# .---------------------------.
for j in range(eos.neos-1):
eos.P_poly_tab[j] = eos.K_poly_tab[j]*eos.rho_poly_tab[j]**(eos.Gamma_poly_tab[j])
return
# Function : impose_continuity_on_eps_cold()
# Author(s) : Leo Werneck
# Description : This function populates the array eps_integ_const_tab
# by demanding that eps_cold be everywhere continuous
# Dependencies : none
#
# Inputs : eos - named tuple containing the following:
# neos - number of EOSs to be used (single polytrope = 1)
# rho_poly_tab - values of rho distinguish one EOS from the
# other (not required for a single polytrope)
# Gamma_poly_tab - values of Gamma to be used within each EOS
# K_poly_tab - value of K to be used within each EOS
# eps_integ_const_tab - uninitialized, see output variable below
#
# Outputs : eos.eps_integ_const_tab - value of C used to compute eps_cold within each EOS,
# determined by imposing that eps_cold be everywhere
# continuous
def impose_continuity_on_eps_cold(eos):
# Computing eps_cold for the case of a polytropic EOS, we have
# .------------------------------------------------------------------------------------------------------.
# | / C_0 + K_0*rho^(Gamma_0 - 1)/(Gamma_0 - 1) , rho < rho_0 ; |
# | | C_1 + K_1*rho^(Gamma_1 - 1)/(Gamma_1 - 1) , rho_0 < rho < rho_1 ; |
# | | ... ... |
# | eps = < C_j + K_j*rho^(Gamma_j - 1)/(Gamma_j - 1) , rho_(j-1) < rho < rho_j ; |
# | | ... ... |
# | | C_(n-2) + K_(n-2)*rho^(Gamma_(n-2)-1)/(Gamma_(n-2)-1) , rho_(neos-3) < rho < rho_(neos-2) ; |
# | \ C_(n-1) + K_(n-1)*rho^(Gamma_(n-1)-1)/(Gamma_(n-1)-1) , rho > rho_(neos-2) . |
# .------------------------------------------------------------------------------------------------------.
# By demanding that eps_cold(rho -> 0) = 0, we fix C_0 = 0. Thus, for
# a single polytrope we need only return this
if eos.neos==1:
return
# For the case of a piecewise polytropic EOS, emanding that eps_cold
# be everywhere continuous results in the relation:
# .-----------------------------------------------------------------.
# | C_j = C_(j-1) |
# | + K_(j-1)*rho_(j-1)^( Gamma_(j-1) - 1 )/( Gamma_(j-1) - 1 ) |
# | - K_(j+0)*rho_(j-1)^( Gamma_(j+0) - 1 )/( Gamma_(j+0) - 1 ) |
# .-----------------------------------------------------------------.
eos.eps_integ_const_tab[0] = 0.0
for j in range(1,eos.neos):
# Second line of the boxed equation above
aux_jm1 = eos.K_poly_tab[j-1]*eos.rho_poly_tab[j-1]**(eos.Gamma_poly_tab[j-1]-1.0)/(eos.Gamma_poly_tab[j-1]-1)
# Third line of the boxed equation above
aux_jp0 = eos.K_poly_tab[j+0]*eos.rho_poly_tab[j-1]**(eos.Gamma_poly_tab[j+0]-1.0)/(eos.Gamma_poly_tab[j+0]-1)
# Boxed equation above
eos.eps_integ_const_tab[j] = eos.eps_integ_const_tab[j-1] + aux_jm1 - aux_jp0
return
# Function : set_up_EOS_parameters__complete_set_of_input_variables()
# Author(s) : Leo Werneck
# Description : This function determine all polytropic related
# parameters from user input
# Dependencies : impose_continuity_on_P_cold()
# compute_P_poly_tab()
#
# Inputs : neos - number of EOSs to be used (single polytrope = 1)
# rho_poly_tab - values of rho distinguish one EOS from the
# other (not required for a single polytrope)
# Gamma_poly_tab - values of Gamma to be used within each EOS
# K_poly_tab0 - value of K_poly_tab[0], for the first EOS
#
# Outputs : eos - named tuple containing the following:
# neos - number of EOSs to be used (single polytrope = 1)
# rho_poly_tab - values of rho used to distinguish one EOS from
# the other (not required for a single polytrope)
# Gamma_poly_tab - values of Gamma to be used within each EOS
# K_poly_tab - value of K to be used within each EOS
# P_poly_tab - values of P used to distinguish one EOS from
# the other (not required for a single polytrope)
def set_up_EOS_parameters__complete_set_of_input_variables(neos,rho_poly_tab,Gamma_poly_tab,K_poly_tab0):
# Error check #1: Verify if the correct number of rho_poly_tab has been given by the user
if (neos == 1):
pass
elif len(rho_poly_tab) != neos-1:
print("Error: neos="+str(neos)+". Expected "+str(neos-1)+" values of rho_poly_tab, but "+str(len(rho_poly_tab))+" values were given.")
sys.exit(1)
# Error check #2: Verify if the correct number of Gamma_poly_tab has been given by the user
if len(Gamma_poly_tab) != neos:
print("Error: neos="+str(neos)+". Expected "+str(neos)+" values of Gamma_poly_tab, but "+str(len(Gamma_poly_tab))+" values were given.")
sys.exit(1)
# Create the arrays to store the values of K_poly_tab and eps_integ_const_tab
K_poly_tab = [0 for i in range(neos)]
P_poly_tab = [0 for i in range(neos-1)]
eps_integ_const_tab = [0 for i in range(neos)]
# Create the EOS "struct" (named tuple)
eos_struct = namedtuple("eos_struct","neos rho_poly_tab Gamma_poly_tab K_poly_tab P_poly_tab eps_integ_const_tab")
eos = eos_struct(neos,rho_poly_tab,Gamma_poly_tab,K_poly_tab,P_poly_tab,eps_integ_const_tab)
# Step 1: Determine K_poly_tab. For the details, please see the implementation
# of the function impose_continuity_on_P_cold() below.
impose_continuity_on_P_cold(eos,K_poly_tab0)
# Step 2: Determine eps_integ_const_tab. For the details, please see the
# implementation of the function impose_continuity_on_eps_cold() below.
impose_continuity_on_eps_cold(eos)
# Step 3: Determine P_poly_tab. For the details, please see the implementation
# of the function compute_P_poly_tab() below.
compute_P_poly_tab(eos)
return eos
# Function : set_up_EOS_parameters__Read_et_al_input_variables()
# Author(s) : Leo Werneck
# Description : This function determine all polytropic related
# parameters from user input
# Dependencies : impose_continuity_on_P_cold()
# compute_P_poly_tab()
#
# Inputs : neos - number of EOSs to be used (single polytrope = 1)
# rho_poly_tab - values of rho distinguish one EOS from the
# other (not required for a single polytrope)
# Gamma_poly_tab - values of Gamma to be used within each EOS
# K_poly_tab0 - value of K_poly_tab[0], for the first EOS
#
# Outputs : eos - named tuple containing the following:
# neos - number of EOSs to be used (single polytrope = 1)
# rho_poly_tab - values of rho used to distinguish one EOS from
# the other (not required for a single polytrope)
# Gamma_poly_tab - values of Gamma to be used within each EOS
# K_poly_tab - value of K to be used within each EOS
# P_poly_tab - values of P used to distinguish one EOS from
# the other (not required for a single polytrope)
def set_up_EOS_parameters__Read_et_al_input_variables(EOSname,units="rescaledensity"):
# Check if the input units are implemented below
available_units = ["rescaledensity","geometrized","cgs"]
if units not in available_units:
print("ERROR: unknown units ",units)
print("Available units are: ",end="")
for unit in available_units:
print(unit,end=" ")
print("")
sys.exit(1)
# Set up the number of polytropic EOSs, which is
# fixed at seven for this type of input
neos = 7
# Set up input from table II of Read et al. (2008),
# and from the legend in FIG. 3.
# Source: https://arxiv.org/pdf/0812.2163.pdf
# .--------------.---------.-------------.-----.
# | rho_j | Gamma_j | K_j | P_j |
# .--------------.---------.-------------.-----.
# | 2.440340e+07 | 1.58425 | 6.80110e-09 | P_0 |
# | 3.78358e+11 | 1.28733 | K_1 | P_1 |
# | 2.62780e+12 | 0.62223 | K_2 | P_2 |
# | rho_3 | 1.35692 | K_3 | P_3 |
# | 10^(14.7) | Gamma_4 | K_4 | P_4 |
# | 10^(15.0) | Gamma_5 | K_5 | P_5 |
# | - | Gamma_6 | K_6 | - |
# .--------------.---------.-------------.-----.
#
# Load up the NRPy+ dictionary containing information about the Read et al.
# EOS tables (table III in Read et al. - https://arxiv.org/pdf/0812.2163.pdf)
import TOV.Piecewise_Polytrope__dict as PPdict
log_of_p4 = PPdict.EOS_Read_et_al_dict[EOSname].log_of_p4
Gamma4 = PPdict.EOS_Read_et_al_dict[EOSname].Gamma4
Gamma5 = PPdict.EOS_Read_et_al_dict[EOSname].Gamma5
Gamma6 = PPdict.EOS_Read_et_al_dict[EOSname].Gamma6
# Set up the speed of light and change the units of the input pressure
c = 2.997924580000000e+10 # Speed of light
G = 6.674299999999999e-08 # Gravitational constant
M = 1.988409870698051e+33 # Mass of the sun
log_of_p4 -= 2.0*np.log10(c)
# Set up tabulated polytropic values following the table above
# and the user input. All quantities which are still unknown are
# set to absurd values to make sure they are overwritten
rho_poly_tab = [2.440340e+07, 3.78358e+11, 2.62780e+12, -1e30 , 10**(14.7) , 10**(15.0)]
P_poly_tab = [-1e30 , -1e30 , -1e30 , -1e30 , 10**(log_of_p4), -1e30 ]
Gamma_poly_tab = [1.58425 , 1.28733 , 0.62223 , 1.35692, Gamma4 , Gamma5, Gamma6]
K_poly_tab = [6.80110e-09 , -1e30 , -1e30 , -1e30 , -1e30 , -1e30 , -1e30]
# Compute {K_1,K_2,K_3}, using
# .-----------------------------------------------------.
# | K_j = K_(j-1) * rho_(j-1)^( Gamma_(j-1) - Gamma_j ) |
# .-----------------------------------------------------.
for j in range(1,4):
K_poly_tab[j] = K_poly_tab[j-1] * rho_poly_tab[j-1]**(Gamma_poly_tab[j-1] - Gamma_poly_tab[j])
# Compute {P_0,P_1,P_2}, using
# .-------------------------------.
# | P_j = K_j * rho_j^( Gamma_j ) |
# .-------------------------------.
for j in range(3):
P_poly_tab[j] = K_poly_tab[j] * rho_poly_tab[j]**(Gamma_poly_tab[j])
# Set up auxiliary variables for the evaluation of rho_3
P4 = P_poly_tab[4]
K3 = K_poly_tab[3]
rho4_p_Gam4 = rho_poly_tab[4]**(Gamma_poly_tab[4])
G3m4 = Gamma_poly_tab[3] - Gamma_poly_tab[4]
# Compute rho_3 using
# .----------------------------------------------------------------------.
# | rho_3 = ( P_4 /( K_3 * rho_4^(Gamma_4) ) )^(1.0/(Gamma_3 - Gamma_4)) |
# .----------------------------------------------------------------------.
rho_poly_tab[3] = ( P4/(K3 * rho4_p_Gam4) )**(1.0/G3m4)
# Compute {P_3,P_4,P_5} and {K_4,K_5,K_6}
for j in range(3,neos-1):
P_poly_tab[j] = K_poly_tab[j] * rho_poly_tab[j]**(Gamma_poly_tab[j])
K_poly_tab[j+1] = K_poly_tab[j] * rho_poly_tab[j]**(Gamma_poly_tab[j] - Gamma_poly_tab[j+1])
if units == "rescaledensity":
# We impose a "ratio preserving rescaling" of rhob:
#
# rhob_rescaled[j] / rhob[j] = rhob_rescaled[j-1] / rhob[j-1]
#
# which implies the relation
# .-------------------------------------------------------------.
# | rhob_rescaled[j-1] = (rhob[j-1]/rhob[j]) * rhob_rescaled[j] |
# .-------------------------------------------------------------.
# after setting rhob_nuclear_rescaled = 1
rhob_rescaled = [1.0 for i in range(neos-1)]
for j in range(neos-2,0,-1):
rhob_rescaled[j-1] = (rho_poly_tab[j-1]/rho_poly_tab[j]) * rhob_rescaled[j]
# Now because the values of P and rho given by Read et al. are already
# in the same units, namely (g/cm^3), the ratio P/rho should be invariant
# under this rescalling procedure. Therefore
# .---------------------------------------------------------------------.
# | P_rescaled[j] = (rhob_rescaled[j]/rhob_readetal[j]) * P_readetal[j] |
# .---------------------------------------------------------------------.
P_rescaled = [0.0 for i in range(neos-1)]
for j in range(neos-1):
P_rescaled[j] = (rhob_rescaled[j]/rho_poly_tab[j]) * P_poly_tab[j]
rho_poly_tab = rhob_rescaled
P_poly_tab = P_rescaled
elif units == "geometrized" :
# Now convert to units in which Msun = 1, G = 1, c = 1
csq = c**2
units_of_length = G * M / csq
units_of_time = units_of_length/c
units_of_mass = M
units_of_density = units_of_mass / units_of_length**3
units_of_pressure = units_of_mass / units_of_length / units_of_time**2
for i in range(neos-1):
rho_poly_tab[i] /= units_of_density
P_poly_tab[i] *= csq
P_poly_tab[i] /= units_of_pressure
elif units == "cgs" :
# Restore P to cgs units
csq = c*c
for i in range(neos-1):
P_poly_tab[i] *= csq
# Demanding that the pressure be everywhere continuous then imposes
# .-------------------------------------------------------------------------------------------.
# | K_dimensionless[j-1] = K_dimensionless[j]/rhob_dimensionless[j-1]^(Gamma[j-1] - Gamma[j]) |
# .-------------------------------------------------------------------------------------------.
K_poly_tab[0] = P_poly_tab[0]/rho_poly_tab[0]**(Gamma_poly_tab[0])
for j in range(1,neos):
K_poly_tab[j] = K_poly_tab[j-1]*rho_poly_tab[j-1]**(Gamma_poly_tab[j-1]-Gamma_poly_tab[j])
# Allocate memory for the integration constants of eps_cold
eps_integ_const_tab = [0 for i in range(neos)]
# Create the EOS "struct" (named tuple)
eos_struct = namedtuple("eos_struct","neos rho_poly_tab Gamma_poly_tab K_poly_tab P_poly_tab eps_integ_const_tab")
eos = eos_struct(neos,rho_poly_tab,Gamma_poly_tab,K_poly_tab,P_poly_tab,eps_integ_const_tab)
# Populate the integration constants of eps_cold
impose_continuity_on_eps_cold(eos)
return eos
# Function : Polytrope_EOS__compute_P_cold_from_rhob()
# Author(s) : Leo Werneck
# Description : This function computes P_cold for a polytropic EOS
# Dependencies : polytropic_index_from_rhob()
#
# Inputs : eos - named tuple containing the following:
# neos - number of EOSs to be used (single polytrope = 1)
# rho_poly_tab - values of rho distinguish one EOS from the
# other (not required for a single polytrope)
# Gamma_poly_tab - values of Gamma to be used within each EOS
# K_poly_tab - value of K to be used within each EOS
# P_poly_tab - values of P used to distinguish one EOS from
# the other (not required for a single polytrope)
# rho_baryon - the value of rho for which we want to
# compute P_cold
#
# Outputs : P_cold - for a single or piecewise polytropic EOS
def Polytrope_EOS__compute_P_cold_from_rhob(eos, rho_baryon):
# Compute the polytropic index from rho_baryon
j = polytropic_index_from_rhob(eos, rho_baryon)
# Return the value of P_cold for a polytropic EOS
# .--------------------------------.
# | P_cold = K_j * rho_b^(Gamma_j) |
# .--------------------------------.
return eos.K_poly_tab[j]*rho_baryon**eos.Gamma_poly_tab[j]
# Function : Polytrope_EOS__compute_rhob_from_P_cold()
# Author(s) : Leo Werneck
# Description : This function computes rho_b for a polytropic EOS
# Dependencies : polytropic_index_from_P()
#
# Inputs : eos - named tuple containing the following:
# neos - number of EOSs to be used (single polytrope = 1)
# rho_poly_tab - values of rho distinguish one EOS from the
# other (not required for a single polytrope)
# Gamma_poly_tab - values of Gamma to be used within each EOS
# K_poly_tab - value of K to be used within each EOS
# P_poly_tab - values of P used to distinguish one EOS from
# the other (not required for a single polytrope)
# P - the value of P for which we want to
# compute rho_b
#
# Outputs : rho_baryon - for a single or piecewise polytropic EOS
def Polytrope_EOS__compute_rhob_from_P_cold(eos,P):
# Compute the polytropic index from P
j = polytropic_index_from_P(eos,P)
# Return the value of rho_b for a polytropic EOS
# .----------------------------------.
# | rho_b = (P_cold/K_j)^(1/Gamma_j) |
# .----------------------------------.
return (P/eos.K_poly_tab[j])**(1.0/eos.Gamma_poly_tab[j])
# Function : Polytrope_EOS__compute_eps_cold_from_rhob()
# Author(s) : Leo Werneck
# Description : This function computes eps_cold for a polytropic EOS
# Dependencies : polytropic_index_from_rhob()
#
# Inputs : eos - named tuple containing the following:
# neos - number of EOSs to be used (single polytrope = 1)
# rho_poly_tab - values of rho distinguish one EOS from the
# other (not required for a single polytrope)
# Gamma_poly_tab - values of Gamma to be used within each EOS
# K_poly_tab - value of K to be used within each EOS
# P_poly_tab - values of P used to distinguish one EOS from
# the other (not required for a single polytrope)
# rho_baryon - the value of rho for which we want to
# compute P_cold
#
# Outputs : eps_cold - for a single or piecewise polytropic EOS
def Polytrope_EOS__compute_eps_cold_from_rhob(eos, rho_baryon):
if rho_baryon == 0.0:
return 0.0
# Compute the polytropic index from rho_baryon
j = polytropic_index_from_rhob(eos, rho_baryon)
# Compute P_cold
P_cold = Polytrope_EOS__compute_P_cold_from_rhob(eos, rho_baryon)
# Return the value of P_cold for a polytropic EOS
# .----------------------------------------------.
# | eps_cold = C_j + P_cold/( rhob*(Gamma_j-1) ) |
# .----------------------------------------------.
return ( eos.eps_integ_const_tab[j] + P_cold/(rho_baryon*(eos.Gamma_poly_tab[j] - 1.0)) )
# Function : Polytrope_EOS__compute_rhob_and_eps_cold_from_P_cold()
# Author(s) : Leo Werneck
# Description : This function computes rho_b and eps_cold for a polytropic EOS
# Dependencies : polytropic_index_from_P()
# Polytrope_EOS__compute_rhob_from_P_cold()
#
# Inputs : eos - named tuple containing the following:
# neos - number of EOSs to be used (single polytrope = 1)
# rho_poly_tab - values of rho distinguish one EOS from the
# other (not required for a single polytrope)
# Gamma_poly_tab - values of Gamma to be used within each EOS
# K_poly_tab - value of K to be used within each EOS
# P_poly_tab - values of P used to distinguish one EOS from
# the other (not required for a single polytrope)
# P - the value of P for which we want to
# compute rho_b
#
# Outputs : rho_baryon - for a single or piecewise polytropic EOS
def Polytrope_EOS__compute_rhob_and_eps_cold_from_P_cold(eos,P):
# Compute the polytropic index from P and set Gamma
j = polytropic_index_from_P(eos,P)
Gamma = eos.Gamma_poly_tab[j]
# Compute the value of rho_b for a polytropic EOS
# .----------------------------------.
# | rho_b = (P_cold/K_j)^(1/Gamma_j) |
# .----------------------------------.
rho_b = (P/eos.K_poly_tab[j])**(1.0/Gamma)
return rho_b, Polytrope_EOS__compute_eps_cold_from_rhob(eos, rho_b)
# Function : polytropic_index_from_rhob()
# Author(s) : Leo Werneck and Zach Etienne
# Description : This function computes P_cold for a polytropic EOS
# Dependencies : none
#
# Input(s) : eos - named tuple containing the following:
# neos - number of EOSs to be used (single polytrope = 1)
# rho_poly_tab - values of rho distinguish one EOS from the
# other (not required for a single polytrope)
# Gamma_poly_tab - values of Gamma to be used within each EOS
# K_poly_tab - value of K to be used within each EOS
# P_poly_tab - values of P used to distinguish one EOS from
# the other (not required for a single polytrope)
# rho_in - value of rho for which we compute the
# polytropic index
#
# Output(s) : polytropic index computed from rho_in
def polytropic_index_from_rhob(eos, rho_in):
# Returns the value of the polytropic index based on rho_in
polytropic_index = 0
if not (eos.neos==1):
for j in range(eos.neos-1):
polytropic_index += (rho_in > eos.rho_poly_tab[j])
return polytropic_index
# Function : polytropic_index_from_P()
# Author(s) : Leo Werneck and Zach Etienne
# Description : This function computes P_cold for a polytropic EOS
# Dependencies : none
#
# Input(s) : eos - named tuple containing the following:
# neos - number of EOSs to be used (single polytrope = 1)
# rho_poly_tab - values of rho distinguish one EOS from the
# other (not required for a single polytrope)
# Gamma_poly_tab - values of Gamma to be used within each EOS
# K_poly_tab - value of K to be used within each EOS
# P_poly_tab - values of P used to distinguish one EOS from
# the other (not required for a single polytrope)
# P_in - value of P for which we compute the
# polytropic index
#
# Output(s) : polytropic index computed from P_in
def polytropic_index_from_P(eos, P_in):
# Returns the value of the polytropic index based on P_in
polytropic_index = 0
if not (eos.neos==1):
for j in range(eos.neos-1):
polytropic_index += (P_in > eos.P_poly_tab[j])
return polytropic_index
# Function : generate_IllinoisGRMHD_EOS_parameter_file()
# Author(s) : Leo Werneck and Zach Etienne
# Description : This function computes P_cold for a polytropic EOS
# Dependencies : none
#
# Input(s) : eos - named tuple containing the following:
# neos - number of EOSs to be used (single polytrope = 1)
# rho_poly_tab - values of rho distinguish one EOS from the
# other (not required for a single polytrope)
# Gamma_poly_tab - values of Gamma to be used within each EOS
# K_poly_tab - value of K to be used within each EOS
# P_poly_tab - values of P used to distinguish one EOS from
# the other (not required for a single polytrope)
# Output(s) : parameter file to be used by IllinoisGRMHD
def generate_IllinoisGRMHD_EOS_parameter_file(EOSname,outfilename, \
Gamma_thermal=None, \
EOS_struct=None, \
tau_atmosphere=4.876083025795607e-12, \
rho_atmosphere=1.292852735094440e-10, \
K_single_polytrope=1.0, \
Gamma_single_polytrope=2.0):
with open(outfilename,"w") as file:
file.write("""
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
#
#.-------------------------------------------------------------------------.
#| IllinoisGRMHD Equation of State (EOS) parameter file Generated by NRPy+ |
#|-------------------------------------------------------------------------|
#| This section of the parameter file has been generated by |
#| the TOV/Polytropic_EOSs.py NRPy+ module |
#|-------------------------------------------------------------------------|
#| Recommended reading: Tutorial-TOV-Piecewise_Polytrope_EOSs.ipynb |
#|-------------------------------------------------------------------------|
#| NRPy+ repositoryon github: https://github.com/zachetienne/nrpytutorial/ |
#|-------------------------------------------------------------------------|
#| *Warning*: it is highly recommended not to change this section manually |
#.-------------------------------------------------------------------------.
""")
if EOSname == "single":
with open(outfilename,"a") as file:
file.write("""#
#.-------------------------------.
#| EOS Type: Single Polytrope |
#.-------------------------------.
#| Required inputs: |
#| - K_single_polytrope |
#| - Gamma_single_polytrope |
#| - tau_atmosphere |
#| - rho_atmosphere |
#| |
#| IllinoisGRMHD parameters set: |
#| - neos |
#| - K_ppoly_tab0 |
#| - rho_ppoly_tab_in[0] |
#| - Gamma_ppoly_tab_in[0] |
#| - Gamma_th |
#| - tau_atm |
#| - rho_b_atm |
#| |
#| NRPyPlusTOVID parameters set: |
#| - rho_atmosphere |
#| - Gamma_atmosphere |
#| - K_atmosphere |
#|-------------------------------|
#| For single polytropes, we |
#| always assume: |
#| Gamma_th = Gamma_poly_tab |
#.-------------------------------.
#
# Set up initial data file name
NRPyPlusTOVID::TOV_filename = "outputTOVpolytrope.txt"
# Set the number of EOSs to 1 (single polytrope)
IllinoisGRMHD::neos = 1
# Set atmospheric value of tau
IllinoisGRMHD::tau_atm = %.15e
# Set atmospheric value of rho
IllinoisGRMHD::rho_b_atm = %.15e
NRPyPlusTOVID::rho_atmosphere = %.15e
# Set K_ppoly_tab0 and K_atmosphere
IllinoisGRMHD::K_ppoly_tab0 = %.15e
NRPyPlusTOVID::K_atmosphere = %.15e
# Set Gamma_ppoly_tab_in[0] and Gamma_atmosphere
IllinoisGRMHD::Gamma_ppoly_tab_in[0] = %.15e
NRPyPlusTOVID::Gamma_atmosphere = %.15e
# Set Gamma_thermal
# (must be the same as Gamma_ppoly_tab for a single polytrope)
IllinoisGRMHD::Gamma_th = %.15e
# Set rho_ppoly_tab_in[0] to zero
# (for a single polytrope this value is not used)
IllinoisGRMHD::rho_ppoly_tab_in[0] = 0.0
#.----------------------.
#| EOS_Omni parameters: |
#| - n_pieces |
#| - hybrid_k0 |
#| - hybrid_gamma[0] |
#| - hybrid_gamma_th |
#.----------------------.
# Set up the number of polytropic EOSs.
EOS_Omni::n_pieces = 1
# Set hybrid_k0 to K_ppoly_tab0
EOS_Omni::hybrid_k0 = %.15e
# Set hybrid_gamma to Gamma_ppoly_tab_in
EOS_Omni::hybrid_gamma[0] = %.15e
# Set hybrid_gamma_th to Gamma_th
EOS_Omni::hybrid_gamma_th = %.15e
#.--------------------------------.
#| End of NRPy+ generated section |
#.--------------------------------.
#
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""%(tau_atmosphere, # sets IllinoisGRMHD::tau_atm
rho_atmosphere, # sets IllinoisGRMHD::rho_b_atm
rho_atmosphere, # sets NRPyPlusTOVID::rho_atmosphere
K_single_polytrope, # sets IllinoisGRMHD::K_ppoly_tab0
K_single_polytrope, # sets NRPyPlusTOVID::K_atmosphere
Gamma_single_polytrope, # sets IllinoisGRMHD::Gamma_ppoly_tab_in[0]
Gamma_single_polytrope, # sets NRPyPlusTOVID::Gamma_atmosphere
Gamma_single_polytrope, # sets IllinoisGRMHD::Gamma_th
K_single_polytrope, # sets EOS_Omni::hybrid_k0
Gamma_single_polytrope, # sets EOS_Omni::hybrid_gamma[0]
Gamma_single_polytrope)) # sets EOS_Omni::hybrid_gamma_th
elif EOSname == "piecewise":
if EOS_struct is None: # Use "is None" instead of "==None", as the former is more correct.
print("Error: Please set the EOS named tuple. Usage:")
print("generate_IllinoisGRMHD_EOS_parameter_file(\"piecewise\",outfilename,Gamma_thermal=Gamma_th,EOS_struct=eos_named_tuple)")
sys.exit(1)
if Gamma_thermal is None: # Use "is None" instead of "==None", as the former is more correct.
print("Error: Please set Gamma_thermal. Usage:")
print("generate_IllinoisGRMHD_EOS_parameter_file(\"piecewise\",outfilename,Gamma_thermal=Gamma_th,EOS_struct=eos_named_tuple)")
sys.exit(1)
atm_index = polytropic_index_from_rhob(EOS_struct,rho_atmosphere)
Gamma_atm = EOS_struct.Gamma_poly_tab[atm_index]
Kpoly_atm = EOS_struct.K_poly_tab[atm_index]
IDfilename = "outputTOVpolytrope-"+EOSname+".txt"
with open(outfilename,"a") as file:
file.write("""#
#.---------------------------------------.
#| EOS Type: Generic Piecewise Polytrope |
#.---------------------------------------.
#| Required parameters: |
#| - EOS_struct |
#| - Gamma_thermal |
#| - tau_atmosphere |
#| - rho_atmosphere |
#| |
#| IllinoisGRMHD parameters set: |
#| - neos |
#| - K_ppoly_tab0 |
#| - rho_ppoly_tab_in[j] 0<=j<=neos-2 |
#| - Gamma_ppoly_tab_in[j] 0<=j<=neos-1 |
#| - Gamma_th |
#| - tau_atm |
#| - rho_b_atm |
#.---------------------------------------.
#| NRPyPlusTOVID parameters set: |
#| - rho_atmosphere |
#| - Gamma_atmosphere |
#| - K_atmosphere |
#.---------------------------------------.
#| EOS_Omni parameters set: |
#| - n_pieces |
#| - hybrid_k0 |
#| - hybrid_rho[j] 0<=j<=neos-2 |
#| - hybrid_gamma[j] 0<=j<=neos-1 |
#| - hybrid_gamma_th |
#.---------------------------------------.
#
# Set up initial data file name
NRPyPlusTOVID::TOV_filename = \"%s\"
# Set up the number of polytropic EOSs.
IllinoisGRMHD::neos = %d
# Set atmospheric value of tau
IllinoisGRMHD::tau_atm = %.15e
# Set K_ppoly_tab0 and K_atmosphere
IllinoisGRMHD::K_ppoly_tab0 = %.15e
NRPyPlusTOVID::K_atmosphere = %.15e
# Set atmospheric value of rho
IllinoisGRMHD::rho_b_atm = %.15e
NRPyPlusTOVID::rho_atmosphere = %.15e
# Set rho_ppoly_tab_in""" %(IDfilename,EOS_struct.neos,tau_atmosphere,EOS_struct.K_poly_tab[0],Kpoly_atm,rho_atmosphere,rho_atmosphere))
for j in range(EOS_struct.neos-1):
file.write("""
IllinoisGRMHD::rho_ppoly_tab_in[%d] = %.15e""" %(j,EOS_struct.rho_poly_tab[j]))
file.write("""
# Set Gamma_atmosphere and Gamma_ppoly_tab_in
NRPyPlusTOVID::Gamma_atmosphere = %.15e""" %(Gamma_atm))
for j in range(EOS_struct.neos):
file.write("""
IllinoisGRMHD::Gamma_ppoly_tab_in[%d] = %.15e""" %(j,EOS_struct.Gamma_poly_tab[j]))
file.write("""
# Set Gamma_th
IllinoisGRMHD::Gamma_th = %.15e
#.---------------------------------.
#| EOS_Omni parameters: |
#| - n_pieces |
#| - hybrid_k0 |
#| - hybrid_rho[j] 0<=j<=neos-2 |
#| - hybrid_gamma[j] 0<=j<=neos-1 |
#| - hybrid_gamma_th |
#.---------------------------------.
# Set up the number of polytropic EOSs.
EOS_Omni::n_pieces = %d
# Set hybrid_k0 to K_ppoly_tab0
EOS_Omni::hybrid_k0 = %.15e
# Set hybrid_rho to rho_ppoly_tab_in""" %(Gamma_thermal,EOS_struct.neos,EOS_struct.K_poly_tab[0]))
for j in range(EOS_struct.neos-1):
file.write("""
EOS_Omni::hybrid_rho[%d] = %.15e""" %(j,EOS_struct.rho_poly_tab[j]))
file.write("""
# Set hybrid_gamma to Gamma_ppoly_tab_in""")
for j in range(EOS_struct.neos):
file.write("""
EOS_Omni::hybrid_gamma[%d] = %.15e""" %(j,EOS_struct.Gamma_poly_tab[j]))
file.write("""
# Set hybrid_gamma_th to Gamma_th
EOS_Omni::hybrid_gamma_th = %.15e
#.--------------------------------.
#| End of NRPy+ generated section |
#.--------------------------------.
#
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^""" %(Gamma_thermal))
else:
import TOV.Piecewise_Polytrope__dict
if EOSname not in TOV.Piecewise_Polytrope__dict.EOS_Read_et_al_dict:
print("ERROR: Unknown EOS name "+EOSname)
sys.exit(1)
if Gamma_thermal is None: # Use "is None" instead of "==None", as the former is more correct.
print("Error: Please set Gamma_thermal. Usage:")
print("generate_IllinoisGRMHD_EOS_parameter_file(EOSname,outfilename,Gamma_thermal=None)")
sys.exit(1)
eos = set_up_EOS_parameters__Read_et_al_input_variables(EOSname)
atm_index = polytropic_index_from_rhob(eos,rho_atmosphere)
Gamma_atm = EOS_struct.Gamma_poly_tab[atm_index]
Kpoly_atm = EOS_struct.K_poly_tab[atm_index]
IDfilename = "outputTOVpolytrope-"+EOSname+".txt"
# This is done for cosmetic purposes, so that parameter files
# of different EOS names all look the same.
largest_name_in_EOS_table = 6
if len(EOSname)==largest_name_in_EOS_table:
pass
else:
for _k in range(largest_name_in_EOS_table - len(EOSname)): # _k is unused.
EOSname += " "
with open(outfilename,"a") as file:
file.write("""#
#.---------------------------------------.
#| EOS Type: Piecewise Polytrope |
#.---------------------------------------.
#| EOS name: """+EOSname+""" |
#.---------------------------------------.
#| Reference: Table II and III in |
#| Read et al. PRD 79,124032 (2009) |
#| https://arxiv.org/pdf/0812.2163.pdf |
#.---------------------------------------.
#| Note that while we use the values in |
#| Read et al. (2009), we write them in |
#| geometrized units where G = 1 = c. We |
#| also normalize the nuclear density to |
#| unity. |
#| You can read more about this in the |
#| following NRPy+ tutorial module: |
#| Tutorial-TOV-Piecewise_Polytrope_EOSs |
#.---------------------------------------.
#| Required inputs: |
#| - EOS name |
#| - Gamma_thermal |
#.---------------------------------------.
#| IllinoisGRMHD parameters: |
#| - neos |
#| - K_ppoly_tab0 |
#| - rho_ppoly_tab_in[j] 0<=j<=neos-2 |
#| - Gamma_ppoly_tab_in[j] 0<=j<=neos-1 |
#| - Gamma_th |
#| - tau_atm |
#| - rho_b_atm |
#.---------------------------------------.
# Set up the number of polytropic EOSs.
IllinoisGRMHD::neos = %d
# Set atmospheric value of tau
IllinoisGRMHD::tau_atm = %.15e
# Set K_ppoly_tab0
IllinoisGRMHD::K_ppoly_tab0 = %.15e
# Set atmospheric value of rho
IllinoisGRMHD::rho_b_atm = %.15e
# Set rho_ppoly_tab_in""" %(EOS_struct.neos,tau_atmosphere,EOS_struct.K_poly_tab[0],rho_atmosphere))
for j in range(EOS_struct.neos-1):
file.write("""
IllinoisGRMHD::rho_ppoly_tab_in[%d] = %.15e""" %(j,EOS_struct.rho_poly_tab[j]))
file.write("""
# Set Gamma_ppoly_tab_in""")
for j in range(EOS_struct.neos):
file.write("""
IllinoisGRMHD::Gamma_ppoly_tab_in[%d] = %.15e""" %(j,EOS_struct.Gamma_poly_tab[j]))
file.write("""
# Set Gamma_th
IllinoisGRMHD::Gamma_th = %.15e
#.---------------------------.
#| NRPyPlusTOVID parameters: |
#| - TOV_filename |
#| - rho_atmosphere |
#| - Gamma_atmosphere |
#| - K_atmosphere |
#.---------------------------.
# Set up initial data file name
NRPyPlusTOVID::TOV_filename = \"%s\"
# Set atmospheric value of rho
NRPyPlusTOVID::rho_atmosphere = %.15e
# Set Gamma_atmosphere
NRPyPlusTOVID::Gamma_atmosphere = %.15e
# Set K_atmosphere
NRPyPlusTOVID::K_atmosphere = %.15e
#.---------------------------------.
#| EOS_Omni parameters: |
#| - n_pieces |
#| - hybrid_k0 |
#| - hybrid_rho[j] 0<=j<=neos-2 |
#| - hybrid_gamma[j] 0<=j<=neos-1 |
#| - hybrid_gamma_th |
#.---------------------------------.
# Set up the number of polytropic EOSs.
EOS_Omni::n_pieces = %d
# Set hybrid_k0 to K_ppoly_tab0
EOS_Omni::hybrid_k0 = %.15e
# Set hybrid_rho to rho_ppoly_tab_in""" %(Gamma_thermal,IDfilename,rho_atmosphere,Gamma_atm,Kpoly_atm,EOS_struct.neos,EOS_struct.K_poly_tab[0]))
for j in range(EOS_struct.neos-1):
file.write("""
EOS_Omni::hybrid_rho[%d] = %.15e""" %(j,EOS_struct.rho_poly_tab[j]))
file.write("""
# Set hybrid_gamma to Gamma_ppoly_tab_in""")
for j in range(EOS_struct.neos):
file.write("""
EOS_Omni::hybrid_gamma[%d] = %.15e""" %(j,EOS_struct.Gamma_poly_tab[j]))
file.write("""
# Set hybrid_gamma_th to Gamma_th
EOS_Omni::hybrid_gamma_th = %.15e
#.--------------------------------.
#| End of NRPy+ generated section |
#.--------------------------------.
#
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^""" %(Gamma_thermal))
|
zachetienneREPO_NAMEnrpytutorialPATH_START.@nrpytutorial_extracted@nrpytutorial-master@TOV@Polytropic_EOSs.py@.PATH_END.py
|
{
"filename": "build.py",
"repo_name": "AFD-Illinois/ebhlight",
"repo_path": "ebhlight_extracted/ebhlight-master/prob/bhtherm/build.py",
"type": "Python"
}
|
import sys; sys.path.append('../../script');
sys.dont_write_bytecode = True; import bhlight as bhl; del sys
PROB = 'bhtherm'
### COMPILE TIME PARAMETERS ###
# SPATIAL RESOLUTION AND MPI DECOMPOSITION
bhl.config.set_cparm('N1TOT', 128)
bhl.config.set_cparm('N2TOT', 16)
bhl.config.set_cparm('N3TOT', 8)
bhl.config.set_cparm('N1CPU', 1)
bhl.config.set_cparm('N2CPU', 1)
bhl.config.set_cparm('N3CPU', 1)
# OPENMP PARALLELIZATION
bhl.config.set_cparm('OPENMP', True)
# COORDINATES
bhl.config.set_cparm('METRIC', 'MKS')
# ELECTRONS
bhl.config.set_cparm('ELECTRONS', False)
bhl.config.set_cparm('SUPPRESS_HIGHB_HEAT', False)
bhl.config.set_cparm('BETA_HEAT', True)
bhl.config.set_cparm('COULOMB', True)
# FLUID
bhl.config.set_cparm('RECONSTRUCTION', 'WENO')
bhl.config.set_cparm('X1L_GAS_BOUND', 'BC_PROB')
bhl.config.set_cparm('X1R_GAS_BOUND', 'BC_PROB')
bhl.config.set_cparm('X2L_GAS_BOUND', 'BC_POLAR')
bhl.config.set_cparm('X2R_GAS_BOUND', 'BC_POLAR')
bhl.config.set_cparm('X3L_GAS_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X3R_GAS_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X1L_INFLOW', False)
bhl.config.set_cparm('X1R_INFLOW', False)
bhl.config.set_cparm('X2L_INFLOW', False)
bhl.config.set_cparm('X2R_INFLOW', False)
bhl.config.set_cparm('X3L_INFLOW', False)
bhl.config.set_cparm('X3R_INFLOW', False)
# RADIATION
bhl.config.set_cparm('RADIATION', True)
bhl.config.set_cparm('EMISSION', True)
bhl.config.set_cparm('ABSORPTION', True)
bhl.config.set_cparm('SCATTERING', False)
bhl.config.set_cparm('NU_BINS_EMISS', 100)
bhl.config.set_cparm('NU_BINS_SPEC', 200)
bhl.config.set_cparm('GRAYABSORPTION', True)
bhl.config.set_cparm('BREMSSTRAHLUNG', False)
bhl.config.set_cparm('SYNCHROTRON', False)
bhl.config.set_cparm('X1L_RAD_BOUND', 'BC_REFLECT')
bhl.config.set_cparm('X1R_RAD_BOUND', 'BC_REFLECT')
bhl.config.set_cparm('X2L_RAD_BOUND', 'BC_REFLECT')
bhl.config.set_cparm('X2R_RAD_BOUND', 'BC_REFLECT')
bhl.config.set_cparm('X3L_RAD_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X3R_RAD_BOUND', 'BC_PERIODIC')
### RUNTIME PARAMETERS ###
bhl.config.set_rparm('tf', 'double', default = 1.5e2)
bhl.config.set_rparm('dt', 'double', default = 1.e-6)
bhl.config.set_rparm('Rout', 'double', default = 20.)
bhl.config.set_rparm('Rout_rad', 'double', default = 20.)
bhl.config.set_rparm('gam', 'double', default = 5./3.)
bhl.config.set_rparm('DTd', 'double', default = 5.)
bhl.config.set_rparm('DTl', 'double', default = 5.e-1)
bhl.config.set_rparm('DTr', 'double', default = 1000000.)
bhl.config.set_rparm('DNr', 'integer', default = 1000000)
bhl.config.set_rparm('a', 'double', default = 0.)
bhl.config.set_rparm('mbh', 'double', default = 1.)
bhl.config.set_rparm('M_unit', 'double', default = 1.989e27)
bhl.config.set_rparm('tune_emiss', 'double', 1.e-5)
bhl.config.set_rparm('tune_scatt', 'double', 0.1)
bhl.config.set_rparm('t0_tune_emiss', 'double', 0)
bhl.config.set_rparm('t0_tune_scatt', 'double', 0)
bhl.config.set_rparm('MAD', 'int', default = 1)
bhl.config.set_rparm('BHflux', 'double', default = 0.)
bhl.config.set_rparm('beta', 'double', default = 100.)
bhl.config.set_rparm('numin_emiss', 'double', default=1.e16)
bhl.config.set_rparm('numax_emiss', 'double', default=1.e26)
bhl.config.set_rparm('numin_spec', 'double', default=1.e10)
bhl.config.set_rparm('numax_spec', 'double', default=1.e25)
bhl.config.set_rparm('tp_over_te', 'double', default=1)
bhl.config.set_rparm('nph_per_proc', 'double', default=1.e6)
bhl.config.set_rparm('cour', 'double', default=0.4)
bhl.config.set_rparm('hslope', 'double', default=1)
bhl.config.set_rparm('kappa', 'double', default=1.234041e-21)
### CONFIGURE AND COMPILE ###
bhl.build(PROB)
|
AFD-IllinoisREPO_NAMEebhlightPATH_START.@ebhlight_extracted@ebhlight-master@prob@bhtherm@build.py@.PATH_END.py
|
{
"filename": "_meta.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/heatmapgl/_meta.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MetaValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="meta", parent_name="heatmapgl", **kwargs):
super(MetaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@heatmapgl@_meta.py@.PATH_END.py
|
{
"filename": "kobe.py",
"repo_name": "exomishra/kobe",
"repo_path": "kobe_extracted/kobe-master/code/kobe.py",
"type": "Python"
}
|
"""
Created on Fri Nov 29 14:38:18 2019
@author: lokeshmishra
"""
# Standard Imports
import os
import sys
#get_ipython().run_line_magic('matplotlib', 'inline')
#get_ipython().run_line_magic('matplotlib', 'inline')
import pylab as p
import pandas as pd
import numpy as np
import scipy as sp
import pprint
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
sns.set()
from scipy import constants as sp_constants
from astropy import constants as astropy_constants
from astropy import units as astropy_units
# from mymodules import dataread, analysis, plottingroutine
from kobe_choices import *
from kobe_columns import *
# SI units
mass_sun = 1.9884*10**30
mass_earth = 5.9722*10**24
radius_sun = 6.957*10**8
radius_earth = 6.3781366*10**6
mass_sun = astropy_constants.M_sun
mass_earth = astropy_constants.M_earth
radius_sun = astropy_constants.R_sun
radius_earth = astropy_constants.R_earth
def rotation_matrix(axis, angle, invert=False):
"""
This function returns a 3D rotation matrix.
Parameters
----------
axis : string
rotation axis. example: 'x', 'y', 'z'
angle : float
angle of rotation in radians
invert : bool (default - False)
if True - inverse of rotation matrix (by angle --> - angle)
Examples:
a) Rotate a vector along x, by 90 degrees, about z:
i/o: rotation_matrix('z',np.pi/2, invert=False) @ np.array((1,0,0))
o/p: array([0,1,0])
b) Rotating arrays of vectors:
input:
v = np.array((np.array((0,0,1)),np.array((0,1,0)),np.array((1,0,0)), np.array((1,1,1))))
v.reshape(3,-1).reshape(-1,3) # reshape
np.einsum('bc,ac',rotation_matrix('x',np.pi/2), v) # use numpy's einsum notation format to carry out rotation
output:
array([ [ 0., -1., 0.],
[ 0., 0., 1.],
[ 1., 0., 0.],
[ 1., -1., 1.]])
"""
if invert == True:
angle = - angle
if axis == 'x':
rotation_matrix = np.array([[1,0,0],[0, np.cos(angle), - np.sin(angle)],[0, np.sin(angle), np.cos(angle)]])
if axis == 'y':
rotation_matrix = np.array([[np.cos(angle), 0, np.sin(angle)], [0,1,0], [-np.sin(angle),0, np.cos(angle)]])
if axis == 'z':
rotation_matrix = np.array([[np.cos(angle), - np.sin(angle) , 0], [ np.sin(angle), np.cos(angle), 0], [0,0,1]])
# replace the small number with 0
small_number = 6.5e-17
rotation_matrix[abs(rotation_matrix) < small_number] = 0.0
return rotation_matrix
def calculate_shadows(df_input, system_id, npoints=1e7,
stellar_radius=None,seed=None, print_probability=True, grazing_transits=False):
"""
This function calculates the transit shadow bands for all planets in a chosen system.
Parameters
----------
df_input : pandas dataframe
dataframe containing planets with their orbital elements
npoints : int/float
number of grid points on sphere (default is 1e7)
system_id : int
system number for which shadows have to be calculated
stellar_radius : float, optional
Stellar radius in solar units.
seed : int
seed the random number generator for grid points on sphere
print_probability : bool, default True
if true, prints individual transit probabilities for all planets
grazing_transits : bool, default False
if true, include grazing transits. if False only complete transits are calculated
Returns
-------
df_shadow : pandas dataframe
dataframe - column 1 is theta, the azimuthal angle range [0,2 pi]
column 2 is phi, the polar angle range [0,pi]
next column is for planet 1, and so on.
Entry is 1 if (theta, phi) in shadow, else 0.
df_angle_data : pandas dataframe
(f,alpha) poisition of observer (wrt to each planet i.e. after rotations)
useful in calculating impact parameter
Notes
-----
Timing for npoints per planet:
1e7 points - 1.2s
1e8 points - 7s
1e9 points - memory allocation error
Results unreliable for less than 1e7 points.
Individual transit probability of planet i is given by:
df_shadow_output['planet i'].nonzero()[0].shape[0]/df_shadow_output.shape[0]
"""
from astropy import units as astropy_units
from astropy import constants as astropy_constants
npoints = int(npoints)
# Get system info: number of planets, stellar radius, etc..
nplanets = df_input.loc[df_input[col_system]==system_id][col_planetmultiplicity].iat[0]
if stellar_radius == None:
r_star = df_input.loc[df_input[col_system]==system_id][col_r_star].mean() * astropy_constants.R_sun.value
else:
r_star = stellar_radius * astropy_constants.R_sun.value
series_sma = df_input.loc[df_input[col_system]==system_id][col_sma] * astropy_constants.au.value
series_r_planet = df_input.loc[df_input[col_system]==system_id][col_r_planet] * astropy_constants.R_earth.value
series_ecc = df_input.loc[df_input[col_system]==system_id][col_ecc] #* astropy_units.one
series_inc = df_input.loc[df_input[col_system]==system_id][col_inc] #* astropy_units.rad
series_long_node = df_input.loc[df_input[col_system]==system_id][col_long_node] #* astropy_units.rad
series_long_peri = df_input.loc[df_input[col_system]==system_id][col_long_peri] #* astropy_units.rad
series_arg_peri = series_long_peri - series_long_node
# series_transitprob = df_input.loc[df_input[col_system]==system_id][col_transitprob]
# Step 1 - create grid on sphere
r = 1 # unit sphere
if seed == None:
seed = 42 # meaning of life
np.random.seed(seed)
u1 = np.random.rand(npoints) # a random number generator
np.random.seed(seed+1)
u2 = np.random.rand(npoints) # a random number generator
theta = 2*np.pi*u1 #* astropy_units.rad
phi = np.arccos(1-2*u2) #* astropy_units.rad
X = r*np.sin(phi)*np.cos(theta)
Y = r*np.sin(phi)*np.sin(theta)
Z = r*np.cos(phi)
# Create an array of vectors, V
V = np.column_stack((X,Y,Z))
V_norm = np.linalg.norm(V, axis=1)
### SANITY TEST 1 ####
#sanity check: norm of all vectors should be 1 (1 is True in bool)
# check that the difference between norm of each vector and 1 is less than 1e-15.
# If not replace it with 0 (0 is flase).
check_norm = np.where(abs(V_norm-1)<1e-15, V_norm, 0)
if check_norm.all() == False:
print('Sanity test 1 - FAILED. Not all vectors are unit normed. ')
# Initiate output dataframe: first column is (theta) and second column is phi
df_shadow_output = pd.DataFrame()
df_shadow_output['theta'] = theta
df_shadow_output['phi'] = phi
df_angle_data = pd.DataFrame()
df_angle_data['theta'] = theta
df_angle_data['phi'] = phi
# Step 2 - start loop over planets
for index_planet in range(nplanets):
# Step 2a - align orbit of current planet
# inverse rotate along z by long_node
# inverse rotate along x by inclination
# finally inverse rotate along z by arg_peri
v_prime = np.copy(V)
v_prime = np.einsum('bc,ac',rotation_matrix('z',series_long_node.iat[index_planet],invert=True),v_prime)
v_prime = np.einsum('bc,ac',rotation_matrix('x',series_inc.iat[index_planet],invert=True),v_prime)
v_prime = np.einsum('bc,ac',rotation_matrix('z',series_arg_peri.iat[index_planet],invert=True),v_prime)
v_prime_norm = np.linalg.norm(v_prime, axis=1)
### SANITY TEST 2 ####
#sanity check: norm of all vectors should be 1 (1 is True in bool)
# check that the difference between norm of each vector and 1 is less than 1e-15.
# If not replace it with 0 (0 is flase).
check_norm = np.where(abs(v_prime_norm-1)<1e-15, v_prime_norm, 0)
if check_norm.all() == False:
print('Sanity Test 2 - FAILED. Not all vectors are unit normed. ')
check_norm = np.where((abs(v_prime_norm)-1)<1e-2, v_prime_norm,0)
if check_norm.all() == False:
print('Sanity Test 3 - FAILED. Not all vectors are unit normed. ')
### Get cartesian of primed (rotated) vectors
x_prime, y_prime, z_prime = np.hsplit(v_prime,3)
x_prime = x_prime.reshape(-1,)
y_prime = y_prime.reshape(-1,)
z_prime = z_prime.reshape(-1,)
r_xy_prime = np.sqrt(1 - z_prime*z_prime)
#r_xy is norm of x & y
### Get inclination angle for each rotated observer
# i.e. angle between Z and v_prime
# since both vectors have unit norm, and since one vector points only along z, the dot product simplifies
# inclination_rad = np.arccos(z_prime)
# no output because this is same as alpha of observer, defined below
### Get polar angles from rotated vectors (f- true anomaly is azimuthal, alpha is polar)
alpha = np.arccos(z_prime) #* astropy_units.rad # in radians
f = np.arccos(x_prime/r_xy_prime) #* astropy_units.rad # in radians
# Step 2b - Calculate pairs of (f, alpha) inside shadow
if grazing_transits == True:
height_shadow = ((r_star - series_r_planet.iat[index_planet])*(1 + series_ecc.iat[index_planet] * np.cos(f)))/((series_sma.iat[index_planet])*(1 - series_ecc.iat[index_planet]**2))
elif grazing_transits == False:
height_shadow = ((r_star + series_r_planet.iat[index_planet])*(1 + series_ecc.iat[index_planet] * np.cos(f)))/((series_sma.iat[index_planet])*(1 - series_ecc.iat[index_planet]**2))
alpha_rad = np.arcsin(height_shadow)
# for each f (true anomaly), we calculate the shadow band by calculating the min and max of shadow
# shadow_min = np.pi/2 * astropy_units.rad - alpha_rad
# shadow_max = np.pi/2 * astropy_units.rad + alpha_rad
shadow_min = np.pi/2 - alpha_rad
shadow_max = np.pi/2 + alpha_rad
# For a simple probability calculation
shadow = np.where((alpha >= shadow_min) & (alpha <= shadow_max), alpha,0)
shadow_points = shadow.nonzero()[0].shape[0]
# print('Probability planet %d = %.3f %%'%(index_planet+1,100*shadow_points/npoints))
# Step 2c - mark (theta,phi) pair of current planet inside shadow
# zeroes everywhere, except at shadow where it is 1
# For storing location of shadow band
shadow_index = np.where((alpha >= shadow_min) & (alpha <= shadow_max))[0]
boolean_result = np.zeros((npoints))
boolean_result[shadow_index] = 1
## OUTPUT!
df_shadow_output['planet %d'%(index_planet+1)] = boolean_result
df_angle_data['(f,alpha) planet %d'%(index_planet+1)] = list(np.column_stack((f,alpha)))
# sanity check - number of f,alpha pairs = theta phi pairs
if shadow_points != boolean_result.nonzero()[0].shape[0]:
print('DANGER: Number of shadow points is not same in output! HELP!')
print(shadow_points)
print(boolean_result.nonzero()[0].shape[0])
# Step 2d - repeat for next planet
# Step 3 - calculate number of transiting planets for each point on the grid
df_shadow_output['number_transiting_planet'] = df_shadow_output[list(df_shadow_output.columns)[2:]].sum(axis=1)
if print_probability == True:
for index_planet in range(nplanets):
probability = df_shadow_output.iloc[:,2+index_planet].to_numpy().nonzero()[0].shape[0]/df_shadow_output.shape[0]
print('Transit probability of planet %d = %.4f %%'%(index_planet+1,100*probability))
print('Multi Transit Probability for %d planets = %.4f %%'%(nplanets,100*df_shadow_output.loc[df_shadow_output['number_transiting_planet']==nplanets].shape[0]/df_shadow_output.shape[0]))
if nplanets >= 6:
print('Multi Transit Probability for %d planets = %.4f %%'%(nplanets-5,100*df_shadow_output.loc[df_shadow_output['number_transiting_planet']==nplanets-5].shape[0]/df_shadow_output.shape[0]))
if nplanets >= 11:
print('Multi Transit Probability for %d planets = %.4f %%'%(nplanets-10,100*df_shadow_output.loc[df_shadow_output['number_transiting_planet']==nplanets-10].shape[0]/df_shadow_output.shape[0]))
if nplanets >= 16:
print('Multi Transit Probability for %d planets = %.4f %%'%(nplanets-15,100*df_shadow_output.loc[df_shadow_output['number_transiting_planet']==nplanets-15].shape[0]/df_shadow_output.shape[0]))
if nplanets >= 21:
print('Multi Transit Probability for %d planets = %.4f %%'%(nplanets-20,100*df_shadow_output.loc[df_shadow_output['number_transiting_planet']==nplanets-20].shape[0]/df_shadow_output.shape[0]))
# print % of observers that see atleast one transit.
print('Shadow calculation: %d planets in system %d in %s population with %.1e grid points.\n%.2f %% observers see atleast one transiting planet.'%(nplanets, system_id,dataset, npoints,100*df_shadow_output.loc[df_shadow_output['number_transiting_planet']!=0].shape[0]/npoints))
print('----------------------------------')
return df_shadow_output, df_angle_data
def read_keplercompleteness(robovetter_file):
"""
This function reads the IPAC file containing Keplar Simulated Robovetter Results.
Parameter
---------
robovetter_file : string
absolute path to robovetter file
set/see detains in kobe_choices
"""
#Step 1 - read kepler stellar file
# Reading the file
f = open(robovetter_file,'r')
# Read and ignore header lines
for index in range(75):
line = f.readline()
# Create container lists
tceid, kicid, disp, dispscore = [],[],[],[]
ntl, StellarEclipse,CentroidOffset,EphemerisMatch = [],[],[],[]
period, epoch, expect_mes, detect_mes = [],[],[],[]
ntran, depth_ppm, t_duration = [],[],[]
rp, rs, teff, logg = [],[],[],[]
sma, snr_dv, fit_prov, rp_over_rs, a_over_rs = [],[],[],[],[]
impact = []
# Loop over lines and extract variables of interest
for line in f:
data = line.strip().split()
tceid.append(data[0])
kicid.append(data[1])
disp .append(data[2])
dispscore.append(data[3])
ntl .append(data[4])
StellarEclipse.append(data[5])
CentroidOffset.append(data[6])
EphemerisMatch.append(data[7])
period.append(data[8])
epoch.append(data[9])
expect_mes.append(data[10])
detect_mes.append(data[11])
ntran.append(data[12])
depth_ppm.append(data[13])
t_duration.append(data[14])
rp.append(data[15]) # earth radii
rs.append(data[16]) # solar radii
teff.append(data[17])
logg.append(data[18])
sma.append(data[19])
rp_over_rs.append(data[20])
a_over_rs.append(data[21])
impact.append(data[22])
snr_dv.append(data[23])
# data[24] is insolation on planet
fit_prov.append(data[25])
# Step 2 - create dataframe out of it
df_robovetter = pd.DataFrame()
df_robovetter['tceid'] = tceid
df_robovetter['kicid'] = kicid
df_robovetter['disp'] = disp
df_robovetter['dispscore'] = dispscore
df_robovetter['Flag - Not Transit Like'] = ntl
df_robovetter['Flag - StellarEclipse'] = StellarEclipse
df_robovetter['Flag - CentroidOffset'] = CentroidOffset
df_robovetter['Flag - EphemerisMatch'] = EphemerisMatch
df_robovetter['period [days]'] = period
df_robovetter['epoch [barycentric kepler julian date]'] = epoch
df_robovetter['expect_mes'] = expect_mes
df_robovetter['detect_mes'] = detect_mes
df_robovetter['ntran'] = ntran
df_robovetter['depth_ppm'] = depth_ppm
df_robovetter['t_duration [hours]'] = t_duration
df_robovetter['rp [R_earth]'] = rp
df_robovetter['rs [R_sun]'] = rs
df_robovetter['t_star [Kelvin]'] = teff
df_robovetter['logg [cm/s2]'] = logg
df_robovetter['sma [AU]'] = sma
df_robovetter['rp_over_rs'] = rp_over_rs
df_robovetter['a_over_rs'] = a_over_rs
df_robovetter['impact'] = impact
df_robovetter['snr_dv'] = snr_dv
df_robovetter['fit_prov'] = fit_prov
# Step 3 - Turn robovetter output to boolean
# Fix robovetting output to boolean:
# 'PC' (planetary candidate) => 1
# 'FP' (false positive) => 0
bool_disp = np.zeros(df_robovetter.shape[0])
for index in range(df_robovetter.shape[0]):
if disp[index] == 'PC':
bool_disp[index] = 1
df_robovetter['vetting outcome'] = bool_disp
# Change datatype to float
cols = ['kicid', 'dispscore', 'Flag - Not Transit Like',
'Flag - StellarEclipse', 'Flag - CentroidOffset',
'Flag - EphemerisMatch', 'period [days]',
'epoch [barycentric kepler julian date]', 'expect_mes', 'detect_mes',
'ntran', 'depth_ppm', 't_duration [hours]', 'rp [R_earth]',
'rs [R_sun]', 't_star [Kelvin]', 'logg [cm/s2]', 'sma [AU]',
'rp_over_rs', 'a_over_rs', 'impact', 'snr_dv', 'fit_prov',
'vetting outcome']
for index in cols:
df_robovetter[index] = df_robovetter[index].astype(float)
return df_robovetter
def calculate_completeness(df_input, x_col, y_col, x_range, y_range, injection_threshold,spacing,dispscore_cutoff):
"""
This function calculates completeness via counting number of injected TCE's vetted as PC.
Parameters
----------
df_input : pandas dataframe
dataframe with Kepler Simulated results
output of function 'read_keplercompleteness()'
x_col : string
provide one axis on which completeness is projected
usually : 'period [days]'
y_col : string
usually : 'rp [R_earth]'
x_range : list
format : [min, max, step size]
y_range : list
format : [min, max, step size]
injection_threshold : int
the minimum number of injected TCE's per bin (inclusive) for which completeness is calculated.
if number of injected TCS in a bin falls below this threshold,
then completeness for this bin is fixed to -0.01.
spacing : string
options are: 'linear' or 'log'
dispscore_cutoff : float (between 0 and 1)
used to incorporate Kepler 'reliability' (see sec: 7.3.4 in Thompson et. al. 2018, or Mulders et. al. 2018 or Bryson et. al. 2019)
use : 0 to not invoke do anything
use : 0.9 to follow Mulder et. al.
Note
----
At each bin lower point in included, and upper point is excluded.
"""
# spacing
if spacing == 'linear':
x_array = np.arange(x_range[0],x_range[1],x_range[2])
y_array = np.arange(y_range[0],y_range[1],y_range[2])
elif spacing =='log':
xrange = [x_range[0],x_range[1]]
yrange = [y_range[0],y_range[1]]
xbins = x_range[2]
ybins = y_range[2]
xdeltalog = np.log10(xrange[1]/xrange[0])/(xbins-1)
ydeltalog = np.log10(yrange[1]/yrange[0])/(ybins-1)
x_array, y_array = np.zeros(xbins),np.zeros(ybins)
for index in range(xbins):
x_array[index] = 10**(np.log10(xrange[0]) + index*xdeltalog)
for index in range(ybins):
y_array[index] = 10**(np.log10(yrange[0]) + index*ydeltalog)
total_injections = np.zeros((x_array.shape[0],y_array.shape[0]))
pc_injections = np.zeros((x_array.shape[0],y_array.shape[0]))
completeness = np.zeros((x_array.shape[0],y_array.shape[0]))
thresholdbins = 0
emptybins = 0
totalbins = 0
for xind in range(x_array.shape[0]):
for yind in range(y_array.shape[0]):
if spacing == 'linear':
condition = (df_input[x_col]>=x_array[xind]) & (df_input[x_col]< x_array[xind]+x_range[2]) & (df_input[y_col]>=y_array[yind]) & (df_input[y_col]< y_array[yind]+y_range[2])
elif spacing == 'log':
condition = (df_input[x_col]>=10**np.log10(x_array[xind])) & (df_input[x_col]< 10**(np.log10(x_array[xind])+xdeltalog)) & (df_input[y_col]>=10**(np.log10(y_array[yind]))) & (df_input[y_col]< 10**(np.log10(y_array[yind])+ydeltalog))
total_injections[xind][yind] = int(df_input.loc[condition].shape[0])
pc_injections[xind][yind] = int(df_input.loc[condition & (df_input['vetting outcome']==1) & (df_input['dispscore'] >= dispscore_cutoff)].shape[0])
## threshold check
if total_injections[xind][yind] == 0:
completeness[xind][yind] = -0.01
emptybins += 1
elif total_injections[xind][yind] < injection_threshold:
completeness[xind][yind] = -0.01
thresholdbins += 1
else:
completeness[xind][yind] = pc_injections[xind][yind]/total_injections[xind][yind]
totalbins += 1
if print_details == True:
print('Bins along x and y: ', total_injections.shape)
print('Number of bins with less than %d injections = %d'%(injection_threshold,thresholdbins))
print('Number of empty bins = %d'%(emptybins))
print('Number of total bins = %d'%(totalbins))
metadata = 'Completeness Calculation Information: \n Column along x is %s with min %.2f, max %.2f and step size %d. \n Column along y is %s with min %.2f, max %.2f and step size %.2f. \n Order of output: this text, total injections, pc injections, completeness(over threshold), \n x_range(mix, max, step), y_range(min, max, step). \n Note: Completeness ranges from 0-1. \n Completeness of -1 flags when total injected TCEs are either 0 or less than threshold injections of %d. \n Spacing is %s. \n Disposition Score cutoff is at %.1f'%(x_col,x_range[0],x_range[1],x_range[2],y_col,y_range[0],y_range[1],y_range[2],injection_threshold, spacing, dispscore_cutoff)
output = np.array([metadata,total_injections, pc_injections, completeness, x_range, y_range,dispscore_cutoff], dtype=object)
return output
def read_cdpp(cdpp_file, t_cdpp):
"""
This function read the KEPLER noise data and returns cdpp.
Parameters
----------
cdpp_file : string
absolute path to stellar noise data from Kepler
set path in kobe_choices
t_cdpp : int
approximate duration of transit
options are: 3,6,9,12
Output
------
cdpp : numpy array
a distribution of cdpp
"""
# part 1 - read noise
df_keplercomplete = pd.read_csv(cdpp_file,sep=',',header=0,low_memory=False)
# remove nan from cdpp
na_indices = df_keplercomplete['rrmscdpp06p0'].isna().to_numpy()
na_indices = np.where(na_indices == True)[0]
# na_indices = na_indices[0]
df_keplercomplete = df_keplercomplete.drop(axis=0, index = na_indices, inplace=False)
# we apply cuts to select noise for FGK - Solar Type Stars
condition_radius = (df_keplercomplete['radius']<= 5)
condition_mass = (df_keplercomplete['mass']>= 0.7) & (df_keplercomplete['mass']<= 1.3)
condition_temp = (df_keplercomplete['teff'] >= 3880) & (df_keplercomplete['teff']<= 7200)
#fgk stars - temperatures from Peacaut and Mamajek 2013
#
df_keplercomplete = df_keplercomplete.loc[condition_radius].loc[condition_mass].loc[condition_temp]
#
cdpp3 = df_keplercomplete['rrmscdpp03p0'].to_numpy()
cdpp6 = df_keplercomplete['rrmscdpp06p0'].to_numpy()
cdpp9 = df_keplercomplete['rrmscdpp09p0'].to_numpy()
cdpp12 = df_keplercomplete['rrmscdpp12p0'].to_numpy()
if t_cdpp == 3:
cdpp = cdpp3
elif t_cdpp == 6:
cdpp = cdpp6
elif t_cdpp == 9:
cdpp = cdpp9
elif t_cdpp == 12:
cdpp = cdpp12
else:
print('Error with t_cdpp choice.')
return cdpp
def process_input_bern(df_input, mass_threshold, calculate_period, mstar):
"""
Process the input a bit. This function has been developed for Bern Model.
You may need to do your input processing.
Parameters
----------
df_input : pandas dataframe
an input of data
mass_threshold : float/int
minimum mass of a planet which is kept (planets with mass < mass_threshold are dropped)
calculate_period : boolean
whether to calculate period from sma, mass of star
mstar : float or string
constant mass of star in msun
Output
------
df_output : pandas dataframe
an output of data
Notes (for processing bern model output)
----
- place mass cut
- remove ejected/accreeted/collided planets
- deal with radius definition
- fix negative sma
- calculate periods
- convert Rj into Rearth
- convert inclination to degrees
- sort planets by sma
- correct system numbers, planet numbers and multiplicity
- correct index of final output
"""
# masscut
df_input = df_input.loc[df_input[col_mass]>= mass_threshold]
# remove planets that did not end well
# in bern model this is checked via column 'emps_status'
# status codes: 0 fine, negative accreted by planet, 2 ejected, 3 accreted by star
# 5 couldn't start, 6 not started, 8 core evaporated, 9 tidally spiralled
df_input = df_input.loc[df_input[col_status]==0]
# convert Rj to Rearth
# astropy_constants.R_jup/astropy_constants.R_earth = 11.208981
df_input[col_core_radius] = df_input[col_core_radius]*11.208981
df_input[col_total_radius] = df_input[col_total_radius]*11.208981
df_input[col_transit_radius] = df_input[col_transit_radius]*11.208981
df_input[col_p1bar_radius] = df_input[col_p1bar_radius]*11.208981
# fixing radius
# if time_age is < 2.3e7 then use max of core or p1bar radius (planete)
# if time_age is > 2.3e7 then use max of core or transit radius (completo)
# all final radius values are stored in col_total_radius:
if time_age != None:
if float(time_age) < 2.3e7:
df_input['radius'] = df_input[[col_core_radius,col_p1bar_radius]].max(axis=1)
elif float(time_age) > 2.3e7:
df_input['radius'] = df_input[[col_core_radius,col_transit_radius]].max(axis=1)
# fix planets with negative sma (planets with sma less than 1e3 are put at 1e2)
# note: 4e3 au is solar radii
if df_input.loc[df_input[col_sma]<0].shape[0]>0:
print('Error: %d Planets found with SMA < 0 au. SMA replaced to 0.01 au.'%(df_input.loc[df_input[col_sma]<0][col_planet].unique().shape[0]))
df_input = df_input.where(df_input[col_sma]>=0,other=10**-2)
# if mstar is not a column then set constant value:
# else copy column
if mstar_column == False:
print('mstar is not a column; reading value mstar = %.2f msun.'%(mstar))
mstar = mstar * mass_sun.value
df_input[col_m_star] = mstar
elif mstar_column == True:
df_input[col_m_star] = df_input[mstar]
print('mstar column found.')
# calculate period (assuming circular orbit)
if calculate_period == True:
kepler_orbit_constant = 4*(np.pi**2)*(sp_constants.gravitational_constant**-1)*(df_input[col_m_star]**-1)
df_input[col_period] = ((((df_input[col_sma]*sp_constants.astronomical_unit)**3)*kepler_orbit_constant)**0.5)*(sp_constants.day**-1)
# convert inclination from radians to degrees
# df_input[col_inc] = df_input[col_inc]*(180/np.pi)
# sort by sma
df_input = sort_by_column(df_input, which_column=col_sma, col_system=col_system_pop,col_planet=col_planet_pop)
# correct system, planet numbers and add multiplicity
df_input = correct_system_planet_numbers(df_input, col_system=col_system_pop, col_planet=col_planet_pop,
new_system_column=col_system, new_planet_column=col_planet,
col_planetmultiplicity=col_planetmultiplicity, correct_index=True)
return df_input
def process_input_completo(df_input, mass_threshold, calculate_period, mstar):
"""
Process the input a bit. This function has been developed for Bern Model.
You may need to do your input processing.
Parameters
----------
df_input : pandas dataframe
an input of data
mass_threshold : float/int
minimum mass of a planet which is kept (planets with mass < mass_threshold are dropped)
calculate_period : boolean
whether to calculate period from sma, mass of star
mstar : float or string
constant mass of star in msun
Output
------
df_output : pandas dataframe
an output of data
Notes (for processing bern model output)
----
- floor emps status to make integer
- remove ejected/accreeted/collided planets
- deal with radius definition
- fix negative sma
- calculate periods
- read orbital elements from ref_red
- convert inclination to degrees
- sort planets by sma
- correct system numbers, planet numbers and multiplicity
- correct index of final output
"""
# masscut
df_input = df_input.loc[df_input[col_mass]>= mass_threshold]
# remove planets that did not end well
# in bern model this is checked via column 'emps_status'
# status codes: 0 fine, negative accreted by planet, 2 ejected, 3 accreted by star
# 5 couldn't start, 6 not started, 8 core evaporated, 9 tidally spiralled
df_input[col_status] = np.floor(df_input[col_status])
df_input = df_input.loc[df_input[col_status]==0]
df_input.index = np.arange(df_input.shape[0])
# fixing radius
# if time_age is < 2.3e7 then use max of core or p1bar radius (planete)
# if time_age is > 2.3e7 then use max of core or transit radius (completo)
# all final radius values are stored in col_total_radius:
if time_age != None:
if float(time_age) < 2.3e7:
df_input['radius'] = df_input[[col_core_radius,col_p1bar_radius]].max(axis=1)
elif float(time_age) > 2.3e7:
df_input['radius'] = df_input[[col_core_radius,col_transit_radius]].max(axis=1)
# fix planets with negative sma (planets with sma less than 1e3 are put at 1e2)
# note: 4e3 au is solar radii
if df_input.loc[df_input[col_sma]<0].shape[0]>0:
print('Error: %d Planets found with SMA < 0 au. SMA replaced to 0.01 au.'%(df_input.loc[df_input[col_sma]<0][col_planet].unique().shape[0]))
df_input = df_input.where(df_input[col_sma]>=0,other=10**-2)
# if mstar is not a column then set constant value:
# else copy column
if mstar_column == False:
print('mstar is not a column; reading value mstar = %.2f msun.'%(mstar))
mstar = mstar * mass_sun.value
df_input[col_m_star] = mstar
elif mstar_column == True:
df_input[col_m_star] = df_input[mstar] * mass_sun.value
print('mstar column found.')
# calculate period (assuming circular orbit)
if calculate_period == True:
kepler_orbit_constant = 4*(np.pi**2)*(sp_constants.gravitational_constant**-1)*(df_input[col_m_star]**-1)
df_input[col_period] = ((((df_input[col_sma]*sp_constants.astronomical_unit)**3)*kepler_orbit_constant)**0.5)*(sp_constants.day**-1)
# convert Rj to Rearth
# astropy_constants.R_jup/astropy_constants.R_earth = 11.208981
# df_input[col_core_radius] = df_input[col_core_radius]*11.208981
# df_input[col_total_radius] = df_input[col_total_radius]*11.208981
# df_input[col_transit_radius] = df_input[col_transit_radius]*11.208981
# df_input[col_p1bar_radius] = df_input[col_p1bar_radius]*11.208981
# read orbital elements from auxiliary file:
df_refred = pd.read_csv(auxiliary_file, delim_whitespace=True, header=None, low_memory=False)
col_refred_system = 128-1
col_refred_planet = 129-1
# read from corresponding ref_red
col_refred_ecc = 63-1
col_refred_inc = 64-1
col_refred_long_node = 110-1
col_refred_long_peri = 111-1
# loop over input dataframe
if print_details == True:
print('Copying data from auxiliary file.')
for index_row in range(df_input.shape[0]):
planet_id = df_input.iloc[index_row,col_planet_pop]
system_id = df_input.iloc[index_row,col_system_pop]
condition = (df_refred[col_refred_system]==system_id) & (df_refred[col_refred_planet]==planet_id)
df_input.loc[index_row, col_ecc] = df_refred.loc[condition][col_refred_ecc].to_numpy()[0]
df_input.loc[index_row, col_inc] = df_refred.loc[condition][col_refred_inc].to_numpy()[0]
df_input.loc[index_row, col_long_node] = df_refred.loc[condition][col_refred_long_node].to_numpy()[0]
df_input.loc[index_row, col_long_peri] = df_refred.loc[condition][col_refred_long_peri].to_numpy()[0]
# print(index_row, planet_id, system_id,df_refred.loc[condition][col_refred_ecc].to_numpy()[0])
if print_details == True:
print('Finished copying data.')
# convert inclination from radians to degrees
# df_input[col_inc] = df_input[col_inc]*(180/np.pi)
# sort by sma
df_input = sort_by_column(df_input, which_column=col_sma, col_system=col_system_pop,col_planet=col_planet_pop)
# correct system, planet numbers and add multiplicity
df_input = correct_system_planet_numbers(df_input, col_system=col_system_pop, col_planet=col_planet_pop,
new_system_column=col_system, new_planet_column=col_planet,
col_planetmultiplicity=col_planetmultiplicity, correct_index=True)
return df_input
# Count number of planets per system
def planetspersystem(df_input, column_dictionary=None,system_column=None,planet_column=None):
"""
Given a data frame containing planetary system with system number and planet number,
this function gives two lists.
List 1 - system_wise -- entries correspond to planetary systems
List 2 - planet_wise -- entries correspond to each planet in the the planetary system
"""
if system_column==None:
system_column = dataread.find_key(column_dictionary,'System')
if planet_column==None:
planet_column = dataread.find_key(column_dictionary,'Planet')
pps_system=[]
ind=0
while ind < df_input.shape[0]:
nsys = df_input.iloc[ind][system_column]
temp=[]
while df_input.iloc[ind][system_column]==nsys:
temp.append(int((df_input.iloc[ind][planet_column])))
ind+=1
pmax=len(temp)
if ind==df_input.shape[0]:
break
pps_system.append(len(temp))
# Now we write the number of planets per system for each planet
pps_planet = []
for ind1 in np.arange(len(pps_system)):
nsys = pps_system[ind1]
ind2 = 0
while ind2 < nsys:
pps_planet.append(nsys)
ind2 += 1
# nsystems = len(pps_system)
# nplanets1 = sum(pps_system)
# nplanets2 = len(pps_planet)
# check = nplanets1 - nplanets2
# print('Total number of systems = %d'%(nsystems))
# print('Total number of planets = %d and check is %d (0 means ok)'%(nplanets1,check))
return pps_system,pps_planet
# Function to sort dataframe by a column
def sort_by_column(df_input,which_column, col_system, col_planet, ascending_order=True):
"""
This function is used to sort planets within a system according to the sorting column.
Parameters
----------
df_inputv: dataframe
which_column : column info
which column to sort on
col_system : column_info
system column to read
col_planet : column info
planet column to read
acending_order : (boolean)
True means low to high
False means high to low
Output
------
df_sorted : dataframe sorted by column
"""
# Correction for index list
df_input.index = np.arange(df_input.shape[0])
# Total number of systems
iterations = int(df_input.iloc[-1][col_system_pop])
system_array = df_input[col_system_pop]
# Create new empty dataframe
df_sorted = pd.DataFrame()
# failed_systems = []
# Loop over each system
for current_system in range(iterations):
current_system+=1 #correction because for loop indices start from 0!
current_indices = np.where(system_array==current_system)
entries = df_input.iloc[current_indices[0]][which_column]
# Sanity Check 1
# multiplicity_check = df_input.iloc[current_indices[0][0]][col_planetmultiplicity]
# if multiplicity_check != len(entries):
# failed_systems.append(current_system)
#Sort entries:
entries = entries.sort_values(ascending=ascending_order)
sorted_indices = entries.index
#Place sorted entries in the new dataframe
df_sorted = df_sorted.append(df_input.iloc[sorted_indices][:], ignore_index=True)
# exit loop
#Correct planet numbers in sorted dataframe
# df_sorted = correct_system_planet_numbers(df_sorted, column_dictionary)
# if len(failed_systems)>0:
# print('Multiplicity mismatch found in %d systems!'%(len(failed_systems)))
return df_sorted
# Function to correct the system and planet number columns.
def correct_system_planet_numbers(df_input, column_dictionary=None,col_system=None, col_planet=None,new_system_column=None, new_planet_column = None, col_planetmultiplicity=None,correct_index=True):
"""
This function corrects system and planet numbers to ensure continuity.
Can also correct multiplicity if user provides col_planetmultiplicity.
"""
if col_system==None:
col_system = dataread.find_key(column_dictionary,'System')
if col_planet==None:
col_planet = dataread.find_key(column_dictionary,'Planet')
pps_system, pps_planet = planetspersystem(df_input,column_dictionary,col_system, col_planet)
# Correct system numbers to ensure continuity
# creating system list
syslist = []
nsys = 1
for ind in range(len(pps_system)):
iterations = pps_system[ind]
ind2 = 0
while ind2 < iterations:
syslist.append(nsys)
ind2 += 1
nsys += 1
if new_system_column == None:
df_input[col_system] = syslist
else:
df_input[new_system_column] = syslist
# Correct planet numbers to ensure continuity
# We use pps_system(planets per system count - system wise) to create a list of planet index
planetlist = []
for ind in range(len(pps_system)):
iterations = pps_system[ind]
ind2 = 1
plist = []
while ind2 <= iterations:
plist.append(ind2)
ind2 += 1
planetlist.append(plist)
# Flatten this list
planetlist = [item for sublist in planetlist for item in sublist]
if new_planet_column == None:
df_input[col_planet] = planetlist
else:
df_input[new_planet_column] = planetlist
# Correct multiplicity column is provided:
if col_planetmultiplicity != None:
df_input[col_planetmultiplicity] = pps_planet
# print('Total Number of Systems and Planets is %d and %d'%(len(pps_system),len(pps_planet)))
if correct_index == True:
df_input.index = np.arange(df_input.shape[0])
return df_input
def kobe_shadows(df_input,cdpp):
"""
The KOBE Shadows module: calculate transit shadow bands for all planets
NOTE
----
1. the transit signal (rplanet/rstar)**2 is calculated in driver for all planets
because this remains same for all planets irrespective of observers
2. CDPP for each KOBE shadow system is sampled at end of this function
Parameters
----------
df_input : pandas dataframe
input dataframe processed suitably
Output
------
df_kobe_output : pandas dataframe
dataframe of planets that transit (potentially detectable)
columns: all from input, some are added
sanity_checklist : list
list containing values for sanity checks in driver
"""
# number of systems in current population
nsystem = df_input[col_system].unique().shape[0]
# number of views to keep for each system (nsystem*nviews = simulate_nstars)
nviews = int(simulate_nstars/nsystem)
# number of views with non-zero transiting planets (Kept for later sanity check on the length of final dataframe)
# creating two of these for two different purposes
nsystem_nonzero_views1 = 0 # via number of observers who see atleast a planet
nsystem_nonzero_views2 = 0 # via loop appending all such potentially observable planets
# total number of transiting planets (for later sanity check)
nplanet_transiting = 0
# Start a KOBE output dataframe
df_kobe_output = pd.DataFrame()
for index_system in range(nsystem):
# Loop 2
# This loops over all the systems present in the current ref_red file.
if print_details == True:
print('Calculating Transit Shadow Band for system - %d'%(index_system+1))
# step 5 call kepler inclination or call kepler shadows
# df_shadow = kobe.calculate_shadows(df_input=df_input, dict_column=dict_input,system_id=index_system,npoints=1e7,print_probability=print_details,grazing_transits=grazing_transits)
# keep only nviews
# np.random.seed(42)
# df_shadow = df_shadow.loc[np.random.choice(df_shadow.index, size=nviews, replace=False)]
# Here, we do a small shortcut to cut down computation time.
# Instead of calculating shadows at 1e7 point and then randomly selecting few views.
# We calculate shadows for only required views.
df_shadow, df_angle_data = calculate_shadows(df_input=df_input, system_id=index_system+1,npoints=nviews,print_probability=print_fine_details,grazing_transits=grazing_transits)
# reject rows where number of transiting planets is zero!
df_shadow = df_shadow.loc[df_shadow['number_transiting_planet']!=0]
df_angle_data = df_angle_data.loc[df_shadow.index]
# check that the two frames have the same shape:
if df_shadow.shape[0]!= df_angle_data.shape[0]:
print('BIG PROBLEM BOSS: The number of views in angle dataframe is not the same with the shadow dataframe. WTF?')
# re-number the indices of this dataframe (used in the loop later on)
df_shadow.index = np.arange(df_shadow.shape[0])
df_angle_data.index = np.arange(df_angle_data.shape[0])
# add number of remaining views to number of views with non-zero transiting planets (for later sanity check)
nsystem_nonzero_views1 = nsystem_nonzero_views1 + df_shadow.shape[0]
# add number of transiting planets
nplanet_transiting = nplanet_transiting + int(df_shadow['number_transiting_planet'].sum())
####################################
####################################
####################################
# Step 4: KOBE -The output file
####################################
####################################
####################################
# Loop over number of system-views to be added (each row of df_shadow)
iterations_system_views = df_shadow.shape[0]
for index_system_views in range(iterations_system_views):
# Loop 3
# This loops over all the views which have a transiting planet.
# our aim in this loop is to select the rows corresponding to the planets in shadow and
# add it to df_kobe_output.
nsystem_nonzero_views2 += 1
# we start by selecting the system number of the current system (add 1 because of python)
condition1 = (df_input[col_system] == index_system+1)
# some numpy magic to get planet indices from the current row
planet_indices = df_shadow.iloc[[index_system_views],:].where(df_shadow==1).values[0]
planet_indices = np.delete(planet_indices,-1) # get rid of the last column
planet_indices = np.delete(planet_indices, [0,1]) # get rid of theta, phi
planet_indices = list(np.where(planet_indices==1)[0])
# finally append!
df_kobe_output = df_kobe_output.append(df_input.loc[condition1].iloc[planet_indices,:],ignore_index=True,sort=False)
# reorder the indices of this dataframe
# df_kobe_output.index = np.arange(df_kobe_output.shape[0])
#
# read the inclination seen by kobe for the appended planets
# numpy magic to extract separate arrays out of (f,alpha) array
f_alpha_pairs = np.concatenate(df_angle_data.iloc[[index_system_views],2:].to_numpy()[0][planet_indices]).reshape(-1,2)
f_array = np.take(f_alpha_pairs, indices=0,axis=1)
alpha_array = np.take(f_alpha_pairs, indices=1,axis=1)
# note, observer's inclination is same as observer's location given by alpha (along polar)
df_kobe_output.loc[df_kobe_output.tail(len(planet_indices)).index,'kobe_inclination'] = alpha_array
df_kobe_output.loc[df_kobe_output.tail(len(planet_indices)).index,'kobe_observer_azimuth'] = f_array
# give kobe system numbers and planet numbers
df_kobe_output.loc[df_kobe_output.tail(len(planet_indices)).index,'kobe_system_id'] = nsystem_nonzero_views2*np.ones(len(planet_indices))
df_kobe_output.loc[df_kobe_output.tail(len(planet_indices)).index,'kobe_planet_id'] = np.arange(len(planet_indices))+1
####################################
####################################
# KOBE TRANSIT Calculations
####################################
####################################
# step 7 draw CDPP, calculate SNR (for each remaining planet)
df_kobe_output.loc[df_kobe_output.tail(len(planet_indices)).index,'kobe_cdpp [ppm]'] = np.random.choice(cdpp)*10**-6
# get out of this loop to continue SNR calculations
if nsystem_nonzero_views1 != nsystem_nonzero_views2:
print('Number of systems in output may be wrong. Check system views for system %d'%(index_system+1))
if index_system == break_after_system:
break
return df_kobe_output, [nsystem_nonzero_views1,nsystem_nonzero_views2,nplanet_transiting]
def kobe_transits(df_kobe_output):
"""
The KOBE Transits module: calculate transit related parameters for all planets
Parameters
----------
df_input : pandas dataframe
input dataframe processed suitably
"""
# Calculate impact parameter for each planet with respect to the observer
# we use observer's azimuth as true anomaly --> implies that planet is crossing observer's azimuth
# we use observer's polar angle as inclination --> identical by definition
distance_to_star = (df_kobe_output[col_sma]*sp_constants.astronomical_unit*(1-(df_kobe_output[col_ecc]**2)))/(1 + (df_kobe_output[col_ecc]*np.cos(df_kobe_output['kobe_observer_azimuth'])))
df_kobe_output['impact_parameter'] = (distance_to_star * np.cos(df_kobe_output['kobe_inclination']))/(df_kobe_output[col_r_star]*radius_sun.value)
# geometrically, transits occur if impact parameter (sky projected distance of planet from star in stellar radius unit) is less than the sum of planet and stellar radii in stellar radius unit
# we calculate the this geometric qunatity
df_kobe_output['rp+rs/rs'] = 1 + ((df_kobe_output[col_r_planet]/df_kobe_output[col_r_star])*(radius_earth/radius_sun))
## Any observer inside the transit shadow caused by a planet, is bound to see this planet's transit.
# The geometric condition for transit is that the imapact parameter should be smaller than 'rp+rs/rs'. It should be true for all of the planets in kobe_output
# We check this by counting how many fail this condition.
# first count planets that don't satisfy this strongly condition
# I'm checking this on absolutue, to ensure that all planets are checked!
condition_geotransit = abs(df_kobe_output['impact_parameter']) > df_kobe_output['rp+rs/rs']
failed_planets = df_kobe_output.loc[condition_geotransit].shape[0]
if failed_planets != 0:
print('!!!!!!!!!!!!!')
print('FATAL ERROR : GEOMETRIC TRANSIT CONDITION NOT SATISFIED STRONGLY FOR %d PLANETS !!!!'%(failed_planets))
print('Strong Condition == b < (rs+rp)/rs')
print('!!!!!!!!!!!!!')
# consider a weeker condition as well
condition_geotransit = abs(df_kobe_output['impact_parameter']) > df_kobe_output['rp+rs/rs']
failed_planets = df_kobe_output.loc[condition_geotransit].shape[0]
if failed_planets != 0:
print('!!!!!!!!!!!!!')
print('FATAL ERROR : GEOMETRIC TRANSIT CONDITION NOT SATISFIED WEAKLY FOR %d PLANETS !!!!'%(failed_planets))
print('Weak Condition == b =< (rs+rp)/rs')
print('!!!!!!!!!!!!!')
# characteristic transit duration : eq 19, Transits and Occultations by J. Winn in Exoplanets
df_kobe_output['transit_duration [hours]'] = ((df_kobe_output[col_r_star]*radius_sun.value)*(df_kobe_output[col_period]*24))/((np.pi)*(df_kobe_output[col_sma]*sp_constants.astronomical_unit))
# effective cdpp for transit duration. Following, eq 4, Christiansen et. al. 2012.
df_kobe_output['kobe_cdpp_eff [ppm]'] = df_kobe_output['kobe_cdpp [ppm]']*((t_cdpp/df_kobe_output['transit_duration [hours]'])**0.5)
# snr for 1 transit, or SES - single event statistic
df_kobe_output['kobe_ses'] = df_kobe_output['transit_signal']/df_kobe_output['kobe_cdpp_eff [ppm]']
# calculate number of transits that will be observed by kobe
df_kobe_output['number_transits'] = t_obs/df_kobe_output[col_period]
# Finally, snr(multi) or MES - multiple event statistic
df_kobe_output['kobe_mes'] = df_kobe_output['kobe_ses']*((df_kobe_output['number_transits'])**0.5)
####################################
####################################
# KOBE - tce
####################################
####################################
# those planets which satisfy the following two conditions:
# number transits >= minimum_transits (currently at 3)
# kobe_mes >= snr_threshold
# are flagged as kobe_tce (i.e. kobe_threshold crossing events): 1 means yes tce, and 0 means no tce.
df_kobe_output['kobe_tce'] = 0
condition_tce = (df_kobe_output['number_transits']>=minimum_transit) & (df_kobe_output['kobe_mes']>=snr_threshold)
df_kobe_output.loc[df_kobe_output.loc[condition_tce].index, 'kobe_tce'] = 1
return df_kobe_output
def kobe_vetter(df_kobe_output,kepler_completeness):
"""
The KOBE Vetter module: applies Kepler completenes.
Parameters
----------
df_input : pandas dataframe
input dataframe processed suitably
kepler_completeness : binned information
an output of calculate_completeness
Note
----
1. flag_completeness is
'PC' for planet candidates
'FP' for false positives
'-1' for planets which are not tces
"""
####################################
####################################
# KOBE - Kepler Completeness & Reliability
####################################
####################################
# Two flags are created.
# 'flag_completeness' : 'FP' means planet is vetted as False Positive. 'PC' means planet is vetted as Planetary Candidate
## 0 - create x and y arrays to go through each bin
x_array = np.arange(completeness_period_range[0],completeness_period_range[1],completeness_period_range[2])
y_array = np.arange(completeness_radius_range[0],completeness_radius_range[1],completeness_radius_range[2])
## 1 - sanity checks
drop_c=0
drop1_c=0
parameter_space_exceeded_c= []
## Create Columns
## first create columns and mark each as 'PC'
df_kobe_output['flag_completeness'] = 'PC'
## then all planets that are not tce are marked with NaN
condition_nan = df_kobe_output['kobe_tce']==0
df_kobe_output.loc[df_kobe_output.loc[condition_nan].index, 'flag_completeness'] = -1
for xind in range(x_array.shape[0]):
for yind in range(y_array.shape[0]):
## 2 now we are inside a bin
## we will create a flag
completeness_c = kepler_completeness[xind][yind]
if completeness_c == -0.01:
completeness_c = 0
parameter_space_exceeded_c.append(1)
drop_ratio_c = 1 - completeness_c
## all planets that fall into current bin and are tce are selected
condition = (df_kobe_output[col_period]>=x_array[xind]) & (df_kobe_output[col_period]< x_array[xind]+completeness_period_range[2]) & (df_kobe_output[col_r_planet]>=y_array[yind]) & (df_kobe_output[col_r_planet]< y_array[yind]+completeness_radius_range[2]) & (df_kobe_output['kobe_tce']==1)
planet_index = df_kobe_output.loc[condition].index.to_numpy()
## estimate number of planets to flag
drop_planets_size_c = int(np.floor(drop_ratio_c*planet_index.shape[0]))
drop_c = drop_c + drop_planets_size_c
## now we randomly select planets within this bin which are flagged as FP
if planet_index.shape[0] != 0:
drop_planets_index_c = list(np.random.choice(planet_index,size=drop_planets_size_c,replace=False))
df_kobe_output.loc[drop_planets_index_c,'flag_completeness'] = 'FP'
drop1_c = drop1_c + len(drop_planets_index_c)
## completeness finished
# step 9 kepler/kobe multiplicity
# initalize columns
df_kobe_output['kobe_c_multiplicity'] = 0
# now calculate
pps_system, pps_planet = planetspersystem(df_input=df_kobe_output.loc[df_kobe_output['flag_completeness']=='PC'], system_column='kobe_system_id',planet_column='kobe_planet_id')
df_kobe_output.loc[df_kobe_output.loc[df_kobe_output['flag_completeness']=='PC'].index,'kobe_c_multiplicity'] = pps_planet
return df_kobe_output
def print_header():
"""
"""
header1 ="""
-----------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------
KKKKKKKKK KKKKKKK OOOOOOOOO BBBBBBBBBBBBBBBBB EEEEEEEEEEEEEEEEEEEEEE
K:::::::K K:::::K OO:::::::::OO B::::::::::::::::B E::::::::::::::::::::E
K:::::::K K:::::K OO:::::::::::::OO B::::::BBBBBB:::::B E::::::::::::::::::::E
K:::::::K K::::::KO:::::::OOO:::::::OBB:::::B B:::::BEE::::::EEEEEEEEE::::E
KK::::::K K:::::KKKO::::::O O::::::O B::::B B:::::B E:::::E EEEEEE
K:::::K K:::::K O:::::O O:::::O B::::B B:::::B E:::::E
K::::::K:::::K O:::::O O:::::O B::::BBBBBB:::::B E::::::EEEEEEEEEE
K:::::::::::K O:::::O O:::::O B:::::::::::::BB E:::::::::::::::E
K:::::::::::K O:::::O O:::::O B::::BBBBBB:::::B E:::::::::::::::E
K::::::K:::::K O:::::O O:::::O B::::B B:::::B E::::::EEEEEEEEEE
K:::::K K:::::K O:::::O O:::::O B::::B B:::::B E:::::E
KK::::::K K:::::KKKO::::::O O::::::O B::::B B:::::B E:::::E EEEEEE
K:::::::K K::::::KO:::::::OOO:::::::OBB:::::BBBBBB::::::BEE::::::EEEEEEEE:::::E
K:::::::K K:::::K OO:::::::::::::OO B:::::::::::::::::B E::::::::::::::::::::E
K:::::::K K:::::K OO:::::::::OO B::::::::::::::::B E::::::::::::::::::::E
KKKKKKKKK KKKKKKK OOOOOOOOO BBBBBBBBBBBBBBBBB EEEEEEEEEEEEEEEEEEEEEE
K E P L E R O B S E R V E S B E R N E X O P L A N E T S
-----------------------------------------------------------------------------------------
Code: Kepler Observes Bern Exoplanets
Starting date: 25.10.2019
Last edit date: 09.12.2020
Author: Lokesh Mishra (University of Bern & Geneva Observatory)
Contact: www.lokeshmishra.com
-----------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------"""
print(header1)
|
exomishraREPO_NAMEkobePATH_START.@kobe_extracted@kobe-master@code@kobe.py@.PATH_END.py
|
{
"filename": "thermalization_mpi.py",
"repo_name": "AFD-Illinois/ebhlight",
"repo_path": "ebhlight_extracted/ebhlight-master/test/thermalization_mpi.py",
"type": "Python"
}
|
#!/usr/bin/env python3
from __future__ import print_function, division
import os
import sys; sys.dont_write_bytecode = True
sys.path.insert(0, '../script/')
sys.path.insert(0, '../script/analysis/')
from subprocess import call
import subprocess as sp
import glob
import numpy as np
import hdf5_to_dict as io
import units
cgs = units.get_cgs()
import util
TMP_DIR = 'TMP'
util.safe_remove(TMP_DIR)
PROBLEM = 'thermalization_mpi'
AUTO = '-auto' in sys.argv
FAST = '-fast' in sys.argv
TF = 5.12 if FAST else 512
# devnull
try:
from subprocess import DEVNULL # py3k
except ImportError:
import os
DEVNULL = open(os.devnull, 'wb')
os.chdir('../prob/' + 'thermalization')
# COMPILE CODE
call(['python', 'build_mpi.py', '-dir', TMP_DIR])
os.chdir('../../test')
call(['mv', '../prob/' + 'thermalization' + '/' + TMP_DIR, './'])
# RUN EXECUTABLE
NPROC = 8
import psutil
NCORE = psutil.cpu_count(logical=False)
NTHREADS = (max(NCORE/NPROC, 1))
os.environ['OMP_NUM_THREADS'] = '%d' % NTHREADS
os.chdir(TMP_DIR)
if FAST:
util.change_rparm('tf',str(TF),'param_template.dat')
# stdin=DEVNULL is required for MPI tests run with test_auto.py
call(['mpirun', '-np', '%d' % NPROC, './bhlight', '-p', 'param_template.dat'],
stdin=DEVNULL)
os.chdir('../')
# READ SIMULATION OUTPUT
dfiles = np.sort(glob.glob(os.path.join(TMP_DIR,'')+'/dumps/dump*.h5'))
Nd = len(dfiles)
hdr = io.load_hdr(dfiles[0])
geom = io.load_geom(hdr)
t_code = np.zeros(Nd)
Te_code = np.zeros(Nd)
Tr_code = np.zeros(Nd)
#Tr2_code = np.zeros(Nd)
Etot = np.zeros(Nd)
for n in range(Nd):
dump = io.load_dump(dfiles[n], geom)
t_code[n] = dump['t']*hdr['T_unit']
uu = dump['UU'].mean()
rho = dump['RHO'].mean()
Te_code[n] = (hdr['gam']-1.)*uu*hdr['U_unit']/(2.*rho*hdr['Ne_unit']*cgs['KBOL'])
#Te_code[n] = dump['Thetae'][0][0][0]*cgs['ME']*cgs['CL']**2/cgs['KBOL']
#Te_code[n] = (hdr['gam']-1.)*dump['UU'][0,0,0]*hdr['U_unit']/(2.*dump['RHO']*hdr['Ne_unit']*cgs['KBOL'])
R00 = -dump['Rmunu'][:,:,:,0,0].mean()
Tr_code[n] = (R00*hdr['U_unit']/cgs['AR'])**(1./4.)
#Tr2_code[n] = (-dump['Rmunu'][0,0,0,0,0]*hdr['U_unit']/cgs['AR'])**(1./4.)
ne = rho*hdr['Ne_unit']
#ne = dump['RHO'][0,0,0]*hdr['Ne_unit']
Etot[n] = 2.*ne*cgs['KBOL']*Te_code[n]/(hdr['gam']-1.) + cgs['AR']*Tr_code[n]**4.
print(ne)
print('gam = %e' % hdr['gam'])
diag = io.load_diag(os.path.join(TMP_DIR, 'dumps/'), hdr=hdr)
print(diag['egas'] + diag['erad'])
# SEMIANALYTIC SOLUTION
from scipy.integrate import odeint
tf = 1.e3
t = np.logspace(-4, np.log10(tf), 1000)
nubins = 100
numin = 1.e13
numax = 1.e18
dlnu = (np.log(numax) - np.log(numin))/nubins
nu = np.zeros(nubins)
for n in range(nubins):
nu[n] = np.exp(np.log(numin) + (0.5 + n)*dlnu)
gamma_e = hdr['gam']
ne = hdr['Ne_unit']
T0 = 1.e6
kb = cgs['KBOL']
me = cgs['ME']
cl = cgs['CL']
ar = cgs['AR']
h = cgs['HPL']
qe = cgs['QE']
def jnu(nu, ne, thetae):
Te = thetae*me*cl*cl/kb
x = h*nu/(kb*Te)
return 5.4e-39*ne**2.*Te**(-1./2.)*np.exp(-x)
def jnu(nu, ne, thetae):
Te = thetae*me*cl*cl/kb
x = h*nu/(kb*Te)
rel = (1. + 4.4e-10*Te)
gff = 1.2
if x < 1.e-3:
efac = (24 - 24*x + 12*x*x - 4.*x*x*x + x*x*x*x)/24.
else:
efac = np.exp(-x)
jv = 1./(4.*np.pi)*pow(2.,5.)*np.pi*pow(qe,6)/(3.*me*pow(cl,3))
jv *= pow(2.*np.pi/(3.*kb*me),1./2.)
jv *= pow(Te,-1./2.)*ne*ne
jv *= efac*rel*gff
return jv
def Bnu_inv(nu, thetae):
x = h*nu/(me*cl*cl*thetae)
return (2.*h/(cl*cl))/(np.expm1(x))
def dydt(y, t0):
source = np.zeros(len(y))
Te = y[0]*(gamma_e - 1.)/(2.*ne*kb)
thetae = kb*Te/(cgs['ME']*cgs['CL']**2)
for n in range(nubins):
Bnu_v = nu[n]**3.*Bnu_inv(nu[n], thetae)
jnu_v = jnu(nu[n], ne, thetae)
source[n+1] = cl*jnu_v*(4.*np.pi/cl - y[n+1]/Bnu_v)
#source[n+1] = cl*jnu_v*(4.*np.pi/cl)
source[0] = -sum(source[1:]*nu*dlnu)
return source
y0 = np.zeros(nubins + 1)
y0[0] = 2.*ne*kb*T0/(gamma_e - 1.)
ans = odeint(dydt, y0, t)
ue_v = ans[:,0]
Te_v = ue_v*(gamma_e-1.)/(2.*ne*kb)
ur_v = np.zeros(len(t))
for n in range(len(t)):
ur_v[n] = sum(ans[n,1:]*nu*dlnu)
Tr_v = (ur_v/ar)**(1./4.)
if AUTO:
data = {}
data['SOL'] = [t, Te_v]
data['CODE'] = [t_code, Te_code]
data['THRESHOLD'] = 0.05
import pickle
pickle.dump(data, open('data.p', 'wb'))
# CLEAN UP
util.safe_remove(TMP_DIR)
sys.exit()
# MAKE FIGURE
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pylab as pl
code_col = 'r'; code_ls = ''; code_mrk = '.'
sol_col = 'k'; sol_ls = '-'; sol_mrk = ''
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(1,1,1)
ax.plot(t_code, Te_code, color='r', linestyle=code_ls, marker=code_mrk,
markersize=10)
ax.plot(t_code, Tr_code, color='b', linestyle=code_ls, marker=code_mrk,
markersize=10)
ax.plot(t, Te_v, color='k', linestyle='--')
ax.plot(t, Tr_v, color='k', linestyle='--')
plt.yscale('log')
plt.ylim([0,1.e6])
plt.xlabel('t (s)'); plt.ylabel('Te (K)')
plt.xscale('log')
plt.xlim([1.e-3, 1.e0])
plt.savefig(PROBLEM + '.png', bbox_inches='tight')
# CLEAN UP
util.safe_remove(TMP_DIR)
|
AFD-IllinoisREPO_NAMEebhlightPATH_START.@ebhlight_extracted@ebhlight-master@test@thermalization_mpi.py@.PATH_END.py
|
{
"filename": "performance_counters.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/yt/utilities/performance_counters.py",
"type": "Python"
}
|
import atexit
import time
from bisect import insort
from collections import defaultdict
from datetime import datetime as dt
from functools import wraps
from yt.config import ytcfg
from yt.funcs import mylog
class PerformanceCounters:
_shared_state = {} # type: ignore
def __new__(cls, *args, **kwargs):
self = object.__new__(cls, *args, **kwargs)
self.__dict__ = cls._shared_state
return self
def __init__(self):
self.counters = defaultdict(lambda: 0.0)
self.counting = defaultdict(lambda: False)
self.starttime = defaultdict(lambda: 0)
self.endtime = defaultdict(lambda: 0)
self._on = ytcfg.get("yt", "time_functions")
self.exit()
def __call__(self, name):
if not self._on:
return
if self.counting[name]:
self.counters[name] = time.time() - self.counters[name]
self.counting[name] = False
self.endtime[name] = dt.now()
else:
self.counters[name] = time.time()
self.counting[name] = True
self.starttime[name] = dt.now()
def call_func(self, func):
if not self._on:
return func
@wraps(func)
def func_wrapper(*args, **kwargs):
self(func.__name__)
func(*args, **kwargs)
self(func.__name__)
return func_wrapper
def print_stats(self):
mylog.info("Current counter status:\n")
times = []
for i in self.counters:
insort(times, [self.starttime[i], i, 1]) # 1 for 'on'
if not self.counting[i]:
insort(times, [self.endtime[i], i, 0]) # 0 for 'off'
shifts = {}
order = []
endtimes = {}
shift = 0
multi = 5
for i in times:
# a starting entry
if i[2] == 1:
shifts[i[1]] = shift
order.append(i[1])
shift += 1
if i[2] == 0:
shift -= 1
endtimes[i[1]] = self.counters[i[1]]
line = ""
for i in order:
if self.counting[i]:
line = "%s%s%i : %s : still running\n" % (
line,
" " * shifts[i] * multi,
shifts[i],
i,
)
else:
line = "%s%s%i : %s : %0.3e\n" % (
line,
" " * shifts[i] * multi,
shifts[i],
i,
self.counters[i],
)
mylog.info("\n%s", line)
def exit(self):
if self._on:
atexit.register(self.print_stats)
yt_counters = PerformanceCounters()
time_function = yt_counters.call_func
class ProfilingController:
def __init__(self):
self.profilers = {}
def profile_function(self, function_name):
def wrapper(func):
try:
import cProfile
except ImportError:
return func
my_prof = cProfile.Profile()
self.profilers[function_name] = my_prof
@wraps(func)
def run_in_profiler(*args, **kwargs):
my_prof.enable()
func(*args, **kwargs)
my_prof.disable()
return run_in_profiler
return wrapper
def write_out(self, filename_prefix):
if ytcfg.get("yt", "internals", "parallel"):
pfn = "%s_%03i_%03i" % (
filename_prefix,
ytcfg.get("yt", "internals", "global_parallel_rank"),
ytcfg.get("yt", "internals", "global_parallel_size"),
)
else:
pfn = f"{filename_prefix}"
for n, p in sorted(self.profilers.items()):
fn = f"{pfn}_{n}.cprof"
mylog.info("Dumping %s into %s", n, fn)
p.dump_stats(fn)
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@yt@utilities@performance_counters.py@.PATH_END.py
|
{
"filename": "generalContours_demo2.py",
"repo_name": "itseez/opencv",
"repo_path": "opencv_extracted/opencv-master/samples/python/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/generalContours_demo2.py",
"type": "Python"
}
|
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
rng.seed(12345)
def thresh_callback(val):
threshold = val
## [Canny]
# Detect edges using Canny
canny_output = cv.Canny(src_gray, threshold, threshold * 2)
## [Canny]
## [findContours]
# Find contours
contours, _ = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
## [findContours]
# Find the rotated rectangles and ellipses for each contour
minRect = [None]*len(contours)
minEllipse = [None]*len(contours)
for i, c in enumerate(contours):
minRect[i] = cv.minAreaRect(c)
if c.shape[0] > 5:
minEllipse[i] = cv.fitEllipse(c)
# Draw contours + rotated rects + ellipses
## [zeroMat]
drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8)
## [zeroMat]
## [forContour]
for i, c in enumerate(contours):
color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256))
# contour
cv.drawContours(drawing, contours, i, color)
# ellipse
if c.shape[0] > 5:
cv.ellipse(drawing, minEllipse[i], color, 2)
# rotated rectangle
box = cv.boxPoints(minRect[i])
box = np.intp(box) #np.intp: Integer used for indexing (same as C ssize_t; normally either int32 or int64)
cv.drawContours(drawing, [box], 0, color)
## [forContour]
## [showDrawings]
# Show in a window
cv.imshow('Contours', drawing)
## [showDrawings]
## [setup]
# Load source image
parser = argparse.ArgumentParser(description='Code for Creating Bounding rotated boxes and ellipses for contours tutorial.')
parser.add_argument('--input', help='Path to input image.', default='stuff.jpg')
args = parser.parse_args()
src = cv.imread(cv.samples.findFile(args.input))
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
# Convert image to gray and blur it
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
src_gray = cv.blur(src_gray, (3,3))
## [setup]
## [createWindow]
# Create Window
source_window = 'Source'
cv.namedWindow(source_window)
cv.imshow(source_window, src)
## [createWindow]
## [trackbar]
max_thresh = 255
thresh = 100 # initial threshold
cv.createTrackbar('Canny Thresh:', source_window, thresh, max_thresh, thresh_callback)
thresh_callback(thresh)
## [trackbar]
cv.waitKey()
|
itseezREPO_NAMEopencvPATH_START.@opencv_extracted@opencv-master@samples@python@tutorial_code@ShapeDescriptors@bounding_rotated_ellipses@generalContours_demo2.py@.PATH_END.py
|
{
"filename": "utils_spin.py",
"repo_name": "carronj/plancklens",
"repo_path": "plancklens_extracted/plancklens-master/plancklens/utils_spin.py",
"type": "Python"
}
|
r"""Module with spin-weight related utilities.
Conventions are $_{\pm |s|} X_{lm} = - (\pm)^{|s|} (G_{lm} \pm i C_{lm})$.
For CMB maps,
$ _{0}X_{lm} = T_{lm} $
$ _{\pm}X_{lm} = -1/2 (E_{lm} \pm i B_{lm}) $
hence
$ G^{0}_{lm} = -T_{lm} $
$ G^{2}_{lm} = E_{lm} $
$ C^{2}_{lm} = B_{lm} $.
"""
import healpy as hp
import numpy as np
def alm2map_spin(gclm, nside, spin, lmax, mmax=None):
assert spin >= 0, spin
assert len(gclm) == 2, len(gclm)
if spin > 0:
return hp.alm2map_spin(gclm, nside, spin, lmax, mmax=mmax)
elif spin == 0:
return hp.alm2map(-gclm[0], nside, lmax=lmax, mmax=mmax), 0.
def map2alm_spin(maps, spin, lmax=None, mmax=None):
assert spin >= 0, spin
if spin > 0:
return hp.map2alm_spin(maps, spin, lmax=lmax, mmax=mmax)
else:
return -hp.map2alm(maps[0], lmax=lmax, mmax=mmax, iter=0), 0.
try:
from lenspyx.wigners import wigners
HASWIGNER = True
HASWIGNER_LPYX = True
except ImportError:
try:
from plancklens.wigners import wigners # fortran 90 shared object
HASWIGNER = True
HASWIGNER_LPYX = False
except ImportError:
HASWIGNER = False
HASWIGNER_LPYX = False
print("could not load wigners.so fortran shared object")
print('try f2py -c -m wigners wigners.f90 from the command line in wigners directory ?')
GL_cache = {}
def wignerc(cl1, cl2, sp1, s1, sp2, s2, lmax_out=None):
"""Legendre coeff. of $ (\\xi_{sp1,s1} * \\xi_{sp2,s2})(\\cos \\theta)$ from their harmonic series.
Uses Gauss-Legendre quadrature to solve this exactly.
"""
assert HASWIGNER
lmax1 = len(cl1) - 1
lmax2 = len(cl2) - 1
lmax_out = lmax1 + lmax2 if lmax_out is None else lmax_out
lmaxtot = lmax1 + lmax2 + lmax_out
spo = sp1 + sp2
so = s1 + s2
if np.any(cl1) and np.any(cl2):
N = (lmaxtot + 2 - lmaxtot % 2) // 2
fn = 'tht wg %s' % N if HASWIGNER_LPYX else 'xg wg %s' % N
if not fn in GL_cache.keys():
if HASWIGNER_LPYX: # lenspyx use tht in place of xg = cos tht
GL_cache[fn] = wigners.get_thgwg(N)
else:
GL_cache[fn] = wigners.get_xgwg(-1., 1., N)
xg, wg = GL_cache[fn]
if HASWIGNER:
if np.iscomplexobj(cl1):
xi1 = wigners.wignerpos(np.real(cl1), xg, sp1, s1) + 1j * wigners.wignerpos(np.imag(cl1), xg, sp1, s1)
else:
xi1 = wigners.wignerpos(cl1, xg, sp1, s1)
if np.iscomplexobj(cl2):
xi2 = wigners.wignerpos(np.real(cl2), xg, sp2, s2) + 1j * wigners.wignerpos(np.imag(cl2), xg, sp2, s2)
else:
xi2 = wigners.wignerpos(cl2, xg, sp2, s2)
xi1xi2w = xi1 * xi2 * wg
if np.iscomplexobj(xi1xi2w):
ret = wigners.wignercoeff(np.real(xi1xi2w), xg, spo, so, lmax_out)
ret = ret + 1j * wigners.wignercoeff(np.imag(xi1xi2w), xg, spo, so, lmax_out)
return ret
else:
return wigners.wignercoeff(xi1xi2w, xg, spo, so, lmax_out)
else:
assert 0
else:
return np.zeros(lmax_out + 1, dtype=float)
def get_spin_raise(s, lmax):
r"""Response coefficient of spin-s spherical harmonic to spin raising operator.
:math:`\sqrt{ (l - s) (l + s + 1) }` for abs(s) <= l <= lmax
"""
ret = np.zeros(lmax + 1, dtype=float)
ret[abs(s):] = np.sqrt(np.arange(abs(s) -s, lmax - s + 1) * np.arange(abs(s) + s + 1, lmax + s + 2))
return ret
def get_spin_lower(s, lmax):
r"""Response coefficient of spin-s spherical harmonic to spin lowering operator.
:math:`-\sqrt{ (l + s) (l - s + 1) }` for abs(s) <= l <= lmax
"""
ret = np.zeros(lmax + 1, dtype=float)
ret[abs(s):] = -np.sqrt(np.arange(s + abs(s), lmax + s + 1) * np.arange(abs(s) - s + 1, lmax - s + 2))
return ret
def _dict_transpose(cls):
ret = {}
for k in cls.keys():
if len(k) == 1:
ret[k + k] = np.copy(cls[k])
else:
assert len(k) == 2
ret[k[1] + k[0]] = np.copy(cls[k])
return ret
def spin_cls(s1, s2, cls):
r"""Spin-weighted power spectrum :math:`_{s1}X_{lm} _{s2}X^{*}_{lm}`
The output is real unless necessary.
"""
if s1 < 0:
return (-1) ** (s1 + s2) * np.conjugate(spin_cls(-s1, -s2, _dict_transpose(cls)))
assert s1 in [0, -2, 2] and s2 in [0, -2, 2], (s1, s2, 'not implemented')
if s1 == 0:
if s2 == 0:
return cls['tt']
tb = cls.get('tb', None)
assert 'te' in cls.keys() or 'et' in cls.keys()
te = cls.get('te', cls.get('et'))
return -te if tb is None else -te + 1j * np.sign(s2) * tb
elif s1 == 2:
if s2 == 0:
assert 'te' in cls.keys() or 'et' in cls.keys()
tb = cls.get('bt', cls.get('tb', None))
et = cls.get('et', cls.get('te'))
return -et if tb is None else -et - 1j * tb
elif s2 == 2:
return cls['ee'] + cls['bb']
elif s2 == -2:
eb = cls.get('be', cls.get('eb', None))
return cls['ee'] - cls['bb'] if eb is None else cls['ee'] - cls['bb'] + 2j * eb
else:
assert 0
def get_spin_matrix(sout, sin, cls):
r"""Spin-space matrix R^{-1} cls[T, E, B] R where R is the mapping from _{0, \pm 2}X to T, E, B.
cls is dictionary with keys 'tt', 'te', 'ee', 'bb'.
If a key is not present the corresponding spectrum is assumed to be zero.
('t' 'e' and 'b' keys also works in place of 'tt' 'ee', 'bb'.)
Output is complex only when necessary (that is, TB and/or EB present and relevant).
"""
assert sin in [0, 2, -2] and sout in [0, 2, -2], (sin, sout)
if sin == 0:
if sout == 0:
return cls.get('tt', cls.get('t', 0.))
tb = cls.get('tb', None)
return (-cls.get('te', 0.) - 1j * np.sign(sout) * tb) if tb is not None else -cls.get('te', 0.)
if sin == 2:
if sout == 0:
te = cls.get('te', 0.)
tb = cls.get('tb', None)
return -0.5 * (te - 1j * tb) if tb is not None else -0.5 * te
if sout == 2:
return 0.5 * (cls.get('ee', cls.get('e', 0.)) + cls.get('bb', cls.get('b', 0.)))
if sout == -2:
ret = 0.5 * (cls.get('ee', cls.get('e', 0.)) - cls.get('bb', cls.get('b', 0.)))
eb = cls.get('eb', None)
return ret - 1j * eb if eb is not None else ret
if sin == -2:
if sout == 0:
te = cls.get('te', 0.)
tb = cls.get('tb', None)
return -0.5 * (te + 1j * tb) if tb is not None else -0.5 * te
if sout == 2:
ret = 0.5 * (cls.get('ee', cls.get('e', 0.)) - cls.get('bb', cls.get('b', 0.)))
eb = cls.get('eb', None)
return ret + 1j * eb if eb is not None else ret
if sout == -2:
return 0.5 * (cls.get('ee', cls.get('e', 0.)) + cls.get('bb', cls.get('b', 0.)))
assert 0, (sin, sout)
|
carronjREPO_NAMEplancklensPATH_START.@plancklens_extracted@plancklens-master@plancklens@utils_spin.py@.PATH_END.py
|
{
"filename": "baseline.py",
"repo_name": "ratt-ru/QuartiCal",
"repo_path": "QuartiCal_extracted/QuartiCal-main/quartical/gains/baseline.py",
"type": "Python"
}
|
import xarray
import numpy as np
import dask.array as da
from daskms.experimental.zarr import xds_to_zarr
from numba import njit
from numba.extending import overload
from quartical.utils.numba import coerce_literal, JIT_OPTIONS
import quartical.gains.general.factories as factories
from quartical.gains.general.convenience import get_dims, get_row
def write_baseline_datasets(bl_corr_xds_list, output_opts):
if bl_corr_xds_list:
return xds_to_zarr(
bl_corr_xds_list,
f"{output_opts.gain_directory}::BLCORR"
)
else:
return None
def compute_baseline_corrections(
data_xds_list,
solved_gain_xds_lod,
mapping_xds_list
):
bl_corr_xdsl = []
itr = zip(data_xds_list, mapping_xds_list, solved_gain_xds_lod)
for (data_xds, mapping_xds, gain_dict) in itr:
data_col = data_xds.DATA.data
model_col = data_xds.MODEL_DATA.data
flag_col = data_xds.FLAG.data
weight_col = data_xds._WEIGHT.data # The weights exiting the solver.
ant1_col = data_xds.ANTENNA1.data
ant2_col = data_xds.ANTENNA2.data
corr_mode = data_xds.sizes["corr"]
time_maps = tuple(
[mapping_xds.get(f"{k}_time_map").data for k in gain_dict.keys()]
)
freq_maps = tuple(
[mapping_xds.get(f"{k}_freq_map").data for k in gain_dict.keys()]
)
dir_maps = tuple(
[mapping_xds.get(f"{k}_dir_map").data for k in gain_dict.keys()]
)
is_bda = hasattr(data_xds, "ROW_MAP") # We are dealing with BDA.
row_map = data_xds.ROW_MAP.data if is_bda else None
row_weights = data_xds.ROW_WEIGHTS.data if is_bda else None
req_itr = zip(gain_dict.values(), time_maps, freq_maps, dir_maps)
req_args = []
for g, tb, fm, dm in req_itr:
req_args.extend([g.gains.data, g.gains.dims])
req_args.extend([tb, ("gain_time",)])
req_args.extend([fm, ("gain_freq",)])
req_args.extend([dm, ("direction",)])
n_ant = data_xds.sizes["ant"]
n_bla = int((n_ant*(n_ant - 1))/2 + n_ant)
bl_corr = da.blockwise(
dask_compute_baseline_corrections,
("rowlike", "baseline", "chan", "corr"),
data_col, ("rowlike", "chan", "corr"),
model_col, ("rowlike", "chan", "dir", "corr"),
weight_col, ("rowlike", "chan", "corr"),
flag_col, ("rowlike", "chan"),
ant1_col, ("rowlike",),
ant2_col, ("rowlike",),
*((row_map, ("rowlike",)) if is_bda else (None, None)),
*((row_weights, ("rowlike",)) if is_bda else (None, None)),
corr_mode, None,
*req_args,
dtype=data_col.dtype,
align_arrays=False,
concatenate=True,
new_axes={"baseline": n_bla},
adjust_chunks={
"rowlike": ((1,)*len(data_xds.chunks['row'])),
"chan": data_col.chunks[1]
}
)
a1_inds = [x for x in range(n_ant) for _ in range(x, n_ant)]
a2_inds = [y for x in range(n_ant) for y in range(x, n_ant)]
bl_corr_xds = xarray.Dataset(
{"bl_correction": (("time", "bl_id", "chan", "corr"), bl_corr)},
coords={
"time": (("time",), np.arange(len(data_xds.chunks['row']))),
"bl_id": (("bl_id",), np.arange(n_bla)),
"chan": (("chan",), data_xds.chan.values),
"corr": (("corr",), data_xds.corr.values),
"antenna1": (("bl_id", a1_inds)),
"antenna2": (("bl_id", a2_inds))
}
)
bl_corr_xdsl.append(bl_corr_xds)
return bl_corr_xdsl
def dask_compute_baseline_corrections(
data,
model,
weight,
flags,
a1,
a2,
row_map,
row_weights,
corr_mode,
*args
):
gains = tuple(args[::4])
time_maps = tuple(args[1::4])
freq_maps = tuple(args[2::4])
dir_maps = tuple(args[3::4])
return _compute_baseline_corrections(
data,
model,
weight,
flags,
gains,
a1,
a2,
time_maps,
freq_maps,
dir_maps,
row_map,
row_weights,
corr_mode,
)
@njit(**JIT_OPTIONS)
def _compute_baseline_corrections(
data,
model,
weight,
flags,
gains,
a1,
a2,
time_maps,
freq_maps,
dir_maps,
row_map,
row_weights,
corr_mode
):
return _compute_baseline_corrections_impl(
data,
model,
weight,
flags,
gains,
a1,
a2,
time_maps,
freq_maps,
dir_maps,
row_map,
row_weights,
corr_mode
)
def _compute_baseline_corrections_impl(
data,
model,
weight,
flags,
gains,
a1,
a2,
time_maps,
freq_maps,
dir_maps,
row_map,
row_weights,
corr_mode
):
return NotImplementedError
@overload(_compute_baseline_corrections_impl, jit_options=JIT_OPTIONS)
def nb_compute_baseline_corrections_impl(
data,
model,
weight,
flags,
gains,
a1,
a2,
time_maps,
freq_maps,
dir_maps,
row_map,
row_weights,
corr_mode
):
coerce_literal(nb_compute_baseline_corrections_impl, ["corr_mode"])
imul_rweight = factories.imul_rweight_factory(corr_mode, row_weights)
v1_imul_v2 = factories.v1_imul_v2_factory(corr_mode)
v1_imul_v2ct = factories.v1_imul_v2ct_factory(corr_mode)
iadd = factories.iadd_factory(corr_mode)
iunpack = factories.iunpack_factory(corr_mode)
valloc = factories.valloc_factory(corr_mode)
def impl(
data,
model,
weight,
flags,
gains,
a1,
a2,
time_maps,
freq_maps,
dir_maps,
row_map,
row_weights,
corr_mode
):
n_rows, n_chan, n_dir, n_corr = get_dims(model, row_map)
n_ant = int(max(np.max(a1), np.max(a2))) + 1
n_bla = int((n_ant*(n_ant - 1))/2 + n_ant) # bls plus autos
jhj = np.zeros((n_bla, n_chan, n_corr), dtype=np.complex64)
jhr = np.zeros((n_bla, n_chan, n_corr), dtype=np.complex64)
bl_ids = (n_bla - ((n_ant - a1 + 1)*(n_ant - a1))//2 + a2 - a1)
n_gains = len(gains)
dir_loop = np.arange(n_dir)
for row_ind in range(n_rows):
row = get_row(row_ind, row_map)
a1_m, a2_m = a1[row], a2[row]
bl_m = bl_ids[row]
v = valloc(np.complex128) # Hold GMGH.
for f in range(n_chan):
if flags[row, f]:
continue
m = model[row, f]
w = weight[row, f]
r = data[row, f]
for d in dir_loop:
iunpack(v, m[d])
for g in range(n_gains - 1, -1, -1):
t_m = time_maps[g][row_ind]
f_m = freq_maps[g][f]
d_m = dir_maps[g][d] # Broadcast dir.
gain = gains[g][t_m, f_m]
gain_p = gain[a1_m, d_m]
gain_q = gain[a2_m, d_m]
v1_imul_v2(gain_p, v, v)
v1_imul_v2ct(v, gain_q, v)
imul_rweight(v, v, row_weights, row_ind)
iadd(jhj[bl_m, f], v.conjugate() * w * v)
iadd(jhr[bl_m, f], v.conjugate() * w * r)
bl_corrections = np.ones_like(jhr).ravel()
sel = np.where(jhj.ravel() != 0)
bl_corrections[sel] = jhr.ravel()[sel]/jhj.ravel()[sel]
return bl_corrections.reshape((1, n_bla, n_chan, n_corr))
return impl
def apply_baseline_corrections(data_xds_list, bl_xds_list):
bl_corr_xds = []
for xds, blxds in zip(data_xds_list, bl_xds_list):
data_col = xds._CORRECTED_DATA.data
bl_corrections = blxds.bl_correction.data
ant1_col = xds.ANTENNA1.data
ant2_col = xds.ANTENNA2.data
corres = da.blockwise(
dask_apply_baseline_corrections, ("rowlike", "chan", "corr"),
data_col, ("rowlike", "chan", "corr"),
bl_corrections, ("rowlike", "baseline", "chan", "corr"),
ant1_col, ("rowlike",),
ant2_col, ("rowlike",),
dtype=data_col.dtype,
align_arrays=False,
concatenate=True,
adjust_chunks={"rowlike": data_col.chunks[0],
"chan": data_col.chunks[1]})
new_xds = xds.assign(
{
"_CORRECTED_DATA": ((xds._CORRECTED_DATA.dims), corres)
}
)
bl_corr_xds.append(new_xds)
return bl_corr_xds
def dask_apply_baseline_corrections(
data,
bl_corrections,
a1,
a2,
):
return _apply_baseline_corrections(
data,
bl_corrections,
a1,
a2,
)
@njit(**JIT_OPTIONS)
def _apply_baseline_corrections(data, bl_corrections, a1, a2):
data = data.copy()
n_rows, n_chan, n_corr = data.shape
n_ant = int(max(np.max(a1), np.max(a2))) + 1
n_bla = int((n_ant*(n_ant - 1))/2 + n_ant) # bls plus autos
bl_ids = (n_bla - ((n_ant - a1 + 1)*(n_ant - a1))//2 + a2 - a1)
for row in range(n_rows):
bl_m = bl_ids[row]
for f in range(n_chan):
v = data[row, f]
for c in range(n_corr):
if bl_corrections[0, bl_m, f, c]:
blg = 1/bl_corrections[0, bl_m, f, c]
else:
blg = 1
data[row, f, c] = blg * v[c]
return data
|
ratt-ruREPO_NAMEQuartiCalPATH_START.@QuartiCal_extracted@QuartiCal-main@quartical@gains@baseline.py@.PATH_END.py
|
{
"filename": "CHANGELOG.md",
"repo_name": "glass-dev/glass",
"repo_path": "glass_extracted/glass-main/CHANGELOG.md",
"type": "Markdown"
}
|
<!-- markdownlint-disable MD024 -->
# Changelog
All functional changes to the project are documented in this file.
## [2024.2] (15 Nov 2024)
- gh-188: add docstrings to all functions and tidy docs (#381)
- gh-336: support Python 3.13 (#337)
- gh-358: add static types support (#368)
- gh-131: rename `gaussian_gls` to `discretized_cls` (#345)
- gh-328: efficient resampling in `ellipticity_ryden04` (#341)
- gh-137: deprecate `redshifts_from_nz` in favor of `redshifts` (#333)
- gh-328: fix shape mismatch bug in ellipticity_ryden04 (#332)
- gh-315: add broadcasting rule in ellipticity_ryden04 + tests (#317)
- gh-198: enforce `python>3.8` & `numpy>1.21` (#326)
- gh-260: remove glass.core.constants (#261)
- gh-107: add all public functions/classes under glass namespace (#221)
- gh-168: move examples into repository (#169)
- gh-156: add FITS catalogue writer tool (#158)
## [2024.1] (16 Jul 2024)
### Added
- A new function `combine()` that evaluates the linear combination of radial
window functions with given weights.
- A new function `effective_cls()` which combines power spectra using a list of
weights, which models what happens in the simulation.
- A new function `position_weights()` that returns weights for `effective_cls()`
to model the result of `positions_from_delta()`.
- A new function `multi_plane_weights()` that returns weights for
`effective_cls()` to model the result of `MultiPlaneConvergence`.
- The `glass.core.algorithm` module.
- The new `partition(method="nnls")` function computes a partition with
non-negative contributions for each shell.
- Function `redshifts()` to sample redshifts following a radial window function.
### Changed
- The default method for `partition()` is now `"nnls"`.
- Both `partition(method="nnls")` and `partition(method="lstsq")` now have an
additional integral constraint so that the sum of the partition recovers the
integral of the input function.
- The output of `partition()` now has the shells axis as its first.
### Fixed
- Now uses the updated intersphinx URL for the GLASS examples.
- A bug in `effective_cls()` that caused arrays to be one entry too long if
`lmax` was not given explicitly.
- A bug in `partition()` with the default method.
- `partition()` now works correctly with functions having extra axes.
## [2023.7] (1 Aug 2023)
### Added
- Function `getcl()` to return angular power spectra by index from a list using
GLASS ordering.
- New `linear_windows()` and `cubic_windows()` window functions for shells.
### Changed
- The `gaussian_phz()` function now accepts bounds using `lower=` and `upper=`
keyword parameters.
- The `partition()` function now returns an array of weights to approximate the
given function by the windows.
## [2023.6] (30 Jun 2023)
### Added
- `deflect()` applies deflections to positions
- `from_convergence()` returns other lensing fields given the convergence
- A new `glass.ext` namespace, reserved for extensions
### Changed
- The `glass` module is no longer a namespace package
- The point sampling functions `positions_from_delta()` and
`uniform_positions()` now return an iterator
- `ellipticity_gaussian()` and `ellipticity_intnorm()` accept array inputs
- Use pyproject.toml for packaging
### Deprecated
- `shear_from_convergence()` is deprecated in favour of `from_convergence()`
### Removed
- The `glass.all` meta-module is no longer necessary
### Fixed
- Incorrect extrapolation in `glass.core.array.trapz_product()`, causing a bug
in `glass.points.effective_bias()`
## [2023.5] (31 May 2023)
### Added
- Allow dimensional input to the sampling functions in `glass.points` (#80)
- The `redshifts_from_nz()` function supports `count` arrays (#83)
### Changed
- Position sampling returns counts alongside points (#80)
- `redshifts_from_nz()` no longer returns `gal_pop` (#83)
- Move core functionality that is used by other, user-facing modules into the
`glass.core` module (#88)
### Removed
- Remove profiling functions (#89)
## [2023.2] - 1 Mar 2023
### Added
- The `glass.lensing.MultiPlaneConvergence.add_window` method to add a
convergence plane given by a window function.
- The `glass.shells` module for shell definitions.
- User functions to save and load Cls
- This changelog added to keep track of changes between versions
### Changed
- Instead of an array of shell boundaries and `MatterWeights`, the shells are
entirely defined by a `RadialWindow` window function.
- The `glass.lensing.multi_plane_matrix` function now takes a sequence of window
functions.
- The arguments of `glass.lensing.MultiPlaneConvergence.add_plane` are called
`zsrc` and `wlens` instead of the more ambiguous `z` and `w`. The properties
`MultiPlaneConvergence.z` and `MultiPlaneConvergence.w` that return these
values are similarly changed.
- The `glass.points.effective_bias` now takes a single window function as input
and computes its effective bias parameter.
- Some type hints added to library functions
### Removed
- The `glass.lensing.multi_plane_weights` function, replaced by the
`glass.lensing.MultiPlaneConvergence.add_window` method.
- The `glass.galaxies.constant_densities` and `density_from_dndz` functions,
since densities can now easily be partitioned by window functions for shells.
- The `zmin, zmax` parameters of `glass.galaxies.redshifts_from_nz`, for the
same reason.
- The `glass.math.restrict_interval` function, as shells are now defined by
window functions instead of sharp intervals.
- The `glass.matter` module, in favour of the more appropriately-named
`glass.shells` module.
## [2023.1] - 31 Jan 2023
### Added
- Initial wide release for GLASS paper
[2024.2]: https://github.com/glass-dev/glass/compare/v2024.1...v2024.2
[2024.1]: https://github.com/glass-dev/glass/compare/v2023.7...v2024.1
[2023.7]: https://github.com/glass-dev/glass/compare/v2023.6...v2023.7
[2023.6]: https://github.com/glass-dev/glass/compare/v2023.5...v2023.6
[2023.5]: https://github.com/glass-dev/glass/compare/v2023.2...v2023.5
[2023.2]: https://github.com/glass-dev/glass/compare/v2023.1...v2023.2
[2023.1]: https://github.com/glass-dev/glass/releases/tag/v2023.1
|
glass-devREPO_NAMEglassPATH_START.@glass_extracted@glass-main@CHANGELOG.md@.PATH_END.py
|
{
"filename": "_autocolorscale.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/treemap/marker/_autocolorscale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class AutocolorscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="autocolorscale", parent_name="treemap.marker", **kwargs
):
super(AutocolorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@treemap@marker@_autocolorscale.py@.PATH_END.py
|
{
"filename": "bayesian_hierarchical_linear_regression.ipynb",
"repo_name": "pyro-ppl/numpyro",
"repo_path": "numpyro_extracted/numpyro-master/notebooks/source/bayesian_hierarchical_linear_regression.ipynb",
"type": "Jupyter Notebook"
}
|
# Bayesian Hierarchical Linear Regression
Author: [Carlos Souza](mailto:souza@gatech.edu)
Updated by: [Chris Stoafer](mailto:cstoafer@gmail.com)
Probabilistic Machine Learning models can not only make predictions about future data, but also **model uncertainty**. In areas such as **personalized medicine**, there might be a large amount of data, but there is still a relatively **small amount of data for each patient**. To customize predictions for each person it becomes necessary to **build a model for each person** — with its inherent **uncertainties** — and to couple these models together in a **hierarchy** so that information can be borrowed from other **similar people** [1].
The purpose of this tutorial is to demonstrate how to **implement a Bayesian Hierarchical Linear Regression model using NumPyro**. To motivate the tutorial, I will use [OSIC Pulmonary Fibrosis Progression](https://www.kaggle.com/c/osic-pulmonary-fibrosis-progression) competition, hosted at Kaggle.
## 1. Understanding the task
Pulmonary fibrosis is a disorder with no known cause and no known cure, created by scarring of the lungs. In this competition, we were asked to predict a patient’s severity of decline in lung function. Lung function is assessed based on output from a spirometer, which measures the forced vital capacity (FVC), i.e. the volume of air exhaled.
In medical applications, it is useful to **evaluate a model's confidence in its decisions**. Accordingly, the metric used to rank the teams was designed to reflect **both the accuracy and certainty of each prediction**. It's a modified version of the Laplace Log Likelihood (more details on that later).
Let's explore the data and see what's that all about:
```python
!pip install -q numpyro@git+https://github.com/pyro-ppl/numpyro arviz
```
```python
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
```
```python
train = pd.read_csv(
"https://gist.githubusercontent.com/ucals/"
"2cf9d101992cb1b78c2cdd6e3bac6a4b/raw/"
"43034c39052dcf97d4b894d2ec1bc3f90f3623d9/"
"osic_pulmonary_fibrosis.csv"
)
train.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Patient</th>
<th>Weeks</th>
<th>FVC</th>
<th>Percent</th>
<th>Age</th>
<th>Sex</th>
<th>SmokingStatus</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>ID00007637202177411956430</td>
<td>-4</td>
<td>2315</td>
<td>58.253649</td>
<td>79</td>
<td>Male</td>
<td>Ex-smoker</td>
</tr>
<tr>
<th>1</th>
<td>ID00007637202177411956430</td>
<td>5</td>
<td>2214</td>
<td>55.712129</td>
<td>79</td>
<td>Male</td>
<td>Ex-smoker</td>
</tr>
<tr>
<th>2</th>
<td>ID00007637202177411956430</td>
<td>7</td>
<td>2061</td>
<td>51.862104</td>
<td>79</td>
<td>Male</td>
<td>Ex-smoker</td>
</tr>
<tr>
<th>3</th>
<td>ID00007637202177411956430</td>
<td>9</td>
<td>2144</td>
<td>53.950679</td>
<td>79</td>
<td>Male</td>
<td>Ex-smoker</td>
</tr>
<tr>
<th>4</th>
<td>ID00007637202177411956430</td>
<td>11</td>
<td>2069</td>
<td>52.063412</td>
<td>79</td>
<td>Male</td>
<td>Ex-smoker</td>
</tr>
</tbody>
</table>
</div>
In the dataset, we were provided with a baseline chest CT scan and associated clinical information for a set of patients. A patient has an image acquired at time Week = 0 and has numerous follow up visits over the course of approximately 1-2 years, at which time their FVC is measured. For this tutorial, I will use only the Patient ID, the weeks and the FVC measurements, discarding all the rest. Using only these columns enabled our team to achieve a competitive score, which shows the power of Bayesian hierarchical linear regression models especially when gauging uncertainty is an important part of the problem.
Since this is real medical data, the relative timing of FVC measurements varies widely, as shown in the 3 sample patients below:
```python
def chart_patient(patient_id, ax):
data = train[train["Patient"] == patient_id]
x = data["Weeks"]
y = data["FVC"]
ax.set_title(patient_id)
sns.regplot(x=x, y=y, ax=ax, ci=None, line_kws={"color": "red"})
f, axes = plt.subplots(1, 3, figsize=(15, 5))
chart_patient("ID00007637202177411956430", axes[0])
chart_patient("ID00009637202177434476278", axes[1])
chart_patient("ID00010637202177584971671", axes[2])
```

On average, each of the 176 provided patients made 9 visits, when FVC was measured. The visits happened in specific weeks in the [-12, 133] interval. The decline in lung capacity is very clear. We see, though, they are very different from patient to patient.
We were are asked to predict every patient's FVC measurement for every possible week in the [-12, 133] interval, and the confidence for each prediction. In other words: we were asked fill a matrix like the one below, and provide a confidence score for each prediction:
<img src="https://i.ibb.co/0Z9kW8H/matrix-completion.jpg" alt="drawing" width="600"/>
The task was perfect to apply Bayesian inference. However, the vast majority of solutions shared by Kaggle community used discriminative machine learning models, disconsidering the fact that most discriminative methods are very poor at providing realistic uncertainty estimates. Because they are typically trained in a manner that optimizes the parameters to minimize some loss criterion (e.g. the predictive error), they do not, in general, encode any uncertainty in either their parameters or the subsequent predictions. Though many methods can produce uncertainty estimates either as a by-product or from a post-processing step, these are typically heuristic based, rather than stemming naturally from a statistically principled estimate of the target uncertainty distribution [2].
## 2. Modelling: Bayesian Hierarchical Linear Regression with Partial Pooling
The simplest possible linear regression, not hierarchical, would assume all FVC decline curves have the same $\alpha$ and $\beta$. That's the **pooled model**. In the other extreme, we could assume a model where each patient has a personalized FVC decline curve, and **these curves are completely unrelated**. That's the **unpooled model**, where each patient has completely separate regressions.
Here, I'll use the middle ground: **Partial pooling**. Specifically, I'll assume that while $\alpha$'s and $\beta$'s are different for each patient as in the unpooled case, **the coefficients all share similarity**. We can model this by assuming that each individual coefficient comes from a common group distribution. The image below represents this model graphically:
<img src="https://i.ibb.co/H7NgBfR/Artboard-2-2x-100.jpg" alt="drawing" width="600"/>
Mathematically, the model is described by the following equations:
\begin{align}
\mu_{\alpha} &\sim \text{Normal}(0, 500) \\
\sigma_{\alpha} &\sim \text{Half-Normal}(100) \\
\mu_{\beta} &\sim \text{Normal}(0, 3) \\
\sigma_{\beta} &\sim \text{Half-Normal}(3) \\
\alpha_i &\sim \text{Normal}(\mu_{\alpha}, \sigma_{\alpha}) \\
\beta_i &\sim \text{Normal}(\mu_{\beta}, \sigma_{\beta}) \\
\sigma &\sim \text{Half-Normal}(100) \\
FVC_{ij} &\sim \text{Normal}(\alpha_i + t \beta_i, \sigma)
\end{align}
where *t* is the time in weeks. Those are very uninformative priors, but that's ok: our model will converge!
Implementing this model in NumPyro is pretty straightforward:
```python
from jax import random
import numpyro
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS, Predictive
assert numpyro.__version__.startswith("0.16.1")
```
```python
def model(patient_code, Weeks, FVC_obs=None):
μ_α = numpyro.sample("μ_α", dist.Normal(0.0, 500.0))
σ_α = numpyro.sample("σ_α", dist.HalfNormal(100.0))
μ_β = numpyro.sample("μ_β", dist.Normal(0.0, 3.0))
σ_β = numpyro.sample("σ_β", dist.HalfNormal(3.0))
n_patients = len(np.unique(patient_code))
with numpyro.plate("plate_i", n_patients):
α = numpyro.sample("α", dist.Normal(μ_α, σ_α))
β = numpyro.sample("β", dist.Normal(μ_β, σ_β))
σ = numpyro.sample("σ", dist.HalfNormal(100.0))
FVC_est = α[patient_code] + β[patient_code] * Weeks
with numpyro.plate("data", len(patient_code)):
numpyro.sample("obs", dist.Normal(FVC_est, σ), obs=FVC_obs)
```
That's all for modelling!
## 3. Fitting the model
A great achievement of Probabilistic Programming Languages such as NumPyro is to decouple model specification and inference. After specifying my generative model, with priors, condition statements and data likelihood, I can leave the hard work to NumPyro's inference engine.
Calling it requires just a few lines. Before we do it, let's add a numerical Patient ID for each patient code. That can be easily done with scikit-learn's LabelEncoder:
```python
from sklearn.preprocessing import LabelEncoder
patient_encoder = LabelEncoder()
train["patient_code"] = patient_encoder.fit_transform(train["Patient"].values)
FVC_obs = train["FVC"].values
Weeks = train["Weeks"].values
patient_code = train["patient_code"].values
```
Now, calling NumPyro's inference engine:
```python
nuts_kernel = NUTS(model)
mcmc = MCMC(nuts_kernel, num_samples=2000, num_warmup=2000)
rng_key = random.PRNGKey(0)
mcmc.run(rng_key, patient_code, Weeks, FVC_obs=FVC_obs)
posterior_samples = mcmc.get_samples()
```
sample: 100%|██████████████████████████████████| 4000/4000 [00:51<00:00, 77.93it/s, 255 steps of size 1.48e-02. acc. prob=0.92]
## 4. Checking the model
### 4.1. Inspecting the learned parameters
First, let's inspect the parameters learned. To do that, I will use [ArviZ](https://arviz-devs.github.io/arviz/), which perfectly integrates with NumPyro:
```python
import arviz as az
data = az.from_numpyro(mcmc)
az.plot_trace(data, compact=True, figsize=(15, 25));
```

Looks like our model learned personalized alphas and betas for each patient!
### 4.2. Visualizing FVC decline curves for some patients
Now, let's visually inspect FVC decline curves predicted by our model. We will completely fill in the FVC table, predicting all missing values. The first step is to create a table to fill:
```python
def create_prediction_template(unique_patient_df, weeks_series):
unique_patient_df["_temp"] = True
weeks = pd.DataFrame(weeks_series, columns=["Weeks"])
weeks["_temp"] = True
return unique_patient_df.merge(weeks, on="_temp").drop(["_temp"], axis=1)
```
```python
patients = train[["Patient", "patient_code"]].drop_duplicates()
start_week_number = -12
end_week_number = 134
predict_weeks = pd.Series(np.arange(start_week_number, end_week_number))
pred_template = create_prediction_template(patients, predict_weeks)
```
Predicting the missing values in the FVC table and confidence (sigma) for each value becomes really easy:
```python
patient_code = pred_template["patient_code"].values
Weeks = pred_template["Weeks"].values
predictive = Predictive(model, posterior_samples, return_sites=["σ", "obs"])
samples_predictive = predictive(random.PRNGKey(0), patient_code, Weeks, None)
```
Note that for [`Predictive`](http://num.pyro.ai/en/latest/utilities.html#numpyro.infer.util.Predictive) to work as expected, the response variable of the model (in this case, `FVC_obs`) must be set to `None`.
Let's now put the predictions together with the true values, to visualize them:
```python
df = pred_template.copy()
df["FVC_pred"] = samples_predictive["obs"].T.mean(axis=1)
df["sigma"] = samples_predictive["obs"].T.std(axis=1)
df["FVC_inf"] = df["FVC_pred"] - df["sigma"]
df["FVC_sup"] = df["FVC_pred"] + df["sigma"]
df = pd.merge(
df, train[["Patient", "Weeks", "FVC"]], how="left", on=["Patient", "Weeks"]
)
df = df.rename(columns={"FVC": "FVC_true"})
df.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Patient</th>
<th>patient_code</th>
<th>Weeks</th>
<th>FVC_pred</th>
<th>sigma</th>
<th>FVC_inf</th>
<th>FVC_sup</th>
<th>FVC_true</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>ID00007637202177411956430</td>
<td>0</td>
<td>-12</td>
<td>2226.545166</td>
<td>160.158493</td>
<td>2066.386719</td>
<td>2386.703613</td>
<td>NaN</td>
</tr>
<tr>
<th>1</th>
<td>ID00007637202177411956430</td>
<td>0</td>
<td>-11</td>
<td>2216.172852</td>
<td>160.390778</td>
<td>2055.781982</td>
<td>2376.563721</td>
<td>NaN</td>
</tr>
<tr>
<th>2</th>
<td>ID00007637202177411956430</td>
<td>0</td>
<td>-10</td>
<td>2219.136963</td>
<td>155.339615</td>
<td>2063.797363</td>
<td>2374.476562</td>
<td>NaN</td>
</tr>
<tr>
<th>3</th>
<td>ID00007637202177411956430</td>
<td>0</td>
<td>-9</td>
<td>2214.727051</td>
<td>153.333313</td>
<td>2061.393799</td>
<td>2368.060303</td>
<td>NaN</td>
</tr>
<tr>
<th>4</th>
<td>ID00007637202177411956430</td>
<td>0</td>
<td>-8</td>
<td>2208.758545</td>
<td>157.368637</td>
<td>2051.389893</td>
<td>2366.127197</td>
<td>NaN</td>
</tr>
</tbody>
</table>
</div>
Finally, let's see our predictions for 3 patients:
```python
def chart_patient_with_predictions(patient_id, ax):
data = df[df["Patient"] == patient_id]
x = data["Weeks"]
ax.set_title(patient_id)
ax.plot(x, data["FVC_true"], "o")
ax.plot(x, data["FVC_pred"])
ax = sns.regplot(x=x, y=data["FVC_true"], ax=ax, ci=None, line_kws={"color": "red"})
ax.fill_between(x, data["FVC_inf"], data["FVC_sup"], alpha=0.5, color="#ffcd3c")
ax.set_ylabel("FVC")
f, axes = plt.subplots(1, 3, figsize=(15, 5))
chart_patient_with_predictions("ID00007637202177411956430", axes[0])
chart_patient_with_predictions("ID00009637202177434476278", axes[1])
chart_patient_with_predictions("ID00011637202177653955184", axes[2])
```

The results are exactly what we expected to see! Highlight observations:
- The model adequately learned Bayesian Linear Regressions! The orange line (learned predicted FVC mean) is very inline with the red line (deterministic linear regression). But most important: it learned to predict uncertainty, showed in the light orange region (one sigma above and below the mean FVC line)
- The model predicts a higher uncertainty where the data points are more disperse (1st and 3rd patients). Conversely, where the points are closely grouped together (2nd patient), the model predicts a higher confidence (narrower light orange region)
- Finally, in all patients, we can see that the uncertainty grows as the look more into the future: the light orange region widens as the # of weeks grow!
### 4.3. Computing the modified Laplace Log Likelihood and RMSE
As mentioned earlier, the competition was evaluated on a modified version of the Laplace Log Likelihood. In medical applications, it is useful to evaluate a model's confidence in its decisions. Accordingly, the metric is designed to reflect both the accuracy and certainty of each prediction.
For each true FVC measurement, we predicted both an FVC and a confidence measure (standard deviation $\sigma$). The metric was computed as:
\begin{align}
\sigma_{clipped} &= max(\sigma, 70) \\
\delta &= min(|FVC_{true} - FVC_{pred}|, 1000) \\
metric &= -\dfrac{\sqrt{2}\delta}{\sigma_{clipped}} - \ln(\sqrt{2} \sigma_{clipped})
\end{align}
The error was thresholded at 1000 ml to avoid large errors adversely penalizing results, while the confidence values were clipped at 70 ml to reflect the approximate measurement uncertainty in FVC. The final score was calculated by averaging the metric across all (Patient, Week) pairs. Note that metric values will be negative and higher is better.
Next, we calculate the metric and RMSE:
```python
y = df.dropna()
rmse = ((y["FVC_pred"] - y["FVC_true"]) ** 2).mean() ** (1 / 2)
print(f"RMSE: {rmse:.1f} ml")
sigma_c = y["sigma"].values
sigma_c[sigma_c < 70] = 70
delta = (y["FVC_pred"] - y["FVC_true"]).abs()
delta[delta > 1000] = 1000
lll = -np.sqrt(2) * delta / sigma_c - np.log(np.sqrt(2) * sigma_c)
print(f"Laplace Log Likelihood: {lll.mean():.4f}")
```
RMSE: 122.3 ml
Laplace Log Likelihood: -6.1406
What do these numbers mean? It means if you adopted this approach, you would **outperform most of the public solutions** in the competition. Curiously, the vast majority of public solutions adopt a standard deterministic Neural Network, modelling uncertainty through a quantile loss. **Most of the people still adopt a frequentist approach**.
**Uncertainty** for single predictions becomes more and more important in machine learning and is often a requirement. **Especially when the consequences of a wrong prediction are high**, we need to know what the probability distribution of an individual prediction is. For perspective, Kaggle just launched a new competition sponsored by Lyft, to build motion prediction models for self-driving vehicles. "We ask that you predict a few trajectories for every agent **and provide a confidence score for each of them**."
## 5. Add layer to model hierarchy: Smoking Status
We can extend the model by including the column `SmokingStatus` as a pooling level, where model parameters will be partially pooled by the groups "Never smoked", "Ex-smoker", and "Currently smokes". To do this, we need to:
1. Encode the `SmokingStatus` column
2. Map patient encoding to smoking status encodings
3. Refine and retrain the model with the additional hierarchy
```python
train["SmokingStatus"].value_counts()
```
Ex-smoker 1038
Never smoked 429
Currently smokes 82
Name: SmokingStatus, dtype: int64
```python
patient_code = train["patient_code"].values
Weeks = train["Weeks"].values
```
```python
smoking_status_encoder = LabelEncoder()
train["smoking_status_code"] = smoking_status_encoder.fit_transform(
train["SmokingStatus"]
)
smoking_status_code = train["smoking_status_code"].values
```
```python
map_patient_to_smoking_status = (
train[["patient_code", "smoking_status_code"]]
.drop_duplicates()
.set_index("patient_code", verify_integrity=True)
.sort_index()["smoking_status_code"]
.values
)
```
```python
def model_smoking_hierarchy(
patient_code, Weeks, map_patient_to_smoking_status, FVC_obs=None
):
μ_α_global = numpyro.sample("μ_α_global", dist.Normal(0.0, 500.0))
σ_α_global = numpyro.sample("σ_α_global", dist.HalfNormal(100.0))
μ_β_global = numpyro.sample("μ_β_global", dist.Normal(0.0, 3.0))
σ_β_global = numpyro.sample("σ_β_global", dist.HalfNormal(3.0))
n_patients = len(np.unique(patient_code))
n_smoking_statuses = len(np.unique(map_patient_to_smoking_status))
with numpyro.plate("plate_smoking_status", n_smoking_statuses):
μ_α_smoking_status = numpyro.sample(
"μ_α_smoking_status", dist.Normal(μ_α_global, σ_α_global)
)
μ_β_smoking_status = numpyro.sample(
"μ_β_smoking_status", dist.Normal(μ_β_global, σ_β_global)
)
with numpyro.plate("plate_i", n_patients):
α = numpyro.sample(
"α",
dist.Normal(μ_α_smoking_status[map_patient_to_smoking_status], σ_α_global),
)
β = numpyro.sample(
"β",
dist.Normal(μ_β_smoking_status[map_patient_to_smoking_status], σ_β_global),
)
σ = numpyro.sample("σ", dist.HalfNormal(100.0))
FVC_est = α[patient_code] + β[patient_code] * Weeks
with numpyro.plate("data", len(patient_code)):
numpyro.sample("obs", dist.Normal(FVC_est, σ), obs=FVC_obs)
```
### Reparameterize the model
Hierarchical models often need to be reparameterized to enable MCMC to explore the full parameter space. NumPyro's `LocScaleReparam` is used to do this below. For more details, see [bad_posterior_geometry.ipynb](https://num.pyro.ai/en/stable/tutorials/bad_posterior_geometry.html) and [funnel.py](https://num.pyro.ai/en/stable/examples/funnel.html). Thomas Wiecki also has a [great post](https://twiecki.io/blog/2017/02/08/bayesian-hierchical-non-centered/) about developing non-centered models.
```python
from numpyro.handlers import reparam
from numpyro.infer.reparam import LocScaleReparam
reparam_config = {
"μ_α_smoking_status": LocScaleReparam(0),
"μ_β_smoking_status": LocScaleReparam(0),
"α": LocScaleReparam(0),
"β": LocScaleReparam(0),
}
reparam_model_smoking_hierarchy = reparam(
model_smoking_hierarchy, config=reparam_config
)
```
```python
nuts_kernel = NUTS(reparam_model_smoking_hierarchy, target_accept_prob=0.97)
mcmc = MCMC(nuts_kernel, num_samples=3000, num_warmup=5000)
rng_key = random.PRNGKey(0)
mcmc.run(rng_key, patient_code, Weeks, map_patient_to_smoking_status, FVC_obs=FVC_obs)
posterior_samples = mcmc.get_samples()
```
sample: 100%|█████████████████████████████████| 8000/8000 [03:55<00:00, 33.99it/s, 1023 steps of size 5.68e-03. acc. prob=0.97]
### 5.1. Inspect the learned parameters
```python
data = az.from_numpyro(mcmc)
az.plot_trace(data, compact=True, figsize=(15, 45));
```

#### Smoking Status distributions
Adding a legend for the smoking status distributions to help interpret the model results for that level.
| Smoking Status | Code |
|-------------------|------|
| Currently smokes | 0 |
| Ex-smoker | 1 |
| Never smoked | 2 |
```python
# Check the label code for each SmokingStatus
smoking_status_encoder.inverse_transform([0, 1, 2])
```
array(['Currently smokes', 'Ex-smoker', 'Never smoked'], dtype=object)
```python
axes = az.plot_trace(
data,
var_names=["μ_α_smoking_status", "μ_β_smoking_status"],
legend=True,
compact=True,
figsize=(15, 15),
)
# The legend handles were not working for the first plot
axes[0, 0].legend();
```
WARNING:matplotlib.legend:No artists with labels found to put in legend. Note that artists whose label start with an underscore are ignored when legend() is called with no argument.

### Interpret smoking status model parameters
The model parameters for each smoking status show interesting results, especially for trend, `μ_β_smoking_status`. In the trace plots above and summary table below the trend for current smokers, `μ_β_smoking_status[0]`, has a positive mean, whereas the trend for patients that are ex-smokers and those that have never smoked are negative, `μ_β_smoking_status[1]` and `μ_β_smoking_status[2]`.
```python
trace = az.from_numpyro(mcmc)
az.summary(
trace,
var_names=["μ_α_global", "μ_β_global", "μ_α_smoking_status", "μ_β_smoking_status"],
)
```
Shape validation failed: input_shape: (1, 3000), minimum_shape: (chains=2, draws=4)
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>mean</th>
<th>sd</th>
<th>hdi_3%</th>
<th>hdi_97%</th>
<th>mcse_mean</th>
<th>mcse_sd</th>
<th>ess_bulk</th>
<th>ess_tail</th>
<th>r_hat</th>
</tr>
</thead>
<tbody>
<tr>
<th>μ_α_global</th>
<td>1660.172</td>
<td>309.657</td>
<td>1118.038</td>
<td>2274.933</td>
<td>6.589</td>
<td>4.660</td>
<td>2203.0</td>
<td>2086.0</td>
<td>NaN</td>
</tr>
<tr>
<th>μ_β_global</th>
<td>-1.252</td>
<td>2.062</td>
<td>-5.014</td>
<td>2.678</td>
<td>0.037</td>
<td>0.035</td>
<td>3040.0</td>
<td>2041.0</td>
<td>NaN</td>
</tr>
<tr>
<th>μ_α_smoking_status[0]</th>
<td>2970.486</td>
<td>227.761</td>
<td>2572.943</td>
<td>3429.343</td>
<td>7.674</td>
<td>5.452</td>
<td>878.0</td>
<td>1416.0</td>
<td>NaN</td>
</tr>
<tr>
<th>μ_α_smoking_status[1]</th>
<td>2907.950</td>
<td>68.011</td>
<td>2782.993</td>
<td>3035.172</td>
<td>5.209</td>
<td>3.698</td>
<td>171.0</td>
<td>281.0</td>
<td>NaN</td>
</tr>
<tr>
<th>μ_α_smoking_status[2]</th>
<td>2475.281</td>
<td>102.948</td>
<td>2286.072</td>
<td>2671.298</td>
<td>6.181</td>
<td>4.381</td>
<td>278.0</td>
<td>566.0</td>
<td>NaN</td>
</tr>
<tr>
<th>μ_β_smoking_status[0]</th>
<td>2.061</td>
<td>1.713</td>
<td>-1.278</td>
<td>5.072</td>
<td>0.032</td>
<td>0.024</td>
<td>2797.0</td>
<td>2268.0</td>
<td>NaN</td>
</tr>
<tr>
<th>μ_β_smoking_status[1]</th>
<td>-4.625</td>
<td>0.498</td>
<td>-5.566</td>
<td>-3.721</td>
<td>0.010</td>
<td>0.007</td>
<td>2309.0</td>
<td>2346.0</td>
<td>NaN</td>
</tr>
<tr>
<th>μ_β_smoking_status[2]</th>
<td>-4.513</td>
<td>0.789</td>
<td>-6.011</td>
<td>-3.056</td>
<td>0.016</td>
<td>0.011</td>
<td>2466.0</td>
<td>2494.0</td>
<td>NaN</td>
</tr>
</tbody>
</table>
</div>
Let's look at these curves for individual patients to help interpret these model results.
### 5.2. Visualizing FVC decline curves for some patients
```python
patient_code = pred_template["patient_code"].values
Weeks = pred_template["Weeks"].values
predictive = Predictive(
reparam_model_smoking_hierarchy, posterior_samples, return_sites=["σ", "obs"]
)
samples_predictive = predictive(
random.PRNGKey(0), patient_code, Weeks, map_patient_to_smoking_status, None
)
```
```python
df = pred_template.copy()
df["FVC_pred"] = samples_predictive["obs"].T.mean(axis=1)
df["sigma"] = samples_predictive["obs"].T.std(axis=1)
df["FVC_inf"] = df["FVC_pred"] - df["sigma"]
df["FVC_sup"] = df["FVC_pred"] + df["sigma"]
df = pd.merge(
df, train[["Patient", "Weeks", "FVC"]], how="left", on=["Patient", "Weeks"]
)
df = df.rename(columns={"FVC": "FVC_true"})
df.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Patient</th>
<th>patient_code</th>
<th>Weeks</th>
<th>FVC_pred</th>
<th>sigma</th>
<th>FVC_inf</th>
<th>FVC_sup</th>
<th>FVC_true</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>ID00007637202177411956430</td>
<td>0</td>
<td>-12</td>
<td>2229.098877</td>
<td>157.880753</td>
<td>2071.218018</td>
<td>2386.979736</td>
<td>NaN</td>
</tr>
<tr>
<th>1</th>
<td>ID00007637202177411956430</td>
<td>0</td>
<td>-11</td>
<td>2225.022461</td>
<td>157.358429</td>
<td>2067.664062</td>
<td>2382.380859</td>
<td>NaN</td>
</tr>
<tr>
<th>2</th>
<td>ID00007637202177411956430</td>
<td>0</td>
<td>-10</td>
<td>2224.487549</td>
<td>155.416016</td>
<td>2069.071533</td>
<td>2379.903564</td>
<td>NaN</td>
</tr>
<tr>
<th>3</th>
<td>ID00007637202177411956430</td>
<td>0</td>
<td>-9</td>
<td>2212.780518</td>
<td>154.162155</td>
<td>2058.618408</td>
<td>2366.942627</td>
<td>NaN</td>
</tr>
<tr>
<th>4</th>
<td>ID00007637202177411956430</td>
<td>0</td>
<td>-8</td>
<td>2219.202393</td>
<td>154.729507</td>
<td>2064.472900</td>
<td>2373.931885</td>
<td>NaN</td>
</tr>
</tbody>
</table>
</div>
```python
f, axes = plt.subplots(1, 3, figsize=(15, 5))
chart_patient_with_predictions("ID00048637202185016727717", axes[0]) # Never smoked
chart_patient_with_predictions("ID00342637202287526592911", axes[1]) # Ex-smoker
chart_patient_with_predictions("ID00331637202286306023714", axes[2]) # Currently smokes
```

### Review patients that currently smoke
By plotting each patient with the smoking status "Currently smokes", we see some patients with a clear positive trend and others without a clear trend or negative trend. The trend lines are less overfit than the unpooled trend lines and show relatively large uncertainty in the slope and intercept. Depending on the model use case, we could proceed in different ways:
- If we just wanted to get an understanding of different attributes as they relate to patient's FVC over time, we could stop here with an understanding that current smokers might have an increase in FVC over time when being monitored for Pulmonary Fibrosis. We might hypothesize causes for this observation to design a new experiment to test that hypothesis.
- If we wanted to develop a model that generates predictions used to treat patients, then we will want to make sure we are not overfitting so that we can trust the model with new patients. We might adjust model parameters to shrink the "Currently smokes" group model parameters to be closer to global parameters or even combine the group with "Ex-smokers". We could look into collecting more data for current smokers to help ensure the model is not overfitting.
```python
f, axes = plt.subplots(3, 3, figsize=(15, 15))
for i, patient in enumerate(
train[train["SmokingStatus"] == "Currently smokes"]["Patient"].unique()
):
chart_patient_with_predictions(patient, axes.flatten()[i])
```

### 5.3 Modified Laplace Log Likelihood and RMSE for model with Smoking Status Level
We calculate the metrics for the updated model and compare to the original model.
```python
y = df.dropna()
rmse = ((y["FVC_pred"] - y["FVC_true"]) ** 2).mean() ** (1 / 2)
print(f"RMSE: {rmse:.1f} ml")
sigma_c = y["sigma"].values
sigma_c[sigma_c < 70] = 70
delta = (y["FVC_pred"] - y["FVC_true"]).abs()
delta[delta > 1000] = 1000
lll = -np.sqrt(2) * delta / sigma_c - np.log(np.sqrt(2) * sigma_c)
print(f"Laplace Log Likelihood: {lll.mean():.4f}")
```
RMSE: 122.6 ml
Laplace Log Likelihood: -6.1420
Both the Laplace Log Likelihood and RMSE show slightly worse performance for the smoking status model. We've learned that adding this hierarchy level as-is did not improve model performance, but we did find some interested results from the smoking status level that might be worth investigating. In addition, we might try to adjust priors or trying a different level (e.g. gender) to improve model performance.
## Wrap-up
Finally, I hope the great work done by Pyro/NumPyro developers help democratize Bayesian methods, empowering an ever growing community of researchers and practitioners to create models that can not only generate predictions, but also assess uncertainty in their predictions.
## References
1. Ghahramani, Z. Probabilistic machine learning and artificial intelligence. Nature 521, 452–459 (2015). https://doi.org/10.1038/nature14541
2. Rainforth, Thomas William Gamlen. Automating Inference, Learning, and Design Using Probabilistic Programming. University of Oxford, 2017.
|
pyro-pplREPO_NAMEnumpyroPATH_START.@numpyro_extracted@numpyro-master@notebooks@source@bayesian_hierarchical_linear_regression.ipynb@.PATH_END.py
|
{
"filename": "test_ned_remote.py",
"repo_name": "astropy/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/ipac/ned/tests/test_ned_remote.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.table import Table
from astroquery.ipac import ned
@pytest.mark.remote_data
class TestNed:
def test_get_references(self):
response = ned.core.Ned.get_table_async(
"m1", table='references', from_year=2010)
assert response is not None
result = ned.core.Ned.get_table(
"m1", table='references', to_year=2012, extended_search=True)
assert isinstance(result, Table)
def test_get_positions_async(self):
response = ned.core.Ned.get_table_async("m1", table='positions')
assert response is not None
def test_get_positions(self):
result = ned.core.Ned.get_table("m1", table='positions')
assert isinstance(result, Table)
def test_get_redshifts_async(self):
response = ned.core.Ned.get_table_async("3c 273", table='redshifts')
assert response is not None
def test_get_redshifts(self):
result = ned.core.Ned.get_table("3c 273", table='redshifts')
assert isinstance(result, Table)
def test_get_photometry_async(self):
response = ned.core.Ned.get_table_async("3C 273", table='photometry')
assert response is not None
def test_photometry(self):
result = ned.core.Ned.get_table("3c 273", table='photometry')
assert isinstance(result, Table)
def test_get_image_list(self):
response = ned.core.Ned.get_image_list('m1')
assert len(response) > 0
def test_get_images_async(self):
readable_objs = ned.core.Ned.get_images_async('m1')
assert readable_objs is not None
def test_get_images(self):
fits_images = ned.core.Ned.get_images('m1')
assert fits_images is not None
def test_query_refcode_async(self):
response = ned.core.Ned.query_refcode_async('1997A&A...323...31K')
assert response is not None
def test_query_refcode(self):
result = ned.core.Ned.query_refcode('1997A&A...323...31K')
assert isinstance(result, Table)
def test_query_region_iau_async(self):
response = ned.core.Ned.query_region_iau_async('1234-423')
assert response is not None
def test_query_region_iau(self):
result = ned.core.Ned.query_region_iau('1234-423')
assert isinstance(result, Table)
def test_query_region_async(self):
response = ned.core.Ned.query_region_async("05h35m17.3s +22d00m52.2s")
assert response is not None
def test_query_region(self):
result = ned.core.Ned.query_region("m1")
assert isinstance(result, Table)
def test_query_object_async(self):
response = ned.core.Ned.query_object_async('m1')
assert response is not None
def test_query_object(self):
result = ned.core.Ned.query_object('m1')
assert isinstance(result, Table)
def test_get_object_notes_async(self):
response = ned.core.Ned.get_table_async('m1', table='object_notes')
assert response is not None
def test_get_object_notes(self):
result = ned.core.Ned.get_table('3c 273', table='object_notes')
assert isinstance(result, Table)
def test_file_format(self):
result_ascii = ned.core.Ned.get_image_list('NGC6060', item='spectra',
file_format='NED-ascii')
result_fits = ned.core.Ned.get_image_list('NGC6060', item='spectra',
file_format='fits')
assert len(result_ascii) == 3
assert len(result_fits) == 1
|
astropyREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@ipac@ned@tests@test_ned_remote.py@.PATH_END.py
|
{
"filename": "example_filament_profile.py",
"repo_name": "e-koch/FilFinder",
"repo_path": "FilFinder_extracted/FilFinder-master/examples/example_filament_profile.py",
"type": "Python"
}
|
# Licensed under an MIT open source license - see LICENSE
from fil_finder import fil_finder_2D
from fil_finder.width_profiles import filament_profile
from fil_finder.utilities import eight_con
from astropy.io import fits
import astropy.units as u
import numpy as np
import scipy.ndimage as nd
import matplotlib.pyplot as p
'''
Example of the new radial profiling code for FilFinder.
This functionality is still in testing!
'''
hdu = fits.open("filaments_updatedhdr.fits")[0]
img, hdr = hdu.data, hdu.header
# Add some noise
np.random.seed(500)
noiseimg = img + np.random.normal(0, 0.05, size=img.shape)
# We need the finalized skeletons, so run the first few portions of the
# normal algorithm.
test = fil_finder_2D(noiseimg, header=hdr, beamwidth=10.0*u.arcsec,
flatten_thresh=95, distance=260*u.pc,
glob_thresh=20)
test.create_mask(verbose=False, border_masking=False, size_thresh=430)
test.medskel(verbose=False)
test.analyze_skeletons(verbose=False)
# Now choose one the longest path skeletons from the labeled array
labels, num = nd.label(test.skeleton_longpath, eight_con())
# Number 3 isn't too long and is in a relatively uncrowded region.
# We enable verbose, which will plot the profile normal to each pixel in the
# skeleton.
# The noise parameter allows an array, of the same shape as the image, to be
# passed and used as weights in the fitting.
# NOTE: If you don't want a million plots showing up, uncomment the next line:
# p.ion()
dists, profiles, extents, fit_table = \
filament_profile(labels == 3, noiseimg, hdr, max_dist=0.14*u.pc,
distance=250.*u.pc, bright_unit="", noise=None,
verbose=True)
# The fits aren't perfect (fitting is limited to a fairly naive gaussian right
# now), but the evolution in the profile shape is quite evident!
|
e-kochREPO_NAMEFilFinderPATH_START.@FilFinder_extracted@FilFinder-master@examples@example_filament_profile.py@.PATH_END.py
|
{
"filename": "demo_VEH_MAN_10t.py",
"repo_name": "projectchrono/chrono",
"repo_path": "chrono_extracted/chrono-main/src/demos/python/vehicle/demo_VEH_MAN_10t.py",
"type": "Python"
}
|
# =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2014 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Radu Serban
# =============================================================================
#
# MAN 10t vehicle model demo.
#
# The vehicle reference frame has Z up, X towards the front of the vehicle, and
# Y pointing to the left.
#
# =============================================================================
import pychrono.core as chrono
import pychrono.irrlicht as irr
import pychrono.vehicle as veh
import math
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
#chrono.SetChronoDataPath('path/to/data')
veh.SetDataPath(chrono.GetChronoDataPath() + 'vehicle/')
# Initial vehicle location and orientation
initLoc = chrono.ChVector3d(0, 0, 0.7)
initRot = chrono.ChQuaterniond(1, 0, 0, 0)
# Visualization type for vehicle parts (PRIMITIVES, MESH, or NONE)
chassis_vis_type = veh.VisualizationType_MESH
suspension_vis_type = veh.VisualizationType_PRIMITIVES
steering_vis_type = veh.VisualizationType_PRIMITIVES
wheel_vis_type = veh.VisualizationType_MESH
tire_vis_type = veh.VisualizationType_MESH
# Collision type for chassis (PRIMITIVES, MESH, or NONE)
chassis_collision_type = veh.CollisionType_NONE
# Type of tire model (RIGID, TMEASY)
tire_model = veh.TireModelType_TMEASY
# Rigid terrain
terrainHeight = 0 # terrain height (FLAT terrain only)
terrainLength = 200.0 # size in X direction
terrainWidth = 200.0 # size in Y direction
# Poon chassis tracked by the camera
trackPoint = chrono.ChVector3d(-3.0, 0.0, 1.75)
# Contact method
contact_method = chrono.ChContactMethod_SMC
contact_vis = False
# Simulation step sizes
step_size = 1e-3
tire_step_size = step_size
# Simulation end time
t_end = 1000
# Time interval between two render frames
render_step_size = 1.0 / 50 # FPS = 50
# =============================================================================
#print ( "Copyright (c) 2017 projectchrono.org\nChrono version: ", chrono.CHRONO_VERSION , "\n\n")
# --------------
# Create systems
# --------------
# Create the MAN 10t vehicle, set parameters, and initialize
truck = veh.MAN_10t()
truck.SetContactMethod(contact_method)
truck.SetChassisCollisionType(chassis_collision_type)
truck.SetChassisFixed(False)
truck.SetInitPosition(chrono.ChCoordsysd(initLoc, initRot))
truck.SetTireType(tire_model)
truck.SetTireStepSize(tire_step_size)
truck.SetDriveline8WD(True)
truck.Initialize()
truck.SetChassisVisualizationType(chassis_vis_type)
truck.SetSuspensionVisualizationType(suspension_vis_type)
truck.SetSteeringVisualizationType(steering_vis_type)
truck.SetWheelVisualizationType(wheel_vis_type)
truck.SetTireVisualizationType(tire_vis_type)
truck.GetSystem().SetCollisionSystemType(chrono.ChCollisionSystem.Type_BULLET)
# Create the terrain
terrain = veh.RigidTerrain(truck.GetSystem())
if (contact_method == chrono.ChContactMethod_NSC):
patch_mat = chrono.ChContactMaterialNSC()
patch_mat.SetFriction(0.9)
patch_mat.SetRestitution(0.01)
elif (contact_method == chrono.ChContactMethod_SMC):
patch_mat = chrono.ChContactMaterialSMC()
patch_mat.SetFriction(0.9)
patch_mat.SetRestitution(0.01)
patch_mat.SetYoungModulus(2e7)
patch = terrain.AddPatch(patch_mat,
chrono.CSYSNORM,
terrainLength, terrainWidth)
patch.SetTexture(veh.GetDataFile("terrain/textures/tile4.jpg"), 200, 200)
patch.SetColor(chrono.ChColor(0.8, 0.8, 0.5))
terrain.Initialize()
# Create the vehicle Irrlicht interface
vis = veh.ChWheeledVehicleVisualSystemIrrlicht()
vis.SetWindowTitle('MAN 10t')
vis.SetWindowSize(1280, 1024)
vis.SetChaseCamera(trackPoint, 10.0, 0.5)
vis.Initialize()
vis.AddLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
vis.AddLightDirectional()
vis.AddSkyBox()
vis.AttachVehicle(truck.GetVehicle())
# Create the driver system
driver = veh.ChInteractiveDriverIRR(vis)
# Set the time response for steering and throttle keyboard inputs.
steering_time = 1.0 # time to go from 0 to +1 (or from 0 to -1)
throttle_time = 1.0 # time to go from 0 to +1
braking_time = 0.3 # time to go from 0 to +1
driver.SetSteeringDelta(render_step_size / steering_time)
driver.SetThrottleDelta(render_step_size / throttle_time)
driver.SetBrakingDelta(render_step_size / braking_time)
driver.Initialize()
# ---------------
# Simulation loop
# ---------------
# output vehicle mass
print( "VEHICLE MASS: ", truck.GetVehicle().GetMass())
# Number of simulation steps between miscellaneous events
render_steps = math.ceil(render_step_size / step_size)
# Initialize simulation frame counter s
step_number = 0
render_frame = 0
truck.GetVehicle().EnableRealtime(True)
while vis.Run() :
time = truck.GetSystem().GetChTime()
# End simulation
if (time >= t_end):
break
# Render scene and output POV-Ray data
if (step_number % render_steps == 0) :
vis.BeginScene()
vis.Render()
vis.EndScene()
render_frame += 1
# Get driver inputs
driver_inputs = driver.GetInputs()
# Update modules (process inputs from other modules)
driver.Synchronize(time)
terrain.Synchronize(time)
truck.Synchronize(time, driver_inputs, terrain)
vis.Synchronize(time, driver_inputs)
# Advance simulation for one timestep for all modules
driver.Advance(step_size)
terrain.Advance(step_size)
truck.Advance(step_size)
vis.Advance(step_size)
# Increment frame number
step_number += 1
|
projectchronoREPO_NAMEchronoPATH_START.@chrono_extracted@chrono-main@src@demos@python@vehicle@demo_VEH_MAN_10t.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "astropenguin/ndradex",
"repo_path": "ndradex_extracted/ndradex-main/README.md",
"type": "Markdown"
}
|
# ndRADEX
[](https://pypi.org/project/ndradex/)
[](https://pypi.org/project/ndradex/)
[](https://pepy.tech/project/ndradex)
[](https://doi.org/10.5281/zenodo.3384031)
[](https://github.com/astropenguin/ndradex/actions)
Multidimensional grid RADEX calculator
## Overview
ndRADEX is a Python package which can run [RADEX], non-LTE molecular radiative transfer code, with multiple grid parameters.
The output will be multidimensional arrays provided by [xarray], which would be useful for parameter search of physical conditions in comparison with observed values.
### Features
- **Grid calculation:** ndRADEX has a simple `run()` function, where all parameters of RADEX can be griddable (i.e., they can be list-like with length of more than one).
- **Builtin RADEX:** ndRADEX provides builtin RADEX binaries in the package, which are automatically downloaded and built on the first import. This also enables us to do RADEX calculations in the cloud such as [Google Colaboratory](https://colab.research.google.com).
- **Multiprocessing:** ndRADEX supports multiprocessing RADEX run by default. At least twice speedup is expected compared to single processing.
- **Handy I/O:** The output of ndRADEX is a [xarray]'s Dataset, a standard multidimensional data structure as well as [pandas]. You can handle it in the same manner as NumPy and pandas (i.e., element-wise operation, save/load data, plotting, etc).
### Requirements
- Python 3.8-3.11 (tested by the author)
- gfortran (necessary to build RADEX)
### Installation
You can install ndRADEX with pip:
```shell
$ pip install ndradex
```
## Usages
Within Python, import the package like:
```python
>>> import ndradex
```
### Single RADEX calculation
The main function of ndRADEX is `ndradex.run()`.
For example, to get RADEX results of CO(1-0) with kinetic temperature of 100.0 K, CO column density of 1e15 cm^-2, and H2 density of 1e3 cm^-3:
```python
>>> ds = ndradex.run("co.dat", "1-0", 100.0, 1e15, 1e3)
```
where `"co.dat"` is a name of [LAMDA] datafile and `"1-0"` is a name of transition.
The available values are listed in [List of available LAMDA datafiles and transitions](https://github.com/astropenguin/ndradex/wiki/List-of-available-LAMDA-datafiles-and-transitions).
Note that you do not need to any download datafiles:
ndRADEX automatically manage this.
In this case, other parameters like line width, background temperature are default values defined in the function.
The geometry of escape probability is uniform (`"uni"`) by default.
You can change these values with custom config (see customizations below).
The output is a [xarray]'s Dataset with no dimension:
```python
>>> print(ds)
<xarray.Dataset>
Dimensions: ()
Coordinates:
QN_ul <U3 '1-0'
T_kin int64 100
N_mol float64 1e+15
n_H2 float64 1e+03
T_bg float64 2.73
dv float64 1.0
geom <U3 'uni'
description <U9 'LAMDA(CO)'
Data variables:
E_u float64 5.5
freq float64 115.3
wavel float64 2.601e+03
T_ex float64 132.5
tau float64 0.009966
T_r float64 1.278
pop_u float64 0.4934
pop_l float64 0.1715
I float64 1.36
F float64 2.684e-08
```
You can access each result value like:
```python
>>> flux = ds["F"].values
```
### Grid RADEX calculation
As a natural extension, you can run grid RADEX calculation like:
```python
>>> ds = ndradex.run("co.dat", ["1-0", "2-1"], T_kin=[100.0, 200.0, 300.0],
N_mol=1e15, n_H2=[1e3, 1e4, 1e5, 1e6, 1e7])
```
There are 13 parameters which can be griddable:
`QN_ul` (transition name), `T_kin` (kinetic temperature), `N_mol` (column density), `n_H2` (H2 density), `n_pH2` (para-H2 density), `n_oH2` (ortho-H2 density), `n_e` (electron density), `n_H` (atomic hydrogen density), `n_He` (Helium density), `n_Hp` (ionized hydrogen density), `T_bg` (background temperature), `dv` (line width), and `geom` (photon escape geometry).
The output of this example is a [xarray]'s Dataset with three dimensions of (`QN_ul`, `T_kin`, `n_H2`):
```python
>>> print(ds)
<xarray.Dataset>
Dimensions: (QN_ul: 2, T_kin: 3, n_H2: 5)
Coordinates:
* QN_ul (QN_ul) <U3 '1-0' '2-1'
* T_kin (T_kin) int64 100 200 300
N_mol float64 1e+15
* n_H2 (n_H2) float64 1e+03 1e+04 1e+05 1e+06 1e+07
T_bg float64 2.73
dv float64 1.0
geom <U3 'uni'
description <U9 'LAMDA(CO)'
Data variables:
E_u (QN_ul, T_kin, n_H2) float64 5.5 5.5 5.5 5.5 ... 16.6 16.6 16.6
freq (QN_ul, T_kin, n_H2) float64 115.3 115.3 115.3 ... 230.5 230.5
wavel (QN_ul, T_kin, n_H2) float64 2.601e+03 2.601e+03 ... 1.3e+03
T_ex (QN_ul, T_kin, n_H2) float64 132.5 -86.52 127.6 ... 316.6 301.6
tau (QN_ul, T_kin, n_H2) float64 0.009966 -0.005898 ... 0.0009394
T_r (QN_ul, T_kin, n_H2) float64 1.278 0.5333 ... 0.3121 0.2778
pop_u (QN_ul, T_kin, n_H2) float64 0.4934 0.201 ... 0.04972 0.04426
pop_l (QN_ul, T_kin, n_H2) float64 0.1715 0.06286 ... 0.03089 0.02755
I (QN_ul, T_kin, n_H2) float64 1.36 0.5677 ... 0.3322 0.2957
F (QN_ul, T_kin, n_H2) float64 2.684e-08 1.12e-08 ... 4.666e-08
```
For more information, run `help(ndradex.run)` to see the docstrings.
### Save and load results
You can save and load the dataset like:
```python
# save results to a netCDF file
>>> ndradex.save_dataset(ds, "results.nc")
# load results from a netCDF file
>>> ds = ndradex.load_dataset("results.nc")
```
## Customization
For the first time you import ndRADEX, the custom configuration file is created as `~/.config/ndradex/config.toml`.
By editing this, you can customize the following two settings of ndRADEX.
Note that you can change the path of configuration directory by setting an environment variable, `NDRADEX_DIR`.
### Changing default values
As mentioned above, you can change the default values of the `run()` function like:
```toml
# config.toml
[defaults]
T_bg = 10.0 # change default background temp to 10.0 K
geom = "lvg" # change default geometry to LVG
timeout = 60.0
n_procs = 8
```
You can also change the number of multiprocesses (`n_procs`) and timeout (`timeout`) here.
### Setting datafile aliases
Sometimes datafile names are not intuitive (for example, name of CS datafile is `cs@lique.dat`).
For convenience, you can define aliases of datafile names like:
```toml
# config.toml
[lamda.aliaes]
CS = "cs@lique.dat"
CO = "~/your/local/co.dat"
H13CN = "https://home.strw.leidenuniv.nl/~moldata/datafiles/h13cn@xpol.dat"
```
As shown in the second and third examples, you can also specify a local file path or a URL on the right hand.
After the customization, you can use these aliases in the `run()` function:
```python
>>> ds = ndradex.run("CS", "1-0", ...) # equiv to cs@lique.dat
```
[xarray]: http://xarray.pydata.org/en/stable/
[RADEX]: https://home.strw.leidenuniv.nl/~moldata/radex.html
[LAMDA]: https://home.strw.leidenuniv.nl/~moldata/
[pandas]: https://pandas.pydata.org/
|
astropenguinREPO_NAMEndradexPATH_START.@ndradex_extracted@ndradex-main@README.md@.PATH_END.py
|
{
"filename": "test_apf_shift.py",
"repo_name": "petigura/specmatch-syn",
"repo_path": "specmatch-syn_extracted/specmatch-syn-master/smsyn/tests/test_apf_shift.py",
"type": "Python"
}
|
"""Test smsyn.inst.hires.shift.shift code by using the moon
spectrum and a synthesized coelho model
"""
import os
from argparse import ArgumentParser
from astropy.io import fits
import numpy as np
import smsyn.inst.hires.shift
import smsyn.inst.apf.loadspec
import smsyn.library
import smsyn.io.spectrum
from smsyn import DATA_DIR
if __name__=="__main__":
# file input setup
name = 'MOON'
obs = 'rabh.225'
inpfile = os.path.join(DATA_DIR,'{}_{}.fits'.format(name,obs))
outfile = '{}_{}.sm.fits'.format(name,obs)
# parse path to library file from command line
psr = ArgumentParser()
psr.add_argument('libfile',type=str,help="path to library hdf file")
args = psr.parse_args()
# create Library object instance
lib = smsyn.library.read_hdf(args.libfile, wavlim=[4000,7000])
dirname = os.path.dirname(os.path.abspath(smsyn.__file__))
hduL = fits.open(inpfile)
# which orders are we interested in
orders = range(30,51)
# load 2d wavelength solution
wav = fits.getdata(os.path.join(DATA_DIR, 'apf_wave_bj2.fits')) # guess at wavelength scale
wav = wav[orders,:]
# read 2d fits spectrum
flux,uflux = smsyn.inst.apf.loadspec.read_fits(inpfile, flatten=True, geterr=True, specorders=orders)
# synthesize section of model spectrum
ref_wav = np.logspace(np.log10(wav[0,0]),np.log10(wav[-1,-1]),64000)
ref_flux = lib.synth(ref_wav, 5700, 4.4, 0.0, 2, 2)
# shift to rest wavelength and flatten
flux_shift, uflux_shift = smsyn.inst.hires.shift.shift(
wav, flux, uflux, ref_wav, ref_flux
)
# create Spectrum instance and save result to new fits file
spec = smsyn.io.spectrum.Spectrum(
ref_wav, flux_shift, uflux_shift, header=dict(name=name,obs=obs)
)
spec.to_fits(outfile)
|
petiguraREPO_NAMEspecmatch-synPATH_START.@specmatch-syn_extracted@specmatch-syn-master@smsyn@tests@test_apf_shift.py@.PATH_END.py
|
{
"filename": "from_json_test.py",
"repo_name": "vaexio/vaex",
"repo_path": "vaex_extracted/vaex-master/tests/from_json_test.py",
"type": "Python"
}
|
from common import *
import tempfile
def test_from_json(ds_local):
df = ds_local
# Create temporary json files
pandas_df = df.to_pandas_df(virtual=True, array_type='numpy')
tmp = tempfile.mktemp('.json')
with open(tmp, 'w') as f:
f.write(pandas_df.to_json())
tmp_df = vaex.from_json(tmp)
assert set(tmp_df.get_column_names()) == set(df.get_column_names())
assert len(tmp_df) == len(df)
assert tmp_df.x.tolist() == df.x.tolist()
assert tmp_df.bool.tolist() == df.bool.tolist()
|
vaexioREPO_NAMEvaexPATH_START.@vaex_extracted@vaex-master@tests@from_json_test.py@.PATH_END.py
|
{
"filename": "apply.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/core/apply.py",
"type": "Python"
}
|
from __future__ import annotations
import abc
from collections import defaultdict
from collections.abc import Callable
import functools
from functools import partial
import inspect
from typing import (
TYPE_CHECKING,
Any,
Literal,
cast,
)
import numpy as np
from pandas._libs.internals import BlockValuesRefs
from pandas._typing import (
AggFuncType,
AggFuncTypeBase,
AggFuncTypeDict,
AggObjType,
Axis,
AxisInt,
NDFrameT,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.errors import SpecificationError
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import is_nested_object
from pandas.core.dtypes.common import (
is_dict_like,
is_extension_array_dtype,
is_list_like,
is_numeric_dtype,
is_sequence,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCNDFrame,
ABCSeries,
)
from pandas.core._numba.executor import generate_apply_looper
import pandas.core.common as com
from pandas.core.construction import ensure_wrapped_if_datetimelike
from pandas.core.util.numba_ import (
get_jit_arguments,
prepare_function_arguments,
)
if TYPE_CHECKING:
from collections.abc import (
Generator,
Hashable,
Iterable,
MutableMapping,
Sequence,
)
from pandas import (
DataFrame,
Index,
Series,
)
from pandas.core.groupby import GroupBy
from pandas.core.resample import Resampler
from pandas.core.window.rolling import BaseWindow
ResType = dict[int, Any]
def frame_apply(
obj: DataFrame,
func: AggFuncType,
axis: Axis = 0,
raw: bool = False,
result_type: str | None = None,
by_row: Literal[False, "compat"] = "compat",
engine: str = "python",
engine_kwargs: dict[str, bool] | None = None,
args=None,
kwargs=None,
) -> FrameApply:
"""construct and return a row or column based frame apply object"""
_, func, columns, _ = reconstruct_func(func, **kwargs)
axis = obj._get_axis_number(axis)
klass: type[FrameApply]
if axis == 0:
klass = FrameRowApply
elif axis == 1:
if columns:
raise NotImplementedError(
f"Named aggregation is not supported when {axis=}."
)
klass = FrameColumnApply
return klass(
obj,
func,
raw=raw,
result_type=result_type,
by_row=by_row,
engine=engine,
engine_kwargs=engine_kwargs,
args=args,
kwargs=kwargs,
)
class Apply(metaclass=abc.ABCMeta):
axis: AxisInt
def __init__(
self,
obj: AggObjType,
func: AggFuncType,
raw: bool,
result_type: str | None,
*,
by_row: Literal[False, "compat", "_compat"] = "compat",
engine: str = "python",
engine_kwargs: dict[str, bool] | None = None,
args,
kwargs,
) -> None:
self.obj = obj
self.raw = raw
assert by_row is False or by_row in ["compat", "_compat"]
self.by_row = by_row
self.args = args or ()
self.kwargs = kwargs or {}
self.engine = engine
self.engine_kwargs = {} if engine_kwargs is None else engine_kwargs
if result_type not in [None, "reduce", "broadcast", "expand"]:
raise ValueError(
"invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}"
)
self.result_type = result_type
self.func = func
@abc.abstractmethod
def apply(self) -> DataFrame | Series:
pass
@abc.abstractmethod
def agg_or_apply_list_like(
self, op_name: Literal["agg", "apply"]
) -> DataFrame | Series:
pass
@abc.abstractmethod
def agg_or_apply_dict_like(
self, op_name: Literal["agg", "apply"]
) -> DataFrame | Series:
pass
def agg(self) -> DataFrame | Series | None:
"""
Provide an implementation for the aggregators.
Returns
-------
Result of aggregation, or None if agg cannot be performed by
this method.
"""
func = self.func
if isinstance(func, str):
return self.apply_str()
if is_dict_like(func):
return self.agg_dict_like()
elif is_list_like(func):
# we require a list, but not a 'str'
return self.agg_list_like()
# caller can react
return None
def transform(self) -> DataFrame | Series:
"""
Transform a DataFrame or Series.
Returns
-------
DataFrame or Series
Result of applying ``func`` along the given axis of the
Series or DataFrame.
Raises
------
ValueError
If the transform function fails or does not transform.
"""
obj = self.obj
func = self.func
axis = self.axis
args = self.args
kwargs = self.kwargs
is_series = obj.ndim == 1
if obj._get_axis_number(axis) == 1:
assert not is_series
return obj.T.transform(func, 0, *args, **kwargs).T
if is_list_like(func) and not is_dict_like(func):
func = cast(list[AggFuncTypeBase], func)
# Convert func equivalent dict
if is_series:
func = {com.get_callable_name(v) or v: v for v in func}
else:
func = {col: func for col in obj}
if is_dict_like(func):
func = cast(AggFuncTypeDict, func)
return self.transform_dict_like(func)
# func is either str or callable
func = cast(AggFuncTypeBase, func)
try:
result = self.transform_str_or_callable(func)
except TypeError:
raise
except Exception as err:
raise ValueError("Transform function failed") from err
# Functions that transform may return empty Series/DataFrame
# when the dtype is not appropriate
if (
isinstance(result, (ABCSeries, ABCDataFrame))
and result.empty
and not obj.empty
):
raise ValueError("Transform function failed")
if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals(
obj.index
):
raise ValueError("Function did not transform")
return result
def transform_dict_like(self, func) -> DataFrame:
"""
Compute transform in the case of a dict-like func
"""
from pandas.core.reshape.concat import concat
obj = self.obj
args = self.args
kwargs = self.kwargs
# transform is currently only for Series/DataFrame
assert isinstance(obj, ABCNDFrame)
if len(func) == 0:
raise ValueError("No transform functions were provided")
func = self.normalize_dictlike_arg("transform", obj, func)
results: dict[Hashable, DataFrame | Series] = {}
for name, how in func.items():
colg = obj._gotitem(name, ndim=1)
results[name] = colg.transform(how, 0, *args, **kwargs)
return concat(results, axis=1)
def transform_str_or_callable(self, func) -> DataFrame | Series:
"""
Compute transform in the case of a string or callable func
"""
obj = self.obj
args = self.args
kwargs = self.kwargs
if isinstance(func, str):
return self._apply_str(obj, func, *args, **kwargs)
# Two possible ways to use a UDF - apply or call directly
try:
return obj.apply(func, args=args, **kwargs)
except Exception:
return func(obj, *args, **kwargs)
def agg_list_like(self) -> DataFrame | Series:
"""
Compute aggregation in the case of a list-like argument.
Returns
-------
Result of aggregation.
"""
return self.agg_or_apply_list_like(op_name="agg")
def compute_list_like(
self,
op_name: Literal["agg", "apply"],
selected_obj: Series | DataFrame,
kwargs: dict[str, Any],
) -> tuple[list[Hashable] | Index, list[Any]]:
"""
Compute agg/apply results for like-like input.
Parameters
----------
op_name : {"agg", "apply"}
Operation being performed.
selected_obj : Series or DataFrame
Data to perform operation on.
kwargs : dict
Keyword arguments to pass to the functions.
Returns
-------
keys : list[Hashable] or Index
Index labels for result.
results : list
Data for result. When aggregating with a Series, this can contain any
Python objects.
"""
func = cast(list[AggFuncTypeBase], self.func)
obj = self.obj
results = []
keys = []
# degenerate case
if selected_obj.ndim == 1:
for a in func:
colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj)
args = (
[self.axis, *self.args]
if include_axis(op_name, colg)
else self.args
)
new_res = getattr(colg, op_name)(a, *args, **kwargs)
results.append(new_res)
# make sure we find a good name
name = com.get_callable_name(a) or a
keys.append(name)
else:
indices = []
for index, col in enumerate(selected_obj):
colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index])
args = (
[self.axis, *self.args]
if include_axis(op_name, colg)
else self.args
)
new_res = getattr(colg, op_name)(func, *args, **kwargs)
results.append(new_res)
indices.append(index)
# error: Incompatible types in assignment (expression has type "Any |
# Index", variable has type "list[Any | Callable[..., Any] | str]")
keys = selected_obj.columns.take(indices) # type: ignore[assignment]
return keys, results
def wrap_results_list_like(
self, keys: Iterable[Hashable], results: list[Series | DataFrame]
):
from pandas.core.reshape.concat import concat
obj = self.obj
try:
return concat(results, keys=keys, axis=1, sort=False)
except TypeError as err:
# we are concatting non-NDFrame objects,
# e.g. a list of scalars
from pandas import Series
result = Series(results, index=keys, name=obj.name)
if is_nested_object(result):
raise ValueError(
"cannot combine transform and aggregation operations"
) from err
return result
def agg_dict_like(self) -> DataFrame | Series:
"""
Compute aggregation in the case of a dict-like argument.
Returns
-------
Result of aggregation.
"""
return self.agg_or_apply_dict_like(op_name="agg")
def compute_dict_like(
self,
op_name: Literal["agg", "apply"],
selected_obj: Series | DataFrame,
selection: Hashable | Sequence[Hashable],
kwargs: dict[str, Any],
) -> tuple[list[Hashable], list[Any]]:
"""
Compute agg/apply results for dict-like input.
Parameters
----------
op_name : {"agg", "apply"}
Operation being performed.
selected_obj : Series or DataFrame
Data to perform operation on.
selection : hashable or sequence of hashables
Used by GroupBy, Window, and Resample if selection is applied to the object.
kwargs : dict
Keyword arguments to pass to the functions.
Returns
-------
keys : list[hashable]
Index labels for result.
results : list
Data for result. When aggregating with a Series, this can contain any
Python object.
"""
from pandas.core.groupby.generic import (
DataFrameGroupBy,
SeriesGroupBy,
)
obj = self.obj
is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy))
func = cast(AggFuncTypeDict, self.func)
func = self.normalize_dictlike_arg(op_name, selected_obj, func)
is_non_unique_col = (
selected_obj.ndim == 2
and selected_obj.columns.nunique() < len(selected_obj.columns)
)
if selected_obj.ndim == 1:
# key only used for output
colg = obj._gotitem(selection, ndim=1)
results = [getattr(colg, op_name)(how, **kwargs) for _, how in func.items()]
keys = list(func.keys())
elif not is_groupby and is_non_unique_col:
# key used for column selection and output
# GH#51099
results = []
keys = []
for key, how in func.items():
indices = selected_obj.columns.get_indexer_for([key])
labels = selected_obj.columns.take(indices)
label_to_indices = defaultdict(list)
for index, label in zip(indices, labels):
label_to_indices[label].append(index)
key_data = [
getattr(selected_obj._ixs(indice, axis=1), op_name)(how, **kwargs)
for label, indices in label_to_indices.items()
for indice in indices
]
keys += [key] * len(key_data)
results += key_data
elif is_groupby:
# key used for column selection and output
df = selected_obj
results, keys = [], []
for key, how in func.items():
cols = df[key]
if cols.ndim == 1:
series = obj._gotitem(key, ndim=1, subset=cols)
results.append(getattr(series, op_name)(how, **kwargs))
keys.append(key)
else:
for _, col in cols.items():
series = obj._gotitem(key, ndim=1, subset=col)
results.append(getattr(series, op_name)(how, **kwargs))
keys.append(key)
else:
results = [
getattr(obj._gotitem(key, ndim=1), op_name)(how, **kwargs)
for key, how in func.items()
]
keys = list(func.keys())
return keys, results
def wrap_results_dict_like(
self,
selected_obj: Series | DataFrame,
result_index: list[Hashable],
result_data: list,
):
from pandas import Index
from pandas.core.reshape.concat import concat
obj = self.obj
# Avoid making two isinstance calls in all and any below
is_ndframe = [isinstance(r, ABCNDFrame) for r in result_data]
if all(is_ndframe):
results = [result for result in result_data if not result.empty]
keys_to_use: Iterable[Hashable]
keys_to_use = [k for k, v in zip(result_index, result_data) if not v.empty]
# Have to check, if at least one DataFrame is not empty.
if keys_to_use == []:
keys_to_use = result_index
results = result_data
if selected_obj.ndim == 2:
# keys are columns, so we can preserve names
ktu = Index(keys_to_use)
ktu._set_names(selected_obj.columns.names)
keys_to_use = ktu
axis: AxisInt = 0 if isinstance(obj, ABCSeries) else 1
result = concat(
results,
axis=axis,
keys=keys_to_use,
)
elif any(is_ndframe):
# There is a mix of NDFrames and scalars
raise ValueError(
"cannot perform both aggregation "
"and transformation operations "
"simultaneously"
)
else:
from pandas import Series
# we have a list of scalars
# GH 36212 use name only if obj is a series
if obj.ndim == 1:
obj = cast("Series", obj)
name = obj.name
else:
name = None
result = Series(result_data, index=result_index, name=name)
return result
def apply_str(self) -> DataFrame | Series:
"""
Compute apply in case of a string.
Returns
-------
result: Series or DataFrame
"""
# Caller is responsible for checking isinstance(self.f, str)
func = cast(str, self.func)
obj = self.obj
from pandas.core.groupby.generic import (
DataFrameGroupBy,
SeriesGroupBy,
)
# Support for `frame.transform('method')`
# Some methods (shift, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
method = getattr(obj, func, None)
if callable(method):
sig = inspect.getfullargspec(method)
arg_names = (*sig.args, *sig.kwonlyargs)
if self.axis != 0 and (
"axis" not in arg_names or func in ("corrwith", "skew")
):
raise ValueError(f"Operation {func} does not support axis=1")
if "axis" in arg_names and not isinstance(
obj, (SeriesGroupBy, DataFrameGroupBy)
):
self.kwargs["axis"] = self.axis
return self._apply_str(obj, func, *self.args, **self.kwargs)
def apply_list_or_dict_like(self) -> DataFrame | Series:
"""
Compute apply in case of a list-like or dict-like.
Returns
-------
result: Series, DataFrame, or None
Result when self.func is a list-like or dict-like, None otherwise.
"""
if self.engine == "numba":
raise NotImplementedError(
"The 'numba' engine doesn't support list-like/"
"dict likes of callables yet."
)
if self.axis == 1 and isinstance(self.obj, ABCDataFrame):
return self.obj.T.apply(self.func, 0, args=self.args, **self.kwargs).T
func = self.func
kwargs = self.kwargs
if is_dict_like(func):
result = self.agg_or_apply_dict_like(op_name="apply")
else:
result = self.agg_or_apply_list_like(op_name="apply")
result = reconstruct_and_relabel_result(result, func, **kwargs)
return result
def normalize_dictlike_arg(
self, how: str, obj: DataFrame | Series, func: AggFuncTypeDict
) -> AggFuncTypeDict:
"""
Handler for dict-like argument.
Ensures that necessary columns exist if obj is a DataFrame, and
that a nested renamer is not passed. Also normalizes to all lists
when values consists of a mix of list and non-lists.
"""
assert how in ("apply", "agg", "transform")
# Can't use func.values(); wouldn't work for a Series
if (
how == "agg"
and isinstance(obj, ABCSeries)
and any(is_list_like(v) for _, v in func.items())
) or (any(is_dict_like(v) for _, v in func.items())):
# GH 15931 - deprecation of renaming keys
raise SpecificationError("nested renamer is not supported")
if obj.ndim != 1:
# Check for missing columns on a frame
from pandas import Index
cols = Index(list(func.keys())).difference(obj.columns, sort=True)
if len(cols) > 0:
# GH 58474
raise KeyError(f"Label(s) {list(cols)} do not exist")
aggregator_types = (list, tuple, dict)
# if we have a dict of any non-scalars
# eg. {'A' : ['mean']}, normalize all to
# be list-likes
# Cannot use func.values() because arg may be a Series
if any(isinstance(x, aggregator_types) for _, x in func.items()):
new_func: AggFuncTypeDict = {}
for k, v in func.items():
if not isinstance(v, aggregator_types):
new_func[k] = [v]
else:
new_func[k] = v
func = new_func
return func
def _apply_str(self, obj, func: str, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on obj
- try to find a numpy function
- raise
"""
assert isinstance(func, str)
if hasattr(obj, func):
f = getattr(obj, func)
if callable(f):
return f(*args, **kwargs)
# people may aggregate on a non-callable attribute
# but don't let them think they can pass args to it
assert len(args) == 0
assert not any(kwarg == "axis" for kwarg in kwargs)
return f
elif hasattr(np, func) and hasattr(obj, "__array__"):
# in particular exclude Window
f = getattr(np, func)
return f(obj, *args, **kwargs)
else:
msg = f"'{func}' is not a valid function for '{type(obj).__name__}' object"
raise AttributeError(msg)
class NDFrameApply(Apply):
"""
Methods shared by FrameApply and SeriesApply but
not GroupByApply or ResamplerWindowApply
"""
obj: DataFrame | Series
@property
def index(self) -> Index:
return self.obj.index
@property
def agg_axis(self) -> Index:
return self.obj._get_agg_axis(self.axis)
def agg_or_apply_list_like(
self, op_name: Literal["agg", "apply"]
) -> DataFrame | Series:
obj = self.obj
kwargs = self.kwargs
if op_name == "apply":
if isinstance(self, FrameApply):
by_row = self.by_row
elif isinstance(self, SeriesApply):
by_row = "_compat" if self.by_row else False
else:
by_row = False
kwargs = {**kwargs, "by_row": by_row}
if getattr(obj, "axis", 0) == 1:
raise NotImplementedError("axis other than 0 is not supported")
keys, results = self.compute_list_like(op_name, obj, kwargs)
result = self.wrap_results_list_like(keys, results)
return result
def agg_or_apply_dict_like(
self, op_name: Literal["agg", "apply"]
) -> DataFrame | Series:
assert op_name in ["agg", "apply"]
obj = self.obj
kwargs = {}
if op_name == "apply":
by_row = "_compat" if self.by_row else False
kwargs.update({"by_row": by_row})
if getattr(obj, "axis", 0) == 1:
raise NotImplementedError("axis other than 0 is not supported")
selection = None
result_index, result_data = self.compute_dict_like(
op_name, obj, selection, kwargs
)
result = self.wrap_results_dict_like(obj, result_index, result_data)
return result
class FrameApply(NDFrameApply):
obj: DataFrame
def __init__(
self,
obj: AggObjType,
func: AggFuncType,
raw: bool,
result_type: str | None,
*,
by_row: Literal[False, "compat"] = False,
engine: str = "python",
engine_kwargs: dict[str, bool] | None = None,
args,
kwargs,
) -> None:
if by_row is not False and by_row != "compat":
raise ValueError(f"by_row={by_row} not allowed")
super().__init__(
obj,
func,
raw,
result_type,
by_row=by_row,
engine=engine,
engine_kwargs=engine_kwargs,
args=args,
kwargs=kwargs,
)
# ---------------------------------------------------------------
# Abstract Methods
@property
@abc.abstractmethod
def result_index(self) -> Index:
pass
@property
@abc.abstractmethod
def result_columns(self) -> Index:
pass
@property
@abc.abstractmethod
def series_generator(self) -> Generator[Series]:
pass
@staticmethod
@functools.cache
@abc.abstractmethod
def generate_numba_apply_func(
func, nogil=True, nopython=True, parallel=False
) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]:
pass
@abc.abstractmethod
def apply_with_numba(self):
pass
def validate_values_for_numba(self) -> None:
# Validate column dtyps all OK
for colname, dtype in self.obj.dtypes.items():
if not is_numeric_dtype(dtype):
raise ValueError(
f"Column {colname} must have a numeric dtype. "
f"Found '{dtype}' instead"
)
if is_extension_array_dtype(dtype):
raise ValueError(
f"Column {colname} is backed by an extension array, "
f"which is not supported by the numba engine."
)
@abc.abstractmethod
def wrap_results_for_axis(
self, results: ResType, res_index: Index
) -> DataFrame | Series:
pass
# ---------------------------------------------------------------
@property
def res_columns(self) -> Index:
return self.result_columns
@property
def columns(self) -> Index:
return self.obj.columns
@cache_readonly
def values(self):
return self.obj.values
def apply(self) -> DataFrame | Series:
"""compute the results"""
# dispatch to handle list-like or dict-like
if is_list_like(self.func):
if self.engine == "numba":
raise NotImplementedError(
"the 'numba' engine doesn't support lists of callables yet"
)
return self.apply_list_or_dict_like()
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
# string dispatch
if isinstance(self.func, str):
if self.engine == "numba":
raise NotImplementedError(
"the 'numba' engine doesn't support using "
"a string as the callable function"
)
return self.apply_str()
# ufunc
elif isinstance(self.func, np.ufunc):
if self.engine == "numba":
raise NotImplementedError(
"the 'numba' engine doesn't support "
"using a numpy ufunc as the callable function"
)
with np.errstate(all="ignore"):
results = self.obj._mgr.apply("apply", func=self.func)
# _constructor will retain self.index and self.columns
return self.obj._constructor_from_mgr(results, axes=results.axes)
# broadcasting
if self.result_type == "broadcast":
if self.engine == "numba":
raise NotImplementedError(
"the 'numba' engine doesn't support result_type='broadcast'"
)
return self.apply_broadcast(self.obj)
# one axis empty
elif not all(self.obj.shape):
return self.apply_empty_result()
# raw
elif self.raw:
return self.apply_raw(engine=self.engine, engine_kwargs=self.engine_kwargs)
return self.apply_standard()
def agg(self):
obj = self.obj
axis = self.axis
# TODO: Avoid having to change state
self.obj = self.obj if self.axis == 0 else self.obj.T
self.axis = 0
result = None
try:
result = super().agg()
finally:
self.obj = obj
self.axis = axis
if axis == 1:
result = result.T if result is not None else result
if result is None:
result = self.obj.apply(self.func, axis, args=self.args, **self.kwargs)
return result
def apply_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to apply the function to an empty
series in order to see if this is a reduction function
"""
assert callable(self.func)
# we are not asked to reduce or infer reduction
# so just return a copy of the existing object
if self.result_type not in ["reduce", None]:
return self.obj.copy()
# we may need to infer
should_reduce = self.result_type == "reduce"
from pandas import Series
if not should_reduce:
try:
if self.axis == 0:
r = self.func(
Series([], dtype=np.float64), *self.args, **self.kwargs
)
else:
r = self.func(
Series(index=self.columns, dtype=np.float64),
*self.args,
**self.kwargs,
)
except Exception:
pass
else:
should_reduce = not isinstance(r, Series)
if should_reduce:
if len(self.agg_axis):
r = self.func(Series([], dtype=np.float64), *self.args, **self.kwargs)
else:
r = np.nan
return self.obj._constructor_sliced(r, index=self.agg_axis)
else:
return self.obj.copy()
def apply_raw(self, engine="python", engine_kwargs=None):
"""apply to the values as a numpy array"""
def wrap_function(func):
"""
Wrap user supplied function to work around numpy issue.
see https://github.com/numpy/numpy/issues/8352
"""
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, str):
result = np.array(result, dtype=object)
return result
return wrapper
if engine == "numba":
args, kwargs = prepare_function_arguments(
self.func, # type: ignore[arg-type]
self.args,
self.kwargs,
num_required_args=1,
)
# error: Argument 1 to "__call__" of "_lru_cache_wrapper" has
# incompatible type "Callable[..., Any] | str | list[Callable
# [..., Any] | str] | dict[Hashable,Callable[..., Any] | str |
# list[Callable[..., Any] | str]]"; expected "Hashable"
nb_looper = generate_apply_looper(
self.func, # type: ignore[arg-type]
**get_jit_arguments(engine_kwargs),
)
result = nb_looper(self.values, self.axis, *args)
# If we made the result 2-D, squeeze it back to 1-D
result = np.squeeze(result)
else:
result = np.apply_along_axis(
wrap_function(self.func),
self.axis,
self.values,
*self.args,
**self.kwargs,
)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result, index=self.index, columns=self.columns)
else:
return self.obj._constructor_sliced(result, index=self.agg_axis)
def apply_broadcast(self, target: DataFrame) -> DataFrame:
assert callable(self.func)
result_values = np.empty_like(target.values)
# axis which we want to compare compliance
result_compare = target.shape[0]
for i, col in enumerate(target.columns):
res = self.func(target[col], *self.args, **self.kwargs)
ares = np.asarray(res).ndim
# must be a scalar or 1d
if ares > 1:
raise ValueError("too many dims to broadcast")
if ares == 1:
# must match return dim
if result_compare != len(res):
raise ValueError("cannot broadcast result")
result_values[:, i] = res
# we *always* preserve the original index / columns
result = self.obj._constructor(
result_values, index=target.index, columns=target.columns
)
return result
def apply_standard(self):
if self.engine == "python":
results, res_index = self.apply_series_generator()
else:
results, res_index = self.apply_series_numba()
# wrap results
return self.wrap_results(results, res_index)
def apply_series_generator(self) -> tuple[ResType, Index]:
assert callable(self.func)
series_gen = self.series_generator
res_index = self.result_index
results = {}
for i, v in enumerate(series_gen):
results[i] = self.func(v, *self.args, **self.kwargs)
if isinstance(results[i], ABCSeries):
# If we have a view on v, we need to make a copy because
# series_generator will swap out the underlying data
results[i] = results[i].copy(deep=False)
return results, res_index
def apply_series_numba(self):
if self.engine_kwargs.get("parallel", False):
raise NotImplementedError(
"Parallel apply is not supported when raw=False and engine='numba'"
)
if not self.obj.index.is_unique or not self.columns.is_unique:
raise NotImplementedError(
"The index/columns must be unique when raw=False and engine='numba'"
)
self.validate_values_for_numba()
results = self.apply_with_numba()
return results, self.result_index
def wrap_results(self, results: ResType, res_index: Index) -> DataFrame | Series:
from pandas import Series
# see if we can infer the results
if len(results) > 0 and 0 in results and is_sequence(results[0]):
return self.wrap_results_for_axis(results, res_index)
# dict of scalars
# the default dtype of an empty Series is `object`, but this
# code can be hit by df.mean() where the result should have dtype
# float64 even if it's an empty Series.
constructor_sliced = self.obj._constructor_sliced
if len(results) == 0 and constructor_sliced is Series:
result = constructor_sliced(results, dtype=np.float64)
else:
result = constructor_sliced(results)
result.index = res_index
return result
def apply_str(self) -> DataFrame | Series:
# Caller is responsible for checking isinstance(self.func, str)
# TODO: GH#39993 - Avoid special-casing by replacing with lambda
if self.func == "size":
# Special-cased because DataFrame.size returns a single scalar
obj = self.obj
value = obj.shape[self.axis]
return obj._constructor_sliced(value, index=self.agg_axis)
return super().apply_str()
class FrameRowApply(FrameApply):
axis: AxisInt = 0
@property
def series_generator(self) -> Generator[Series]:
return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))
@staticmethod
@functools.cache
def generate_numba_apply_func(
func, nogil=True, nopython=True, parallel=False
) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]:
numba = import_optional_dependency("numba")
from pandas import Series
# Import helper from extensions to cast string object -> np strings
# Note: This also has the side effect of loading our numba extensions
from pandas.core._numba.extensions import maybe_cast_str
jitted_udf = numba.extending.register_jitable(func)
# Currently the parallel argument doesn't get passed through here
# (it's disabled) since the dicts in numba aren't thread-safe.
@numba.jit(nogil=nogil, nopython=nopython, parallel=parallel)
def numba_func(values, col_names, df_index, *args):
results = {}
for j in range(values.shape[1]):
# Create the series
ser = Series(
values[:, j], index=df_index, name=maybe_cast_str(col_names[j])
)
results[j] = jitted_udf(ser, *args)
return results
return numba_func
def apply_with_numba(self) -> dict[int, Any]:
func = cast(Callable, self.func)
args, kwargs = prepare_function_arguments(
func, self.args, self.kwargs, num_required_args=1
)
nb_func = self.generate_numba_apply_func(
func, **get_jit_arguments(self.engine_kwargs)
)
from pandas.core._numba.extensions import set_numba_data
index = self.obj.index
columns = self.obj.columns
# Convert from numba dict to regular dict
# Our isinstance checks in the df constructor don't pass for numbas typed dict
with set_numba_data(index) as index, set_numba_data(columns) as columns:
res = dict(nb_func(self.values, columns, index, *args))
return res
@property
def result_index(self) -> Index:
return self.columns
@property
def result_columns(self) -> Index:
return self.index
def wrap_results_for_axis(
self, results: ResType, res_index: Index
) -> DataFrame | Series:
"""return the results for the rows"""
if self.result_type == "reduce":
# e.g. test_apply_dict GH#8735
res = self.obj._constructor_sliced(results)
res.index = res_index
return res
elif self.result_type is None and all(
isinstance(x, dict) for x in results.values()
):
# Our operation was a to_dict op e.g.
# test_apply_dict GH#8735, test_apply_reduce_to_dict GH#25196 #37544
res = self.obj._constructor_sliced(results)
res.index = res_index
return res
try:
result = self.obj._constructor(data=results)
except ValueError as err:
if "All arrays must be of the same length" in str(err):
# e.g. result = [[2, 3], [1.5], ['foo', 'bar']]
# see test_agg_listlike_result GH#29587
res = self.obj._constructor_sliced(results)
res.index = res_index
return res
else:
raise
if not isinstance(results[0], ABCSeries):
if len(result.index) == len(self.res_columns):
result.index = self.res_columns
if len(result.columns) == len(res_index):
result.columns = res_index
return result
class FrameColumnApply(FrameApply):
axis: AxisInt = 1
def apply_broadcast(self, target: DataFrame) -> DataFrame:
result = super().apply_broadcast(target.T)
return result.T
@property
def series_generator(self) -> Generator[Series]:
values = self.values
values = ensure_wrapped_if_datetimelike(values)
assert len(values) > 0
# We create one Series object, and will swap out the data inside
# of it. Kids: don't do this at home.
ser = self.obj._ixs(0, axis=0)
mgr = ser._mgr
is_view = mgr.blocks[0].refs.has_reference()
if isinstance(ser.dtype, ExtensionDtype):
# values will be incorrect for this block
# TODO(EA2D): special case would be unnecessary with 2D EAs
obj = self.obj
for i in range(len(obj)):
yield obj._ixs(i, axis=0)
else:
for arr, name in zip(values, self.index):
# GH#35462 re-pin mgr in case setitem changed it
ser._mgr = mgr
mgr.set_values(arr)
object.__setattr__(ser, "_name", name)
if not is_view:
# In apply_series_generator we store the a shallow copy of the
# result, which potentially increases the ref count of this reused
# `ser` object (depending on the result of the applied function)
# -> if that happened and `ser` is already a copy, then we reset
# the refs here to avoid triggering a unnecessary CoW inside the
# applied function (https://github.com/pandas-dev/pandas/pull/56212)
mgr.blocks[0].refs = BlockValuesRefs(mgr.blocks[0])
yield ser
@staticmethod
@functools.cache
def generate_numba_apply_func(
func, nogil=True, nopython=True, parallel=False
) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]:
numba = import_optional_dependency("numba")
from pandas import Series
from pandas.core._numba.extensions import maybe_cast_str
jitted_udf = numba.extending.register_jitable(func)
@numba.jit(nogil=nogil, nopython=nopython, parallel=parallel)
def numba_func(values, col_names_index, index, *args):
results = {}
# Currently the parallel argument doesn't get passed through here
# (it's disabled) since the dicts in numba aren't thread-safe.
for i in range(values.shape[0]):
# Create the series
# TODO: values corrupted without the copy
ser = Series(
values[i].copy(),
index=col_names_index,
name=maybe_cast_str(index[i]),
)
results[i] = jitted_udf(ser, *args)
return results
return numba_func
def apply_with_numba(self) -> dict[int, Any]:
func = cast(Callable, self.func)
args, kwargs = prepare_function_arguments(
func, self.args, self.kwargs, num_required_args=1
)
nb_func = self.generate_numba_apply_func(
func, **get_jit_arguments(self.engine_kwargs)
)
from pandas.core._numba.extensions import set_numba_data
# Convert from numba dict to regular dict
# Our isinstance checks in the df constructor don't pass for numbas typed dict
with (
set_numba_data(self.obj.index) as index,
set_numba_data(self.columns) as columns,
):
res = dict(nb_func(self.values, columns, index, *args))
return res
@property
def result_index(self) -> Index:
return self.index
@property
def result_columns(self) -> Index:
return self.columns
def wrap_results_for_axis(
self, results: ResType, res_index: Index
) -> DataFrame | Series:
"""return the results for the columns"""
result: DataFrame | Series
# we have requested to expand
if self.result_type == "expand":
result = self.infer_to_same_shape(results, res_index)
# we have a non-series and don't want inference
elif not isinstance(results[0], ABCSeries):
result = self.obj._constructor_sliced(results)
result.index = res_index
# we may want to infer results
else:
result = self.infer_to_same_shape(results, res_index)
return result
def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame:
"""infer the results to the same shape as the input object"""
result = self.obj._constructor(data=results)
result = result.T
# set the index
result.index = res_index
# infer dtypes
result = result.infer_objects()
return result
class SeriesApply(NDFrameApply):
obj: Series
axis: AxisInt = 0
by_row: Literal[False, "compat", "_compat"] # only relevant for apply()
def __init__(
self,
obj: Series,
func: AggFuncType,
*,
by_row: Literal[False, "compat", "_compat"] = "compat",
args,
kwargs,
) -> None:
super().__init__(
obj,
func,
raw=False,
result_type=None,
by_row=by_row,
args=args,
kwargs=kwargs,
)
def apply(self) -> DataFrame | Series:
obj = self.obj
if len(obj) == 0:
return self.apply_empty_result()
# dispatch to handle list-like or dict-like
if is_list_like(self.func):
return self.apply_list_or_dict_like()
if isinstance(self.func, str):
# if we are a string, try to dispatch
return self.apply_str()
if self.by_row == "_compat":
return self.apply_compat()
# self.func is Callable
return self.apply_standard()
def agg(self):
result = super().agg()
if result is None:
obj = self.obj
func = self.func
# string, list-like, and dict-like are entirely handled in super
assert callable(func)
result = func(obj, *self.args, **self.kwargs)
return result
def apply_empty_result(self) -> Series:
obj = self.obj
return obj._constructor(dtype=obj.dtype, index=obj.index).__finalize__(
obj, method="apply"
)
def apply_compat(self):
"""compat apply method for funcs in listlikes and dictlikes.
Used for each callable when giving listlikes and dictlikes of callables to
apply. Needed for compatibility with Pandas < v2.1.
.. versionadded:: 2.1.0
"""
obj = self.obj
func = self.func
if callable(func):
f = com.get_cython_func(func)
if f and not self.args and not self.kwargs:
return obj.apply(func, by_row=False)
try:
result = obj.apply(func, by_row="compat")
except (ValueError, AttributeError, TypeError):
result = obj.apply(func, by_row=False)
return result
def apply_standard(self) -> DataFrame | Series:
# caller is responsible for ensuring that f is Callable
func = cast(Callable, self.func)
obj = self.obj
if isinstance(func, np.ufunc):
with np.errstate(all="ignore"):
return func(obj, *self.args, **self.kwargs)
elif not self.by_row:
return func(obj, *self.args, **self.kwargs)
if self.args or self.kwargs:
# _map_values does not support args/kwargs
def curried(x):
return func(x, *self.args, **self.kwargs)
else:
curried = func
mapped = obj._map_values(mapper=curried)
if len(mapped) and isinstance(mapped[0], ABCSeries):
# GH#43986 Need to do list(mapped) in order to get treated as nested
# See also GH#25959 regarding EA support
return obj._constructor_expanddim(list(mapped), index=obj.index)
else:
return obj._constructor(mapped, index=obj.index).__finalize__(
obj, method="apply"
)
class GroupByApply(Apply):
obj: GroupBy | Resampler | BaseWindow
def __init__(
self,
obj: GroupBy[NDFrameT],
func: AggFuncType,
*,
args,
kwargs,
) -> None:
kwargs = kwargs.copy()
self.axis = obj.obj._get_axis_number(kwargs.get("axis", 0))
super().__init__(
obj,
func,
raw=False,
result_type=None,
args=args,
kwargs=kwargs,
)
def apply(self):
raise NotImplementedError
def transform(self):
raise NotImplementedError
def agg_or_apply_list_like(
self, op_name: Literal["agg", "apply"]
) -> DataFrame | Series:
obj = self.obj
kwargs = self.kwargs
if op_name == "apply":
kwargs = {**kwargs, "by_row": False}
if getattr(obj, "axis", 0) == 1:
raise NotImplementedError("axis other than 0 is not supported")
if obj._selected_obj.ndim == 1:
# For SeriesGroupBy this matches _obj_with_exclusions
selected_obj = obj._selected_obj
else:
selected_obj = obj._obj_with_exclusions
# Only set as_index=True on groupby objects, not Window or Resample
# that inherit from this class.
with com.temp_setattr(
obj, "as_index", True, condition=hasattr(obj, "as_index")
):
keys, results = self.compute_list_like(op_name, selected_obj, kwargs)
result = self.wrap_results_list_like(keys, results)
return result
def agg_or_apply_dict_like(
self, op_name: Literal["agg", "apply"]
) -> DataFrame | Series:
from pandas.core.groupby.generic import (
DataFrameGroupBy,
SeriesGroupBy,
)
assert op_name in ["agg", "apply"]
obj = self.obj
kwargs = {}
if op_name == "apply":
by_row = "_compat" if self.by_row else False
kwargs.update({"by_row": by_row})
if getattr(obj, "axis", 0) == 1:
raise NotImplementedError("axis other than 0 is not supported")
selected_obj = obj._selected_obj
selection = obj._selection
is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy))
# Numba Groupby engine/engine-kwargs passthrough
if is_groupby:
engine = self.kwargs.get("engine", None)
engine_kwargs = self.kwargs.get("engine_kwargs", None)
kwargs.update({"engine": engine, "engine_kwargs": engine_kwargs})
with com.temp_setattr(
obj, "as_index", True, condition=hasattr(obj, "as_index")
):
result_index, result_data = self.compute_dict_like(
op_name, selected_obj, selection, kwargs
)
result = self.wrap_results_dict_like(selected_obj, result_index, result_data)
return result
class ResamplerWindowApply(GroupByApply):
axis: AxisInt = 0
obj: Resampler | BaseWindow
def __init__(
self,
obj: Resampler | BaseWindow,
func: AggFuncType,
*,
args,
kwargs,
) -> None:
super(GroupByApply, self).__init__(
obj,
func,
raw=False,
result_type=None,
args=args,
kwargs=kwargs,
)
def apply(self):
raise NotImplementedError
def transform(self):
raise NotImplementedError
def reconstruct_func(
func: AggFuncType | None, **kwargs
) -> tuple[bool, AggFuncType, tuple[str, ...] | None, npt.NDArray[np.intp] | None]:
"""
This is the internal function to reconstruct func given if there is relabeling
or not and also normalize the keyword to get new order of columns.
If named aggregation is applied, `func` will be None, and kwargs contains the
column and aggregation function information to be parsed;
If named aggregation is not applied, `func` is either string (e.g. 'min') or
Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name
and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]})
If relabeling is True, will return relabeling, reconstructed func, column
names, and the reconstructed order of columns.
If relabeling is False, the columns and order will be None.
Parameters
----------
func: agg function (e.g. 'min' or Callable) or list of agg functions
(e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}).
**kwargs: dict, kwargs used in is_multi_agg_with_relabel and
normalize_keyword_aggregation function for relabelling
Returns
-------
relabelling: bool, if there is relabelling or not
func: normalized and mangled func
columns: tuple of column names
order: array of columns indices
Examples
--------
>>> reconstruct_func(None, **{"foo": ("col", "min")})
(True, defaultdict(<class 'list'>, {'col': ['min']}), ('foo',), array([0]))
>>> reconstruct_func("min")
(False, 'min', None, None)
"""
relabeling = func is None and is_multi_agg_with_relabel(**kwargs)
columns: tuple[str, ...] | None = None
order: npt.NDArray[np.intp] | None = None
if not relabeling:
if isinstance(func, list) and len(func) > len(set(func)):
# GH 28426 will raise error if duplicated function names are used and
# there is no reassigned name
raise SpecificationError(
"Function names must be unique if there is no new column names "
"assigned"
)
if func is None:
# nicer error message
raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).")
if relabeling:
# error: Incompatible types in assignment (expression has type
# "MutableMapping[Hashable, list[Callable[..., Any] | str]]", variable has type
# "Callable[..., Any] | str | list[Callable[..., Any] | str] |
# MutableMapping[Hashable, Callable[..., Any] | str | list[Callable[..., Any] |
# str]] | None")
func, columns, order = normalize_keyword_aggregation( # type: ignore[assignment]
kwargs
)
assert func is not None
return relabeling, func, columns, order
def is_multi_agg_with_relabel(**kwargs) -> bool:
"""
Check whether kwargs passed to .agg look like multi-agg with relabeling.
Parameters
----------
**kwargs : dict
Returns
-------
bool
Examples
--------
>>> is_multi_agg_with_relabel(a="max")
False
>>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min"))
True
>>> is_multi_agg_with_relabel()
False
"""
return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and (
len(kwargs) > 0
)
def normalize_keyword_aggregation(
kwargs: dict,
) -> tuple[
MutableMapping[Hashable, list[AggFuncTypeBase]],
tuple[str, ...],
npt.NDArray[np.intp],
]:
"""
Normalize user-provided "named aggregation" kwargs.
Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs
to the old Dict[str, List[scalar]]].
Parameters
----------
kwargs : dict
Returns
-------
aggspec : dict
The transformed kwargs.
columns : tuple[str, ...]
The user-provided keys.
col_idx_order : List[int]
List of columns indices.
Examples
--------
>>> normalize_keyword_aggregation({"output": ("input", "sum")})
(defaultdict(<class 'list'>, {'input': ['sum']}), ('output',), array([0]))
"""
from pandas.core.indexes.base import Index
# Normalize the aggregation functions as Mapping[column, List[func]],
# process normally, then fixup the names.
# TODO: aggspec type: typing.Dict[str, List[AggScalar]]
aggspec = defaultdict(list)
order = []
columns = tuple(kwargs.keys())
for column, aggfunc in kwargs.values():
aggspec[column].append(aggfunc)
order.append((column, com.get_callable_name(aggfunc) or aggfunc))
# uniquify aggfunc name if duplicated in order list
uniquified_order = _make_unique_kwarg_list(order)
# GH 25719, due to aggspec will change the order of assigned columns in aggregation
# uniquified_aggspec will store uniquified order list and will compare it with order
# based on index
aggspec_order = [
(column, com.get_callable_name(aggfunc) or aggfunc)
for column, aggfuncs in aggspec.items()
for aggfunc in aggfuncs
]
uniquified_aggspec = _make_unique_kwarg_list(aggspec_order)
# get the new index of columns by comparison
col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order)
return aggspec, columns, col_idx_order
def _make_unique_kwarg_list(
seq: Sequence[tuple[Any, Any]],
) -> Sequence[tuple[Any, Any]]:
"""
Uniquify aggfunc name of the pairs in the order list
Examples:
--------
>>> kwarg_list = [("a", "<lambda>"), ("a", "<lambda>"), ("b", "<lambda>")]
>>> _make_unique_kwarg_list(kwarg_list)
[('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]
"""
return [
(pair[0], f"{pair[1]}_{seq[:i].count(pair)}") if seq.count(pair) > 1 else pair
for i, pair in enumerate(seq)
]
def relabel_result(
result: DataFrame | Series,
func: dict[str, list[Callable | str]],
columns: Iterable[Hashable],
order: Iterable[int],
) -> dict[Hashable, Series]:
"""
Internal function to reorder result if relabelling is True for
dataframe.agg, and return the reordered result in dict.
Parameters:
----------
result: Result from aggregation
func: Dict of (column name, funcs)
columns: New columns name for relabelling
order: New order for relabelling
Examples
--------
>>> from pandas.core.apply import relabel_result
>>> result = pd.DataFrame(
... {"A": [np.nan, 2, np.nan], "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]},
... index=["max", "mean", "min"],
... )
>>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]}
>>> columns = ("foo", "aab", "bar", "dat")
>>> order = [0, 1, 2, 3]
>>> result_in_dict = relabel_result(result, funcs, columns, order)
>>> pd.DataFrame(result_in_dict, index=columns)
A C B
foo 2.0 NaN NaN
aab NaN 6.0 NaN
bar NaN NaN 4.0
dat NaN NaN 2.5
"""
from pandas.core.indexes.base import Index
reordered_indexes = [
pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1])
]
reordered_result_in_dict: dict[Hashable, Series] = {}
idx = 0
reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1
for col, fun in func.items():
s = result[col].dropna()
# In the `_aggregate`, the callable names are obtained and used in `result`, and
# these names are ordered alphabetically. e.g.
# C2 C1
# <lambda> 1 NaN
# amax NaN 4.0
# max NaN 4.0
# sum 18.0 6.0
# Therefore, the order of functions for each column could be shuffled
# accordingly so need to get the callable name if it is not parsed names, and
# reorder the aggregated result for each column.
# e.g. if df.agg(c1=("C2", sum), c2=("C2", lambda x: min(x))), correct order is
# [sum, <lambda>], but in `result`, it will be [<lambda>, sum], and we need to
# reorder so that aggregated values map to their functions regarding the order.
# However there is only one column being used for aggregation, not need to
# reorder since the index is not sorted, and keep as is in `funcs`, e.g.
# A
# min 1.0
# mean 1.5
# mean 1.5
if reorder_mask:
fun = [
com.get_callable_name(f) if not isinstance(f, str) else f for f in fun
]
col_idx_order = Index(s.index).get_indexer(fun)
valid_idx = col_idx_order != -1
if valid_idx.any():
s = s.iloc[col_idx_order[valid_idx]]
# assign the new user-provided "named aggregation" as index names, and reindex
# it based on the whole user-provided names.
if not s.empty:
s.index = reordered_indexes[idx : idx + len(fun)]
reordered_result_in_dict[col] = s.reindex(columns)
idx = idx + len(fun)
return reordered_result_in_dict
def reconstruct_and_relabel_result(result, func, **kwargs) -> DataFrame | Series:
from pandas import DataFrame
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
if relabeling:
# This is to keep the order to columns occurrence unchanged, and also
# keep the order of new columns occurrence unchanged
# For the return values of reconstruct_func, if relabeling is
# False, columns and order will be None.
assert columns is not None
assert order is not None
result_in_dict = relabel_result(result, func, columns, order)
result = DataFrame(result_in_dict, index=columns)
return result
# TODO: Can't use, because mypy doesn't like us setting __name__
# error: "partial[Any]" has no attribute "__name__"
# the type is:
# typing.Sequence[Callable[..., ScalarResult]]
# -> typing.Sequence[Callable[..., ScalarResult]]:
def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]:
"""
Possibly mangle a list of aggfuncs.
Parameters
----------
aggfuncs : Sequence
Returns
-------
mangled: list-like
A new AggSpec sequence, where lambdas have been converted
to have unique names.
Notes
-----
If just one aggfunc is passed, the name will not be mangled.
"""
if len(aggfuncs) <= 1:
# don't mangle for .agg([lambda x: .])
return aggfuncs
i = 0
mangled_aggfuncs = []
for aggfunc in aggfuncs:
if com.get_callable_name(aggfunc) == "<lambda>":
aggfunc = partial(aggfunc)
aggfunc.__name__ = f"<lambda_{i}>"
i += 1
mangled_aggfuncs.append(aggfunc)
return mangled_aggfuncs
def maybe_mangle_lambdas(agg_spec: Any) -> Any:
"""
Make new lambdas with unique names.
Parameters
----------
agg_spec : Any
An argument to GroupBy.agg.
Non-dict-like `agg_spec` are pass through as is.
For dict-like `agg_spec` a new spec is returned
with name-mangled lambdas.
Returns
-------
mangled : Any
Same type as the input.
Examples
--------
>>> maybe_mangle_lambdas("sum")
'sum'
>>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
[<function __main__.<lambda_0>,
<function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
"""
is_dict = is_dict_like(agg_spec)
if not (is_dict or is_list_like(agg_spec)):
return agg_spec
mangled_aggspec = type(agg_spec)() # dict or OrderedDict
if is_dict:
for key, aggfuncs in agg_spec.items():
if is_list_like(aggfuncs) and not is_dict_like(aggfuncs):
mangled_aggfuncs = _managle_lambda_list(aggfuncs)
else:
mangled_aggfuncs = aggfuncs
mangled_aggspec[key] = mangled_aggfuncs
else:
mangled_aggspec = _managle_lambda_list(agg_spec)
return mangled_aggspec
def validate_func_kwargs(
kwargs: dict,
) -> tuple[list[str], list[str | Callable[..., Any]]]:
"""
Validates types of user-provided "named aggregation" kwargs.
`TypeError` is raised if aggfunc is not `str` or callable.
Parameters
----------
kwargs : dict
Returns
-------
columns : List[str]
List of user-provided keys.
func : List[Union[str, callable[...,Any]]]
List of user-provided aggfuncs
Examples
--------
>>> validate_func_kwargs({"one": "min", "two": "max"})
(['one', 'two'], ['min', 'max'])
"""
tuple_given_message = "func is expected but received {} in **kwargs."
columns = list(kwargs)
func = []
for col_func in kwargs.values():
if not (isinstance(col_func, str) or callable(col_func)):
raise TypeError(tuple_given_message.format(type(col_func).__name__))
func.append(col_func)
if not columns:
no_arg_message = "Must provide 'func' or named aggregation **kwargs."
raise TypeError(no_arg_message)
return columns, func
def include_axis(op_name: Literal["agg", "apply"], colg: Series | DataFrame) -> bool:
return isinstance(colg, ABCDataFrame) or (
isinstance(colg, ABCSeries) and op_name == "agg"
)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@core@apply.py@.PATH_END.py
|
{
"filename": "maf_run_results.py",
"repo_name": "lsst/rubin_sim",
"repo_path": "rubin_sim_extracted/rubin_sim-main/rubin_sim/maf/web/maf_run_results.py",
"type": "Python"
}
|
__all__ = ("MafRunResults",)
import os
import re
from collections import OrderedDict
import numpy as np
import rubin_sim.maf.db as db
import rubin_sim.maf.metric_bundles as metricBundles
class MafRunResults:
"""Read and serve the MAF resultsDb_sqlite.db database for the
show_maf jinja2 templates.
Deals with a single MAF run (one output directory, one results_db) only.
Parameters
----------
out_dir : `str`
The location of the results database for this run.
run_name : `str`, optional
The name of the opsim run.
If None, simply stays blank on show_maf display pages.
results_db : `str`, optional
The path to the sqlite database in `out_dir`.
If None, uses the default of `resultsDb_sqlite.db`.
"""
def __init__(self, out_dir, run_name=None, results_db=None):
self.out_dir = os.path.relpath(out_dir, ".")
self.run_name = run_name
# Read in the results database.
if results_db is None:
results_db = os.path.join(self.out_dir, "resultsDb_sqlite.db")
database = db.ResultsDb(database=results_db)
# Get the metric and display info (1-1 match)
self.metrics = database.get_metric_display_info()
self.metrics = self.sort_metrics(self.metrics)
# Get the plot and stats info (many-1 metric match)
skip_stats = ["Completeness@Time", "Completeness H", "FractionPop "]
self.stats = database.get_summary_stats(summary_name_notlike=skip_stats)
self.plots = database.get_plot_files()
# Pull up the names of the groups and subgroups.
groups = sorted(np.unique(self.metrics["display_group"]))
self.groups = OrderedDict()
for g in groups:
group_metrics = self.metrics[np.where(self.metrics["display_group"] == g)]
self.groups[g] = sorted(np.unique(group_metrics["display_subgroup"]))
self.summary_stat_order = [
"Id",
"Identity",
"Median",
"Mean",
"Rms",
"RobustRms",
"N(-3Sigma)",
"N(+3Sigma)",
"Count",
"25th%ile",
"75th%ile",
"Min",
"Max",
]
# Add in the table fraction sorting to summary stat ordering.
table_fractions = [
x for x in list(np.unique(self.stats["summary_metric"])) if x.startswith("TableFraction")
]
if len(table_fractions) > 0:
for x in (
"TableFraction 0 == P",
"TableFraction 1 == P",
"TableFraction 1 < P",
):
if x in table_fractions:
table_fractions.remove(x)
table_fractions = sorted(table_fractions)
self.summary_stat_order.append("TableFraction 0 == P")
for table_frac in table_fractions:
self.summary_stat_order.append(table_frac)
self.summary_stat_order.append("TableFraction 1 == P")
self.summary_stat_order.append("TableFraction 1 < P")
self.plot_order = ["SkyMap", "Histogram", "PowerSpectrum", "Combo"]
# Methods to deal with metricIds
def convert_select_to_metrics(self, group_list, metric_id_list):
"""
Convert the lists of values returned by 'select metrics' template page
into an appropriate dataframe of metrics (in sorted order).
Parameters
----------
group_list : `list` [`str`]
The groups of metrics to show on the show_maf pages.
metric_id_list : `list` [`int`]
The integer ids of the metrics in the sqlite results database.
Returns
-------
metrics : `np.ndarray`, (N,)
An array of the metric information for the metrics .
"""
metric_ids = set()
for group_subgroup in group_list:
group = group_subgroup.split("_")[0]
subgroup = group_subgroup.split("_")[-1].replace("+", " ")
m_ids = self.metric_ids_in_subgroup(group, subgroup)
for m_id in m_ids:
metric_ids.add(m_id)
for m_id in metric_id_list:
m_id = int(m_id)
metric_ids.add(m_id)
metric_ids = list(metric_ids)
metrics = self.metric_ids_to_metrics(metric_ids)
metrics = self.sort_metrics(metrics)
return metrics
def get_json(self, metric):
"""
Return the JSON string containing the data for a particular metric.
"""
if len(metric) > 1:
return None
metric = metric[0]
filename = metric["metric_datafile"]
if filename.upper() == "NULL":
return None
datafile = os.path.join(self.out_dir, filename)
# Read data back into a bundle.
m_b = metricBundles.create_empty_metric_bundle()
m_b.read(datafile)
io = m_b.output_json()
if io is None:
return None
return io.get_value()
def get_npz(self, metric):
"""
Return the npz data.
"""
if len(metric) > 1:
return None
metric = metric[0]
filename = metric["metric_datafile"]
if filename.upper() == "NULL":
return None
else:
datafile = os.path.join(self.out_dir, filename)
return datafile
def get_results_db(self):
"""
Return the summary results sqlite filename, as long as the
results data is named `resultsDb_sqlite.db`.
"""
return os.path.join(self.out_dir, "resultsDb_sqlite.db")
def metric_ids_in_subgroup(self, group, subgroup):
"""
Return the metric_ids within a given group/subgroup.
"""
metrics = self.metrics_in_subgroup(group, subgroup)
metric_ids = list(metrics["metric_id"])
return metric_ids
def metric_ids_to_metrics(self, metric_ids, metrics=None):
"""
Return an ordered numpy array of metrics matching metric_ids.
"""
if metrics is None:
metrics = self.metrics
metrics = metrics[np.in1d(metrics["metric_id"], metric_ids)]
return metrics
def metrics_to_metric_ids(self, metrics):
"""
Return a list of the metric Ids corresponding to a subset of metrics.
"""
return list(metrics["metric_id"])
# Methods to deal with metrics in numpy recarray.
def sort_metrics(
self,
metrics,
order=(
"display_group",
"display_subgroup",
"base_metric_name",
"slicer_name",
"display_order",
"metric_info_label",
),
):
"""
Sort the metrics by order specified by 'order'.
Default is to sort by group, subgroup, metric name, slicer,
display order, then info_label.
Returns sorted numpy array.
"""
if len(metrics) > 0:
metrics = np.sort(metrics, order=order)
return metrics
def metrics_in_group(self, group, metrics=None, sort=True):
"""
Given a group, return the metrics belonging to this group,
in display order.
"""
if metrics is None:
metrics = self.metrics
metrics = metrics[np.where(metrics["display_group"] == group)]
if sort:
metrics = self.sort_metrics(metrics)
return metrics
def metrics_in_subgroup(self, group, subgroup, metrics=None):
"""
Given a group and subgroup, return a dataframe of the metrics
belonging to these group/subgroups, in display order.
If 'metrics' is provided, then only consider this subset of metrics.
"""
metrics = self.metrics_in_group(group, metrics, sort=False)
if len(metrics) > 0:
metrics = metrics[np.where(metrics["display_subgroup"] == subgroup)]
metrics = self.sort_metrics(metrics)
return metrics
def metrics_to_subgroups(self, metrics):
"""
Given an array of metrics, return an ordered dict of their
group/subgroups.
"""
group_list = sorted(np.unique(metrics["display_group"]))
groups = OrderedDict()
for group in group_list:
groupmetrics = self.metrics_in_group(group, metrics, sort=False)
groups[group] = sorted(np.unique(groupmetrics["display_subgroup"]))
return groups
def metrics_with_plot_type(self, plot_type="SkyMap", metrics=None):
"""
Return an array of metrics with plot=plot_type
(optionally also within a metric subset).
"""
# Allow some variation in plot_type names for backward compatibility,
# even if plot_type is a list.
if not isinstance(plot_type, list):
plot_type = [plot_type]
plot_types = []
for p_t in plot_type:
plot_types.append(p_t)
if p_t.endswith("lot"):
plot_types.append(p_t[:-4])
else:
plot_types.append(p_t.lower() + "Plot")
if metrics is None:
metrics = self.metrics
# Identify the plots with the right plot_type, get their IDs.
plot_match = self.plots[np.in1d(self.plots["plot_type"], plot_types)]
# Convert those potentially matching metricIds to metrics,
# using the subset info.
metrics = self.metric_ids_to_metrics(plot_match["metric_id"], metrics)
return metrics
def unique_metric_names(self, metrics=None, baseonly=True):
"""
Return a list of the unique metric names,
preserving the order of 'metrics'.
"""
if metrics is None:
metrics = self.metrics
if baseonly:
sort_name = "base_metric_name"
else:
sort_name = "metric_name"
metric_names = list(np.unique(metrics[sort_name]))
return metric_names
def metrics_with_summary_stat(self, summary_stat_name="Identity", metrics=None):
"""
Return metrics with summary stat matching 'summary_stat_name'
(optionally, within a metric subset).
"""
if metrics is None:
metrics = self.metrics
# Identify the potentially matching stats.
stats = self.stats[np.in1d(self.stats["summary_metric"], summary_stat_name)]
# Identify the subset of relevant metrics.
metrics = self.metric_ids_to_metrics(stats["metric_id"], metrics)
# Re-sort metrics because at this point, probably want displayOrder
# + info_label before metric name.
metrics = self.sort_metrics(
metrics,
order=[
"display_group",
"display_subgroup",
"slicer_name",
"display_order",
"metric_info_label",
"base_metric_name",
],
)
return metrics
def metrics_with_stats(self, metrics=None):
"""
Return metrics that have any summary stat.
"""
if metrics is None:
metrics = self.metrics
# Identify metricIds which are also in stats.
metrics = metrics[np.in1d(metrics["metric_id"], self.stats["metric_id"])]
metrics = self.sort_metrics(
metrics,
order=[
"display_group",
"display_subgroup",
"slicer_name",
"display_order",
"metric_info_label",
"base_metric_name",
],
)
return metrics
def unique_slicer_names(self, metrics=None):
"""
For an array of metrics, return the unique slicer names.
"""
if metrics is None:
metrics = self.metrics
return list(np.unique(metrics["slicer_name"]))
def metrics_with_slicer(self, slicer, metrics=None):
"""
For an array of metrics, return the subset which match a
particular 'slicername' value.
"""
if metrics is None:
metrics = self.metrics
metrics = metrics[np.where(metrics["slicer_name"] == slicer)]
return metrics
def unique_metric_name_and_info_label(self, metrics=None):
"""
For an array of metrics, return the unique metric names
+ info_label combo in same order.
"""
if metrics is None:
metrics = self.metrics
metric_info_label = []
for metric_name, info_label in zip(metrics["metric_name"], metrics["metric_info_label"]):
metricinfo = " ".join([metric_name, info_label])
if metricinfo not in metric_info_label:
metric_info_label.append(metricinfo)
return metric_info_label
def unique_metric_info_label(self, metrics=None):
"""
For an array of metrics, return a list of the unique info_label.
"""
if metrics is None:
metrics = self.metrics
return list(np.unique(metrics["metric_info_label"]))
def metrics_with_info_label(self, info_label, metrics=None):
"""
For an array of metrics, return the subset which match a
particular 'info_label' value.
"""
if metrics is None:
metrics = self.metrics
metrics = metrics[np.where(metrics["metric_info_label"] == info_label)]
return metrics
def metrics_with_metric_name(self, metric_name, metrics=None, baseonly=True):
"""
Return all metrics which match metric_name
(default, only the 'base' metric name).
"""
if metrics is None:
metrics = self.metrics
if baseonly:
metrics = metrics[np.where(metrics["base_metric_name"] == metric_name)]
else:
metrics = metrics[np.where(metrics["metric_name"] == metric_name)]
return metrics
def metric_info(self, metric=None, with_data_link=False, with_slicer_name=True):
"""
Return a dict with the metric info we want to show on the webpages.
Currently : MetricName / Slicer/ InfoLabel / datafile (for download)
Used to build a lot of tables in showMaf.
"""
metric_info = OrderedDict()
if metric is None:
metric_info["Metric Name"] = ""
if with_slicer_name:
metric_info["Slicer"] = ""
metric_info["Info Label"] = ""
if with_data_link:
metric_info["Data"] = []
metric_info["Data"].append([None, None])
return metric_info
# Otherwise, do this for real (not a blank).
metric_info["Metric Name"] = metric["metric_name"]
if with_slicer_name:
metric_info["Slicer"] = metric["slicer_name"]
metric_info["InfoL abel"] = metric["metric_info_label"]
if with_data_link:
metric_info["Data"] = []
metric_info["Data"].append(metric["metric_datafile"])
metric_info["Data"].append(os.path.join(self.out_dir, metric["metric_datafile"]))
return metric_info
def caption_for_metric(self, metric):
"""
Return the caption for a given metric.
"""
caption = metric["display_caption"]
if caption == "NULL":
return ""
else:
return caption
# Methods for plots.
def plots_for_metric(self, metric):
"""
Return a numpy array of the plots which match a given metric.
"""
return self.plots[np.where(self.plots["metric_id"] == metric["metric_id"])]
def plot_dict(self, plots=None):
"""
Given an array of plots (for a single metric usually).
Returns an ordered dict with 'plot_type' for interfacing with
jinja2 templates.
plot_dict ==
{'SkyMap': {'plot_file': [], 'thumb_file', []}, 'Histogram': {}..}
If no plot of a particular type, the plot_file and thumb_file
are empty lists.
Calling with plots=None returns a blank plot_dict.
"""
plot_dict = OrderedDict()
# Go through plots in 'plotOrder'.
if plots is None:
for p in self.plot_order:
plot_dict[p] = {}
plot_dict[p]["plot_file"] = ""
plot_dict[p]["thumb_file"] = ""
else:
plot_types = list(np.unique(plots["plot_type"]))
for p in self.plot_order:
if p in plot_types:
plot_dict[p] = {}
plotmatch = plots[np.where(plots["plot_type"] == p)]
plot_dict[p]["plot_file"] = []
plot_dict[p]["thumb_file"] = []
for pm in plotmatch:
plot_dict[p]["plot_file"].append(self.get_plot_file(pm))
plot_dict[p]["thumb_file"].append(self.get_thumb_file(pm))
plot_types.remove(p)
# Round up remaining plots.
for p in plot_types:
plot_dict[p] = {}
plotmatch = plots[np.where(plots["plot_type"] == p)]
plot_dict[p]["plot_file"] = []
plot_dict[p]["thumb_file"] = []
for pm in plotmatch:
plot_dict[p]["plot_file"].append(self.get_plot_file(pm))
plot_dict[p]["thumb_file"].append(self.get_thumb_file(pm))
return plot_dict
def get_thumb_file(self, plot):
"""
Return the thumbnail file name for a given plot.
"""
thumb_file = os.path.join(self.out_dir, plot["thumb_file"])
return thumb_file
def get_plot_file(self, plot):
"""
Return the filename for a given plot.
"""
plot_file = os.path.join(self.out_dir, plot["plot_file"])
return plot_file
def order_plots(self, sky_plots):
"""
sky_plots = numpy array of skymap plots.
Returns an ordered list of plotDicts.
The goal is to lay out the skymaps in a 3x2 grid on the MultiColor
page, in ugrizy order.
If a plot for a filter is missing, add a gap. (i.e. if there is no
u band plot, keep a blank spot).
If there are other plots, with multiple filters or no filter
info, they are added to the end.
If sky_plots includes multiple plots in the same filter,
just goes back to displayOrder.
"""
ordered_sky_plots = []
if len(sky_plots) == 0:
return ordered_sky_plots
order_list = ["u", "g", "r", "i", "z", "y"]
blank_plot_dict = self.plot_dict(None)
# Look for filter names in the plot filenames.
too_many_plots = False
for f in order_list:
pattern = "_" + f + "_"
matches = np.array([bool(re.search(pattern, x)) for x in sky_plots["plot_file"]])
match_sky_plot = sky_plots[matches]
if len(match_sky_plot) == 1:
ordered_sky_plots.append(self.plot_dict(match_sky_plot))
elif len(match_sky_plot) == 0:
ordered_sky_plots.append(blank_plot_dict)
else:
# If we found more than one plot in the same filter,
# we just go back to displayOrder.
too_many_plots = True
break
if too_many_plots is False:
# Add on any additional non-filter plots (e.g. joint completeness)
# that do NOT match original _*_ pattern.
pattern = "_[ugrizy]_"
nonmatches = np.array([bool(re.search(pattern, x)) for x in sky_plots["plot_file"]])
nonmatch_sky_plots = sky_plots[nonmatches == False]
if len(nonmatch_sky_plots) > 0:
for sky_plot in nonmatch_sky_plots:
ordered_sky_plots.append(self.plot_dict(np.array([sky_plot])))
elif too_many_plots:
metrics = self.metrics[np.in1d(self.metrics["metric_id"], sky_plots["metric_id"])]
metrics = self.sort_metrics(metrics, order=["display_order"])
ordered_sky_plots = []
for m in metrics:
sky_plot = sky_plots[np.where(sky_plots["metric_id"] == m["metric_id"])]
ordered_sky_plots.append(self.plot_dict(sky_plot))
# Pad out to make sure there are rows of 3
while len(ordered_sky_plots) % 3 != 0:
ordered_sky_plots.append(blank_plot_dict)
return ordered_sky_plots
def get_sky_maps(self, metrics=None, plot_type="SkyMap"):
"""
Return a numpy array of the plots with plot_type=plot_type,
optionally for subset of metrics.
"""
if metrics is None:
metrics = self.metrics
# Match the plots to the metrics required.
plot_metric_match = self.plots[np.in1d(self.plots["metric_id"], metrics["metric_id"])]
# Match the plot type (which could be a list)
plot_match = plot_metric_match[np.in1d(plot_metric_match["plot_type"], plot_type)]
return plot_match
# Set of methods to deal with summary stats.
def stats_for_metric(self, metric, stat_name=None):
"""
Return a numpy array of summary statistics which match a
given metric(s).
Optionally specify a particular stat_name that you want to match.
"""
stats = self.stats[np.where(self.stats["metric_id"] == metric["metric_id"])]
if stat_name is not None:
stats = stats[np.where(stats["summary_metric"] == stat_name)]
return stats
def stat_dict(self, stats):
"""
Returns an ordered dictionary with statName:statValue
for an array of stats.
Note that if you pass 'stats' from multiple metrics with the same
summary names, they will be overwritten in the resulting dictionary!
So just use stats from one metric, with unique summary_metric names.
"""
# Result = dict with key
# == summary stat name, value = summary stat value.
sdict = OrderedDict()
statnames = self.order_stat_names(stats)
for n in statnames:
match = stats[np.where(stats["summary_metric"] == n)]
# We're only going to look at the first value;
# and this should be a float.
sdict[n] = match["summary_value"][0]
return sdict
def order_stat_names(self, stats):
"""
Given an array of stats, return a list containing all the unique
'summary_metric' names in a default ordering
(identity-count-mean-median-rms..).
"""
names = list(np.unique(stats["summary_metric"]))
# Add some default sorting:
namelist = []
for nord in self.summary_stat_order:
if nord in names:
namelist.append(nord)
names.remove(nord)
for remaining in names:
namelist.append(remaining)
return namelist
def all_stat_names(self, metrics):
"""
Given an array of metrics, return a list containing all the
unique 'summary_metric' names in a default ordering.
"""
names = np.unique(
self.stats["summary_metric"][np.in1d(self.stats["metric_id"], metrics["metric_id"])]
)
names = list(names)
# Add some default sorting.
namelist = []
for nord in self.summary_stat_order:
if nord in names:
namelist.append(nord)
names.remove(nord)
for remaining in names:
namelist.append(remaining)
return namelist
|
lsstREPO_NAMErubin_simPATH_START.@rubin_sim_extracted@rubin_sim-main@rubin_sim@maf@web@maf_run_results.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "astropy/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/eso/tests/__init__.py",
"type": "Python"
}
|
astropyREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@eso@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/histogram/hoverlabel/_font.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram.hoverlabel"
_path_str = "histogram.hoverlabel.font"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@histogram@hoverlabel@_font.py@.PATH_END.py
|
{
"filename": "_array.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatter/error_y/_array.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ArrayValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="array", parent_name="scatter.error_y", **kwargs):
super(ArrayValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatter@error_y@_array.py@.PATH_END.py
|
{
"filename": "common_cosmo.py",
"repo_name": "CobayaSampler/cobaya",
"repo_path": "cobaya_extracted/cobaya-master/tests/common_cosmo.py",
"type": "Python"
}
|
"""
Body of the best-fit test for cosmological likelihoods
"""
from copy import deepcopy
from cobaya.typing import empty_dict
from cobaya.model import get_model
from cobaya.input import update_info
from cobaya.cosmo_input import create_input, planck_base_model
from cobaya.tools import recursive_update
from .common import process_packages_path
from .conftest import install_test_wrapper
# Tolerance for the tests of the derived parameters, in units of the sigma of Planck 2015
tolerance_derived = 0.055
def body_of_test(packages_path, best_fit, info_likelihood, info_theory, ref_chi2,
best_fit_derived=None, extra_model=empty_dict, skip_not_installed=False):
# Create base info
theo = list(info_theory)[0]
# In Class, theta_s is exact, but different from the approximate one cosmomc_theta
# used by Planck, so we take H0 instead
planck_base_model_prime = deepcopy(planck_base_model)
planck_base_model_prime.update(extra_model or {})
if "H0" in best_fit:
planck_base_model_prime["hubble"] = "H"
best_fit_derived = deepcopy(best_fit_derived) or {}
best_fit_derived.pop("H0", None)
info = create_input(planck_names=True, theory=theo, **planck_base_model_prime)
# Add specifics for the test: theory, likelihoods and derived parameters
info = recursive_update(info, {"theory": info_theory})
info["theory"][theo]["use_renames"] = True
info = recursive_update(info, {"likelihood": info_likelihood})
info["params"].update(dict.fromkeys(best_fit_derived or []))
# We need UPDATED info, to get the likelihoods' nuisance parameters
info = update_info(info)
# Notice that update_info adds an aux internal-only "params" property to the likes
for lik in info["likelihood"]:
info["likelihood"][lik].pop("params", None)
info["packages_path"] = process_packages_path(packages_path)
# Ask for debug output and force stopping at any error
info["debug"] = True
info["stop_at_error"] = True
# Create the model and compute likelihood and derived parameters at best fit
model = install_test_wrapper(skip_not_installed, get_model, info)
best_fit_values = {p: best_fit[p] for p in model.parameterization.sampled_params()}
likes, derived = model.loglikes(best_fit_values)
likes = dict(zip(list(model.likelihood), likes))
derived = dict(zip(list(model.parameterization.derived_params()), derived))
# Check value of likelihoods
for like in info["likelihood"]:
chi2 = -2 * likes[like]
msg = ("Testing likelihood '%s': | %.2f (now) - %.2f (ref) | = %.2f >=? %.2f" % (
like, chi2, ref_chi2[like], abs(chi2 - ref_chi2[like]),
ref_chi2["tolerance"]))
assert abs(chi2 - ref_chi2[like]) < ref_chi2["tolerance"], msg
print(msg)
# Check value of derived parameters
not_tested = []
not_passed = []
for p in best_fit_derived or {}:
if best_fit_derived[p][0] is None or p not in best_fit_derived:
not_tested += [p]
continue
rel = (abs(derived[p] - best_fit_derived[p][0]) /
best_fit_derived[p][1])
if rel > tolerance_derived * (
2 if p in (
"YHe", "Y_p", "DH", "sigma8", "s8omegamp5", "thetastar") else 1):
not_passed += [(p, rel, derived[p], best_fit_derived[p][0])]
if not_tested:
print("Derived parameters not tested because not implemented: %r" % not_tested)
assert not not_passed, "Some derived parameters were off. Fractions of " \
"test tolerance: %r" % not_passed
|
CobayaSamplerREPO_NAMEcobayaPATH_START.@cobaya_extracted@cobaya-master@tests@common_cosmo.py@.PATH_END.py
|
{
"filename": "stratsi_plot.py",
"repo_name": "minkailin/stratsi",
"repo_path": "stratsi_extracted/stratsi-master/caseA/stratsi_plot.py",
"type": "Python"
}
|
import sys
import numpy as np
from mpi4py import MPI
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
import h5py
import argparse
from scipy.integrate import simps
#import matplotlib.ticker as mtick
#custom_preamble = {
# "text.usetex": True,
# "text.latex.preamble": [
# r"\usepackage{amsmath}", # for the align enivironment
# ],
# }
from stratsi_params import alpha, delta, stokes, metal, epsilon, vdz, dvdz, rhog, dln_rhog, viscosity_pert
from stratsi_1fluid import epsilon_eqm, dvx_eqm, dvy_eqm, vz_eqm, dvz_eqm, eta_hat, P_eqm#, z_maxvshear, maxvshear
'''
process command line arguements
'''
parser = argparse.ArgumentParser()
parser.add_argument("--mode", nargs='*', help="select mode number")
parser.add_argument("--kx", nargs='*', help="select kx")
parser.add_argument("--sig", nargs='*', help="select eigenvalue")
args = parser.parse_args()
if(args.mode):
plot_mode = np.int(args.mode[0])
else:
plot_mode = 0
if(args.kx):
plot_kx = np.float(args.kx[0])
else:
plot_kx = 400.0
if(args.sig):
eigenv = np.float(args.sig[0]) - np.float(args.sig[1])*1j
else:
eigenv = 1.0
#print("plotting mode number {0:3d}".format(plot_mode))
'''
read in one-fluid data
'''
with h5py.File('stratsi_1fluid_modes.h5','r') as infile:
ks_1f = infile['scales']['kx_space'][:]
z_1f = infile['scales']['z'][:]
zmax_1f = infile['scales']['zmax'][()]
freqs_1f= []
eig_W1f = []
eig_Q1f = []
eig_Ux= []
eig_Uy= []
eig_Uz= []
for k_i in infile['tasks']:
freqs_1f.append(infile['tasks'][k_i]['freq'][:])
eig_W1f.append(infile['tasks'][k_i]['eig_W'][:])
eig_Q1f.append(infile['tasks'][k_i]['eig_Q'][:])
eig_Ux.append(infile['tasks'][k_i]['eig_Ux'][:])
eig_Uy.append(infile['tasks'][k_i]['eig_Uy'][:])
eig_Uz.append(infile['tasks'][k_i]['eig_Uz'][:])
if(args.mode):
n = plot_mode
if(args.kx):
n = np.argmin(np.abs(ks_1f-plot_kx))
kx_1f = ks_1f[n]
sigma_1f = freqs_1f[n]
growth_1f = sigma_1f.real
freq_1f =-sigma_1f.imag
if(args.sig):
g1 = np.argmin(np.abs(sigma_1f-eigenv))
else:
g1 = np.argmax(growth_1f)
sgrow_1f = growth_1f[g1]
ofreq_1f = freq_1f[g1]
#print("one-fluid model: kx, growth, freq = {0:1.2e} {1:13.6e} {2:13.6e}".format(kx_1f, sgrow_1f, ofreq_1f))
print("one-fluid model: kx, growth, freq = {0:1.6e} {1:13.6e} {2:13.6e}".format(kx_1f, sgrow_1f, ofreq_1f))
W1f = np.array(eig_W1f[n][g1])
Q1f = np.array(eig_Q1f[n][g1])
Ux = np.array(eig_Ux[n][g1])
Uy = np.array(eig_Uy[n][g1])
Uz = np.array(eig_Uz[n][g1])
#normalize eigenfunctions such that at delta rhod/rhod = W+Q is unity (and real) at its maximum
g2 = np.argmax(np.abs(W1f + Q1f))
norm_1f = W1f[g2] + Q1f[g2]
W1f /= norm_1f
Q1f /= norm_1f
Ux /= norm_1f
Uy /= norm_1f
Uz /= norm_1f
del_rhod1f = W1f + Q1f
'''
analysis of energy profiles of the most unstable mode based on 1 fluid results
'''
eps1f = epsilon_eqm(z_1f)
dvx1f = dvx_eqm(z_1f)
dvy1f = dvy_eqm(z_1f)
vz1f = vz_eqm(z_1f)
dvz1f = dvz_eqm(z_1f)
rho1f = P_eqm(z_1f) #divided by cs^2, but cs=1
del_rho1f = W1f + eps1f*Q1f/(1.0 + eps1f)
dW1f= np.gradient(W1f, z_1f)
dUx = np.gradient(Ux, z_1f)
dUy = np.gradient(Uy, z_1f)
dUz = np.gradient(Uz, z_1f)
energy1f_tot = (np.abs(Ux)**2 + 4*np.abs(Uy)**2 + np.abs(Uz)**2)
energy1f_A1 = -(dvx1f*np.real(Uz*np.conj(Ux)))
energy1f_A2 = -(4.0*dvy1f*np.real(Uz*np.conj(Uy)))
energy1f_A3 = -(dvz1f*np.abs(Uz)**2)
energy1f_A = energy1f_A1 + energy1f_A2 + energy1f_A3
energy1f_B = -vz1f*np.real(dUx*np.conj(Ux) + 4.0*dUy*np.conj(Uy) + dUz*np.conj(Uz))
energy1f_C = (kx_1f*np.imag(W1f*np.conj(Ux)) - np.real(dW1f*np.conj(Uz)))/(1.0 + eps1f)
energy1f_D = -2.0*eta_hat*np.real(Q1f*np.conj(Ux))*eps1f/(1.0 + eps1f)/(1.0 + eps1f)
energy1f_E = -z_1f*eps1f*np.real(Q1f*np.conj(Uz))/(1.0 + eps1f)
energy1f_A2 /= sgrow_1f
energy1f_A /= sgrow_1f
energy1f_B /= sgrow_1f
energy1f_C /= sgrow_1f
energy1f_D /= sgrow_1f
energy1f_E /= sgrow_1f
'''
compare integrated energetics for the most unstable mode (at each kx) based on 1 fluid result
'''
energy1f_tot_int =[]
energy1f_A_int =[]
energy1f_A2_int =[]
energy1f_B_int =[]
energy1f_C_int =[]
energy1f_D_int =[]
energy1f_E_int =[]
for i, kx1f in enumerate(ks_1f):
g3 = np.argmax(freqs_1f[i].real)
s1f = np.amax(freqs_1f[i].real)
w1f = np.array(eig_W1f[i][g3])
q1f = np.array(eig_Q1f[i][g3])
ux = np.array(eig_Ux[i][g3])
uy = np.array(eig_Uy[i][g3])
uz = np.array(eig_Uz[i][g3])
g4 = np.argmax(np.abs(w1f + q1f))
norm_1f = w1f[g4] + q1f[g4]
w1f /= norm_1f
q1f /= norm_1f
ux /= norm_1f
uy /= norm_1f
uz /= norm_1f
dw1f= np.gradient(w1f, z_1f)
dux = np.gradient(ux, z_1f)
duy = np.gradient(uy, z_1f)
duz = np.gradient(uz, z_1f)
e1f_tot = simps(rho1f*(np.abs(ux)**2 + 4*np.abs(uy)**2 + np.abs(uz)**2), z_1f)
e1f_A1 = simps(-(dvx1f*np.real(uz*np.conj(ux)))*rho1f, z_1f)
e1f_A2 = simps(-(4.0*dvy1f*np.real(uz*np.conj(uy)))*rho1f, z_1f)
e1f_A3 = simps(-(dvz1f*np.abs(uz)**2)*rho1f, z_1f)
e1f_A = e1f_A1 + e1f_A2 + e1f_A3
e1f_B = simps(-vz1f*np.real(dux*np.conj(ux) + 4.0*duy*np.conj(uy) + duz*np.conj(uz))*rho1f, z_1f)
e1f_C = simps((kx1f*np.imag(w1f*np.conj(ux)) - np.real(dw1f*np.conj(uz)))/(1.0 + eps1f)*rho1f, z_1f)
e1f_D = simps(-2.0*eta_hat*np.real(q1f*np.conj(ux))*eps1f/(1.0 + eps1f)/(1.0 + eps1f)*rho1f, z_1f)
e1f_E = simps(-z_1f*eps1f*np.real(q1f*np.conj(uz))/(1.0 + eps1f)*rho1f, z_1f)
e1f_A /= s1f
e1f_A2/= s1f
e1f_B /= s1f
e1f_C /= s1f
e1f_D /= s1f
e1f_E /= s1f
energy1f_tot_int.append(e1f_tot)
energy1f_A_int.append(e1f_A)
energy1f_A2_int.append(e1f_A2)
energy1f_B_int.append(e1f_B)
energy1f_C_int.append(e1f_C)
energy1f_D_int.append(e1f_D)
energy1f_E_int.append(e1f_E)
'''
read in two-fluid data
'''
with h5py.File('stratsi_modes.h5','r') as infile:
ks = infile['scales']['kx_space'][:]
z = infile['scales']['z'][:]
zmax = infile['scales']['zmax'][()]
freqs = []
eig_W = []
eig_Q = []
eig_Ugx= []
eig_Ugy= []
eig_Ugz= []
eig_Udx= []
eig_Udy= []
eig_Udz= []
for k_i in infile['tasks']:
freqs.append(infile['tasks'][k_i]['freq'][:])
eig_W.append(infile['tasks'][k_i]['eig_W'][:])
eig_Q.append(infile['tasks'][k_i]['eig_Q'][:])
eig_Ugx.append(infile['tasks'][k_i]['eig_Ugx'][:])
eig_Ugy.append(infile['tasks'][k_i]['eig_Ugy'][:])
eig_Ugz.append(infile['tasks'][k_i]['eig_Ugz'][:])
eig_Udx.append(infile['tasks'][k_i]['eig_Udx'][:])
eig_Udy.append(infile['tasks'][k_i]['eig_Udy'][:])
eig_Udz.append(infile['tasks'][k_i]['eig_Udz'][:])
if(args.mode):
m = plot_mode
if(args.kx):
m = np.argmin(np.abs(ks-plot_kx))
kx = ks[m]
sigma = freqs[m]
growth = sigma.real
freq =-sigma.imag
if(args.sig):
g1 = np.argmin(np.abs(sigma-eigenv))
else:
g1 = np.argmax(growth)
sgrow = growth[g1]
ofreq = freq[g1]
#print("two-fluid model: kx, growth, freq = {0:1.2e} {1:13.6e} {2:13.6e}".format(kx, sgrow, ofreq))
print("two-fluid model: kx, growth, freq = {0:1.6e} {1:13.6e} {2:13.6e}".format(kx, sgrow, ofreq))
W = np.array(eig_W[m][g1])
Q = np.array(eig_Q[m][g1])
Ugx = np.array(eig_Ugx[m][g1])
Ugy = np.array(eig_Ugy[m][g1])
Ugz = np.array(eig_Ugz[m][g1])
Udx = np.array(eig_Udx[m][g1])
Udy = np.array(eig_Udy[m][g1])
Udz = np.array(eig_Udz[m][g1])
g2 = np.argmax(np.abs(W+Q))
norm = W[g2] + Q[g2]
W /= norm
Q /= norm
Ugx /= norm
Ugy /= norm
Ugz /= norm
Udx /= norm
Udy /= norm
Udz /= norm
del_rhod = W + Q
'''
energy analysis
'''
#read in background vertical profiles of vgx, vgy, vdx, vdy
horiz_eqm = h5py.File('./eqm_horiz.h5', 'r')
vgx = horiz_eqm['vgx'][:]
vgy = horiz_eqm['vgy'][:]
vdx = horiz_eqm['vdx'][:]
vdy = horiz_eqm['vdy'][:]
horiz_eqm.close()
dvgx = np.gradient(vgx, z)
dvgy = np.gradient(vgy, z)
dvdx = np.gradient(vdx, z)
dvdy = np.gradient(vdy, z)
d2vgx = np.gradient(dvgx, z)
d2vgy = np.gradient(dvgy, z)
d2vdx = np.gradient(dvdx, z)
d2vdy = np.gradient(dvdy, z)
eps2f = epsilon(z)
dW = np.gradient(W, z)
dUgx = np.gradient(Ugx, z)
dUgy = np.gradient(Ugy, z)
dUgz = np.gradient(Ugz, z)
d2Ugx = np.gradient(dUgx, z)
d2Ugy = np.gradient(dUgy, z)
d2Ugz = np.gradient(dUgz, z)
dUdx = np.gradient(Udx, z)
dUdy = np.gradient(Udy, z)
dUdz = np.gradient(Udz, z)
energy2f_tot = eps2f*(np.abs(Udx)**2 + 4.0*np.abs(Udy)**2 + np.abs(Udz)**2)
energy2f_tot+= (np.abs(Ugx)**2 + 4.0*np.abs(Ugy)**2 + np.abs(Ugz)**2)
#energy2f_tot/= (1.0 + eps2f)
energy2f_A =-eps2f*np.real(Udz*np.conj(dvdx*Udx + 4.0*dvdy*Udy + dvdz(z)*Udz))
energy2f_A +=-np.real(Ugz*np.conj(dvgx*Ugx + 4.0*dvgy*Ugy))
energy2f_A2 = -eps2f*np.real(Udz*np.conj(4.0*dvdy*Udy)) - np.real(Ugz*np.conj(4.0*dvgy*Ugy))
energy2f_B =-eps2f*vdz(z)*np.real(dUdx*np.conj(Udx) + 4.0*dUdy*np.conj(Udy) + dUdz*np.conj(Udz))
energy2f_C = kx*np.imag(W*np.conj(Ugx)) - np.real(dW*np.conj(Ugz))
energy2f_D = (vgx - vdx)*np.real(Q*np.conj(Ugx)) + 4.0*(vgy - vdy)*np.real(Q*np.conj(Ugy))
energy2f_D += np.abs(Ugx - Udx)**2 + 4.0*np.abs(Ugy - Udy)**2 + np.abs(Ugz - Udz)**2
energy2f_D *= -eps2f/stokes
energy2f_E = (eps2f/stokes)*vdz(z)*np.real(Q*np.conj(Ugz))#buoyancy in 2fluid
if viscosity_pert == True:
dFx = d2Ugx - (4.0/3.0)*kx*kx*Ugx + (1.0/3.0)*1j*kx*dUgz + dln_rhog(z)*(dUgx + 1j*kx*Ugz)
dFx-= W*(d2vgx + dln_rhog(z)*dvgx)
dFx*= alpha
dFy = d2Ugy - kx*kx*Ugy + dln_rhog(z)*dUgy
dFy-= W*(d2vgy + dln_rhog(z)*dvgy)
dFy*= alpha
dFz = (4.0/3.0)*d2Ugz - kx*kx*Ugz + (1.0/3.0)*1j*kx*dUgx + dln_rhog(z)*((4.0/3.0)*dUgz - (2.0/3.0)*1j*kx*Ugx)
dFz*= alpha
energy2f_F = np.real(dFx*np.conj(Ugx) + 4.0*dFy*np.conj(Ugy) + dFz*np.conj(Ugz))
else:
energy2f_F = np.zeros(z.size)
energy2f_A /= sgrow#*(1.0 + eps2f)
energy2f_A2 /= sgrow#*(1.0 + eps2f)
energy2f_B /= sgrow#*(1.0 + eps2f)
energy2f_C /= sgrow#*(1.0 + eps2f)
energy2f_D /= sgrow#*(1.0 + eps2f)
energy2f_E /= sgrow#*(1.0 + eps2f)
energy2f_F /= sgrow#*(1.0 + eps2f)
'''
compare integrated energetics for the most unstable mode (at each kx) based on 2 fluid result
'''
energy2f_tot_int =[]
energy2f_A_int =[]
energy2f_A2_int =[]
energy2f_B_int =[]
energy2f_C_int =[]
energy2f_D_int =[]
energy2f_E_int =[]
energy2f_F_int =[]
for i, kx2f in enumerate(ks):
g3 = np.argmax(freqs[i].real)
s2f = np.amax(freqs[i].real)
w = np.array(eig_W[i][g3])
q = np.array(eig_Q[i][g3])
ugx = np.array(eig_Ugx[i][g3])
ugy = np.array(eig_Ugy[i][g3])
ugz = np.array(eig_Ugz[i][g3])
udx = np.array(eig_Udx[i][g3])
udy = np.array(eig_Udy[i][g3])
udz = np.array(eig_Udz[i][g3])
g4 = np.argmax(np.abs(w + q))
norm = w[g4] + q[g4]
w /= norm
q /= norm
ugx /= norm
ugy /= norm
ugz /= norm
udx /= norm
udy /= norm
udz /= norm
dw = np.gradient(w, z)
dugx = np.gradient(ugx, z)
dugy = np.gradient(ugy, z)
dugz = np.gradient(ugz, z)
d2ugx= np.gradient(dugx, z)
d2ugy= np.gradient(dugy, z)
d2ugz= np.gradient(dugz, z)
dudx = np.gradient(udx, z)
dudy = np.gradient(udy, z)
dudz = np.gradient(udz, z)
e2f_tot = simps((eps2f*(np.abs(udx)**2 + 4.0*np.abs(udy)**2 + np.abs(udz)**2) \
+ (np.abs(ugx)**2 + 4.0*np.abs(ugy)**2 + np.abs(ugz)**2))*rhog(z), z)
e2f_A = simps((-eps2f*np.real(udz*np.conj(dvdx*udx + 4.0*dvdy*udy + dvdz(z)*udz)) \
-np.real(ugz*np.conj(dvgx*ugx + 4.0*dvgy*ugy)))*rhog(z), z)
e2f_A2 = simps((-eps2f*np.real(udz*np.conj(4.0*dvdy*udy)) - np.real(ugz*np.conj(4.0*dvgy*ugy)))*rhog(z), z)
e2f_B = simps((-eps2f*vdz(z)*np.real(dudx*np.conj(udx) + 4.0*dudy*np.conj(udy) + dudz*np.conj(udz)))*rhog(z),z)
e2f_C = simps((kx2f*np.imag(w*np.conj(ugx)) - np.real(dw*np.conj(ugz)))*rhog(z),z)
e2f_D = simps(-(eps2f/stokes)*((vgx - vdx)*np.real(q*np.conj(ugx)) + 4.0*(vgy - vdy)*np.real(q*np.conj(ugy)) \
+ np.abs(ugx - udx)**2 + 4.0*np.abs(ugy - udy)**2 + np.abs(ugz - udz)**2)*rhog(z), z)
e2f_E = simps((eps2f/stokes)*vdz(z)*np.real(q*np.conj(ugz))*rhog(z),z)
if viscosity_pert == True:
dfx = d2ugx - (4.0/3.0)*kx2f*kx2f*ugx + (1.0/3.0)*1j*kx2f*dugz + dln_rhog(z)*(dugx + 1j*kx*ugz)
dfx-= w*(d2vgx + dln_rhog(z)*dvgx)
dfx*= alpha
dfy = d2ugy - kx2f*kx2f*ugy + dln_rhog(z)*dugy
dfy-= w*(d2vgy + dln_rhog(z)*dvgy)
dfy*= alpha
dfz = (4.0/3.0)*d2ugz - kx2f*kx2f*ugz + (1.0/3.0)*1j*kx*dugx + dln_rhog(z)*((4.0/3.0)*dugz - (2.0/3.0)*1j*kx*ugx)
dfz*= alpha
e2f_F=simps((np.real(dfx*np.conj(ugx) + 4.0*dfy*np.conj(ugy) + dfz*np.conj(ugz)))*rhog(z), z)
else:
e2f_F = 0.0
e2f_A /= s2f
e2f_A2 /= s2f
e2f_B /= s2f
e2f_C /= s2f
e2f_D /= s2f
e2f_E /= s2f
e2f_F /= s2f
energy2f_tot_int.append(e2f_tot)
energy2f_A_int.append(e2f_A)
energy2f_A2_int.append(e2f_A2)
energy2f_B_int.append(e2f_B)
energy2f_C_int.append(e2f_C)
energy2f_D_int.append(e2f_D)
energy2f_E_int.append(e2f_E)
energy2f_F_int.append(e2f_F)
'''
plotting parameters
'''
fontsize= 24
nlev = 128
nclev = 6
cmap = plt.cm.inferno
ymax = 1
xmin = 0.0
xmax = np.amax(np.array(zmax, zmax_1f))
'''
plot eigenvalues
'''
plt.rc('font',size=fontsize,weight='bold')
fig, axs = plt.subplots(2, sharex=True, sharey=False, gridspec_kw={'hspace': 0.1}, figsize=(8,6))
#plt.subplots_adjust(left=0.18, right=0.95, top=0.95, bottom=0.15)
plt.subplots_adjust(left=0.16, right=0.95, top=0.95, bottom=0.15)
plt.xscale('log')
for i, k in enumerate(ks_1f):
for n, sig in enumerate(freqs_1f[i]):
if (i == 0) & (n == 0):
lab = r'one fluid'
else:
lab = ''
axs[0].plot(k, sig.real, marker='o', linestyle='none', markersize=8, label=lab,color='black')
for i, k in enumerate(ks):
for n, sig in enumerate(freqs[i]):
if (i == 0) & (n == 0):
lab = r'two fluid'
else:
lab = ''
axs[0].plot(k, sig.real, marker='X', linestyle='none', markersize=8, label=lab,color='red')
axs[0].set_ylabel(r'$s/\Omega$')
lines1, labels1 = axs[0].get_legend_handles_labels()
legend=axs[0].legend(lines1, labels1, loc='upper left', frameon=False, ncol=1, handletextpad=-0.5,fontsize=fontsize/2)
title=r"Z={0:1.2f}, St={1:4.0e}, $\delta$={2:4.0e}".format(metal, stokes, delta)
axs[0].set_title(title,weight='bold')
for i, k in enumerate(ks_1f):
for n, sig in enumerate(freqs_1f[i]):
if (i == 0) & (n == 0):
lab = r'one fluid'
else:
lab = ''
axs[1].plot(k, -sig.imag, marker='o', linestyle='none', markersize=8, label=lab,color='black')
for i, k in enumerate(ks):
for n, sig in enumerate(freqs[i]):
if (i == 0) & (n == 0):
lab = r'two fluid'
else:
lab = ''
axs[1].plot(k, -sig.imag, marker='X', linestyle='none', markersize=8, label=lab,color='red')
#axs[1].plot(ks, -freqs.imag, marker='X',markersize=10,linestyle='none', label=r'two fluid')
axs[1].set_ylabel(r'$\omega/\Omega$')
axs[1].set_xlabel(r'$k_xH_g$')
lines1, labels1 = axs[1].get_legend_handles_labels()
legend=axs[1].legend(lines1, labels1, loc='upper left', frameon=False, ncol=1, handletextpad=-0.5, fontsize=fontsize/2)
plt.xlim(np.amin(ks),np.amax(ks))
fname = 'stratsi_plot_growth'
plt.savefig(fname,dpi=150)
'''
plot max growth rates as func of kx
'''
fig, axs = plt.subplots(2, sharex=True, sharey=False, gridspec_kw={'hspace': 0.1}, figsize=(8,6))
plt.subplots_adjust(left=0.2, right=0.95, top=0.9, bottom=0.15)
plt.xscale('log')
for i, k in enumerate(ks_1f):
g1 = np.argmax(freqs_1f[i].real)
if i == 0:
lab = r'one-fluid'
else:
lab = ''
axs[0].plot(k, freqs_1f[i][g1].real , marker='o', linestyle='none', markersize=8, label=lab,color='black')
#axs[0].axhline(y=maxvshear, linestyle='dashed', linewidth=1, label=r'$max\left|dv_y/dz\right|/\Omega$')
for i, k in enumerate(ks):
g1 = np.argmax(freqs[i].real)
if i == 0:
lab = r'two-fluid'
else:
lab = ''
axs[0].plot(k, freqs[i][g1].real , marker='X', linestyle='none', markersize=8, label=lab,color='red')
axs[0].set_ylabel(r'$s_\mathrm{max}/\Omega$')
lines1, labels1 = axs[0].get_legend_handles_labels()
legend=axs[0].legend(lines1, labels1, loc='upper left', frameon=False, ncol=1, handletextpad=-0.5,fontsize=fontsize/2)
title=r"Z={0:1.2f}, St={1:4.0e}, $\delta$={2:4.0e}".format(metal, stokes, delta)
axs[0].set_title(title,weight='bold')
for i, k in enumerate(ks_1f):
g1 = np.argmax(freqs_1f[i].real)
if i == 0:
lab = r'one fluid'
else:
lab = ''
axs[1].plot(k, -freqs_1f[i][g1].imag, marker='o', linestyle='none', markersize=8, label=lab,color='black')
for i, k in enumerate(ks):
g1 = np.argmax(freqs[i].real)
if i == 0:
lab = r'two fluid'
else:
lab = ''
axs[1].plot(k, -freqs[i][g1].imag, marker='X', linestyle='none', markersize=8, label=lab,color='red')
axs[1].set_ylabel(r'$\omega/\Omega$')
axs[1].set_xlabel(r'$k_xH_g$')
#lines1, labels1 = axs[1].get_legend_handles_labels()
#legend=axs[1].legend(lines1, labels1, loc='upper left', frameon=False, ncol=1, handletextpad=-0.5, fontsize=fontsize/2)
#plt.xlim(np.amin(ks),np.amax(ks))
fname = 'stratsi_plot_growth_max'
plt.savefig(fname,dpi=150)
'''
plot eigenvalues as scatter diagram for a single kx
'''
fig = plt.figure(figsize=(8,4.5))
ax = fig.add_subplot()
plt.subplots_adjust(left=0.2, right=0.95, top=0.9, bottom=0.2)
plt.scatter(-sigma_1f.imag, sigma_1f.real, marker='o', label=r'one fluid',color='black',s=64)
plt.scatter(-sigma.imag, sigma.real, marker='X', label=r'two fluid',color='red',s=64)
plt.rc('font',size=fontsize,weight='bold')
lines1, labels1 = ax.get_legend_handles_labels()
legend=ax.legend(lines1, labels1, loc='upper right', frameon=False, ncol=1, handletextpad=-0.5, fontsize=fontsize/2)
plt.xticks(fontsize=fontsize,weight='bold')
plt.xlabel(r'$\omega/\Omega$',fontsize=fontsize)
plt.yticks(fontsize=fontsize,weight='bold')
plt.ylabel(r'$s/\Omega$', fontsize=fontsize)
title=r"Z={0:1.2f}, St={1:4.0e}, $\delta$={2:4.0e}".format(metal, stokes, delta)
plt.title(title,weight='bold')
fname = 'stratsi_plot_eigen'
plt.savefig(fname,dpi=150)
'''
plot eigenfunctions
'''
plt.rc('font',size=fontsize/1.5,weight='bold')
fig, axs = plt.subplots(5, sharex=True, sharey=False, gridspec_kw={'hspace': 0.1}, figsize=(8,7.5))
plt.subplots_adjust(left=0.18, right=0.95, top=0.95, bottom=0.125)
axs[0].plot(z_1f, del_rhod1f.real, linewidth=2, label=r'one-fluid, real', color='black')
axs[0].plot(z_1f, del_rhod1f.imag, linewidth=2, label=r'one-fluid, imag', color='m')
axs[0].plot(z, del_rhod.real, linewidth=2, label=r'two-fluid, real', color='red', linestyle='dashed')
axs[0].plot(z, del_rhod.imag, linewidth=2, label=r'two-fluid, imag', color='c', linestyle='dashed')
axs[0].set_ylabel(r'$\delta\rho_d/\rho_d$')
lines1, labels1 = axs[0].get_legend_handles_labels()
axs[0].legend(lines1, labels1, loc=(0.6,-0.07), frameon=False, ncol=1, labelspacing=0.3, handletextpad=0.1)
title=r"Z={0:1.2f}, St={1:4.0e}, $\delta$={2:4.0e}".format(metal, stokes, delta)
axs[0].set_title(title,weight='bold')
axs[1].plot(z_1f, W1f.real, linewidth=2, label=r'one-fluid, real', color='black')
axs[1].plot(z_1f, W1f.imag, linewidth=2, label=r'one-fluid, imag', color='m')
axs[1].plot(z, W.real, linewidth=2, label=r'two-fluid, real', color='red', linestyle='dashed')
axs[1].plot(z, W.imag, linewidth=2, label=r'two-fluid, imag', color='c', linestyle='dashed')
axs[1].set_ylabel(r'$\delta\rho_g/\rho_{g}$')
axs[1].ticklabel_format(axis='y', style='sci',scilimits=(-2,2))
axs[1].yaxis.set_major_formatter(FormatStrFormatter('%3.0e'))
#lines1, labels1 = axs[1].get_legend_handles_labels()
#axs[1].legend(lines1, labels1, loc='right', frameon=False, ncol=1)
axs[2].plot(z_1f, np.abs(Ux), linewidth=2, label=r'one-fluid', color='black')
axs[2].plot(z, np.abs(Udx), linewidth=2, label=r'dust', color='red', linestyle='dashed')
axs[2].plot(z, np.abs(Ugx), linewidth=2, label=r'gas', color='lime', linestyle='dotted')
#axs[2].plot(z_1f, Ux_norm.imag, linewidth=2, color='black')
#axs[2].plot(z, Ugx_norm.imag, linewidth=2, color='red', linestyle='dashed')
#axs[2].plot(z, Udx_norm.imag, linewidth=2, color='blue', linestyle='dotted')
axs[2].set_ylabel(r'$|\delta v_{x}|$')
lines1, labels1 = axs[2].get_legend_handles_labels()
axs[2].legend(lines1, labels1, loc='right', frameon=False, ncol=1, labelspacing=0.3, handletextpad=0.1)
axs[3].plot(z_1f, np.abs(Uy), linewidth=2, label=r'one-fluid', color='black')
axs[3].plot(z, np.abs(Udy), linewidth=2, label=r'dust', color='red', linestyle='dashed')
axs[3].plot(z, np.abs(Ugy), linewidth=2, label=r'gas', color='lime', linestyle='dotted')
axs[3].set_ylabel(r'$|\delta v_{y}|$')
#lines1, labels1 = axs[3].get_legend_handles_labels()
#axs[3].legend(lines1, labels1, loc='right', frameon=False, ncol=1)
ymax = np.amax(np.abs(Uy))
#arrbeg = r'\begin{align*}'
#arrend = r'\end{align*}'
#plt.rcParams.update(custom_preamble)
axs[3].annotate(r"$k_xH_g$={0:3.0f}".format(kx)+"\n"+r"s={0:4.2f}$\Omega$".format(sgrow), xy=(0.75*xmax, 0.5*ymax))
axs[4].plot(z_1f, np.abs(Uz), linewidth=2, label=r'one-fluid', color='black')
axs[4].plot(z, np.abs(Udz), linewidth=2, label=r'dust', color='red', linestyle='dashed')
axs[4].plot(z, np.abs(Ugz), linewidth=2, label=r'gas', color='lime', linestyle='dotted')
axs[4].set_ylabel(r'$|\delta v_{z}|$')
#lines1, labels1 = axs[4].get_legend_handles_labels()
#axs[4].legend(lines1, labels1, loc='right', frameon=False, ncol=1)
axs[4].set_xlabel(r'$z/H_g$',fontweight='bold')
plt.xlim(xmin,xmax)
fname = 'stratsi_plot_eigenfunc'
plt.savefig(fname,dpi=150)
'''
2D visualization of eigenfunction (using two-fluid solution)
'''
nx = 128
nz = nx
xaxis = (2.0*np.pi/kx)*np.linspace(-1.0, 1.0, nx)
zaxis = np.linspace(np.amin(z), np.amax(z), nz)
X, Z = np.meshgrid(xaxis,zaxis)
rhod = np.interp(zaxis, z, del_rhod)
vdx = np.interp(zaxis, z, Udx)
vdz = np.interp(zaxis, z, Udz)
rhod_2D = np.repeat(rhod[...,np.newaxis], nx, axis=1)
vdx_2D = np.repeat(vdx[...,np.newaxis], nx, axis=1)
vdz_2D = np.repeat(vdz[...,np.newaxis], nx, axis=1)
data = np.cos(kx*X)*rhod_2D.real - np.sin(kx*X)*rhod_2D.imag
U = np.cos(kx*X)*vdx_2D.real - np.sin(kx*X)*vdx_2D.imag
V = np.cos(kx*X)*vdz_2D.real - np.sin(kx*X)*vdz_2D.imag
plt.figure(figsize=(7,7))
plt.ylim(np.amin(zaxis), np.amax(zaxis))
plt.xlim(np.amin(xaxis), np.amax(xaxis))
minv = np.amin(data)
maxv = np.amax(data)
levels = np.linspace(minv,maxv,nlev)
clevels = np.linspace(minv,maxv,nclev)
plt.rc('font',size=fontsize,weight='bold')
cp = plt.contourf(xaxis, zaxis, data, levels, cmap=cmap)
xfac = np.int(nx/64)
zfac = np.int(nz/128)
#plt.quiver(xaxis[0:nx:xfac], zaxis[0:nz:zfac], U[0:nz:zfac,0:nx:xfac],
# V[0:nz:zfac,0:nx:xfac], color='deepskyblue',
# width=0.005, scale=0.2
# )
speed = np.sqrt(U**2 + V**2)
lw = 0.7#2*speed/speed.max()
plt.streamplot(xaxis, zaxis, U, V,
color='deepskyblue', density=3,
linewidth=lw
)
#plt.gca().set_aspect("equal")
#plt.tight_layout()
plt.subplots_adjust(left=0.2, right=0.9, top=0.9, bottom=0.125)
plt.colorbar(cp,ticks=clevels,format='%.2f')
title=r"$k_xH_g$={0:3.0f}".format(kx)+r", s={0:4.2f}$\Omega$".format(sgrow)
plt.title(title,weight='bold')
plt.xticks(fontsize=fontsize,weight='bold')
#plt.gca().xaxis.set_major_formatter(mtick.FormatStrFormatter('%1.0e'))
plt.xlabel(r'$x/H_g$',fontsize=fontsize)
plt.yticks(fontsize=fontsize,weight='bold')
plt.ylabel(r'$z/H_g$',fontsize=fontsize)
fname = 'stratsi_plot_eigenf2D'
plt.savefig(fname,dpi=150)
'''
plot kinetic energy decomposition based on 1-fluid result
'''
fig = plt.figure(figsize=(8,4.5))
ax = fig.add_subplot()
plt.subplots_adjust(left=0.2, right=0.95, top=0.9, bottom=0.2)
plt.xlim(np.amin(z_1f),np.amax(z_1f))
plt.plot(z_1f, energy1f_A, linewidth=2,label='$E_1, dv/dz$')
plt.plot(z_1f, energy1f_A2, linewidth=2,label=r'$E_{1y}$, $dv_y/dz$',color='black',marker='x',linestyle='None',markevery=8)
plt.plot(z_1f, energy1f_B, linewidth=2,label='$E_2$, vert. settling')
plt.plot(z_1f, energy1f_C, linewidth=2,label='$E_3$, pressure')
plt.plot(z_1f, energy1f_D, linewidth=2,label='$E_4$, dust-gas drift')
plt.plot(z_1f, energy1f_E, linewidth=2,label='$E_5$, buoyancy')
plt.plot(z_1f, energy1f_A + energy1f_B + energy1f_C + energy1f_D + energy1f_E, linewidth=2,label=r'$\sum E_i$',linestyle='dashed')
plt.plot(z_1f, energy1f_tot, linewidth=2,label=r'$E_{tot}$',color='black',marker='o',linestyle='None',markevery=8)
#ax.axvline(x=z_maxvshear, linestyle='dashed', linewidth=1, label=r'$max\left|dv_y/dz\right|/\Omega$')
plt.rc('font',size=fontsize,weight='bold')
lines1, labels1 = ax.get_legend_handles_labels()
legend=ax.legend(lines1, labels1, loc='upper right', frameon=False, ncol=1, fontsize=fontsize/2)
plt.xticks(fontsize=fontsize,weight='bold')
plt.xlabel(r'$z/H_g$',fontsize=fontsize)
title=r"$k_xH_g$={0:3.0f}".format(kx_1f)+r", s={0:4.2f}$\Omega$".format(sgrow_1f)
plt.title(title,weight='bold')
plt.yticks(fontsize=fontsize,weight='bold')
plt.ylabel(r'$pseudo$-$energy$', fontsize=fontsize)
fname = 'stratsi_plot_energy1f'
plt.savefig(fname,dpi=150)
'''
plot kinetic energy decomposition based on 2-fluid result
'''
fig = plt.figure(figsize=(8,4.5))
ax = fig.add_subplot()
plt.subplots_adjust(left=0.2, right=0.95, top=0.9, bottom=0.2)
plt.xlim(xmin,xmax)
plt.plot(z, energy2f_A, linewidth=2,label='$U_1$, vert. shear')
plt.plot(z, energy2f_A2, linewidth=2,label='$U_{1y}$, (vert. shear)$_y$', color='black',marker='x',linestyle='None',markevery=8)
plt.plot(z, energy2f_B, linewidth=2,label='$U_2$, dust settling')
plt.plot(z, energy2f_C, linewidth=2,label='$U_3$, gas pressure')
plt.plot(z, energy2f_D, linewidth=2,label='$U_4$, dust-gas drift')
plt.plot(z, energy2f_E, linewidth=2,label='$U_5$, buoyancy')
if viscosity_pert == True:
plt.plot(z, energy2f_F, linewidth=2,label='$U_6$, viscosity')
plt.plot(z, energy2f_A + energy2f_B + energy2f_C + energy2f_D + energy2f_E + energy2f_F, linewidth=2,label=r'$\sum U_i$',linestyle='dashed')
plt.plot(z, energy2f_tot, linewidth=2,label=r'$U_{tot}$',color='black',marker='o',linestyle='None',markevery=8)
plt.rc('font',size=fontsize,weight='bold')
lines1, labels1 = ax.get_legend_handles_labels()
legend=ax.legend(lines1, labels1, loc='upper right', frameon=False, ncol=1, fontsize=fontsize/2, labelspacing=0.4)
plt.xticks(fontsize=fontsize,weight='bold')
plt.xlabel(r'$z/H_g$',fontsize=fontsize)
plt.yticks(fontsize=fontsize,weight='bold')
plt.ylabel(r'$pseudo$-$energy$', fontsize=fontsize)
title=r"$k_xH_g$={0:3.0f}".format(kx)+r", s={0:4.2f}$\Omega$".format(sgrow)
plt.title(title,weight='bold')
fname = 'stratsi_plot_energy2f'
plt.savefig(fname,dpi=150)
'''
plot energy decomposition as a function of kx (1 fluid)
'''
fig = plt.figure(figsize=(8,4.5))
ax = fig.add_subplot()
plt.subplots_adjust(left=0.185, right=0.95, top=0.9, bottom=0.2)
plt.xscale('log')
#plt.yscale('log')
plt.xlim(np.amin(ks_1f),np.amax(ks_1f))
energy1f_tot_int = np.cbrt(energy1f_tot_int)
energy1f_A_int = np.cbrt(energy1f_A_int)
energy1f_A2_int = np.cbrt(energy1f_A2_int)
energy1f_B_int = np.cbrt(energy1f_B_int)
energy1f_C_int = np.cbrt(energy1f_C_int)
energy1f_D_int = np.cbrt(energy1f_D_int)
energy1f_E_int = np.cbrt(energy1f_E_int)
#plt.plot(ks_1f, energy1f_A_int, linewidth=2,label='$dv/dz$')
plt.plot(ks_1f, energy1f_A2_int, linewidth=2,label='$dv_y/dz$')
plt.plot(ks_1f, energy1f_B_int, linewidth=2,label='vert. settling')
plt.plot(ks_1f, energy1f_C_int, linewidth=2,label='pressure')
plt.plot(ks_1f, energy1f_D_int, linewidth=2,label='dust-gas drift')
plt.plot(ks_1f, energy1f_E_int, linewidth=2,label='buoyancy')
plt.plot(ks_1f, energy1f_tot_int, linewidth=2,label=r'total',color='black',marker='o',linestyle='None',markevery=2)
plt.plot([1e2,1e4], [0,0], linewidth=1,linestyle='dashed',color='black')
lines1, labels1 = ax.get_legend_handles_labels()
legend=ax.legend(lines1, labels1, loc='upper left', frameon=False, ncol=1, fontsize=fontsize/2)
plt.rc('font',size=fontsize,weight='bold')
plt.xticks(fontsize=fontsize,weight='bold')
plt.xlabel(r'$k_xH_g$',fontsize=fontsize)
plt.yticks(fontsize=fontsize,weight='bold')
plt.ylabel(r'$\left(\int \rho E_i dz\right)^{1/3}$', fontsize=fontsize)
title=r"Z={0:1.2f}, St={1:4.0e}, $\delta$={2:4.0e}".format(metal, stokes, delta)
plt.title(title,weight='bold')
fname = 'stratsi_plot_energy1f_int'
plt.savefig(fname,dpi=150)
'''
plot energy decomposition as a function of kx (2 fluid)
'''
fig = plt.figure(figsize=(8,4.5))
ax = fig.add_subplot()
plt.subplots_adjust(left=0.185, right=0.95, top=0.9, bottom=0.2)
plt.xscale('log')
plt.xlim(np.amin(ks),np.amax(ks))
energy2f_tot_int = np.cbrt(energy2f_tot_int)
energy2f_A_int = np.cbrt(energy2f_A_int)
energy2f_A2_int = np.cbrt(energy2f_A2_int)
energy2f_B_int = np.cbrt(energy2f_B_int)
energy2f_C_int = np.cbrt(energy2f_C_int)
energy2f_D_int = np.cbrt(energy2f_D_int)
energy2f_E_int = np.cbrt(energy2f_E_int)
energy2f_F_int = np.cbrt(energy2f_F_int)
#plt.plot(ks_1f, energy1f_A_int, linewidth=2,label='$dv/dz$')
plt.plot(ks, energy2f_A2_int, linewidth=2,label='(vert. shear)$_y$')
plt.plot(ks, energy2f_B_int, linewidth=2,label='dust settling')
plt.plot(ks, energy2f_C_int, linewidth=2,label='gas pressure')
plt.plot(ks, energy2f_D_int, linewidth=2,label='dust-gas drift')
plt.plot(ks, energy2f_E_int, linewidth=2,label='buoyancy')
if viscosity_pert == True:
plt.plot(ks, energy2f_F_int, linewidth=2,label='viscosity')
plt.plot(ks, energy2f_tot_int, linewidth=2,label=r'total',color='black',marker='o',linestyle='None',markevery=2)
plt.plot([1e2,1e4], [0,0], linewidth=1,linestyle='dashed',color='black')
lines1, labels1 = ax.get_legend_handles_labels()
legend=ax.legend(lines1, labels1, loc='upper left', frameon=False, ncol=1, fontsize=fontsize/2)
plt.rc('font',size=fontsize,weight='bold')
plt.xticks(fontsize=fontsize,weight='bold')
plt.xlabel(r'$k_xH_g$',fontsize=fontsize)
plt.yticks(fontsize=fontsize,weight='bold')
plt.ylabel(r'$\left(\int \rho_g U_i dz\right)^{1/3}$', fontsize=fontsize)
title=r"Z={0:1.2f}, St={1:4.0e}, $\delta$={2:4.0e}".format(metal, stokes, delta)
plt.title(title,weight='bold')
fname = 'stratsi_plot_energy2f_int'
plt.savefig(fname,dpi=150)
|
minkailinREPO_NAMEstratsiPATH_START.@stratsi_extracted@stratsi-master@caseA@stratsi_plot.py@.PATH_END.py
|
{
"filename": "_showscale.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/contour/_showscale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showscale", parent_name="contour", **kwargs):
super(ShowscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@contour@_showscale.py@.PATH_END.py
|
{
"filename": "eclipse_depths_example.py",
"repo_name": "ideasrule/platon",
"repo_path": "platon_extracted/platon-master/examples/eclipse_depths_example.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
import numpy as np
from platon.constants import h, c, k_B, R_jup, M_jup, R_sun
from platon.eclipse_depth_calculator import EclipseDepthCalculator
from platon.TP_profile import Profile
p = Profile()
p.set_parametric(1200, 500, 0.5, 0.6, 1e6, 1900)
#p.set_isothermal(1500)
calc = EclipseDepthCalculator(method="xsec") #"ktables" for correlated k
#Uncomment below to get binned eclipse depths
#edges = np.linspace(1.1e-6, 1.7e-6, 30)
#bins = np.array([edges[0:-1], edges[1:]]).T
#calc.change_wavelength_bins(bins)
wavelengths, depths, info_dict = calc.compute_depths(p, R_sun, M_jup, R_jup, 5700, full_output=True)
plt.semilogy(p.get_temperatures(), p.get_pressures())
plt.xlabel("Temperature (K)")
plt.ylabel("Pressure (Pa)")
plt.figure()
plt.loglog(1e6*wavelengths, 1e6*depths)
plt.xlabel("Wavelength ($\mu m$)")
plt.ylabel("Eclipse depth (ppm)")
plt.show()
|
ideasruleREPO_NAMEplatonPATH_START.@platon_extracted@platon-master@examples@eclipse_depths_example.py@.PATH_END.py
|
{
"filename": "README_zh-CN.md",
"repo_name": "alibaba/TinyNeuralNetwork",
"repo_path": "TinyNeuralNetwork_extracted/TinyNeuralNetwork-main/examples/pruner/oneshot/README_zh-CN.md",
"type": "Markdown"
}
|
# Oneshot剪枝样例
[English](README.md)
## 简介
OneShot剪枝是L1、L2、FPGM等结构化通道剪枝的统称(可以通过config文件中的metrics进行配置切换),是一种快速且通用性强的剪枝方案。
## Oneshot剪枝配置
参数如下:
<dl>
<dt><tt>sparsity</tt> : float</dt>
<dd>稀疏率,所有算子都会减掉该比例的通道 </dd>
<dt><tt>metrics</tt> : str</dt>
<dd>剪枝评估算法,有效值包括 (l1_norm, l2_norm, fpgm, random) </dd>
</dl>
当想调节特定算子的剪枝率时,建议通过 `pruner.generate_config(args.config)` 来生成一份包含所有可剪枝节点的配置(详情见demo)。
## Oneshot剪枝代码说明
代码的流程如下
1. 解析配置文件
2. 用配置文件生成OneshotPruner对象(解析配置、计算图、算子依赖)
3. 生成带每层剪枝率的配置文件,并修改特定算子的剪枝率(可选)
4. 调用 `pruner.prune()` 方法完成剪枝(修改算子的参数及权重维度)
5. 对剪枝后的模型做finetune
## 如何接入我的算法和模型?
从数据集角度而言,可以看到代码中自带了针对cifar10数据集的处理函数,我们也提供了cifar100以及imagenet的处理函数(位于tinynn.util命名空间下),如果没有你的数据集,可以参照着实现一个类似的。
从流程角度而言,除此之外的训练函数和验证函数,仍然可以使用之前的实现。
从模型角度而言,按照样例代码的流程,只需将第二步中构造OneshotPruner对象的模型进行替换即可。
## 常见问题
由于PyTorch具有极高的编码自由度,我们无法确保所有的Case都能自动化覆盖,当你遇到问题时,
可以查看[《常见问题解答》](../../../docs/FAQ_zh-CN.md) , 或者加入答疑群

|
alibabaREPO_NAMETinyNeuralNetworkPATH_START.@TinyNeuralNetwork_extracted@TinyNeuralNetwork-main@examples@pruner@oneshot@README_zh-CN.md@.PATH_END.py
|
{
"filename": "testLTSClientpy.py",
"repo_name": "ACS-Community/ACS",
"repo_path": "ACS_extracted/ACS-master/LGPL/CommonSoftware/loggingts/ws/test/testLTSClientpy.py",
"type": "Python"
}
|
#!/usr/bin/env python
#*******************************************************************************
# ALMA - Atacama Large Millimiter Array
# (c) Associated Universities Inc., 2002
# (c) European Southern Observatory, 2002
# Copyright by ESO (in the framework of the ALMA collaboration)
# and Cosylab 2002, All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
#------------------------------------------------------------------------------
#from loggingts.ACSLogTypeExample import SimpleLog,ComplexLog
from ACSLogTypeExampleLTS import SimpleLog,ComplexLog
# Import the acspy.PySimpleClient class
from Acspy.Clients.SimpleClient import PySimpleClient
# Make an instance of the PySimpleClient
simpleClient = PySimpleClient()
simpleClient.getLogger().logInfo("Starting test client.")
SimpleLog().log()
SimpleLog("Array01","Antenna01").log()
a=ComplexLog()
a.setsomeDoubleMember(3.14159)
a.setsomeStringMember("test string")
a.setsomeLongMember(42)
a.setsomeBooleanMember(True)
a.log()
b=ComplexLog()
b.setArray("Array01")
b.setAntenna("Antenna01")
b.setsomeDoubleMember(3.14159)
b.setsomeStringMember("test string")
b.setsomeLongMember(42)
b.setsomeBooleanMember(True)
b.log()
simpleClient.disconnect()
simpleClient.getLogger().logInfo("Exiting test client.")
|
ACS-CommunityREPO_NAMEACSPATH_START.@ACS_extracted@ACS-master@LGPL@CommonSoftware@loggingts@ws@test@testLTSClientpy.py@.PATH_END.py
|
{
"filename": "test_frequency.py",
"repo_name": "lockepatton/sonipy",
"repo_path": "sonipy_extracted/sonipy-master/tests/test_frequency.py",
"type": "Python"
}
|
import numpy as np
from sonipy.scales.frequency import *
import warnings
import pytest
from unittest import TestCase
C4 = 261.6 # Hz
# EXPECTED VALUES
frequency_min = C4
frequency_max = 4 * C4
# Case 1: Vmin Vmax 0 and 1 (defaults are None)
# cents_per_value w\ default vmin and vmax
cents_per_value = 2400.0
vmin = 0
vmax = 1
# Case 2: Vmin Vmax Defaults
# cents_per_value w\ alternate input vmin and vmax
cents_per_value2 = 12.0
vmin2 = -100
vmax2 = 100
# FREQUENCY RANGE WARNINGS
frequency_min_outofrange = 20 # Hz
frequency_max_outofrange = 45000 # Hz
class TestFrequencyScale(TestCase):
def test_cent_per_value(self):
cents_per_value_defaulttest = cent_per_value(
frequency_min, frequency_max, vmin, vmax)
self.assertEqual(cents_per_value_defaulttest, cents_per_value)
cents_per_value_nondefaulttest = cent_per_value(
frequency_min, frequency_max, vmin2, vmax2)
self.assertEqual(cents_per_value_nondefaulttest, cents_per_value2)
def test_get_f_min(self):
frequency_min_defaulttest = get_f_min(
frequency_max, cents_per_value, vmin, vmax)
self.assertEqual(frequency_min_defaulttest, frequency_min)
frequency_min_nondefaulttest = get_f_min(
frequency_max, cents_per_value2, vmin2, vmax2)
self.assertEqual(frequency_min_nondefaulttest, frequency_min)
def test_get_f_max(self):
frequency_max_defaulttest = get_f_max(
frequency_min, cents_per_value, vmin, vmax)
self.assertEqual(frequency_max_defaulttest, frequency_max)
frequency_max_nondefaulttest = get_f_max(
frequency_min, cents_per_value2, vmin2, vmax2)
self.assertEqual(frequency_max_nondefaulttest, frequency_max)
def test_TooManyInputsFrequencyScale(self):
# fails when you give frequency scale 3 inputs
def tooManyInputsFunc():
Scale = FrequencyScale(frequency_min=frequency_min,
frequency_max=frequency_max,
cents_per_value=cents_per_value)
# fails when you give frequency scale 3 inputs
self.assertRaises(Exception, tooManyInputsFunc)
def test_TooFewInputsInFrequencyScale(self):
# fails when you give frequency scale 1 input
def only_frequency_min():
Scale = FrequencyScale(frequency_min=frequency_min)
self.assertRaises(Exception, only_frequency_min)
def only_frequency_max():
Scale = FrequencyScale(frequency_max=frequency_max)
self.assertRaises(Exception, only_frequency_max)
def only_cents_per_value():
Scale = FrequencyScale(cents_per_value=cents_per_value)
self.assertRaises(Exception, only_cents_per_value)
def test_FrequencyRangeWarnings(self):
# frequency min out of range
with pytest.warns(UserWarning) as record:
Scale = FrequencyScale(frequency_min=frequency_min_outofrange,
frequency_max=frequency_max,
value_min = vmin,
value_max = vmax)
self.assertTrue(len(record) == 1)
self.assertTrue("minimum" in str(record[0].message))
# frequency max out of range
with pytest.warns(UserWarning) as record:
Scale = FrequencyScale(frequency_min=frequency_min,
frequency_max=frequency_max_outofrange,
value_min = vmin,
value_max = vmax)
self.assertTrue(len(record) == 1)
self.assertTrue("maximum" in str(record[0].message))
# both frequency min and max out of range
with pytest.warns(UserWarning) as record:
Scale = FrequencyScale(frequency_min=frequency_min_outofrange,
frequency_max=frequency_max_outofrange,
value_min = vmin,
value_max = vmax)
self.assertTrue(len(record) == 2)
self.assertTrue("maximum" in str(record[0].message))
self.assertTrue("minimum" in str(record[1].message))
def test_value_min_greater_than_value_max_warning(self):
# warns when you put value_min > value_max
with pytest.warns(UserWarning) as record:
Scale = FrequencyScale(frequency_min=frequency_min,
frequency_max=frequency_max,
value_min = vmax,
value_max = vmin)
self.assertTrue(len(record) == 1)
self.assertTrue("greater than" in str(record[0].message))
def test_missingVminVmax(self):
# fails when you give frequency scale 3 inputs
def missingVminVax():
Scale = FrequencyScale(frequency_max=frequency_max,
cents_per_value=cents_per_value)
# fails when missing vmin and vmax
self.assertRaises(Exception, missingVminVax)
def test_alternateVminVmax(self):
# missing frequency_min
Scale = FrequencyScale(frequency_max=frequency_max, cents_per_value=cents_per_value2,
value_min=vmin2, value_max=vmax2)
self.assertEqual(Scale.y_frequency_min, frequency_min)
# missing frequency_max
Scale = FrequencyScale(frequency_min=frequency_min, cents_per_value=cents_per_value2,
value_min=vmin2, value_max=vmax2)
self.assertEqual(Scale.y_frequency_max, frequency_max)
# missing cents_per_value
Scale = FrequencyScale(frequency_min=frequency_min, frequency_max=frequency_max,
value_min=vmin2, value_max=vmax2)
self.assertEqual(Scale.y_cents_per_value, cents_per_value2)
if __name__ == '__main__':
unittest.main()
|
lockepattonREPO_NAMEsonipyPATH_START.@sonipy_extracted@sonipy-master@tests@test_frequency.py@.PATH_END.py
|
{
"filename": "test_resampling.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py3/scipy/stats/tests/test_resampling.py",
"type": "Python"
}
|
import numpy as np
import pytest
from scipy.stats import bootstrap, monte_carlo_test, permutation_test
from numpy.testing import assert_allclose, assert_equal, suppress_warnings
from scipy import stats
from scipy import special
from scipy.stats import _resampling as _resampling
from scipy._lib._util import rng_integers
from scipy.optimize import root
def test_bootstrap_iv():
message = "`data` must be a sequence of samples."
with pytest.raises(ValueError, match=message):
bootstrap(1, np.mean)
message = "`data` must contain at least one sample."
with pytest.raises(ValueError, match=message):
bootstrap(tuple(), np.mean)
message = "each sample in `data` must contain two or more observations..."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3], [1]), np.mean)
message = ("When `paired is True`, all samples must have the same length ")
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3], [1, 2, 3, 4]), np.mean, paired=True)
message = "`vectorized` must be `True`, `False`, or `None`."
with pytest.raises(ValueError, match=message):
bootstrap(1, np.mean, vectorized='ekki')
message = "`axis` must be an integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, axis=1.5)
message = "could not convert string to float"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, confidence_level='ni')
message = "`n_resamples` must be a non-negative integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, n_resamples=-1000)
message = "`n_resamples` must be a non-negative integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, n_resamples=1000.5)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, batch=-1000)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, batch=1000.5)
message = "`method` must be in"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, method='ekki')
message = "`bootstrap_result` must have attribute `bootstrap_distribution'"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, bootstrap_result=10)
message = "Either `bootstrap_result.bootstrap_distribution.size`"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, n_resamples=0)
message = "'herring' cannot be used to seed a"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, random_state='herring')
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
@pytest.mark.parametrize("axis", [0, 1, 2])
def test_bootstrap_batch(method, axis):
# for one-sample statistics, batch size shouldn't affect the result
np.random.seed(0)
x = np.random.rand(10, 11, 12)
res1 = bootstrap((x,), np.mean, batch=None, method=method,
random_state=0, axis=axis, n_resamples=100)
res2 = bootstrap((x,), np.mean, batch=10, method=method,
random_state=0, axis=axis, n_resamples=100)
assert_equal(res2.confidence_interval.low, res1.confidence_interval.low)
assert_equal(res2.confidence_interval.high, res1.confidence_interval.high)
assert_equal(res2.standard_error, res1.standard_error)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
def test_bootstrap_paired(method):
# test that `paired` works as expected
np.random.seed(0)
n = 100
x = np.random.rand(n)
y = np.random.rand(n)
def my_statistic(x, y, axis=-1):
return ((x-y)**2).mean(axis=axis)
def my_paired_statistic(i, axis=-1):
a = x[i]
b = y[i]
res = my_statistic(a, b)
return res
i = np.arange(len(x))
res1 = bootstrap((i,), my_paired_statistic, random_state=0)
res2 = bootstrap((x, y), my_statistic, paired=True, random_state=0)
assert_allclose(res1.confidence_interval, res2.confidence_interval)
assert_allclose(res1.standard_error, res2.standard_error)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
@pytest.mark.parametrize("axis", [0, 1, 2])
@pytest.mark.parametrize("paired", [True, False])
def test_bootstrap_vectorized(method, axis, paired):
# test that paired is vectorized as expected: when samples are tiled,
# CI and standard_error of each axis-slice is the same as those of the
# original 1d sample
np.random.seed(0)
def my_statistic(x, y, z, axis=-1):
return x.mean(axis=axis) + y.mean(axis=axis) + z.mean(axis=axis)
shape = 10, 11, 12
n_samples = shape[axis]
x = np.random.rand(n_samples)
y = np.random.rand(n_samples)
z = np.random.rand(n_samples)
res1 = bootstrap((x, y, z), my_statistic, paired=paired, method=method,
random_state=0, axis=0, n_resamples=100)
assert (res1.bootstrap_distribution.shape
== res1.standard_error.shape + (100,))
reshape = [1, 1, 1]
reshape[axis] = n_samples
x = np.broadcast_to(x.reshape(reshape), shape)
y = np.broadcast_to(y.reshape(reshape), shape)
z = np.broadcast_to(z.reshape(reshape), shape)
res2 = bootstrap((x, y, z), my_statistic, paired=paired, method=method,
random_state=0, axis=axis, n_resamples=100)
assert_allclose(res2.confidence_interval.low,
res1.confidence_interval.low)
assert_allclose(res2.confidence_interval.high,
res1.confidence_interval.high)
assert_allclose(res2.standard_error, res1.standard_error)
result_shape = list(shape)
result_shape.pop(axis)
assert_equal(res2.confidence_interval.low.shape, result_shape)
assert_equal(res2.confidence_interval.high.shape, result_shape)
assert_equal(res2.standard_error.shape, result_shape)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
def test_bootstrap_against_theory(method):
# based on https://www.statology.org/confidence-intervals-python/
rng = np.random.default_rng(2442101192988600726)
data = stats.norm.rvs(loc=5, scale=2, size=5000, random_state=rng)
alpha = 0.95
dist = stats.t(df=len(data)-1, loc=np.mean(data), scale=stats.sem(data))
expected_interval = dist.interval(confidence=alpha)
expected_se = dist.std()
config = dict(data=(data,), statistic=np.mean, n_resamples=5000,
method=method, random_state=rng)
res = bootstrap(**config, confidence_level=alpha)
assert_allclose(res.confidence_interval, expected_interval, rtol=5e-4)
assert_allclose(res.standard_error, expected_se, atol=3e-4)
config.update(dict(n_resamples=0, bootstrap_result=res))
res = bootstrap(**config, confidence_level=alpha, alternative='less')
assert_allclose(res.confidence_interval.high, dist.ppf(alpha), rtol=5e-4)
config.update(dict(n_resamples=0, bootstrap_result=res))
res = bootstrap(**config, confidence_level=alpha, alternative='greater')
assert_allclose(res.confidence_interval.low, dist.ppf(1-alpha), rtol=5e-4)
tests_R = {"basic": (23.77, 79.12),
"percentile": (28.86, 84.21),
"BCa": (32.31, 91.43)}
@pytest.mark.parametrize("method, expected", tests_R.items())
def test_bootstrap_against_R(method, expected):
# Compare against R's "boot" library
# library(boot)
# stat <- function (x, a) {
# mean(x[a])
# }
# x <- c(10, 12, 12.5, 12.5, 13.9, 15, 21, 22,
# 23, 34, 50, 81, 89, 121, 134, 213)
# # Use a large value so we get a few significant digits for the CI.
# n = 1000000
# bootresult = boot(x, stat, n)
# result <- boot.ci(bootresult)
# print(result)
x = np.array([10, 12, 12.5, 12.5, 13.9, 15, 21, 22,
23, 34, 50, 81, 89, 121, 134, 213])
res = bootstrap((x,), np.mean, n_resamples=1000000, method=method,
random_state=0)
assert_allclose(res.confidence_interval, expected, rtol=0.005)
tests_against_itself_1samp = {"basic": 1780,
"percentile": 1784,
"BCa": 1784}
def test_multisample_BCa_against_R():
# Because bootstrap is stochastic, it's tricky to test against reference
# behavior. Here, we show that SciPy's BCa CI matches R wboot's BCa CI
# much more closely than the other SciPy CIs do.
# arbitrary skewed data
x = [0.75859206, 0.5910282, -0.4419409, -0.36654601,
0.34955357, -1.38835871, 0.76735821]
y = [1.41186073, 0.49775975, 0.08275588, 0.24086388,
0.03567057, 0.52024419, 0.31966611, 1.32067634]
# a multi-sample statistic for which the BCa CI tends to be different
# from the other CIs
def statistic(x, y, axis):
s1 = stats.skew(x, axis=axis)
s2 = stats.skew(y, axis=axis)
return s1 - s2
# compute confidence intervals using each method
rng = np.random.default_rng(468865032284792692)
res_basic = stats.bootstrap((x, y), statistic, method='basic',
batch=100, random_state=rng)
res_percent = stats.bootstrap((x, y), statistic, method='percentile',
batch=100, random_state=rng)
res_bca = stats.bootstrap((x, y), statistic, method='bca',
batch=100, random_state=rng)
# compute midpoints so we can compare just one number for each
mid_basic = np.mean(res_basic.confidence_interval)
mid_percent = np.mean(res_percent.confidence_interval)
mid_bca = np.mean(res_bca.confidence_interval)
# reference for BCA CI computed using R wboot package:
# library(wBoot)
# library(moments)
# x = c(0.75859206, 0.5910282, -0.4419409, -0.36654601,
# 0.34955357, -1.38835871, 0.76735821)
# y = c(1.41186073, 0.49775975, 0.08275588, 0.24086388,
# 0.03567057, 0.52024419, 0.31966611, 1.32067634)
# twoskew <- function(x1, y1) {skewness(x1) - skewness(y1)}
# boot.two.bca(x, y, skewness, conf.level = 0.95,
# R = 9999, stacked = FALSE)
mid_wboot = -1.5519
# compute percent difference relative to wboot BCA method
diff_basic = (mid_basic - mid_wboot)/abs(mid_wboot)
diff_percent = (mid_percent - mid_wboot)/abs(mid_wboot)
diff_bca = (mid_bca - mid_wboot)/abs(mid_wboot)
# SciPy's BCa CI midpoint is much closer than that of the other methods
assert diff_basic < -0.15
assert diff_percent > 0.15
assert abs(diff_bca) < 0.03
def test_BCa_acceleration_against_reference():
# Compare the (deterministic) acceleration parameter for a multi-sample
# problem against a reference value. The example is from [1], but Efron's
# value seems inaccurate. Straightorward code for computing the
# reference acceleration (0.011008228344026734) is available at:
# https://github.com/scipy/scipy/pull/16455#issuecomment-1193400981
y = np.array([10, 27, 31, 40, 46, 50, 52, 104, 146])
z = np.array([16, 23, 38, 94, 99, 141, 197])
def statistic(z, y, axis=0):
return np.mean(z, axis=axis) - np.mean(y, axis=axis)
data = [z, y]
res = stats.bootstrap(data, statistic)
axis = -1
alpha = 0.95
theta_hat_b = res.bootstrap_distribution
batch = 100
_, _, a_hat = _resampling._bca_interval(data, statistic, axis, alpha,
theta_hat_b, batch)
assert_allclose(a_hat, 0.011008228344026734)
@pytest.mark.parametrize("method, expected",
tests_against_itself_1samp.items())
def test_bootstrap_against_itself_1samp(method, expected):
# The expected values in this test were generated using bootstrap
# to check for unintended changes in behavior. The test also makes sure
# that bootstrap works with multi-sample statistics and that the
# `axis` argument works as expected / function is vectorized.
np.random.seed(0)
n = 100 # size of sample
n_resamples = 999 # number of bootstrap resamples used to form each CI
confidence_level = 0.9
# The true mean is 5
dist = stats.norm(loc=5, scale=1)
stat_true = dist.mean()
# Do the same thing 2000 times. (The code is fully vectorized.)
n_replications = 2000
data = dist.rvs(size=(n_replications, n))
res = bootstrap((data,),
statistic=np.mean,
confidence_level=confidence_level,
n_resamples=n_resamples,
batch=50,
method=method,
axis=-1)
ci = res.confidence_interval
# ci contains vectors of lower and upper confidence interval bounds
ci_contains_true = np.sum((ci[0] < stat_true) & (stat_true < ci[1]))
assert ci_contains_true == expected
# ci_contains_true is not inconsistent with confidence_level
pvalue = stats.binomtest(ci_contains_true, n_replications,
confidence_level).pvalue
assert pvalue > 0.1
tests_against_itself_2samp = {"basic": 892,
"percentile": 890}
@pytest.mark.parametrize("method, expected",
tests_against_itself_2samp.items())
def test_bootstrap_against_itself_2samp(method, expected):
# The expected values in this test were generated using bootstrap
# to check for unintended changes in behavior. The test also makes sure
# that bootstrap works with multi-sample statistics and that the
# `axis` argument works as expected / function is vectorized.
np.random.seed(0)
n1 = 100 # size of sample 1
n2 = 120 # size of sample 2
n_resamples = 999 # number of bootstrap resamples used to form each CI
confidence_level = 0.9
# The statistic we're interested in is the difference in means
def my_stat(data1, data2, axis=-1):
mean1 = np.mean(data1, axis=axis)
mean2 = np.mean(data2, axis=axis)
return mean1 - mean2
# The true difference in the means is -0.1
dist1 = stats.norm(loc=0, scale=1)
dist2 = stats.norm(loc=0.1, scale=1)
stat_true = dist1.mean() - dist2.mean()
# Do the same thing 1000 times. (The code is fully vectorized.)
n_replications = 1000
data1 = dist1.rvs(size=(n_replications, n1))
data2 = dist2.rvs(size=(n_replications, n2))
res = bootstrap((data1, data2),
statistic=my_stat,
confidence_level=confidence_level,
n_resamples=n_resamples,
batch=50,
method=method,
axis=-1)
ci = res.confidence_interval
# ci contains vectors of lower and upper confidence interval bounds
ci_contains_true = np.sum((ci[0] < stat_true) & (stat_true < ci[1]))
assert ci_contains_true == expected
# ci_contains_true is not inconsistent with confidence_level
pvalue = stats.binomtest(ci_contains_true, n_replications,
confidence_level).pvalue
assert pvalue > 0.1
@pytest.mark.parametrize("method", ["basic", "percentile"])
@pytest.mark.parametrize("axis", [0, 1])
def test_bootstrap_vectorized_3samp(method, axis):
def statistic(*data, axis=0):
# an arbitrary, vectorized statistic
return sum(sample.mean(axis) for sample in data)
def statistic_1d(*data):
# the same statistic, not vectorized
for sample in data:
assert sample.ndim == 1
return statistic(*data, axis=0)
np.random.seed(0)
x = np.random.rand(4, 5)
y = np.random.rand(4, 5)
z = np.random.rand(4, 5)
res1 = bootstrap((x, y, z), statistic, vectorized=True,
axis=axis, n_resamples=100, method=method, random_state=0)
res2 = bootstrap((x, y, z), statistic_1d, vectorized=False,
axis=axis, n_resamples=100, method=method, random_state=0)
assert_allclose(res1.confidence_interval, res2.confidence_interval)
assert_allclose(res1.standard_error, res2.standard_error)
@pytest.mark.xfail_on_32bit("Failure is not concerning; see gh-14107")
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
@pytest.mark.parametrize("axis", [0, 1])
def test_bootstrap_vectorized_1samp(method, axis):
def statistic(x, axis=0):
# an arbitrary, vectorized statistic
return x.mean(axis=axis)
def statistic_1d(x):
# the same statistic, not vectorized
assert x.ndim == 1
return statistic(x, axis=0)
np.random.seed(0)
x = np.random.rand(4, 5)
res1 = bootstrap((x,), statistic, vectorized=True, axis=axis,
n_resamples=100, batch=None, method=method,
random_state=0)
res2 = bootstrap((x,), statistic_1d, vectorized=False, axis=axis,
n_resamples=100, batch=10, method=method,
random_state=0)
assert_allclose(res1.confidence_interval, res2.confidence_interval)
assert_allclose(res1.standard_error, res2.standard_error)
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
def test_bootstrap_degenerate(method):
data = 35 * [10000.]
if method == "BCa":
with np.errstate(invalid='ignore'):
msg = "The BCa confidence interval cannot be calculated"
with pytest.warns(stats.DegenerateDataWarning, match=msg):
res = bootstrap([data, ], np.mean, method=method)
assert_equal(res.confidence_interval, (np.nan, np.nan))
else:
res = bootstrap([data, ], np.mean, method=method)
assert_equal(res.confidence_interval, (10000., 10000.))
assert_equal(res.standard_error, 0)
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
def test_bootstrap_gh15678(method):
# Check that gh-15678 is fixed: when statistic function returned a Python
# float, method="BCa" failed when trying to add a dimension to the float
rng = np.random.default_rng(354645618886684)
dist = stats.norm(loc=2, scale=4)
data = dist.rvs(size=100, random_state=rng)
data = (data,)
res = bootstrap(data, stats.skew, method=method, n_resamples=100,
random_state=np.random.default_rng(9563))
# this always worked because np.apply_along_axis returns NumPy data type
ref = bootstrap(data, stats.skew, method=method, n_resamples=100,
random_state=np.random.default_rng(9563), vectorized=False)
assert_allclose(res.confidence_interval, ref.confidence_interval)
assert_allclose(res.standard_error, ref.standard_error)
assert isinstance(res.standard_error, np.float64)
def test_bootstrap_min():
# Check that gh-15883 is fixed: percentileofscore should
# behave according to the 'mean' behavior and not trigger nan for BCa
rng = np.random.default_rng(1891289180021102)
dist = stats.norm(loc=2, scale=4)
data = dist.rvs(size=100, random_state=rng)
true_min = np.min(data)
data = (data,)
res = bootstrap(data, np.min, method="BCa", n_resamples=100,
random_state=np.random.default_rng(3942))
assert true_min == res.confidence_interval.low
res2 = bootstrap(-np.array(data), np.max, method="BCa", n_resamples=100,
random_state=np.random.default_rng(3942))
assert_allclose(-res.confidence_interval.low,
res2.confidence_interval.high)
assert_allclose(-res.confidence_interval.high,
res2.confidence_interval.low)
@pytest.mark.parametrize("additional_resamples", [0, 1000])
def test_re_bootstrap(additional_resamples):
# Test behavior of parameter `bootstrap_result`
rng = np.random.default_rng(8958153316228384)
x = rng.random(size=100)
n1 = 1000
n2 = additional_resamples
n3 = n1 + additional_resamples
rng = np.random.default_rng(296689032789913033)
res = stats.bootstrap((x,), np.mean, n_resamples=n1, random_state=rng,
confidence_level=0.95, method='percentile')
res = stats.bootstrap((x,), np.mean, n_resamples=n2, random_state=rng,
confidence_level=0.90, method='BCa',
bootstrap_result=res)
rng = np.random.default_rng(296689032789913033)
ref = stats.bootstrap((x,), np.mean, n_resamples=n3, random_state=rng,
confidence_level=0.90, method='BCa')
assert_allclose(res.standard_error, ref.standard_error, rtol=1e-14)
assert_allclose(res.confidence_interval, ref.confidence_interval,
rtol=1e-14)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
def test_bootstrap_alternative(method):
rng = np.random.default_rng(5894822712842015040)
dist = stats.norm(loc=2, scale=4)
data = (dist.rvs(size=(100), random_state=rng),)
config = dict(data=data, statistic=np.std, random_state=rng, axis=-1)
t = stats.bootstrap(**config, confidence_level=0.9)
config.update(dict(n_resamples=0, bootstrap_result=t))
l = stats.bootstrap(**config, confidence_level=0.95, alternative='less')
g = stats.bootstrap(**config, confidence_level=0.95, alternative='greater')
assert_equal(l.confidence_interval.high, t.confidence_interval.high)
assert_equal(g.confidence_interval.low, t.confidence_interval.low)
assert np.isneginf(l.confidence_interval.low)
assert np.isposinf(g.confidence_interval.high)
with pytest.raises(ValueError, match='`alternative` must be one of'):
stats.bootstrap(**config, alternative='ekki-ekki')
def test_jackknife_resample():
shape = 3, 4, 5, 6
np.random.seed(0)
x = np.random.rand(*shape)
y = next(_resampling._jackknife_resample(x))
for i in range(shape[-1]):
# each resample is indexed along second to last axis
# (last axis is the one the statistic will be taken over / consumed)
slc = y[..., i, :]
expected = np.delete(x, i, axis=-1)
assert np.array_equal(slc, expected)
y2 = np.concatenate(list(_resampling._jackknife_resample(x, batch=2)),
axis=-2)
assert np.array_equal(y2, y)
@pytest.mark.parametrize("rng_name", ["RandomState", "default_rng"])
def test_bootstrap_resample(rng_name):
rng = getattr(np.random, rng_name, None)
if rng is None:
pytest.skip(f"{rng_name} not available.")
rng1 = rng(0)
rng2 = rng(0)
n_resamples = 10
shape = 3, 4, 5, 6
np.random.seed(0)
x = np.random.rand(*shape)
y = _resampling._bootstrap_resample(x, n_resamples, random_state=rng1)
for i in range(n_resamples):
# each resample is indexed along second to last axis
# (last axis is the one the statistic will be taken over / consumed)
slc = y[..., i, :]
js = rng_integers(rng2, 0, shape[-1], shape[-1])
expected = x[..., js]
assert np.array_equal(slc, expected)
@pytest.mark.parametrize("score", [0, 0.5, 1])
@pytest.mark.parametrize("axis", [0, 1, 2])
def test_percentile_of_score(score, axis):
shape = 10, 20, 30
np.random.seed(0)
x = np.random.rand(*shape)
p = _resampling._percentile_of_score(x, score, axis=-1)
def vectorized_pos(a, score, axis):
return np.apply_along_axis(stats.percentileofscore, axis, a, score)
p2 = vectorized_pos(x, score, axis=-1)/100
assert_allclose(p, p2, 1e-15)
def test_percentile_along_axis():
# the difference between _percentile_along_axis and np.percentile is that
# np.percentile gets _all_ the qs for each axis slice, whereas
# _percentile_along_axis gets the q corresponding with each axis slice
shape = 10, 20
np.random.seed(0)
x = np.random.rand(*shape)
q = np.random.rand(*shape[:-1]) * 100
y = _resampling._percentile_along_axis(x, q)
for i in range(shape[0]):
res = y[i]
expected = np.percentile(x[i], q[i], axis=-1)
assert_allclose(res, expected, 1e-15)
@pytest.mark.parametrize("axis", [0, 1, 2])
def test_vectorize_statistic(axis):
# test that _vectorize_statistic vectorizes a statistic along `axis`
def statistic(*data, axis):
# an arbitrary, vectorized statistic
return sum(sample.mean(axis) for sample in data)
def statistic_1d(*data):
# the same statistic, not vectorized
for sample in data:
assert sample.ndim == 1
return statistic(*data, axis=0)
# vectorize the non-vectorized statistic
statistic2 = _resampling._vectorize_statistic(statistic_1d)
np.random.seed(0)
x = np.random.rand(4, 5, 6)
y = np.random.rand(4, 1, 6)
z = np.random.rand(1, 5, 6)
res1 = statistic(x, y, z, axis=axis)
res2 = statistic2(x, y, z, axis=axis)
assert_allclose(res1, res2)
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
def test_vector_valued_statistic(method):
# Generate 95% confidence interval around MLE of normal distribution
# parameters. Repeat 100 times, each time on sample of size 100.
# Check that confidence interval contains true parameters ~95 times.
# Confidence intervals are estimated and stochastic; a test failure
# does not necessarily indicate that something is wrong. More important
# than values of `counts` below is that the shapes of the outputs are
# correct.
rng = np.random.default_rng(2196847219)
params = 1, 0.5
sample = stats.norm.rvs(*params, size=(100, 100), random_state=rng)
def statistic(data, axis):
return np.asarray([np.mean(data, axis),
np.std(data, axis, ddof=1)])
res = bootstrap((sample,), statistic, method=method, axis=-1,
n_resamples=9999, batch=200)
counts = np.sum((res.confidence_interval.low.T < params)
& (res.confidence_interval.high.T > params),
axis=0)
assert np.all(counts >= 90)
assert np.all(counts <= 100)
assert res.confidence_interval.low.shape == (2, 100)
assert res.confidence_interval.high.shape == (2, 100)
assert res.standard_error.shape == (2, 100)
assert res.bootstrap_distribution.shape == (2, 100, 9999)
@pytest.mark.slow
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
def test_vector_valued_statistic_gh17715():
# gh-17715 reported a mistake introduced in the extension of BCa to
# multi-sample statistics; a `len` should have been `.shape[-1]`. Check
# that this is resolved.
rng = np.random.default_rng(141921000979291141)
def concordance(x, y, axis):
xm = x.mean(axis)
ym = y.mean(axis)
cov = ((x - xm[..., None]) * (y - ym[..., None])).mean(axis)
return (2 * cov) / (x.var(axis) + y.var(axis) + (xm - ym) ** 2)
def statistic(tp, tn, fp, fn, axis):
actual = tp + fp
expected = tp + fn
return np.nan_to_num(concordance(actual, expected, axis))
def statistic_extradim(*args, axis):
return statistic(*args, axis)[np.newaxis, ...]
data = [[4, 0, 0, 2], # (tp, tn, fp, fn)
[2, 1, 2, 1],
[0, 6, 0, 0],
[0, 6, 3, 0],
[0, 8, 1, 0]]
data = np.array(data).T
res = bootstrap(data, statistic_extradim, random_state=rng, paired=True)
ref = bootstrap(data, statistic, random_state=rng, paired=True)
assert_allclose(res.confidence_interval.low[0],
ref.confidence_interval.low, atol=1e-15)
assert_allclose(res.confidence_interval.high[0],
ref.confidence_interval.high, atol=1e-15)
# --- Test Monte Carlo Hypothesis Test --- #
class TestMonteCarloHypothesisTest:
atol = 2.5e-2 # for comparing p-value
def rvs(self, rvs_in, rs):
return lambda *args, **kwds: rvs_in(*args, random_state=rs, **kwds)
def test_input_validation(self):
# test that the appropriate error messages are raised for invalid input
def stat(x):
return stats.skewnorm(x).statistic
message = "Array shapes are incompatible for broadcasting."
data = (np.zeros((2, 5)), np.zeros((3, 5)))
rvs = (stats.norm.rvs, stats.norm.rvs)
with pytest.raises(ValueError, match=message):
monte_carlo_test(data, rvs, lambda x, y: 1, axis=-1)
message = "`axis` must be an integer."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, axis=1.5)
message = "`vectorized` must be `True`, `False`, or `None`."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, vectorized=1.5)
message = "`rvs` must be callable or sequence of callables."
with pytest.raises(TypeError, match=message):
monte_carlo_test([1, 2, 3], None, stat)
with pytest.raises(TypeError, match=message):
monte_carlo_test([[1, 2], [3, 4]], [lambda x: x, None], stat)
message = "If `rvs` is a sequence..."
with pytest.raises(ValueError, match=message):
monte_carlo_test([[1, 2, 3]], [lambda x: x, lambda x: x], stat)
message = "`statistic` must be callable."
with pytest.raises(TypeError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, None)
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat,
n_resamples=-1000)
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat,
n_resamples=1000.5)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, batch=-1000)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, batch=1000.5)
message = "`alternative` must be in..."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat,
alternative='ekki')
def test_batch(self):
# make sure that the `batch` parameter is respected by checking the
# maximum batch size provided in calls to `statistic`
rng = np.random.default_rng(23492340193)
x = rng.random(10)
def statistic(x, axis):
batch_size = 1 if x.ndim == 1 else len(x)
statistic.batch_size = max(batch_size, statistic.batch_size)
statistic.counter += 1
return stats.skewtest(x, axis=axis).statistic
statistic.counter = 0
statistic.batch_size = 0
kwds = {'sample': x, 'statistic': statistic,
'n_resamples': 1000, 'vectorized': True}
kwds['rvs'] = self.rvs(stats.norm.rvs, np.random.default_rng(32842398))
res1 = monte_carlo_test(batch=1, **kwds)
assert_equal(statistic.counter, 1001)
assert_equal(statistic.batch_size, 1)
kwds['rvs'] = self.rvs(stats.norm.rvs, np.random.default_rng(32842398))
statistic.counter = 0
res2 = monte_carlo_test(batch=50, **kwds)
assert_equal(statistic.counter, 21)
assert_equal(statistic.batch_size, 50)
kwds['rvs'] = self.rvs(stats.norm.rvs, np.random.default_rng(32842398))
statistic.counter = 0
res3 = monte_carlo_test(**kwds)
assert_equal(statistic.counter, 2)
assert_equal(statistic.batch_size, 1000)
assert_equal(res1.pvalue, res3.pvalue)
assert_equal(res2.pvalue, res3.pvalue)
@pytest.mark.parametrize('axis', range(-3, 3))
def test_axis(self, axis):
# test that Nd-array samples are handled correctly for valid values
# of the `axis` parameter
rng = np.random.default_rng(2389234)
norm_rvs = self.rvs(stats.norm.rvs, rng)
size = [2, 3, 4]
size[axis] = 100
x = norm_rvs(size=size)
expected = stats.skewtest(x, axis=axis)
def statistic(x, axis):
return stats.skewtest(x, axis=axis).statistic
res = monte_carlo_test(x, norm_rvs, statistic, vectorized=True,
n_resamples=20000, axis=axis)
assert_allclose(res.statistic, expected.statistic)
assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
@pytest.mark.parametrize('alternative', ("less", "greater"))
@pytest.mark.parametrize('a', np.linspace(-0.5, 0.5, 5)) # skewness
def test_against_ks_1samp(self, alternative, a):
# test that monte_carlo_test can reproduce pvalue of ks_1samp
rng = np.random.default_rng(65723433)
x = stats.skewnorm.rvs(a=a, size=30, random_state=rng)
expected = stats.ks_1samp(x, stats.norm.cdf, alternative=alternative)
def statistic1d(x):
return stats.ks_1samp(x, stats.norm.cdf, mode='asymp',
alternative=alternative).statistic
norm_rvs = self.rvs(stats.norm.rvs, rng)
res = monte_carlo_test(x, norm_rvs, statistic1d,
n_resamples=1000, vectorized=False,
alternative=alternative)
assert_allclose(res.statistic, expected.statistic)
if alternative == 'greater':
assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
elif alternative == 'less':
assert_allclose(1-res.pvalue, expected.pvalue, atol=self.atol)
@pytest.mark.parametrize('hypotest', (stats.skewtest, stats.kurtosistest))
@pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
@pytest.mark.parametrize('a', np.linspace(-2, 2, 5)) # skewness
def test_against_normality_tests(self, hypotest, alternative, a):
# test that monte_carlo_test can reproduce pvalue of normality tests
rng = np.random.default_rng(85723405)
x = stats.skewnorm.rvs(a=a, size=150, random_state=rng)
expected = hypotest(x, alternative=alternative)
def statistic(x, axis):
return hypotest(x, axis=axis).statistic
norm_rvs = self.rvs(stats.norm.rvs, rng)
res = monte_carlo_test(x, norm_rvs, statistic, vectorized=True,
alternative=alternative)
assert_allclose(res.statistic, expected.statistic)
assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
@pytest.mark.parametrize('a', np.arange(-2, 3)) # skewness parameter
def test_against_normaltest(self, a):
# test that monte_carlo_test can reproduce pvalue of normaltest
rng = np.random.default_rng(12340513)
x = stats.skewnorm.rvs(a=a, size=150, random_state=rng)
expected = stats.normaltest(x)
def statistic(x, axis):
return stats.normaltest(x, axis=axis).statistic
norm_rvs = self.rvs(stats.norm.rvs, rng)
res = monte_carlo_test(x, norm_rvs, statistic, vectorized=True,
alternative='greater')
assert_allclose(res.statistic, expected.statistic)
assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
@pytest.mark.parametrize('a', np.linspace(-0.5, 0.5, 5)) # skewness
def test_against_cramervonmises(self, a):
# test that monte_carlo_test can reproduce pvalue of cramervonmises
rng = np.random.default_rng(234874135)
x = stats.skewnorm.rvs(a=a, size=30, random_state=rng)
expected = stats.cramervonmises(x, stats.norm.cdf)
def statistic1d(x):
return stats.cramervonmises(x, stats.norm.cdf).statistic
norm_rvs = self.rvs(stats.norm.rvs, rng)
res = monte_carlo_test(x, norm_rvs, statistic1d,
n_resamples=1000, vectorized=False,
alternative='greater')
assert_allclose(res.statistic, expected.statistic)
assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
@pytest.mark.parametrize('dist_name', ('norm', 'logistic'))
@pytest.mark.parametrize('i', range(5))
def test_against_anderson(self, dist_name, i):
# test that monte_carlo_test can reproduce results of `anderson`. Note:
# `anderson` does not provide a p-value; it provides a list of
# significance levels and the associated critical value of the test
# statistic. `i` used to index this list.
# find the skewness for which the sample statistic matches one of the
# critical values provided by `stats.anderson`
def fun(a):
rng = np.random.default_rng(394295467)
x = stats.tukeylambda.rvs(a, size=100, random_state=rng)
expected = stats.anderson(x, dist_name)
return expected.statistic - expected.critical_values[i]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
sol = root(fun, x0=0)
assert sol.success
# get the significance level (p-value) associated with that critical
# value
a = sol.x[0]
rng = np.random.default_rng(394295467)
x = stats.tukeylambda.rvs(a, size=100, random_state=rng)
expected = stats.anderson(x, dist_name)
expected_stat = expected.statistic
expected_p = expected.significance_level[i]/100
# perform equivalent Monte Carlo test and compare results
def statistic1d(x):
return stats.anderson(x, dist_name).statistic
dist_rvs = self.rvs(getattr(stats, dist_name).rvs, rng)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
res = monte_carlo_test(x, dist_rvs,
statistic1d, n_resamples=1000,
vectorized=False, alternative='greater')
assert_allclose(res.statistic, expected_stat)
assert_allclose(res.pvalue, expected_p, atol=2*self.atol)
def test_p_never_zero(self):
# Use biased estimate of p-value to ensure that p-value is never zero
# per monte_carlo_test reference [1]
rng = np.random.default_rng(2190176673029737545)
x = np.zeros(100)
res = monte_carlo_test(x, rng.random, np.mean,
vectorized=True, alternative='less')
assert res.pvalue == 0.0001
def test_against_ttest_ind(self):
# test that `monte_carlo_test` can reproduce results of `ttest_ind`.
rng = np.random.default_rng(219017667302737545)
data = rng.random(size=(2, 5)), rng.random(size=7) # broadcastable
rvs = rng.normal, rng.normal
def statistic(x, y, axis):
return stats.ttest_ind(x, y, axis).statistic
res = stats.monte_carlo_test(data, rvs, statistic, axis=-1)
ref = stats.ttest_ind(data[0], [data[1]], axis=-1)
assert_allclose(res.statistic, ref.statistic)
assert_allclose(res.pvalue, ref.pvalue, rtol=2e-2)
def test_against_f_oneway(self):
# test that `monte_carlo_test` can reproduce results of `f_oneway`.
rng = np.random.default_rng(219017667302737545)
data = (rng.random(size=(2, 100)), rng.random(size=(2, 101)),
rng.random(size=(2, 102)), rng.random(size=(2, 103)))
rvs = rng.normal, rng.normal, rng.normal, rng.normal
def statistic(*args, axis):
return stats.f_oneway(*args, axis=axis).statistic
res = stats.monte_carlo_test(data, rvs, statistic, axis=-1,
alternative='greater')
ref = stats.f_oneway(*data, axis=-1)
assert_allclose(res.statistic, ref.statistic)
assert_allclose(res.pvalue, ref.pvalue, atol=1e-2)
class TestPermutationTest:
rtol = 1e-14
def setup_method(self):
self.rng = np.random.default_rng(7170559330470561044)
# -- Input validation -- #
def test_permutation_test_iv(self):
def stat(x, y, axis):
return stats.ttest_ind((x, y), axis).statistic
message = "each sample in `data` must contain two or more ..."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1]), stat)
message = "`data` must be a tuple containing at least two samples"
with pytest.raises(ValueError, match=message):
permutation_test((1,), stat)
with pytest.raises(TypeError, match=message):
permutation_test(1, stat)
message = "`axis` must be an integer."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, axis=1.5)
message = "`permutation_type` must be in..."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat,
permutation_type="ekki")
message = "`vectorized` must be `True`, `False`, or `None`."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, vectorized=1.5)
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, n_resamples=-1000)
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, n_resamples=1000.5)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, batch=-1000)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, batch=1000.5)
message = "`alternative` must be in..."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, alternative='ekki')
message = "'herring' cannot be used to seed a"
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat,
random_state='herring')
# -- Test Parameters -- #
@pytest.mark.parametrize('random_state', [np.random.RandomState,
np.random.default_rng])
@pytest.mark.parametrize('permutation_type',
['pairings', 'samples', 'independent'])
def test_batch(self, permutation_type, random_state):
# make sure that the `batch` parameter is respected by checking the
# maximum batch size provided in calls to `statistic`
x = self.rng.random(10)
y = self.rng.random(10)
def statistic(x, y, axis):
batch_size = 1 if x.ndim == 1 else len(x)
statistic.batch_size = max(batch_size, statistic.batch_size)
statistic.counter += 1
return np.mean(x, axis=axis) - np.mean(y, axis=axis)
statistic.counter = 0
statistic.batch_size = 0
kwds = {'n_resamples': 1000, 'permutation_type': permutation_type,
'vectorized': True}
res1 = stats.permutation_test((x, y), statistic, batch=1,
random_state=random_state(0), **kwds)
assert_equal(statistic.counter, 1001)
assert_equal(statistic.batch_size, 1)
statistic.counter = 0
res2 = stats.permutation_test((x, y), statistic, batch=50,
random_state=random_state(0), **kwds)
assert_equal(statistic.counter, 21)
assert_equal(statistic.batch_size, 50)
statistic.counter = 0
res3 = stats.permutation_test((x, y), statistic, batch=1000,
random_state=random_state(0), **kwds)
assert_equal(statistic.counter, 2)
assert_equal(statistic.batch_size, 1000)
assert_equal(res1.pvalue, res3.pvalue)
assert_equal(res2.pvalue, res3.pvalue)
@pytest.mark.parametrize('random_state', [np.random.RandomState,
np.random.default_rng])
@pytest.mark.parametrize('permutation_type, exact_size',
[('pairings', special.factorial(3)**2),
('samples', 2**3),
('independent', special.binom(6, 3))])
def test_permutations(self, permutation_type, exact_size, random_state):
# make sure that the `permutations` parameter is respected by checking
# the size of the null distribution
x = self.rng.random(3)
y = self.rng.random(3)
def statistic(x, y, axis):
return np.mean(x, axis=axis) - np.mean(y, axis=axis)
kwds = {'permutation_type': permutation_type,
'vectorized': True}
res = stats.permutation_test((x, y), statistic, n_resamples=3,
random_state=random_state(0), **kwds)
assert_equal(res.null_distribution.size, 3)
res = stats.permutation_test((x, y), statistic, **kwds)
assert_equal(res.null_distribution.size, exact_size)
# -- Randomized Permutation Tests -- #
# To get reasonable accuracy, these next three tests are somewhat slow.
# Originally, I had them passing for all combinations of permutation type,
# alternative, and RNG, but that takes too long for CI. Instead, split
# into three tests, each testing a particular combination of the three
# parameters.
def test_randomized_test_against_exact_both(self):
# check that the randomized and exact tests agree to reasonable
# precision for permutation_type='both
alternative, rng = 'less', 0
nx, ny, permutations = 8, 9, 24000
assert special.binom(nx + ny, nx) > permutations
x = stats.norm.rvs(size=nx)
y = stats.norm.rvs(size=ny)
data = x, y
def statistic(x, y, axis):
return np.mean(x, axis=axis) - np.mean(y, axis=axis)
kwds = {'vectorized': True, 'permutation_type': 'independent',
'batch': 100, 'alternative': alternative, 'random_state': rng}
res = permutation_test(data, statistic, n_resamples=permutations,
**kwds)
res2 = permutation_test(data, statistic, n_resamples=np.inf, **kwds)
assert res.statistic == res2.statistic
assert_allclose(res.pvalue, res2.pvalue, atol=1e-2)
@pytest.mark.slow()
def test_randomized_test_against_exact_samples(self):
# check that the randomized and exact tests agree to reasonable
# precision for permutation_type='samples'
alternative, rng = 'greater', None
nx, ny, permutations = 15, 15, 32000
assert 2**nx > permutations
x = stats.norm.rvs(size=nx)
y = stats.norm.rvs(size=ny)
data = x, y
def statistic(x, y, axis):
return np.mean(x - y, axis=axis)
kwds = {'vectorized': True, 'permutation_type': 'samples',
'batch': 100, 'alternative': alternative, 'random_state': rng}
res = permutation_test(data, statistic, n_resamples=permutations,
**kwds)
res2 = permutation_test(data, statistic, n_resamples=np.inf, **kwds)
assert res.statistic == res2.statistic
assert_allclose(res.pvalue, res2.pvalue, atol=1e-2)
def test_randomized_test_against_exact_pairings(self):
# check that the randomized and exact tests agree to reasonable
# precision for permutation_type='pairings'
alternative, rng = 'two-sided', self.rng
nx, ny, permutations = 8, 8, 40000
assert special.factorial(nx) > permutations
x = stats.norm.rvs(size=nx)
y = stats.norm.rvs(size=ny)
data = [x]
def statistic1d(x):
return stats.pearsonr(x, y)[0]
statistic = _resampling._vectorize_statistic(statistic1d)
kwds = {'vectorized': True, 'permutation_type': 'samples',
'batch': 100, 'alternative': alternative, 'random_state': rng}
res = permutation_test(data, statistic, n_resamples=permutations,
**kwds)
res2 = permutation_test(data, statistic, n_resamples=np.inf, **kwds)
assert res.statistic == res2.statistic
assert_allclose(res.pvalue, res2.pvalue, atol=1e-2)
@pytest.mark.parametrize('alternative', ('less', 'greater'))
# Different conventions for two-sided p-value here VS ttest_ind.
# Eventually, we can add multiple options for the two-sided alternative
# here in permutation_test.
@pytest.mark.parametrize('permutations', (30, 1e9))
@pytest.mark.parametrize('axis', (0, 1, 2))
def test_against_permutation_ttest(self, alternative, permutations, axis):
# check that this function and ttest_ind with permutations give
# essentially identical results.
x = np.arange(3*4*5).reshape(3, 4, 5)
y = np.moveaxis(np.arange(4)[:, None, None], 0, axis)
rng1 = np.random.default_rng(4337234444626115331)
res1 = stats.ttest_ind(x, y, permutations=permutations, axis=axis,
random_state=rng1, alternative=alternative)
def statistic(x, y, axis):
return stats.ttest_ind(x, y, axis=axis).statistic
rng2 = np.random.default_rng(4337234444626115331)
res2 = permutation_test((x, y), statistic, vectorized=True,
n_resamples=permutations,
alternative=alternative, axis=axis,
random_state=rng2)
assert_allclose(res1.statistic, res2.statistic, rtol=self.rtol)
assert_allclose(res1.pvalue, res2.pvalue, rtol=self.rtol)
# -- Independent (Unpaired) Sample Tests -- #
@pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
def test_against_ks_2samp(self, alternative):
x = self.rng.normal(size=4, scale=1)
y = self.rng.normal(size=5, loc=3, scale=3)
expected = stats.ks_2samp(x, y, alternative=alternative, mode='exact')
def statistic1d(x, y):
return stats.ks_2samp(x, y, mode='asymp',
alternative=alternative).statistic
# ks_2samp is always a one-tailed 'greater' test
# it's the statistic that changes (D+ vs D- vs max(D+, D-))
res = permutation_test((x, y), statistic1d, n_resamples=np.inf,
alternative='greater', random_state=self.rng)
assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
@pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
def test_against_ansari(self, alternative):
x = self.rng.normal(size=4, scale=1)
y = self.rng.normal(size=5, scale=3)
# ansari has a different convention for 'alternative'
alternative_correspondence = {"less": "greater",
"greater": "less",
"two-sided": "two-sided"}
alternative_scipy = alternative_correspondence[alternative]
expected = stats.ansari(x, y, alternative=alternative_scipy)
def statistic1d(x, y):
return stats.ansari(x, y).statistic
res = permutation_test((x, y), statistic1d, n_resamples=np.inf,
alternative=alternative, random_state=self.rng)
assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
@pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
def test_against_mannwhitneyu(self, alternative):
x = stats.uniform.rvs(size=(3, 5, 2), loc=0, random_state=self.rng)
y = stats.uniform.rvs(size=(3, 5, 2), loc=0.05, random_state=self.rng)
expected = stats.mannwhitneyu(x, y, axis=1, alternative=alternative)
def statistic(x, y, axis):
return stats.mannwhitneyu(x, y, axis=axis).statistic
res = permutation_test((x, y), statistic, vectorized=True,
n_resamples=np.inf, alternative=alternative,
axis=1, random_state=self.rng)
assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
def test_against_cvm(self):
x = stats.norm.rvs(size=4, scale=1, random_state=self.rng)
y = stats.norm.rvs(size=5, loc=3, scale=3, random_state=self.rng)
expected = stats.cramervonmises_2samp(x, y, method='exact')
def statistic1d(x, y):
return stats.cramervonmises_2samp(x, y,
method='asymptotic').statistic
# cramervonmises_2samp has only one alternative, greater
res = permutation_test((x, y), statistic1d, n_resamples=np.inf,
alternative='greater', random_state=self.rng)
assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
@pytest.mark.xslow()
@pytest.mark.parametrize('axis', (-1, 2))
def test_vectorized_nsamp_ptype_both(self, axis):
# Test that permutation_test with permutation_type='independent' works
# properly for a 3-sample statistic with nd array samples of different
# (but compatible) shapes and ndims. Show that exact permutation test
# and random permutation tests approximate SciPy's asymptotic pvalues
# and that exact and random permutation test results are even closer
# to one another (than they are to the asymptotic results).
# Three samples, different (but compatible) shapes with different ndims
rng = np.random.default_rng(6709265303529651545)
x = rng.random(size=(3))
y = rng.random(size=(1, 3, 2))
z = rng.random(size=(2, 1, 4))
data = (x, y, z)
# Define the statistic (and pvalue for comparison)
def statistic1d(*data):
return stats.kruskal(*data).statistic
def pvalue1d(*data):
return stats.kruskal(*data).pvalue
statistic = _resampling._vectorize_statistic(statistic1d)
pvalue = _resampling._vectorize_statistic(pvalue1d)
# Calculate the expected results
x2 = np.broadcast_to(x, (2, 3, 3)) # broadcast manually because
y2 = np.broadcast_to(y, (2, 3, 2)) # _vectorize_statistic doesn't
z2 = np.broadcast_to(z, (2, 3, 4))
expected_statistic = statistic(x2, y2, z2, axis=axis)
expected_pvalue = pvalue(x2, y2, z2, axis=axis)
# Calculate exact and randomized permutation results
kwds = {'vectorized': False, 'axis': axis, 'alternative': 'greater',
'permutation_type': 'independent', 'random_state': self.rng}
res = permutation_test(data, statistic1d, n_resamples=np.inf, **kwds)
res2 = permutation_test(data, statistic1d, n_resamples=1000, **kwds)
# Check results
assert_allclose(res.statistic, expected_statistic, rtol=self.rtol)
assert_allclose(res.statistic, res2.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected_pvalue, atol=6e-2)
assert_allclose(res.pvalue, res2.pvalue, atol=3e-2)
# -- Paired-Sample Tests -- #
@pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
def test_against_wilcoxon(self, alternative):
x = stats.uniform.rvs(size=(3, 6, 2), loc=0, random_state=self.rng)
y = stats.uniform.rvs(size=(3, 6, 2), loc=0.05, random_state=self.rng)
# We'll check both 1- and 2-sample versions of the same test;
# we expect identical results to wilcoxon in all cases.
def statistic_1samp_1d(z):
# 'less' ensures we get the same of two statistics every time
return stats.wilcoxon(z, alternative='less').statistic
def statistic_2samp_1d(x, y):
return stats.wilcoxon(x, y, alternative='less').statistic
def test_1d(x, y):
return stats.wilcoxon(x, y, alternative=alternative)
test = _resampling._vectorize_statistic(test_1d)
expected = test(x, y, axis=1)
expected_stat = expected[0]
expected_p = expected[1]
kwds = {'vectorized': False, 'axis': 1, 'alternative': alternative,
'permutation_type': 'samples', 'random_state': self.rng,
'n_resamples': np.inf}
res1 = permutation_test((x-y,), statistic_1samp_1d, **kwds)
res2 = permutation_test((x, y), statistic_2samp_1d, **kwds)
# `wilcoxon` returns a different statistic with 'two-sided'
assert_allclose(res1.statistic, res2.statistic, rtol=self.rtol)
if alternative != 'two-sided':
assert_allclose(res2.statistic, expected_stat, rtol=self.rtol)
assert_allclose(res2.pvalue, expected_p, rtol=self.rtol)
assert_allclose(res1.pvalue, res2.pvalue, rtol=self.rtol)
@pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
def test_against_binomtest(self, alternative):
x = self.rng.integers(0, 2, size=10)
x[x == 0] = -1
# More naturally, the test would flip elements between 0 and one.
# However, permutation_test will flip the _signs_ of the elements.
# So we have to work with +1/-1 instead of 1/0.
def statistic(x, axis=0):
return np.sum(x > 0, axis=axis)
k, n, p = statistic(x), 10, 0.5
expected = stats.binomtest(k, n, p, alternative=alternative)
res = stats.permutation_test((x,), statistic, vectorized=True,
permutation_type='samples',
n_resamples=np.inf, random_state=self.rng,
alternative=alternative)
assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
# -- Exact Association Tests -- #
def test_against_kendalltau(self):
x = self.rng.normal(size=6)
y = x + self.rng.normal(size=6)
expected = stats.kendalltau(x, y, method='exact')
def statistic1d(x):
return stats.kendalltau(x, y, method='asymptotic').statistic
# kendalltau currently has only one alternative, two-sided
res = permutation_test((x,), statistic1d, permutation_type='pairings',
n_resamples=np.inf, random_state=self.rng)
assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
@pytest.mark.parametrize('alternative', ('less', 'greater', 'two-sided'))
def test_against_fisher_exact(self, alternative):
def statistic(x,):
return np.sum((x == 1) & (y == 1))
# x and y are binary random variables with some dependence
rng = np.random.default_rng(6235696159000529929)
x = (rng.random(7) > 0.6).astype(float)
y = (rng.random(7) + 0.25*x > 0.6).astype(float)
tab = stats.contingency.crosstab(x, y)[1]
res = permutation_test((x,), statistic, permutation_type='pairings',
n_resamples=np.inf, alternative=alternative,
random_state=rng)
res2 = stats.fisher_exact(tab, alternative=alternative)
assert_allclose(res.pvalue, res2[1])
@pytest.mark.xslow()
@pytest.mark.parametrize('axis', (-2, 1))
def test_vectorized_nsamp_ptype_samples(self, axis):
# Test that permutation_test with permutation_type='samples' works
# properly for a 3-sample statistic with nd array samples of different
# (but compatible) shapes and ndims. Show that exact permutation test
# reproduces SciPy's exact pvalue and that random permutation test
# approximates it.
x = self.rng.random(size=(2, 4, 3))
y = self.rng.random(size=(1, 4, 3))
z = self.rng.random(size=(2, 4, 1))
x = stats.rankdata(x, axis=axis)
y = stats.rankdata(y, axis=axis)
z = stats.rankdata(z, axis=axis)
y = y[0] # to check broadcast with different ndim
data = (x, y, z)
def statistic1d(*data):
return stats.page_trend_test(data, ranked=True,
method='asymptotic').statistic
def pvalue1d(*data):
return stats.page_trend_test(data, ranked=True,
method='exact').pvalue
statistic = _resampling._vectorize_statistic(statistic1d)
pvalue = _resampling._vectorize_statistic(pvalue1d)
expected_statistic = statistic(*np.broadcast_arrays(*data), axis=axis)
expected_pvalue = pvalue(*np.broadcast_arrays(*data), axis=axis)
# Let's forgive this use of an integer seed, please.
kwds = {'vectorized': False, 'axis': axis, 'alternative': 'greater',
'permutation_type': 'pairings', 'random_state': 0}
res = permutation_test(data, statistic1d, n_resamples=np.inf, **kwds)
res2 = permutation_test(data, statistic1d, n_resamples=5000, **kwds)
assert_allclose(res.statistic, expected_statistic, rtol=self.rtol)
assert_allclose(res.statistic, res2.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected_pvalue, rtol=self.rtol)
assert_allclose(res.pvalue, res2.pvalue, atol=3e-2)
# -- Test Against External References -- #
tie_case_1 = {'x': [1, 2, 3, 4], 'y': [1.5, 2, 2.5],
'expected_less': 0.2000000000,
'expected_2sided': 0.4, # 2*expected_less
'expected_Pr_gte_S_mean': 0.3428571429, # see note below
'expected_statistic': 7.5,
'expected_avg': 9.142857, 'expected_std': 1.40698}
tie_case_2 = {'x': [111, 107, 100, 99, 102, 106, 109, 108],
'y': [107, 108, 106, 98, 105, 103, 110, 105, 104],
'expected_less': 0.1555738379,
'expected_2sided': 0.3111476758,
'expected_Pr_gte_S_mean': 0.2969971205, # see note below
'expected_statistic': 32.5,
'expected_avg': 38.117647, 'expected_std': 5.172124}
@pytest.mark.xslow() # only the second case is slow, really
@pytest.mark.parametrize('case', (tie_case_1, tie_case_2))
def test_with_ties(self, case):
"""
Results above from SAS PROC NPAR1WAY, e.g.
DATA myData;
INPUT X Y;
CARDS;
1 1
1 2
1 3
1 4
2 1.5
2 2
2 2.5
ods graphics on;
proc npar1way AB data=myData;
class X;
EXACT;
run;
ods graphics off;
Note: SAS provides Pr >= |S-Mean|, which is different from our
definition of a two-sided p-value.
"""
x = case['x']
y = case['y']
expected_statistic = case['expected_statistic']
expected_less = case['expected_less']
expected_2sided = case['expected_2sided']
expected_Pr_gte_S_mean = case['expected_Pr_gte_S_mean']
expected_avg = case['expected_avg']
expected_std = case['expected_std']
def statistic1d(x, y):
return stats.ansari(x, y).statistic
with np.testing.suppress_warnings() as sup:
sup.filter(UserWarning, "Ties preclude use of exact statistic")
res = permutation_test((x, y), statistic1d, n_resamples=np.inf,
alternative='less')
res2 = permutation_test((x, y), statistic1d, n_resamples=np.inf,
alternative='two-sided')
assert_allclose(res.statistic, expected_statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected_less, atol=1e-10)
assert_allclose(res2.pvalue, expected_2sided, atol=1e-10)
assert_allclose(res2.null_distribution.mean(), expected_avg, rtol=1e-6)
assert_allclose(res2.null_distribution.std(), expected_std, rtol=1e-6)
# SAS provides Pr >= |S-Mean|; might as well check against that, too
S = res.statistic
mean = res.null_distribution.mean()
n = len(res.null_distribution)
Pr_gte_S_mean = np.sum(np.abs(res.null_distribution-mean)
>= np.abs(S-mean))/n
assert_allclose(expected_Pr_gte_S_mean, Pr_gte_S_mean)
@pytest.mark.parametrize('alternative, expected_pvalue',
(('less', 0.9708333333333),
('greater', 0.05138888888889),
('two-sided', 0.1027777777778)))
def test_against_spearmanr_in_R(self, alternative, expected_pvalue):
"""
Results above from R cor.test, e.g.
options(digits=16)
x <- c(1.76405235, 0.40015721, 0.97873798,
2.2408932, 1.86755799, -0.97727788)
y <- c(2.71414076, 0.2488, 0.87551913,
2.6514917, 2.01160156, 0.47699563)
cor.test(x, y, method = "spearm", alternative = "t")
"""
# data comes from
# np.random.seed(0)
# x = stats.norm.rvs(size=6)
# y = x + stats.norm.rvs(size=6)
x = [1.76405235, 0.40015721, 0.97873798,
2.2408932, 1.86755799, -0.97727788]
y = [2.71414076, 0.2488, 0.87551913,
2.6514917, 2.01160156, 0.47699563]
expected_statistic = 0.7714285714285715
def statistic1d(x):
return stats.spearmanr(x, y).statistic
res = permutation_test((x,), statistic1d, permutation_type='pairings',
n_resamples=np.inf, alternative=alternative)
assert_allclose(res.statistic, expected_statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected_pvalue, atol=1e-13)
@pytest.mark.parametrize("batch", (-1, 0))
def test_batch_generator_iv(self, batch):
with pytest.raises(ValueError, match="`batch` must be positive."):
list(_resampling._batch_generator([1, 2, 3], batch))
batch_generator_cases = [(range(0), 3, []),
(range(6), 3, [[0, 1, 2], [3, 4, 5]]),
(range(8), 3, [[0, 1, 2], [3, 4, 5], [6, 7]])]
@pytest.mark.parametrize("iterable, batch, expected",
batch_generator_cases)
def test_batch_generator(self, iterable, batch, expected):
got = list(_resampling._batch_generator(iterable, batch))
assert got == expected
def test_finite_precision_statistic(self):
# Some statistics return numerically distinct values when the values
# should be equal in theory. Test that `permutation_test` accounts
# for this in some way.
x = [1, 2, 4, 3]
y = [2, 4, 6, 8]
def statistic(x, y):
return stats.pearsonr(x, y)[0]
res = stats.permutation_test((x, y), statistic, vectorized=False,
permutation_type='pairings')
r, pvalue, null = res.statistic, res.pvalue, res.null_distribution
correct_p = 2 * np.sum(null >= r - 1e-14) / len(null)
assert pvalue == correct_p == 1/3
# Compare against other exact correlation tests using R corr.test
# options(digits=16)
# x = c(1, 2, 4, 3)
# y = c(2, 4, 6, 8)
# cor.test(x, y, alternative = "t", method = "spearman") # 0.333333333
# cor.test(x, y, alternative = "t", method = "kendall") # 0.333333333
def test_all_partitions_concatenated():
# make sure that _all_paritions_concatenated produces the correct number
# of partitions of the data into samples of the given sizes and that
# all are unique
n = np.array([3, 2, 4], dtype=int)
nc = np.cumsum(n)
all_partitions = set()
counter = 0
for partition_concatenated in _resampling._all_partitions_concatenated(n):
counter += 1
partitioning = np.split(partition_concatenated, nc[:-1])
all_partitions.add(tuple([frozenset(i) for i in partitioning]))
expected = np.prod([special.binom(sum(n[i:]), sum(n[i+1:]))
for i in range(len(n)-1)])
assert_equal(counter, expected)
assert_equal(len(all_partitions), expected)
@pytest.mark.parametrize('fun_name',
['bootstrap', 'permutation_test', 'monte_carlo_test'])
def test_parameter_vectorized(fun_name):
# Check that parameter `vectorized` is working as desired for all
# resampling functions. Results don't matter; just don't fail asserts.
rng = np.random.default_rng(75245098234592)
sample = rng.random(size=10)
def rvs(size): # needed by `monte_carlo_test`
return stats.norm.rvs(size=size, random_state=rng)
fun_options = {'bootstrap': {'data': (sample,), 'random_state': rng,
'method': 'percentile'},
'permutation_test': {'data': (sample,), 'random_state': rng,
'permutation_type': 'samples'},
'monte_carlo_test': {'sample': sample, 'rvs': rvs}}
common_options = {'n_resamples': 100}
fun = getattr(stats, fun_name)
options = fun_options[fun_name]
options.update(common_options)
def statistic(x, axis):
assert x.ndim > 1 or np.array_equal(x, sample)
return np.mean(x, axis=axis)
fun(statistic=statistic, vectorized=None, **options)
fun(statistic=statistic, vectorized=True, **options)
def statistic(x):
assert x.ndim == 1
return np.mean(x)
fun(statistic=statistic, vectorized=None, **options)
fun(statistic=statistic, vectorized=False, **options)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py3@scipy@stats@tests@test_resampling.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/waterfall/insidetextfont/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="waterfall.insidetextfont", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@waterfall@insidetextfont@_color.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "ramstojh/terra",
"repo_path": "terra_extracted/terra-master/terra/__init__.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .terra import *
|
ramstojhREPO_NAMEterraPATH_START.@terra_extracted@terra-master@terra@__init__.py@.PATH_END.py
|
{
"filename": "paper.md",
"repo_name": "JLBLine/WODEN",
"repo_path": "WODEN_extracted/WODEN-master/joss_paper/paper.md",
"type": "Markdown"
}
|
---
title: '`WODEN`: A CUDA-enabled package to simulate low-frequency radio interferometric data'
tags:
- Python
- C
- CUDA
- radio astronomy
- interferometers
authors:
- name: Jack L. B. Line
orcid: 0000-0002-9130-5920
affiliation: "1, 2" # (Multiple affiliations must be quoted)
affiliations:
- name: International Centre for Radio Astronomy Research, Curtin Institute of Radio Astronomy, Perth, WA 6102, Australia
index: 1
- name: ARC Centre of Excellence for All Sky Astrophysics in 3 Dimensions (ASTRO 3D)
index: 2
date: XXX
bibliography: paper.bib
---
# Summary
`WODEN` is designed to simulate the response of a class of telescope known as an interferometer, producing output "visibilities" for a given astrophysical sky model. Simulated observations allow us to test other software packages that are designed to calibrate and analyse real interferometric data, including verifying expected behaviour with known inputs, and testing new sky modelling techniques. The `WODEN` sky model can be specified in Dirac-delta like functions on the sky (known in the field as "point sources"), elliptical Gaussian models, or built out of "shapelet" basis functions, allowing complicated morphologies to be created. Users are able to input a bespoke layout for the interferometer, vary a number of observational parameters including time of day, length of observation and frequency coverage, and select from a number of predefined primary beams which encode the response of the receiving elements of an interferometer. This allows simulations of a number of telescopes to be undertaken. `WODEN` works with input Stokes $I,Q,U,V$ polarisations as a sky model, simulating telescopes with dual linear polarisations, and outputting linear Stokes polarisations.
The core functionality of `WODEN` is written in CUDA as interferometric simulations are computationally intensive but embarrassingly parallel. The performance of CUDA allows for large-scale simulations to be run including emission from all directions in the sky. This is paramount for interferometers with a wide field of view such as the Murchison Widefield Array [MWA, @Tingay2013]. A Python wrapper is used to take advantage of community packages such as [astropy](https://www.astropy.org/) [@astropy2013; @astropy2018] and [pyerfa](https://pypi.org/project/pyerfa/) [@pyerfa] and to present a user-friendly interface to `WODEN`. Those simulating MWA observations can use the MWA `metafits` file to quickly feed in observational parameters to `WODEN` to match real data.
`WODEN` can be run to two levels of precision: a `woden_float` precision (which uses a mix of 32 and 64 bit floating precision), and a `woden_double` (which uses nearly entirely 64 bit precision). In the section titled "Estimation of accuracy and computational speed" below, `WODEN` is shown to produce visibilities to within 0.2% of the expected values when running in `woden_float` mode, and 0.000002% in `woden_double` mode, for baselines of length $\le 10$km.
# Underlying methodolgy
An interferometer creates visibilities $V$ by cross-correlating signals detected between pairs of antennas or dishes (baselines), described by coordinates $u,v,w$. Each visibility is sensitive to the entire sky, directions of which we describe by the direction cosines $l,m,n$. Ignoring the antenna response, the full integral over the sky can be discretised as
\begin{equation} \label{eq:RIME}
V_s(u_i,v_i,w_i) = \\ \sum_j \mathcal{S}_s(l_j,m_j) \exp[-2\pi i(u_il_j + v_im_j + w_i(n_j-1))],
\end{equation}
where $u_i,v_i,w_i$ are the visibility coordinates of the $i^{\mathrm{th}}$ baseline, $l_j$, $m_j$, $n_j$ is the sky position of the $j^{\mathrm{th}}$ component in the sky model, and $\mathcal{S}(l_j,m_j)$ is the flux density of that component in a given Stokes polarisation $s$. `WODEN` simulates dual-linear polarisation antennas, with each
antenna/station having its own primary beam shape. I can define the response of a dual polarisation antenna to direction $l,m$ as
$$
\mathbf{J}(l,m) =
\begin{bmatrix}
g_{\mathrm{ns}}(l,m) & D_{\mathrm{ns}}(l,m) \\
D_{\mathrm{ew}}(l,m) & g_{\mathrm{ew}}(l,m)
\end{bmatrix},
$$
where $g$ are gain terms, $D$ are leakage terms, and $\mathrm{ns}$ refers to north-south and $\mathrm{ew}$ east-west aligned antennas. When calculating the cross-correlation responses from antennas 1 and 2 towards direction $l,m$ to produce linear polarisation visibilities, these gains and leakages interact with the four Stokes polarisations $I,Q,U,V$ as
\begin{equation}\label{eq:RIME_full}
\begin{bmatrix}
V_{12\,XX}(l,m) \\
V_{12\,XY}(l,m) \\
V_{12\,YX}(l,m) \\
V_{12\,YY}(l,m)
\end{bmatrix} =
\mathbf{J}_1(l,m) \otimes \mathbf{J}_2^*(l,m)
\begin{bmatrix}
1 & 1 & 0 & 0 \\
0 & 0 & 1 & i \\
0 & 0 & 1 & -i \\
1 & -1 & 0 & 0
\end{bmatrix}
\begin{bmatrix}
V_{12\,I}(l,m) \\
V_{12\,Q}(l,m) \\
V_{12\,U}(l,m) \\
V_{12\,V}(l,m)
\end{bmatrix}
\end{equation}
where $*$ denotes a complex conjugate, and $\otimes$ an outer product (the result
of this outer product is written explicitly in the `WODEN` documentation [here](https://woden.readthedocs.io/en/joss_review/operating_principles/visibility_calcs.html)). For each baseline, frequency, and time step, `WODEN` calculates all four linear Stokes polarisations ($V_{XX}, V_{XY}, V_{YX}, V_{YY}$) as defined above for all $l_j,m_j$ in the sky model, and then sums over $j$, to produce four full-sky linear Stokes polarisation visibilities per baseline/frequency/time.
For a telescope like the MWA, the primary beam $\mathbf{J}(l,m)$ is a complicated pattern on the sky, which is sensitive to emission from directly overhead to all the way down to the horizon. To truly capture the effects of astrophysical foregrounds we therefore have to simulate the entire sky. The MWA Fully Embedded Element [FEE, @Sokolowski2017] model is currently the most accurate representation of the MWA primary beam, and is incorporated into `WODEN`.
As the sky model of `WODEN` is a list of Right Ascension and Declinations with associated flux densities, the user has full control over the projection of the sky into visibilities. To simulate discrete foregrounds, one can simply input any sky catalogue specified in RA/Dec. For diffuse sky models, one could for example input a list of point source/elliptical Gaussians following the HEALPix projection [@HEALPix2005], or employ a TAN or SIN FITS [@FITS2002] projection. ``WODEN`` will simply calculate the measurement equation for all directions in the sky model.
# Statement of need
Under this discrete sky formalism, upwards of $j\ge25\times10^6$ components can be required to achieve the angular resolution required. Furthermore, $u,v,w$ are time and frequency dependent, so to sample in frequency of order 500 times and 100 samples in time, there are of order $10^{12}$ visibility calculations to make. This makes CUDA acceleration paramount.
Alternative approaches to interferometric simulations exist, such as [pyuvsim](https://github.com/RadioAstronomySoftwareGroup/pyuvsim) [@Lanman2019], which sacrifices speed for excellent precision, and [RIMEz](https://github.com/upenneor/rimez), which decomposes the sky into spherical harmonics rather than discrete points. `WODEN` was designed with the Australian MWA Epoch of Reionisation (EoR) processing pipeline in mind, which uses a calibration and foreground removal software called the `RTS` [@Mitchell2008] in search of signals from the very first stars [see @Yoshiura2021 for a recent use of this pipeline]. The `RTS` creates a sky model using the same formalism above, however the code is not optimised enough to handle the volume of sources to simulate the entire sky. To test the `RTS` method of sky generation, we therefore needed a fast and discretised method. Another excellent CUDA accelerated simulation package, [OSKAR](https://github.com/OxfordSKA/OSKAR) [@OSKAR], addresses these two points. However, the `RTS` also generates parts of the sky model via shapelets [see @Line2020 for an overview], which `OSKAR` cannot. Furthermore, in real data, the precession/nutation of the Earth's rotational axis causes sources to move from the sky coordinates as specified in the RA, DEC J2000 coordinate system. The `RTS` is designed to undo this precession/nutation, and so a simulation fed into the `RTS` should *contain* precession. `WODEN` adds in this precession using the same method as the `RTS` to be consistent. This unique combination of CUDA, shapelet foregrounds, the MWA FEE primary beam, along with source precession, created the need for `WODEN`. These effects should not preclude other calibration packages from using `WODEN` outputs however, meaning `WODEN` is not limited to feeding data into the `RTS` alone.
# Estimation of accuracy and computational speed
The goal of this section is to test the accuracy of the functionality of `WODEN`, including reading of inputs, the array coordinate calculations, the precession/nutation correction, $l,m,n$ and $u,v,w$ calculations, flux density frequency extrapolation via spectral index, calculation of Equation \ref{eq:RIME_full}, and writing out of the data to `uvfits` files.
To test the absolute accuracy of `WODEN`, we first need a set of input parameters that have an analytically predictable outcome. If we ignore the beam response and polarisation, set the flux density of a source to one, and consider a single baseline and sky direction, the measurement equation (Equation \ref{eq:RIME}) becomes[^1]
[^1]: Note there is no negative at the front inside the exponential for $V(u,v,w)$. After numerous comparisons to other simulation packages, and imaging to check the input source positions match, I find dropping the negative gives the correct outputs.
\begin{equation} \label{eq:RIME_simple}
V(u,v,w) = \exp[2\pi i(ul + vm + w(n-1))].
\end{equation}
We can use Euler's formula to split $V$ into real and imaginary components. If
I label the phase for a particular source and baseline as
$$
\phi = 2\pi \left( ul + vm + w(n-1)\right)
$$
then the real and imaginary parts of the visibility $V_{re}$, $V_{im}$ are
$$
V_{re} = \cos(\phi), \quad V_{im} = \sin(\phi).
$$
If we can therefore set $\phi$ to a number of values which produce known sine and cosine outputs, by selecting specific combinations of $u,v,w$ and $l,m,n$, we can simulate visibilities with predictable outputs. First of all, consider the simplified case $\phi_{\mathrm{simple}}$ when $u,v,w = 1,1,1$. In that case,
$$
\frac{\phi_{\mathrm{simple}}}{2\pi} = l + m + (n-1).
$$
If we further set $l = m$, we end up with
$$
\begin{aligned}
\frac{\phi_{\mathrm{simple}}}{2\pi} = 2l + (n-1), \\
l = \sqrt{\left( \frac{1 - n^2}{2} \right)}
\end{aligned}
$$
It can be shown (via [Wolfram Alpha](https://www.wolframalpha.com/widgets/view.jsp?id=c07cc70f1e81887dfd0971d3fe17cfcd)) that a solution for $n$ is
$$
n = \frac{\sqrt{2}\sqrt{-\phi_{\mathrm{simple}}^2 - 4\pi\phi_{\mathrm{simple}} + 8\pi^2} + \phi_{\mathrm{simple}} + 2\pi}{6\pi}
$$
which we can then use to calculate values for $l,m$ through
$$
l = m = \sqrt{\frac{1 - n^2}{2}}.
$$
Practically then, if we input the following combinations of $l,m,n$ into Equation \ref{eq:RIME_simple} our output visibilities should exactly match the $\cos(\phi)$, $\sin(\phi)$ values.
\begin{table}[h]
\begin{center}
\begin{tabular}{ c c c c c }
\hline
$\phi_{\mathrm{simple}}$ & $l,m$ & $n$ & $\cos(\phi)$ & $\sin(\phi)$ \\
\hline
\hline
$0$ & 0.0 & 1.0 & $1.0$ & $0$ \\
$\pi/6$ & 0.0425737516338956 & 0.9981858300655398 & $\sqrt{3}/2$ & $0.5$ \\
$\pi/4$ & 0.0645903244635131 & 0.9958193510729726 & $\sqrt{2}/2$ & $\sqrt{2}/2$ \\
$\pi/3$ & 0.0871449863555500 & 0.9923766939555675 & $0.5$ & $\sqrt{3}/2$ \\
$\pi/2$ & 0.1340695840364469 & 0.9818608319271057 & $0.0$ & $1.0$ \\
$2\pi/3$ & 0.1838657911209207 & 0.9656017510914922 & $-0.5$ & $\sqrt{3}/2$ \\
$3\pi/4$ & 0.2100755148372292 & 0.9548489703255412 & $-\sqrt{2}/2$ & $\sqrt{2}/2$ \\
$5\pi/6$ & 0.2373397982598921 & 0.9419870701468823 & $-\sqrt{3}/2$ & $0.5$ \\
$\pi$ & 0.2958758547680685 & 0.9082482904638630 & $-1.0$ & $0.0$ \\
$7\pi/6$ & 0.3622725654470420 & 0.8587882024392495 & $-\sqrt{3}/2$ & $-0.5$ \\
$5\pi/4$ & 0.4003681253515569 & 0.8242637492968862 & $-\sqrt{2}/2$ & $-\sqrt{2}/2$ \\
\hline
\end{tabular}
\caption{$l,m,n$ combinations used in accuracy test}
\label{tab:lmn_combos}
\end{center}
\end{table}
To test for a range of baseline lengths, we can make a simplification where we set all baseline coordinates to be equal, i.e. $u = v = w = b$ where $b$ is some length in units of wavelength. In this form, the phase including the baseline length $\phi_{b}$ is
$$
\phi_{b} = 2\pi b\left( l + m + n - 1 \right) = b\phi_{\mathrm{simple}}.
$$
As sine/cosine are periodic functions, the following is true:
$$
\phi_{\mathrm{simple}} = \phi_{\mathrm{simple}} + 2\pi \mathrm{n}
$$
where $\mathrm{n}$ is some integer. This means for a given $\phi_{\mathrm{simple}}$, we can find an appropriate $b$ that should still result in the expected sine and cosine outputs by setting
\begin{gather*}
b\phi_{\mathrm{simple}} = \phi_{\mathrm{simple}} + 2\pi \mathrm{n}, \\
b = \frac{\phi_{\mathrm{simple}} + 2\pi \mathrm{n}}{\phi_{\mathrm{simple}}}
\end{gather*}
for a range of $\mathrm{n}$ values. The values of $\mathrm{n}$ and the resultant size of b that I use in testing are shown in Table \ref{tab:b_values}.
\begin{table}[h]
\begin{center}
\begin{tabular}{c c c c c c }
\hline
$\phi_{\mathrm{simple}}$ & $b(\mathrm{n=1})$ & $b(\mathrm{n=10})$ & $b(\mathrm{n=100})$ & $b(\mathrm{n=1000})$ & $b(\mathrm{n=10000})$ \\
\hline
\hline
$0$ & 6.3 & 62.8 & 628.3 & 6283.2 & 62831.9 \\
$\pi/6$ & 13.0 & 121.0 & 1201.0 & 12001.0 & 120001.0 \\
$\pi/4$ & 9.0 & 81.0 & 801.0 & 8001.0 & 80001.0 \\
$\pi/3$ & 7.0 & 61.0 & 601.0 & 6001.0 & 60001.0 \\
$\pi/2$ & 5.0 & 41.0 & 401.0 & 4001.0 & 40001.0 \\
$2\pi/3$ & 4.0 & 31.0 & 301.0 & 3001.0 & 30001.0 \\
$3\pi/4$ & 3.7 & 27.7 & 267.7 & 2667.7 & 26667.7 \\
$5\pi/6$ & 3.4 & 25.0 & 241.0 & 2401.0 & 24001.0 \\
$\pi$ & 3.0 & 21.0 & 201.0 & 2001.0 & 20001.0 \\
$7\pi/6$ & 2.7 & 18.1 & 172.4 & 1715.3 & 17143.9 \\
$5\pi/4$ & 2.6 & 17.0 & 161.0 & 1601.0 & 16001.0 \\
\hline
\end{tabular}
\caption{Range of baseline lengths used in conjunction with the $l,m,n$ coordinates in Table~\ref{tab:lmn_combos}.}
\label{tab:b_values}
\end{center}
\end{table}
`WODEN` reads in an input array layout specified in local east, north, height $E,N,H$ coordinates. It then converts those into local $X,Y,Z$ coordinates via the equations
\begin{gather}
X = -\sin(\phi_{\mathrm{lat}})N + \cos(\phi_{\mathrm{lat}})H \label{eq:xyz_calc1} \\
Y = E \label{eq:xyz_calc2} \\
Z = \cos(\phi_{\mathrm{lat}})N + \sin(\phi_{\mathrm{lat}})H \label{eq:xyz_calc3}
\end{gather}
where $\phi_{\mathrm{lat}}$ is the latitude of the array. $X,Y,Z$ are used to calculate the $u,v,w$ coodinates (c.f. Chapter 4 in @TMSthird). If we place our interferometer at a $\phi_{\mathrm{lat}} = 0.0^\circ$ and set the local sidereal time (LST) to zero, the calculation of $u,v,w$ becomes
\begin{equation}\label{eq:uvw_simple}
u = E; \, v = N; \, w = H;
\end{equation}
allowing us to set $E, N, H = b$ for our values on $b$ in Table \ref{tab:b_values}. Furthermore, we can convert our $l,m$ values from Table \ref{tab:lmn_combos} into RA,Dec ($\alpha, \delta$) via:
\begin{gather}
\delta = \arcsin(l) \label{eq:dec_simple} \\
\alpha = \arcsin \left( \frac{l}{\cos(\arcsin(l))} \right) \label{eq:ra_simple}
\end{gather}
Following the `RTS`, `WODEN` first of all calculates *X,Y,Z* using the array latitude at the time of the observation. It then uses the `PAL` [@PAL2013] [palPrenut](https://github.com/Starlink/pal/blob/master/palPrenut.c) function to generate a rotation matrix to rotate the local *X,Y,Z* coordinates back to the J2000 epoch, as well as the LST and latitude of the array. This accounts for the precession/nutation of the Earth with respect to the J2000 RA/Dec coordinates that the sky model is specified in. To manifest the outcomes of Equations \ref{eq:uvw_simple}, \ref{eq:dec_simple}, and \ref{eq:ra_simple}, we have to apply the opposite rotation about $\phi_{\mathrm{lat}}$ as defined by Equations \ref{eq:xyz_calc1}, \ref{eq:xyz_calc2}, and \ref{eq:xyz_calc3}, as well as the rotations applied via [palPrenut](https://github.com/Starlink/pal/blob/master/palPrenut.c) to account for precession/nutation, to our input $E,N,H$ coordinates.
Figure \ref{fig:WODEN_accuracy} shows the result of running multiple simulations, each with: an array layout with a single baseline; a single time step and frequency channel; a single point source sky model; a primary beam model with gains of one and zero leakage. All possible combinations of $l,m,n$ and $b$ as listed in Tables \ref{tab:lmn_combos} and \ref{tab:b_values} are run. Each simulation is run with the parameters specified in Table \ref{tab:acc_sim_settings}.
\renewcommand{\arraystretch}{1.4}
\begin{table}[h]
\begin{center}
\begin{tabular}{p{0.25\linewidth} p{0.15\linewidth} p{0.5\linewidth}}
\hline
Parameter & Value & Manifestation in simulation \\
\hline
\hline
Date (UTC) & 2020-01-01 12:00:00.0 & \texttt{WODEN} must correct for precession and nutation \\
Latitude (deg) & 0.1095074 & After precess/nut correction, latitude is 0.0$^\circ$ \\
Longitude (deg) & 79.6423588 & After precess/nut correction, LST is 0.0$^\circ$ \\
Frequency (MHz) & 299.792458 & Means $\lambda = 1$, so wavelength scaled $u,v,w = E,N,H$ \\
Reference frequency for sky model (MHz) & 150 & \texttt{WODEN} has to extrapolate the flux density \\
Spectral Index & -0.8 & Needed to extrapolate flux density \\
Reference Stokes I flux density (Jy) & 1.7401375 & Should be extrapolated to a flux of 1.0 at the simulation frequency \\
\hline
\hline
\end{tabular}
\caption{Common settings for the simulations run to produce the results in Figure \ref{fig:WODEN_accuracy}}
\label{tab:acc_sim_settings}
\end{center}
\end{table}

All array layouts, sky models, and simulations are run by `WODEN/test_installation/absolute_accuracy/run_the_absolute_accuracy_test.sh`, which can be run as part of a test suite bundled with `WODEN`. This script reads the values out of the output `uvfits` files, and produces the plot in Figure \ref{fig:WODEN_accuracy}.
[Version 1.0](https://github.com/JLBLine/WODEN/releases/tag/v1.0.0) of `WODEN` was fully 32 bit, which produced the green triangles in Figure \ref{fig:WODEN_accuracy}, with longer baselines consistently a few percent off expectations. A two time processing slow down by moving to a combined 32 and 64 bit `woden_float` mode (orange squares) improves the accuracy to $\le 0.2$% on the longer baselines. The entirely 64 bit `woden_double` precision mode is consistent in precision across baseline length, sitting at < 2e-6% accuracy. The `woden_float` and `woden_double` executables are available in [Version 1.1](https://github.com/JLBLine/WODEN/releases/tag/v1.1.0), and can be switched between via a command line option in `run_woden.py`. It should be noted that these offset errors are deterministic, meaning comparison between different simulations out of [Version 1.0](https://github.com/JLBLine/WODEN/releases/tag/v1.0.0) `WODEN` are consistent; these errors matter most when comparing to real data.
As 32 and 64 bit precision calculations are performed in physically different parts of an NVIDIA GPU, with cards typically having less double precision hardware that single, the `woden_double` version is slower that the `woden_float`. Each card will show a different slow-down between the two modes. As a test, I ran a simulation using a catalogue of over 300,000 sources. The number of sources above the horizon and the simulation settings used are listed in Table \ref{tab:benchmark_sim}, along with the speed difference between the `woden_float` and `woden_double` executables for two different NVIDIA GPU cards.
\renewcommand{\arraystretch}{1}
\begin{table}[h]
\begin{center}
\begin{tabular}{l l}
\hline
Parameters & Value \\
\hline
\hline
Time steps & 14 \\
Frequency channels & 80 \\
Point sources components & 207673 \\
Gaussian components & 1182 \\
Shapelet components (basis functions) & 62 (10400) \\
Primary beam model & MWA FEE \\
GTX 1080 Ti \texttt{woden\_{}float} simulation time & 10min 39sec \\
GTX 1080 Ti \texttt{woden\_{}double} simulation time & 55min 46sec \\
V100 \texttt{woden\_{}float} simulation time & 4min 35sec \\
V100 \texttt{woden\_{}double} simulation time & 5min 55sec \\
\end{tabular}
\caption{Benchmark simulation to compare \texttt{woden\_{}float} and \texttt{woden\_{}double} speeds. Each shapelet component can have several basis function calculations, each more expensive that a point source component calculation. The MWA FEE is the most computationally expensive beam model included with \texttt{WODEN}.}
\label{tab:benchmark_sim}
\end{center}
\end{table}
Given this > 5 times slow down on a desktop card, having the option to toggle between `woden_float` and `woden_double` allows quick experimentation using `woden_float` and longer science-quality runs with `woden_double`. Luckily, for cards like the V100, the slowdown is around 1.3. Note that these simulations can easily be broken up and run across multiple GPUs if available, reducing the real time taken to complete the simulations.
# Example application
In @Line2020, we compared two methods to model Fornax A: a combination of point and elliptical Gaussians, compared to shapelets (see Figure \ref{fig:ForA}). We were able to quickly compare the computational efficiency of the methods using a desktop, and comment on their respective strengths and weaknesses in regard to foreground removal for EoR purposes. Furthermore, as we could control the simulations, we could compare the methods in the absence of other processing systematics that are present in the real data from the MWA, which dominated the comparison when using the `RTS` alone.
![Two methods to simulate Fornax A visibilities are compared here [both imaged using `WSClean` @Offringa2014; @Offringa2017], with point and elliptical Gaussians on the left, and shapelets on the right.\label{fig:ForA}](FornaxA_model_comparison.png)
# Documentation
The documentation for `WODEN` can be found on Read the Docs at [woden.readthedocs.io](https://woden.readthedocs.io/en/latest/), including a detailed installation guide, ways to test a local installation, details of the calculations `WODEN` makes under the hood, and worked examples, which are also included in the `GitHub` repo.
# Acknowledgements
I acknowledge direct contributions from Tony Farlie (who taught me how pointer arithmetic works in `C`) and contributions from Bart Pindor and Daniel Mitchell (through their work in the `RTS` and through advising me on `CUDA`). I would like to thank Chris Jordan who acted as a sounding board as I learned `C` and `CUDA`. Finally, I would like to thank both Matthew Kolopanis and Paul La Plante for reviewing the code and giving useful suggestions on how to improve the code.
This research was supported by the Australian Research Council Centre of Excellence for All Sky Astrophysics in 3 Dimensions (ASTRO 3D), through project number CE170100013. The International Centre for Radio Astronomy Research (ICRAR) is a Joint Venture of Curtin University and The University of Western Australia, funded by the Western Australian State government. This work was supported by resources provided by the Pawsey Supercomputing Centre with funding from the Australian Government and the Government of Western Australia.
# References
|
JLBLineREPO_NAMEWODENPATH_START.@WODEN_extracted@WODEN-master@joss_paper@paper.md@.PATH_END.py
|
{
"filename": "_ysrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/cone/_ysrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="ysrc", parent_name="cone", **kwargs):
super(YsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@cone@_ysrc.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "pymc-devs/pymc",
"repo_path": "pymc_extracted/pymc-main/pymc/step_methods/__init__.py",
"type": "Python"
}
|
# Copyright 2024 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Step methods."""
from pymc.step_methods.compound import BlockedStep, CompoundStep
from pymc.step_methods.hmc import NUTS, HamiltonianMC
from pymc.step_methods.metropolis import (
BinaryGibbsMetropolis,
BinaryMetropolis,
CategoricalGibbsMetropolis,
CauchyProposal,
DEMetropolis,
DEMetropolisZ,
LaplaceProposal,
Metropolis,
MultivariateNormalProposal,
NormalProposal,
PoissonProposal,
UniformProposal,
)
from pymc.step_methods.slicer import Slice
# Other step methods can be added by appending to this list
STEP_METHODS: list[type[BlockedStep]] = [
NUTS,
HamiltonianMC,
Metropolis,
BinaryMetropolis,
BinaryGibbsMetropolis,
Slice,
CategoricalGibbsMetropolis,
]
|
pymc-devsREPO_NAMEpymcPATH_START.@pymc_extracted@pymc-main@pymc@step_methods@__init__.py@.PATH_END.py
|
{
"filename": "_outlinewidth.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/heatmap/colorbar/_outlinewidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OutlinewidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="outlinewidth", parent_name="heatmap.colorbar", **kwargs
):
super(OutlinewidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@heatmap@colorbar@_outlinewidth.py@.PATH_END.py
|
{
"filename": "eos.py",
"repo_name": "PrincetonUniversity/athena",
"repo_path": "athena_extracted/athena-master/tst/regression/scripts/utils/EquationOfState/eos.py",
"type": "Python"
}
|
import numpy as np
from scipy.optimize import brentq
from scipy.interpolate import RectBivariateSpline as RBS
import sys
from . import brent_opt
class EOS(object):
"""Parent class to implement equation of state functions"""
def __init__(self):
"""Initialize EOS class"""
self.ideal = False # Whether this EOS is ideal
self.indep = None # independent variables not including density
def valid(self):
"""Determine if this EOS is valid"""
try:
self.asq_of_rho_p(1, 1)
self.ei_of_rho_p(1, 1)
except NotImplementedError:
return False
return True
def asq_of_rho_p(self, rho, p):
"""Adiabatic sound speed^2 as a function of density (rho) and pressure (p)"""
raise NotImplementedError
def ei_of_rho_p(self, rho, p):
"""Internal energy density as a function of density (rho) and pressure (p)"""
raise NotImplementedError
def es_of_rho_p(self, rho, p):
"""Specific internal energy as a function of density (rho) and pressure (p)"""
return self.ei_of_rho_p(rho, p) / rho
def p_of_rho_es(self, rho, es):
"""Pressure as a function of density (rho) and specific internal energy (es)"""
raise NotImplementedError
class SimpleHydrogen(EOS):
"""Simple hydrogen equation of state"""
def __init__(self):
super(SimpleHydrogen, self).__init__()
self.indep = 'T' # Temperature is the independent variable other than density
self.T_of_rho_ei = np.vectorize(self._T_of_rho_ei)
self.T_of_rho_p = np.vectorize(self._T_of_rho_p)
self.T_of_rho_h = np.vectorize(self._T_of_rho_h)
def _phi(self, T):
return np.exp(1. / T - 1.5 * np.log(T))
def _x(self, rho, T):
"""Ionization fraction"""
with np.errstate(over='ignore'):
return 2. / (1 + np.sqrt(1 + 4.
* np.exp(1. / T - 1.5 * np.log(T) + np.log(rho))))
def _x_T(self, rho, T):
"""Temperature derivative of ionization fraction"""
x = self._x(rho, T)
return x**3 / (2. + x) * np.exp(1. / T - 3.5 * np.log(T)) * (1. + 1.5 * T) * rho
def p_of_rho_T(self, rho, T):
"""Pressure as a function of density (rho) and temperature (T)"""
return rho * T * (1. + self._x(rho, T))
def ei_of_rho_T(self, rho, T):
"""Internal energy density as a function of density (rho) and temperature (T)"""
return self._x(rho, T) * rho + 1.5 * self.p_of_rho_T(rho, T)
def _b(self, rho, T):
lt = np.log(T)
c1 = np.exp(-1.25 * lt - .5 / T)
c2 = np.exp(1.5 * lt - 1. / T)
return 8. * rho * c1 / (np.sqrt(c2) + np.sqrt(c2 + 4. * rho))**3
def gamma1(self, rho, T):
"""Gamma_1 as a function of density (rho) and temperature (T)"""
x = self._x(rho, T)
b = self._b(rho, T)
return (b * (4. + 20. * T + 15. * T**2) + 10. * (2. + x - x**2)) /\
(b * (2. + 3. * T)**2 + 6.*(2. + x - x**2))
def asq_of_rho_T(self, rho, T):
"""Adiabatic sound speed^2 as a function of density (rho) and temperature (T)"""
return T * (1. + self._x(rho, T)) * self.gamma1(rho, T)
def asq_of_rho_p(self, rho, p):
"""Adiabatic sound speed^2 as a function of density (rho) and pressure (p)"""
return p * self.gamma1(rho, self.T_of_rho_p(rho, p)) / rho
def asq_of_rho_h(self, rho, h):
"""Adiabatic sound speed^2 function of density (rho) and specific enthalpy (h)"""
return self.asq_of_rho_T(rho, self.T_of_rho_h(rho, h))
def _T_of_rho_h(self, rho, h):
"""Temperature as a function of density (rho) and specific enthalpy (h)"""
t1 = .4 * h * (1. + sys.float_info.epsilon)
def f(y):
return (self.p_of_rho_T(rho, y) + self.ei_of_rho_T(rho, y)) / (h * rho) - 1.
T, r = brentq(f, .1 * t1, t1, **brent_opt)
if not r.converged:
raise RuntimeError('Unable to converge on temperature.')
return T
def _T_of_rho_p(self, rho, p):
"""Temperature as a function of density (rho) and pressure (p)"""
t1 = p / rho * (1. + sys.float_info.epsilon) # initial guess
def f(y): # function to find root of
return self.p_of_rho_T(rho, y) / p - 1.
try:
T, r = brentq(f, .1 * t1, t1, **brent_opt) # find root
except ValueError:
T, r = brentq(f, .05 * t1, 2 * t1, **brent_opt)
if not r.converged:
raise RuntimeError('Unable to converge on temperature.')
return T
def _T_of_rho_ei(self, rho, ei):
"""Temperature as a function of density (rho) and internal energy density (e)"""
t1 = ei / rho * (1. + sys.float_info.epsilon) # initial guess
def f(y): # function to find root of
return self.ei_of_rho_T(rho, y) / ei - 1.
T, r = brentq(f, .05 * t1, 2 * t1, **brent_opt)
if not r.converged:
raise RuntimeError('Unable to converge on temperature.')
return T
def ei_of_rho_p(self, rho, p):
"""Internal energy density as a function of density (rho) and pressure (p)"""
return self.ei_of_rho_T(rho, self.T_of_rho_p(rho, p))
def p_of_rho_es(self, rho, es):
"""Pressure as a function of density (rho) and specific internal energy (es)"""
return self.p_of_rho_T(rho, self.T_of_rho_ei(rho, rho * es))
class Ideal(EOS):
"""Ideal equation of state class"""
def __init__(self, gamma, R=1):
"""Adiabatic index "gamma" (>1) and ideal gas constant "R"."""
if gamma <= 1:
raise ValueError('The value for gamma must be larger than 1.')
super(Ideal, self).__init__()
self.ideal = True
self._g = gamma
self._gm1 = gamma - 1.
self.R = R
def gamma(self):
"""Returns gamma"""
return self._g
def asq_of_rho_p(self, rho, p):
"""Adiabatic sound speed^2 as a function of density (rho) and pressure (p)"""
return self._g * p / rho
def ei_of_rho_p(self, rho, p):
"""Internal energy density as a function of density (rho) and pressure (p)"""
return p / self._gm1
def T_of_rho_p(self, rho, p):
"""Temperature as a function of density (rho) and pressure (p)"""
return p / (rho * self.R)
def T_of_rho_ei(self, rho, ei):
"""Temperature as a function of density (rho) and internal energy density (ei)"""
return ei * self._gm1 / (rho * self.R)
def p_of_rho_ei(self, rho, ei):
"""Pressure as a function of density (rho) and internal energy density (ei)"""
return ei * self._gm1
def p_of_rho_es(self, rho, es):
"""Pressure as a function of density (rho) and specific internal energy (es)"""
return rho * es * self._gm1
def p_of_rho_T(self, rho, T):
"""Pressure as a function of density (rho) and temperature (T)"""
return rho * T * self.R
class TestIdeal(Ideal):
"""Class to test if Riemann solver gives same answer as Ideal."""
def __init__(self, gamma, R=1):
super(TestIdeal, self).__init__(gamma, R=R)
self.ideal = False
self.indep = 'p'
class AthenaTable(EOS):
def __init__(self, data, lrho, le, ratios=None, indep=None, dens_pow=-1, fn=None,
add_var=None):
super(EOS, self).__init__()
self.fn = fn
if ratios is None:
ratios = np.ones(data.shape[0])
lr = np.log(ratios)
self._lr = lr
if indep is None:
indep = 'ei'
self.indep = indep
self.data = data
self.lrho = lrho
self.le = le
self.dens_pow = dens_pow
var = ['p', 'e', 'asq_p']
if add_var is not None:
var.extend(add_var)
d = {var[i]: RBS(lrho, le + lr[i], np.log10(data[i].T), kx=1, ky=1).ev
for i in range(len(var))}
self._interp_dict = d
def _interp(self, rho, e, var):
ld = np.log10(rho)
return 10**self._interp_dict[var](ld, np.log10(e) + self.dens_pow * ld)
def asq_of_rho_p(self, rho, p):
"""Adiabatic sound speed^2 as a function of density (rho) and pressure (p)"""
return self._interp(rho, p, 'asq_p') * p / rho
def ei_of_rho_p(self, rho, p):
"""Internal energy density as a function of density (rho) and pressure (p)"""
return self._interp(rho, p, 'e') * p
def es_of_rho_p(self, rho, p):
"""Specific internal energy as a function of density (rho) and pressure (p)"""
return self._interp(rho, p, 'e') * p / rho
def p_of_rho_ei(self, rho, ei):
"""Pressure as a function of density (rho) and internal energy density (ei)"""
return self._interp(rho, ei, 'p') * ei
def p_of_rho_es(self, rho, es):
"""Pressure as a function of density (rho) and specific internal energy (es)"""
return self.p_of_rho_ei(rho, es / rho)
def parse_eos(eos):
"""Function to interpret input as an EOS"""
if hasattr(eos, 'asq_of_rho_p'):
return eos # already is EOS class
if eos == 'H' or eos == 'h':
return SimpleHydrogen()
try:
return Ideal(float(eos)) # try parsing as a gamma value
except ValueError:
raise ValueError('Cannot parse EOS "{0:}".'.format(eos))
|
PrincetonUniversityREPO_NAMEathenaPATH_START.@athena_extracted@athena-master@tst@regression@scripts@utils@EquationOfState@eos.py@.PATH_END.py
|
{
"filename": "_texttemplate.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/histogram2d/_texttemplate.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TexttemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="texttemplate", parent_name="histogram2d", **kwargs):
super(TexttemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@histogram2d@_texttemplate.py@.PATH_END.py
|
{
"filename": "clean.py",
"repo_name": "aasensio/hazel",
"repo_path": "hazel_extracted/hazel-master/runPy/clean.py",
"type": "Python"
}
|
import os
os.system('rm -rf build/')
os.system('rm pyhazel.c')
os.system('rm pyhazel.so')
# os.system('python setup.py build_ext --inplace')
|
aasensioREPO_NAMEhazelPATH_START.@hazel_extracted@hazel-master@runPy@clean.py@.PATH_END.py
|
{
"filename": "merian.py",
"repo_name": "legacysurvey/imagine",
"repo_path": "imagine_extracted/imagine-main/merian.py",
"type": "Python"
}
|
from glob import glob
from astrometry.util.fits import fits_table
from astrometry.util.util import Tan
import fitsio
# Find 'brick' files for N540, assume the same ones exist for other bands.
class duck(object):
pass
fns = glob('data/merian/deepCoadd_calexp/*/*/N540/deepCoadd_calexp_*_N540_*.fits')
T = fits_table()
wcscols = ['crval1','crval2','crpix1','crpix2','cd1_1','cd1_2','cd2_1','cd2_2']
for col in ['filename', 'ra','dec', 'ra1','ra2','dec1','dec2', 'naxis1','naxis2',
'brickname'] + wcscols:
T.set(col, [])
for fn in fns:
hdr = fitsio.read_header(fn, ext=1)
fn = fn.replace('data/merian/','')
T.filename.append(fn)
t = duck()
for col in wcscols:
v = hdr[col.upper()]
T.get(col).append(v)
setattr(t, col, v)
v = hdr['ZNAXIS1']
T.naxis1.append(v)
t.width = v
v = hdr['ZNAXIS2']
T.naxis2.append(v)
t.height = v
t.filename = fn
#wcs = Tan(fn, 1)
wcs = Tan(*[float(x) for x in
[t.crval1, t.crval2, t.crpix1, t.crpix2, t.cd1_1, t.cd1_2, t.cd2_1, t.cd2_2,
t.width, t.height]])
r,d = wcs.radec_center()
T.ra.append(r)
T.dec.append(d)
midy = (t.height+1)/2.
midx = (t.width+1)/2.
rr,dd = wcs.pixelxy2radec([1, t.width], [midy,midy])
T.ra1.append(min(rr))
T.ra2.append(max(rr))
rr,dd = wcs.pixelxy2radec([midx,midx], [1, t.height])
T.dec1.append(min(dd))
T.dec2.append(max(dd))
parts = t.filename.strip().split('/')
T.brickname.append('%s_%s' % (parts[-4], parts[-3]))
T.to_np_arrays()
T.writeto('merian-bricks.fits')
|
legacysurveyREPO_NAMEimaginePATH_START.@imagine_extracted@imagine-main@merian.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "lsst/cp_verify",
"repo_path": "cp_verify_extracted/cp_verify-main/pipelines/README.md",
"type": "Markdown"
}
|
# Pipeline Definitions
This directory contains pipeline definition YAML files which are used to verify calibration products with the LSST Science Pipelines.
The pipelines defined here come in three flavors: camera-specific (within named directories), camera-agnostic (top-level, if any), and building-block ingredients (within the [\_ingredients](_ingredients) directory).
Pipelines within the ingredients directory are meant to be imported by other pipelines, and are not intended to be used directly by end-users.
The `pipetask build` command can be used to expand a pipeline YAML and resolve any imports for the purposes of visualizing it.
For example, to visualize the verification of a bias for the [LATISS camera pipeline](https://github.com/lsst/cp_verify/blob/main/pipelines/Latiss/VerifyBias.yaml) pipeline, run:
```bash
pipetask build \
-p $CP_VERIFY_DIR/pipelines/Latiss/VerifyBias.yaml \
--show pipeline
```
and
```bash
pipetask build \
-p $CP_PIPE_DIR/pipelines/Latiss/cpBias.yaml \
--show config
```
All pipelines are checked for basic validity and importability in `test_pipelines.py`.
If adding a new camera to this directory, please update the associated list of cameras in `test_pipelines.py` and add tests for the pipelines that are defined for that camera.
The contents of this directory are checked against expectations, and you will get test failures otherwise.
Your future self will thank you for adding validity tests for your new pipelines!
|
lsstREPO_NAMEcp_verifyPATH_START.@cp_verify_extracted@cp_verify-main@pipelines@README.md@.PATH_END.py
|
{
"filename": "calculate_posteriors.py",
"repo_name": "jfcrenshaw/pzflow-paper",
"repo_path": "pzflow-paper_extracted/pzflow-paper-main/src/scripts/photo-z/calculate_posteriors.py",
"type": "Python"
}
|
"""Use the pz flow ensemble to estimate redshift posteriors for test set galaxies."""
import numpy as np
from load_pzflow_catalog import load_pzflow_catalog
from pzflow import FlowEnsemble
from showyourwork.paths import user as Paths
# instantiate the paths
paths = Paths()
# load the flow ensemble
flowEns = FlowEnsemble(file=paths.data / "pz_ensemble" / "pz_ensemble.pzflow.pkl")
# load the test set
test_set = load_pzflow_catalog(subset="test")
# setup the redshift grid
grid = np.linspace(0, 3.5, 351)
# calculate posteriors
pdfs = flowEns.posterior(
test_set[:100_000],
"redshift",
grid,
err_samples=10,
batch_size=100,
)
# save the posteriors
np.savez(paths.data / "redshift_posteriors.npz", grid=grid, pdfs=pdfs)
|
jfcrenshawREPO_NAMEpzflow-paperPATH_START.@pzflow-paper_extracted@pzflow-paper-main@src@scripts@photo-z@calculate_posteriors.py@.PATH_END.py
|
{
"filename": "_uid.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattersmith/_uid.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="uid", parent_name="scattersmith", **kwargs):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattersmith@_uid.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattersmith/unselected/textfont/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="scattersmith.unselected.textfont",
**kwargs,
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattersmith@unselected@textfont@_color.py@.PATH_END.py
|
{
"filename": "testblob.py",
"repo_name": "dstndstn/tractor",
"repo_path": "tractor_extracted/tractor-main/projects/inference/testblob.py",
"type": "Python"
}
|
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
import fitsio
from astrometry.util.fits import *
from astrometry.util.util import *
from astrometry.util.plotutils import *
from astrometry.sdss import *
from tractor import *
from tractor.sdss import *
if __name__ == '__main__':
sdss = DR9()
run,camcol,field = 1463,4,55
ra,dec = 270.0, 0.003
radius = 0.003
bands = 'ugriz'
stamp_pattern = 'stamp-%s.fits'
catfn = 'cat.fits'
plots = False
# Retrieve SDSS catalog sources in the field
srcband = 'r'
srcs = get_tractor_sources_dr9(run, camcol, field, bandname=srcband,
sdss=sdss,
radecrad=(ra, dec, radius*np.sqrt(2.)),
nanomaggies=True)
print('Got sources:')
for src in srcs:
print(' ', src)
fn = sdss.retrieve('photoField', run, camcol, field)
print('Retrieved', fn)
F = fits_table(fn)
F.cut((F.run == run) * (F.camcol == camcol) * (F.field == field))
print(len(F), 'fields')
assert(len(F) == 1)
F = F[0]
# Retrieve SDSS images
tims = []
tinfs = []
pixscale = 0.396/3600.
for band in bands:
pixradius = radius / pixscale
tim,tinfo = get_tractor_image_dr9(run, camcol, field, band, sdss=sdss,
roiradecsize=(ra, dec, pixradius),
nanomaggies=True)
print('Got tim:', tim)
frame = sdss.readFrame(run, camcol, field, band)
x,y = tim.getWcs().positionToPixel(RaDecPos(ra, dec))
x,y = int(x), int(y)
# Grab calibration information also
tim.sdss_calib = np.median(frame.getCalibVec())
tim.sdss_sky = frame.getSkyAt(x,y)
iband = band_index(band)
tim.sdss_gain = F.gain[iband]
tim.sdss_darkvar = F.dark_variance[iband]
tims.append(tim)
tinfs.append(tinfo)
if band == 'r':
# Cut sources to img bbox
keep = []
h,w = tim.shape
for i,src in enumerate(srcs):
x,y = tim.getWcs().positionToPixel(src.getPosition())
if x < 0 or y < 0 or x >= w or y >= h:
continue
keep.append(i)
srcs = Catalog(*[srcs[i] for i in keep])
print('Cut sources:')
for src in srcs:
print(' ', src)
# Write out the sources
T = fits_table()
T.ra = [src.getPosition().ra for src in srcs]
T.dec = [src.getPosition().dec for src in srcs]
for band in bands:
T.set('psfflux_%s' % band,
[src.getBrightness().getBand(band) for src in srcs])
T.writeto(catfn)
# Write out the images
for band,tim,tinfo in zip(bands, tims, tinfs):
roi = tinfo['roi']
x0,x1,y0,y1 = roi
if plots:
plt.clf()
img = tim.getImage()
mn,mx = [np.percentile(img,p) for p in [25,99]]
dimshow(img, vmin=mn, vmax=mx)
xx,yy = [],[]
for src in srcs:
x,y = tim.getWcs().positionToPixel(src.getPosition())
xx.append(x)
yy.append(y)
ax = plt.axis()
plt.plot(xx, yy, 'r+')
plt.axis(ax)
plt.savefig('tim-%s.png' % band)
cd = tim.getWcs().cdAtPixel((x0+x1)/2., (y0+y1)/2.)
print('CD at center:', cd)
crpix1,crpix2 = tim.getWcs().positionToPixel(RaDecPos(ra, dec))
crpix1 += 1
crpix2 += 1
wcs = Tan(ra, dec, crpix1, crpix2, cd[0,0],cd[0,1],cd[1,0],cd[1,1],w,h)
twcs = ConstantFitsWcs(wcs)
if plots:
xx,yy = [],[]
for src in srcs:
x,y = twcs.positionToPixel(src.getPosition())
xx.append(x)
yy.append(y)
ax = plt.axis()
plt.plot(xx, yy, 'go', mec='g', mfc='none')
plt.axis(ax)
plt.savefig('tim-%s.png' % band)
tractor = Tractor([tim], srcs)
mod = tractor.getModelImage(0)
plt.clf()
dimshow(mod, vmin=mn, vmax=mx)
plt.savefig('mod-%s.png' % band)
hdr = fitsio.FITSHDR()
wcs.add_to_header(hdr)
hdr.add_record(dict(name='X0', value=x0,
comment='X pixel offset in full SDSS image'))
hdr.add_record(dict(name='Y0', value=y0,
comment='Y pixel offset in full SDSS image'))
hdr.add_record(dict(name='RUN', value=run, comment='SDSS run'))
hdr.add_record(dict(name='CAMCOL', value=camcol, comment='SDSS camcol'))
hdr.add_record(dict(name='FIELD', value=field, comment='SDSS field'))
hdr.add_record(dict(name='BAND', value=band, comment='SDSS band'))
# Copy from input "frame" header
orighdr = tinfo['hdr']
for key in ['NMGY']:
hdr.add_record(dict(name=key, value=orighdr[key],
comment=orighdr.get_comment(key)))
hdr.add_record(dict(name='CALIB', value=tim.sdss_calib,
comment='Mean "calibvec" value for this image'))
hdr.add_record(dict(name='SKY', value=tim.sdss_sky,
comment='SDSS sky estimate at image center'))
hdr.add_record(dict(name='GAIN', value=tim.sdss_gain,
comment='SDSS gain'))
hdr.add_record(dict(name='DARKVAR', value=tim.sdss_darkvar,
comment='SDSS dark variance'))
tim.getPsf().toFitsHeader(hdr, 'PSF_')
fn = stamp_pattern % band
fitsio.write(fn, tim.getImage(), clobber=True,
header=hdr)
fitsio.write(fn, tim.getInvvar())
|
dstndstnREPO_NAMEtractorPATH_START.@tractor_extracted@tractor-main@projects@inference@testblob.py@.PATH_END.py
|
{
"filename": "_ohlc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/_ohlc.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Ohlc(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "ohlc"
_valid_props = {
"close",
"closesrc",
"customdata",
"customdatasrc",
"decreasing",
"high",
"highsrc",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"increasing",
"legend",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"line",
"low",
"lowsrc",
"meta",
"metasrc",
"name",
"opacity",
"open",
"opensrc",
"selectedpoints",
"showlegend",
"stream",
"text",
"textsrc",
"tickwidth",
"type",
"uid",
"uirevision",
"visible",
"x",
"xaxis",
"xcalendar",
"xhoverformat",
"xperiod",
"xperiod0",
"xperiodalignment",
"xsrc",
"yaxis",
"yhoverformat",
"zorder",
}
# close
# -----
@property
def close(self):
"""
Sets the close values.
The 'close' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["close"]
@close.setter
def close(self, val):
self["close"] = val
# closesrc
# --------
@property
def closesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `close`.
The 'closesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["closesrc"]
@closesrc.setter
def closesrc(self, val):
self["closesrc"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# decreasing
# ----------
@property
def decreasing(self):
"""
The 'decreasing' property is an instance of Decreasing
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.Decreasing`
- A dict of string/value properties that will be passed
to the Decreasing constructor
Supported dict properties:
line
:class:`plotly.graph_objects.ohlc.decreasing.Li
ne` instance or dict with compatible properties
Returns
-------
plotly.graph_objs.ohlc.Decreasing
"""
return self["decreasing"]
@decreasing.setter
def decreasing(self, val):
self["decreasing"] = val
# high
# ----
@property
def high(self):
"""
Sets the high values.
The 'high' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["high"]
@high.setter
def high(self, val):
self["high"] = val
# highsrc
# -------
@property
def highsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `high`.
The 'highsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["highsrc"]
@highsrc.setter
def highsrc(self, val):
self["highsrc"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for `align`.
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for `bgcolor`.
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for `bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for `namelength`.
split
Show hover information (open, close, high, low)
in separate labels.
Returns
-------
plotly.graph_objs.ohlc.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# increasing
# ----------
@property
def increasing(self):
"""
The 'increasing' property is an instance of Increasing
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.Increasing`
- A dict of string/value properties that will be passed
to the Increasing constructor
Supported dict properties:
line
:class:`plotly.graph_objects.ohlc.increasing.Li
ne` instance or dict with compatible properties
Returns
-------
plotly.graph_objs.ohlc.Increasing
"""
return self["increasing"]
@increasing.setter
def increasing(self, val):
self["increasing"] = val
# legend
# ------
@property
def legend(self):
"""
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2", "legend3",
etc. Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces and shapes part of
the same legend group hide/show at the same time when toggling
legend items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
# legendgrouptitle
# ----------------
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Supported dict properties:
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
plotly.graph_objs.ohlc.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
# legendrank
# ----------
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items. When
having unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and layout.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
# legendwidth
# -----------
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
Note that this style setting can also be set
per direction via `increasing.line.dash` and
`decreasing.line.dash`.
width
[object Object] Note that this style setting
can also be set per direction via
`increasing.line.width` and
`decreasing.line.width`.
Returns
-------
plotly.graph_objs.ohlc.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# low
# ---
@property
def low(self):
"""
Sets the low values.
The 'low' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["low"]
@low.setter
def low(self, val):
self["low"] = val
# lowsrc
# ------
@property
def lowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `low`.
The 'lowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["lowsrc"]
@lowsrc.setter
def lowsrc(self, val):
self["lowsrc"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appears as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# open
# ----
@property
def open(self):
"""
Sets the open values.
The 'open' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["open"]
@open.setter
def open(self, val):
self["open"] = val
# opensrc
# -------
@property
def opensrc(self):
"""
Sets the source reference on Chart Studio Cloud for `open`.
The 'opensrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["opensrc"]
@opensrc.setter
def opensrc(self, val):
self["opensrc"] = val
# selectedpoints
# --------------
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.ohlc.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# text
# ----
@property
def text(self):
"""
Sets hover text elements associated with each sample point. If
a single string, the same string appears over all the data
points. If an array of string, the items are mapped in order to
this trace's sample points.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the width of the open/close tick marks relative to the "x"
minimal interval.
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, 0.5]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# x
# -
@property
def x(self):
"""
Sets the x coordinates. If absent, linear coordinate will be
generated.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xaxis
# -----
@property
def xaxis(self):
"""
Sets a reference between this trace's x coordinates and a 2D
cartesian x axis. If "x" (the default value), the x coordinates
refer to `layout.xaxis`. If "x2", the x coordinates refer to
`layout.xaxis2`, and so on.
The 'xaxis' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
str
"""
return self["xaxis"]
@xaxis.setter
def xaxis(self, val):
self["xaxis"] = val
# xcalendar
# ---------
@property
def xcalendar(self):
"""
Sets the calendar system to use with `x` date data.
The 'xcalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['chinese', 'coptic', 'discworld', 'ethiopian',
'gregorian', 'hebrew', 'islamic', 'jalali', 'julian',
'mayan', 'nanakshahi', 'nepali', 'persian', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["xcalendar"]
@xcalendar.setter
def xcalendar(self, val):
self["xcalendar"] = val
# xhoverformat
# ------------
@property
def xhoverformat(self):
"""
Sets the hover text formatting rulefor `x` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `xaxis.hoverformat`.
The 'xhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["xhoverformat"]
@xhoverformat.setter
def xhoverformat(self, val):
self["xhoverformat"] = val
# xperiod
# -------
@property
def xperiod(self):
"""
Only relevant when the axis `type` is "date". Sets the period
positioning in milliseconds or "M<n>" on the x axis. Special
values in the form of "M<n>" could be used to declare the
number of months. In this case `n` must be a positive integer.
The 'xperiod' property accepts values of any type
Returns
-------
Any
"""
return self["xperiod"]
@xperiod.setter
def xperiod(self, val):
self["xperiod"] = val
# xperiod0
# --------
@property
def xperiod0(self):
"""
Only relevant when the axis `type` is "date". Sets the base for
period positioning in milliseconds or date string on the x0
axis. When `x0period` is round number of weeks, the `x0period0`
by default would be on a Sunday i.e. 2000-01-02, otherwise it
would be at 2000-01-01.
The 'xperiod0' property accepts values of any type
Returns
-------
Any
"""
return self["xperiod0"]
@xperiod0.setter
def xperiod0(self, val):
self["xperiod0"] = val
# xperiodalignment
# ----------------
@property
def xperiodalignment(self):
"""
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
The 'xperiodalignment' property is an enumeration that may be specified as:
- One of the following enumeration values:
['start', 'middle', 'end']
Returns
-------
Any
"""
return self["xperiodalignment"]
@xperiodalignment.setter
def xperiodalignment(self, val):
self["xperiodalignment"] = val
# xsrc
# ----
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `x`.
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
# yaxis
# -----
@property
def yaxis(self):
"""
Sets a reference between this trace's y coordinates and a 2D
cartesian y axis. If "y" (the default value), the y coordinates
refer to `layout.yaxis`. If "y2", the y coordinates refer to
`layout.yaxis2`, and so on.
The 'yaxis' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
str
"""
return self["yaxis"]
@yaxis.setter
def yaxis(self, val):
self["yaxis"] = val
# yhoverformat
# ------------
@property
def yhoverformat(self):
"""
Sets the hover text formatting rulefor `y` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `yaxis.hoverformat`.
The 'yhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["yhoverformat"]
@yhoverformat.setter
def yhoverformat(self, val):
self["yhoverformat"] = val
# zorder
# ------
@property
def zorder(self):
"""
Sets the layer on which this trace is displayed, relative to
other SVG traces on the same subplot. SVG traces with higher
`zorder` appear in front of those with lower `zorder`.
The 'zorder' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
Returns
-------
int
"""
return self["zorder"]
@zorder.setter
def zorder(self, val):
self["zorder"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
close
Sets the close values.
closesrc
Sets the source reference on Chart Studio Cloud for
`close`.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
decreasing
:class:`plotly.graph_objects.ohlc.Decreasing` instance
or dict with compatible properties
high
Sets the high values.
highsrc
Sets the source reference on Chart Studio Cloud for
`high`.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.ohlc.Hoverlabel` instance
or dict with compatible properties
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
increasing
:class:`plotly.graph_objects.ohlc.Increasing` instance
or dict with compatible properties
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.ohlc.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
line
:class:`plotly.graph_objects.ohlc.Line` instance or
dict with compatible properties
low
Sets the low values.
lowsrc
Sets the source reference on Chart Studio Cloud for
`low`.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the trace.
open
Sets the open values.
opensrc
Sets the source reference on Chart Studio Cloud for
`open`.
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.ohlc.Stream` instance or
dict with compatible properties
text
Sets hover text elements associated with each sample
point. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to this trace's sample points.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
tickwidth
Sets the width of the open/close tick marks relative to
the "x" minimal interval.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the x coordinates. If absent, linear coordinate
will be generated.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the x
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
xperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the x0 axis. When `x0period` is round number
of weeks, the `x0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
xperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
zorder
Sets the layer on which this trace is displayed,
relative to other SVG traces on the same subplot. SVG
traces with higher `zorder` appear in front of those
with lower `zorder`.
"""
def __init__(
self,
arg=None,
close=None,
closesrc=None,
customdata=None,
customdatasrc=None,
decreasing=None,
high=None,
highsrc=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
increasing=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
line=None,
low=None,
lowsrc=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
open=None,
opensrc=None,
selectedpoints=None,
showlegend=None,
stream=None,
text=None,
textsrc=None,
tickwidth=None,
uid=None,
uirevision=None,
visible=None,
x=None,
xaxis=None,
xcalendar=None,
xhoverformat=None,
xperiod=None,
xperiod0=None,
xperiodalignment=None,
xsrc=None,
yaxis=None,
yhoverformat=None,
zorder=None,
**kwargs,
):
"""
Construct a new Ohlc object
The ohlc (short for Open-High-Low-Close) is a style of
financial chart describing open, high, low and close for a
given `x` coordinate (most likely time). The tip of the lines
represent the `low` and `high` values and the horizontal
segments represent the `open` and `close` values. Sample points
where the close value is higher (lower) then the open value are
called increasing (decreasing). By default, increasing items
are drawn in green whereas decreasing are drawn in red.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Ohlc`
close
Sets the close values.
closesrc
Sets the source reference on Chart Studio Cloud for
`close`.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
decreasing
:class:`plotly.graph_objects.ohlc.Decreasing` instance
or dict with compatible properties
high
Sets the high values.
highsrc
Sets the source reference on Chart Studio Cloud for
`high`.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.ohlc.Hoverlabel` instance
or dict with compatible properties
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
increasing
:class:`plotly.graph_objects.ohlc.Increasing` instance
or dict with compatible properties
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.ohlc.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
line
:class:`plotly.graph_objects.ohlc.Line` instance or
dict with compatible properties
low
Sets the low values.
lowsrc
Sets the source reference on Chart Studio Cloud for
`low`.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the trace.
open
Sets the open values.
opensrc
Sets the source reference on Chart Studio Cloud for
`open`.
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.ohlc.Stream` instance or
dict with compatible properties
text
Sets hover text elements associated with each sample
point. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to this trace's sample points.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
tickwidth
Sets the width of the open/close tick marks relative to
the "x" minimal interval.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the x coordinates. If absent, linear coordinate
will be generated.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the x
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
xperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the x0 axis. When `x0period` is round number
of weeks, the `x0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
xperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
zorder
Sets the layer on which this trace is displayed,
relative to other SVG traces on the same subplot. SVG
traces with higher `zorder` appear in front of those
with lower `zorder`.
Returns
-------
Ohlc
"""
super(Ohlc, self).__init__("ohlc")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Ohlc
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Ohlc`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("close", None)
_v = close if close is not None else _v
if _v is not None:
self["close"] = _v
_v = arg.pop("closesrc", None)
_v = closesrc if closesrc is not None else _v
if _v is not None:
self["closesrc"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("decreasing", None)
_v = decreasing if decreasing is not None else _v
if _v is not None:
self["decreasing"] = _v
_v = arg.pop("high", None)
_v = high if high is not None else _v
if _v is not None:
self["high"] = _v
_v = arg.pop("highsrc", None)
_v = highsrc if highsrc is not None else _v
if _v is not None:
self["highsrc"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("increasing", None)
_v = increasing if increasing is not None else _v
if _v is not None:
self["increasing"] = _v
_v = arg.pop("legend", None)
_v = legend if legend is not None else _v
if _v is not None:
self["legend"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("legendgrouptitle", None)
_v = legendgrouptitle if legendgrouptitle is not None else _v
if _v is not None:
self["legendgrouptitle"] = _v
_v = arg.pop("legendrank", None)
_v = legendrank if legendrank is not None else _v
if _v is not None:
self["legendrank"] = _v
_v = arg.pop("legendwidth", None)
_v = legendwidth if legendwidth is not None else _v
if _v is not None:
self["legendwidth"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("low", None)
_v = low if low is not None else _v
if _v is not None:
self["low"] = _v
_v = arg.pop("lowsrc", None)
_v = lowsrc if lowsrc is not None else _v
if _v is not None:
self["lowsrc"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("open", None)
_v = open if open is not None else _v
if _v is not None:
self["open"] = _v
_v = arg.pop("opensrc", None)
_v = opensrc if opensrc is not None else _v
if _v is not None:
self["opensrc"] = _v
_v = arg.pop("selectedpoints", None)
_v = selectedpoints if selectedpoints is not None else _v
if _v is not None:
self["selectedpoints"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xaxis", None)
_v = xaxis if xaxis is not None else _v
if _v is not None:
self["xaxis"] = _v
_v = arg.pop("xcalendar", None)
_v = xcalendar if xcalendar is not None else _v
if _v is not None:
self["xcalendar"] = _v
_v = arg.pop("xhoverformat", None)
_v = xhoverformat if xhoverformat is not None else _v
if _v is not None:
self["xhoverformat"] = _v
_v = arg.pop("xperiod", None)
_v = xperiod if xperiod is not None else _v
if _v is not None:
self["xperiod"] = _v
_v = arg.pop("xperiod0", None)
_v = xperiod0 if xperiod0 is not None else _v
if _v is not None:
self["xperiod0"] = _v
_v = arg.pop("xperiodalignment", None)
_v = xperiodalignment if xperiodalignment is not None else _v
if _v is not None:
self["xperiodalignment"] = _v
_v = arg.pop("xsrc", None)
_v = xsrc if xsrc is not None else _v
if _v is not None:
self["xsrc"] = _v
_v = arg.pop("yaxis", None)
_v = yaxis if yaxis is not None else _v
if _v is not None:
self["yaxis"] = _v
_v = arg.pop("yhoverformat", None)
_v = yhoverformat if yhoverformat is not None else _v
if _v is not None:
self["yhoverformat"] = _v
_v = arg.pop("zorder", None)
_v = zorder if zorder is not None else _v
if _v is not None:
self["zorder"] = _v
# Read-only literals
# ------------------
self._props["type"] = "ohlc"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@_ohlc.py@.PATH_END.py
|
{
"filename": "set_alpha.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/galleries/examples/color/set_alpha.py",
"type": "Python"
}
|
"""
=================================
Ways to set a color's alpha value
=================================
Compare setting alpha by the *alpha* keyword argument and by one of the Matplotlib color
formats. Often, the *alpha* keyword is the only tool needed to add transparency to a
color. In some cases, the *(matplotlib_color, alpha)* color format provides an easy way
to fine-tune the appearance of a Figure.
"""
import matplotlib.pyplot as plt
import numpy as np
# Fixing random state for reproducibility.
np.random.seed(19680801)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4))
x_values = [n for n in range(20)]
y_values = np.random.randn(20)
facecolors = ['green' if y > 0 else 'red' for y in y_values]
edgecolors = facecolors
ax1.bar(x_values, y_values, color=facecolors, edgecolor=edgecolors, alpha=0.5)
ax1.set_title("Explicit 'alpha' keyword value\nshared by all bars and edges")
# Normalize y values to get distinct face alpha values.
abs_y = [abs(y) for y in y_values]
face_alphas = [n / max(abs_y) for n in abs_y]
edge_alphas = [1 - alpha for alpha in face_alphas]
colors_with_alphas = list(zip(facecolors, face_alphas))
edgecolors_with_alphas = list(zip(edgecolors, edge_alphas))
ax2.bar(x_values, y_values, color=colors_with_alphas,
edgecolor=edgecolors_with_alphas)
ax2.set_title('Normalized alphas for\neach bar and each edge')
plt.show()
# %%
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.axes.Axes.bar`
# - `matplotlib.pyplot.subplots`
#
# .. tags::
#
# styling: color
# plot-type: bar
# level: beginner
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@galleries@examples@color@set_alpha.py@.PATH_END.py
|
{
"filename": "do_OutFoV_scan.py",
"repo_name": "Swift-BAT/NITRATES",
"repo_path": "NITRATES_extracted/NITRATES-main/nitrates/archive/do_OutFoV_scan.py",
"type": "Python"
}
|
import numpy as np
import os
from astropy.table import Table
from astropy.io import fits
from numba import jit, njit, prange
from scipy import interpolate
from math import erf
import healpy as hp
import pandas as pd
import argparse
import logging, traceback
# import ..config
from ..response.StructFunc import get_full_struct_manager
from ..models.flux_models import Plaw_Flux, Cutoff_Plaw_Flux, Band_Flux
from ..config import (
rt_dir,
fp_dir,
solid_angle_dpi_fname,
drm_dir,
bright_source_table_fname,
)
from ..lib.logllh_ebins_funcs import log_pois_prob, get_eflux, get_gammaln
from ..lib.event2dpi_funcs import det2dpis, mask_detxy
from ..models.models import Model
from ..llh_analysis.minimizers import (
NLLH_DualAnnealingMin,
NLLH_ScipyMinimize,
NLLH_ScipyMinimize_Wjacob,
)
from ..lib.coord_conv_funcs import (
convert_radec2imxy,
convert_imxy2radec,
convert_radec2batxyz,
convert_radec2thetaphi,
)
from ..response.ray_trace_funcs import RayTraces, FootPrints
from ..archive.do_bkg_estimation_wPSs_mp import get_srcs_infov
def cli():
parser = argparse.ArgumentParser()
parser.add_argument("--evfname", type=str, help="Event data file", default=None)
parser.add_argument("--dmask", type=str, help="Detmask fname", default=None)
parser.add_argument("--attfname", type=str, help="attitude fname", default=None)
parser.add_argument(
"--job_id", type=int, help="ID to tell it what seeds to do", default=-1
)
parser.add_argument(
"--Njobs", type=int, help="Total number of jobs submitted", default=64
)
parser.add_argument("--work_dir", type=str, help="work directory", default=None)
parser.add_argument(
"--log_fname", type=str, help="log file name", default="out_fov_scan"
)
parser.add_argument("--Nside", type=int, help="Healpix Nside", default=2**4)
parser.add_argument("--trig_time", type=float, help="Trigger time", default=None)
parser.add_argument(
"--Ntdbls", type=int, help="Number of times to double duration size", default=3
)
parser.add_argument("--min_dur", type=float, help="Trigger time", default=0.256)
parser.add_argument(
"--min_dt",
type=float,
help="Min time offset from trigger time to start at",
default=1.25,
)
parser.add_argument(
"--max_dt",
type=float,
help="Min time offset from trigger time to start at",
default=3.75,
)
parser.add_argument(
"--bkg_dt0",
type=float,
help="Time offset from trigger time to start bkg at",
default=6.0,
)
parser.add_argument(
"--bkg_dur", type=float, help="Duration to use for bkg", default=4.0
)
args = parser.parse_args()
return args
def detxy2batxy(detx, dety):
batx = 0.42 * detx - (285 * 0.42) / 2
baty = 0.42 * dety - (172 * 0.42) / 2
return batx, baty
def batxy2detxy(batx, baty):
detx = (batx + (285 * 0.42) / 2) / 0.42
dety = (baty + (172 * 0.42) / 2) / 0.42
return detx, dety
def bldmask2batxys(bl_dmask):
detys, detxs = np.where(bl_dmask)
return detxy2batxy(detxs, detys)
@njit(cache=True)
def shift_pha_bins(spec, pha_bins0, pha_bins1, new_pha_bins0, new_pha_bins1):
new_spec = np.zeros_like(new_pha_bins0)
for i in range(len(new_spec)):
e0 = new_pha_bins0[i]
e1 = new_pha_bins1[i]
bl = (pha_bins0 >= e0) & (pha_bins1 <= e1)
new_spec[i] += np.sum(spec[bl])
bl = (pha_bins0 < e0) & (pha_bins1 > e0)
if np.sum(bl) > 0:
ind = np.where(bl)[0][0]
dE = pha_bins1[ind] - pha_bins0[ind]
frac_in_bin = (pha_bins1[ind] - e0) / dE
new_spec[i] += frac_in_bin * spec[ind]
bl = (pha_bins0 < e1) & (pha_bins1 > e1)
if np.sum(bl) > 0:
ind = np.where(bl)[0][0]
dE = pha_bins1[ind] - pha_bins0[ind]
frac_in_bin = (e1 - pha_bins0[ind]) / dE
new_spec[i] += frac_in_bin * spec[ind]
return new_spec
@njit(cache=True)
def shift_flor_dpi_pha_bins(
flor_dpi, pha_bins0, pha_bins1, new_pha_bins0, new_pha_bins1
):
Nphabins_new = new_pha_bins0.size
Ndets = flor_dpi.shape[0]
NphotonEs = flor_dpi.shape[1]
new_shp = (Ndets, NphotonEs, Nphabins_new)
new_flor_dpi = np.zeros(new_shp)
for i in range(Ndets):
for j in range(NphotonEs):
new_flor_dpi[i, j] += shift_pha_bins(
flor_dpi[i, j], pha_bins0, pha_bins1, new_pha_bins0, new_pha_bins1
)
return new_flor_dpi
def shift_resp_tab_pha_bins(
resp_tab, pha_bins0, pha_bins1, new_pha_bins0, new_pha_bins1
):
new_tab = Table()
new_tab["ENERG_LO"] = np.copy(resp_tab["ENERG_LO"])
new_tab["ENERG_HI"] = np.copy(resp_tab["ENERG_HI"])
NphotonEs = len(resp_tab["ENERG_LO"])
for cname in resp_tab.colnames:
if "ENERG" in cname:
continue
new_resp = np.zeros((NphotonEs, len(new_pha_bins0)))
for i in range(NphotonEs):
new_resp[i] += shift_pha_bins(
resp_tab[cname][i].astype(np.float64),
pha_bins0.astype(np.float64),
pha_bins1.astype(np.float64),
new_pha_bins0.astype(np.float64),
new_pha_bins1.astype(np.float64),
)
new_tab[cname] = new_resp
return new_tab
def get_dist2(x0, y0, z0, x1, y1, z1):
return (x1 - x0) ** 2 + (y1 - y0) ** 2 + (z1 - z0) ** 2
def get_dist(x0, y0, z0, x1, y1, z1):
return np.sqrt(get_dist2(x0, y0, z0, x1, y1, z1))
def get_dist_wts(x0, y0, z0, x1, y1, z1):
wts = 1.0 / get_dist2(x0, y0, z0, x1, y1, z1)
wts /= np.sum(wts)
return wts
def get_sa_divA(x0, y0, z0, x1, y1, z1):
dist3 = get_dist2(x0, y0, z0, x1, y1, z1) ** 1.5
return np.abs(z1 - z0) / dist3
def get_sa_wts(x0, y0, z0, x1, y1, z1):
wts = get_sa_divA(x0, y0, z0, x1, y1, z1)
wts /= np.sum(wts)
return wts
class Comp_Resp_Obj(object):
def __init__(self, batxs, batys, batzs, struct4comp):
self.ndets = len(batxs)
self.batxs = batxs
self.batys = batys
self.batzs = batzs
self.Ne = struct4comp.Ne
self.struct_obj = struct4comp
self.ncomp_pnts = len(self.struct_obj.batxs)
self.comp_batxs = self.struct_obj.batxs
self.comp_batys = self.struct_obj.batys
self.comp_batzs = self.struct_obj.batzs
self.calc_inds_wts4comp_dets()
def calc_inds_wts4comp_dets(self, dmax=16):
self.wts_list = []
self.inds_list = []
for i in range(self.ndets):
dists = get_dist(
self.comp_batxs,
self.comp_batys,
self.comp_batzs,
self.batxs[i],
self.batys[i],
self.batzs[i],
)
bl = dists <= dmax
wts = get_sa_wts(
self.comp_batxs[bl],
self.comp_batys[bl],
self.comp_batzs[bl],
self.batxs[i],
self.batys[i],
self.batzs[i],
)
inds = np.where(bl)[0]
self.wts_list.append(wts)
self.inds_list.append(inds)
def set_theta_phi(self, theta, phi):
self.struct_obj.set_theta_phi(theta, phi)
self.struct_obj.calc_tot_rhomu_dist()
self.calc_trans()
def calc_trans(self):
self.trans = np.zeros((self.ndets, self.Ne))
self.comp_trans = np.zeros((self.ncomp_pnts, self.Ne))
self.comp_trans[: self.ncomp_pnts] += self.struct_obj.get_trans()
print(
np.shape(self.trans[0]),
np.shape(self.wts_list[0]),
np.shape(self.comp_trans[self.inds_list[0], :]),
)
print(
np.shape(
np.sum(
self.comp_trans[self.inds_list[0], :]
* self.wts_list[0][:, np.newaxis],
axis=0,
)
)
)
for i in range(self.ndets):
self.trans[i] += np.sum(
self.comp_trans[self.inds_list[i], :] * self.wts_list[i][:, np.newaxis],
axis=0,
)
def get_trans(self):
return self.trans
def get_dual_struct_obj(Ephotons):
dual_xs = []
dual_ys = []
for bi in range(8):
x_b = -52.92 + bi * 15.12
y_b = 23.555
for i in range(2):
x = x_b - 3.78 + i * 7.56
for j in range(4):
y = y_b - 18.935 + j * 9.24
dual_xs.append(x)
dual_ys.append(y)
for bi in range(8):
x_b = -52.92 + bi * 15.12
y_b = -23.555
for i in range(2):
x = x_b - (-3.78 + i * 7.56)
for j in range(4):
y = y_b - (-18.935 + j * 9.24)
dual_xs.append(x)
dual_ys.append(y)
dual_xs = np.array(dual_xs)
dual_ys = np.array(dual_ys)
print(len(dual_xs), len(dual_ys))
BATZ_offset = 35.799
dual_elec_x_halfwidth = 3.55
dual_elec_y_halfwidth = 4.41
dual_elec_z0 = -3.725 - 32.612 + BATZ_offset + 1.15 - 1.06 - 1.865
dual_elec_z1 = -3.725 - 32.612 + BATZ_offset + 1.15 - 1.06 + 1.865
dual_elec_zmid = -3.725 - 32.612 + BATZ_offset + 1.15 - 1.06
dual_elec_z_halfwidth = 1.865
# for each dual lets do 8 pnts (+/- x_hw/2, +/- y_hw/2, +/- z_hw/2)
batxs4duals = []
batys4duals = []
batzs4duals = []
Nduals = len(dual_xs)
for ii in range(Nduals):
dualx = dual_xs[ii]
dualy = dual_ys[ii]
for i in range(2):
x = dualx - dual_elec_x_halfwidth / 2.0 + i * dual_elec_x_halfwidth
for j in range(2):
y = dualy - dual_elec_y_halfwidth / 2.0 + j * dual_elec_y_halfwidth
for k in range(2):
z = (
dual_elec_zmid
- dual_elec_z_halfwidth / 2.0
+ k * dual_elec_z_halfwidth
)
batxs4duals.append(x)
batys4duals.append(y)
batzs4duals.append(z)
batxs4duals = np.array(batxs4duals)
batys4duals = np.array(batys4duals)
batzs4duals = np.array(batzs4duals)
print(len(batxs4duals))
dual_struct_obj = get_full_struct_manager(Es=Ephotons)
dual_struct_obj.set_batxyzs(batxs4duals, batys4duals, batzs4duals)
return dual_struct_obj
detxs_by_sand0 = np.arange(0, 286 - 15, 18)
detxs_by_sand1 = detxs_by_sand0 + 15
print(len(detxs_by_sand0))
detys_by_sand0 = np.arange(0, 173 - 7, 11)
detys_by_sand1 = detys_by_sand0 + 7
print(len(detys_by_sand0))
detxs_in_cols_not_edges = [
np.arange(detxs_by_sand0[i] + 1, detxs_by_sand1[i], 1, dtype=np.int64)
for i in range(16)
]
detys_in_rows_not_edges = [
np.arange(detys_by_sand0[i] + 1, detys_by_sand1[i], 1, dtype=np.int64)
for i in range(16)
]
print(detxs_in_cols_not_edges)
dpi_shape = (173, 286)
detxax = np.arange(286, dtype=np.int64)
detyax = np.arange(173, dtype=np.int64)
detx_dpi, dety_dpi = np.meshgrid(detxax, detyax)
print(np.shape(detx_dpi), np.shape(dety_dpi))
print(np.max(detx_dpi), np.max(dety_dpi))
def get_detxys_from_colrows(col0, col1, row0, row1, orientation="NonEdges"):
if orientation == "NonEdges":
good_detxs = np.array(detxs_in_cols_not_edges[col0:col1])
good_detys = np.array(detys_in_rows_not_edges[row0:row1])
elif orientation == "left":
good_detxs = np.array(detxs_by_sand0[col0:col1])
good_detys = np.array(detys_in_rows_not_edges[row0:row1])
good_detys = np.append(good_detys, np.array(detys_by_sand1[row0:row1]))
elif orientation == "top":
good_detxs = np.array(detxs_in_cols_not_edges[col0:col1])
good_detys = np.array(detys_by_sand1[row0:row1])
elif orientation == "bot":
good_detxs = np.array(detxs_in_cols_not_edges[col0:col1])
good_detxs = np.append(good_detxs, np.array(detxs_by_sand0[col0:col1]))
good_detys = np.array(detys_by_sand0[row0:row1])
elif orientation == "right":
good_detxs = np.array(detxs_by_sand1[col0:col1])
good_detys = np.array(detys_in_rows_not_edges[row0:row1])
good_detys = np.append(good_detys, np.array(detys_by_sand1[row0:row1]))
good_detys = np.append(good_detys, np.array(detys_by_sand0[row0:row1]))
else:
print("bad orientation")
blx = np.isin(detx_dpi, good_detxs)
bly = np.isin(dety_dpi, good_detys)
bl = blx & bly
inds = np.where(bl)
return inds
def rot_col_row_orientation(col0, col1, row0, row1, orientation, phi_rot):
if phi_rot < 0:
phi_rot = phi_rot + 2 * np.pi
if (phi_rot >= np.pi / 4) and (phi_rot < np.pi / 2):
# bot is strong
# right is weak
new_row0 = 16 - col1
new_row1 = 16 - col0
new_col0 = 16 - row1
new_col1 = 16 - row0
if orientation == "right":
new_orientation = "bot"
elif orientation == "bot":
new_orientation = "right"
else:
new_orientation = orientation
elif (phi_rot >= np.pi / 2) and (phi_rot < 3 * np.pi / 4):
# bot is strong
# left is weak
new_row0 = 16 - col1
new_row1 = 16 - col0
new_col0 = row0
new_col1 = row1
if orientation == "right":
new_orientation = "bot"
elif orientation == "bot":
new_orientation = "left"
elif orientation == "left":
new_orientation = "right"
else:
new_orientation = orientation
elif (phi_rot >= 3 * np.pi / 4) and (phi_rot < np.pi):
# left is strong
# bot is weak
new_row0 = row0
new_row1 = row1
new_col0 = 16 - col1
new_col1 = 16 - col0
if orientation == "right":
new_orientation = "left"
elif orientation == "left":
new_orientation = "right"
else:
new_orientation = orientation
elif (phi_rot >= np.pi) and (phi_rot < 5 * np.pi / 4):
# left is strong
# top is weak
new_row0 = 16 - row1
new_row1 = 16 - row0
new_col0 = 16 - col1
new_col1 = 16 - col0
if orientation == "right":
new_orientation = "left"
elif orientation == "bot":
new_orientation = "top"
elif orientation == "left":
new_orientation = "right"
elif orientation == "top":
new_orientation = "bot"
else:
new_orientation = orientation
elif (phi_rot >= 5 * np.pi / 4) and (phi_rot < 6 * np.pi / 4):
# top is strong
# left is weak
new_row0 = col0
new_row1 = col1
new_col0 = row0
new_col1 = row1
if orientation == "right":
new_orientation = "top"
elif orientation == "bot":
new_orientation = "left"
elif orientation == "left":
new_orientation = "right"
elif orientation == "top":
new_orientation = "bot"
else:
new_orientation = orientation
elif (phi_rot >= 6 * np.pi / 4) and (phi_rot < 7 * np.pi / 4):
# top is strong
# right is weak
new_row0 = col0
new_row1 = col1
new_col0 = 16 - row1
new_col1 = 16 - row0
if orientation == "right":
new_orientation = "top"
elif orientation == "bot":
new_orientation = "right"
elif orientation == "top":
new_orientation = "bot"
else:
new_orientation = orientation
elif (phi_rot >= 7 * np.pi / 4) and (phi_rot < 8 * np.pi / 4):
# right is strong
# top is weak
new_row0 = 16 - row1
new_row1 = 16 - row0
new_col0 = col0
new_col1 = col1
if orientation == "bot":
new_orientation = "top"
elif orientation == "top":
new_orientation = "bot"
else:
new_orientation = orientation
else:
new_orientation = orientation
new_row0 = row0
new_row1 = row1
new_col0 = col0
new_col1 = col1
return new_col0, new_col1, new_row0, new_row1, new_orientation
def resp_tab2resp_dpis(resp_tab, phi_rot=0.0):
line_cnames = [
cname
for cname in resp_tab.colnames
if (not "ENERG" in cname) and (not "comp" in cname)
]
comp_cnames = [
cname
for cname in resp_tab.colnames
if (not "ENERG" in cname) and ("comp" in cname)
]
NphotonEs, Nphabins = resp_tab[line_cnames[0]].shape
lines_resp_dpi = np.zeros((173, 286, NphotonEs, Nphabins))
for cname in line_cnames:
cname_list = cname.split("_")
col0 = int(cname_list[-5])
col1 = int(cname_list[-4])
row0 = int(cname_list[-2])
row1 = int(cname_list[-1])
orientation = cname_list[0]
(
new_col0,
new_col1,
new_row0,
new_row1,
new_orientation,
) = rot_col_row_orientation(col0, col1, row0, row1, orientation, phi_rot)
det_inds = get_detxys_from_colrows(
new_col0, new_col1, new_row0, new_row1, orientation=new_orientation
)
lines_resp_dpi[det_inds[0], det_inds[1], :, :] = resp_tab[cname].data.copy()
comp_resp_dpi = np.zeros((173, 286, NphotonEs, Nphabins))
for cname in comp_cnames:
cname_list = cname.split("_")
col0 = int(cname_list[-6])
col1 = int(cname_list[-5])
row0 = int(cname_list[-3])
row1 = int(cname_list[-2])
orientation = cname_list[0]
(
new_col0,
new_col1,
new_row0,
new_row1,
new_orientation,
) = rot_col_row_orientation(col0, col1, row0, row1, orientation, phi_rot)
det_inds = get_detxys_from_colrows(
new_col0, new_col1, new_row0, new_row1, orientation=new_orientation
)
comp_resp_dpi[det_inds[0], det_inds[1], :, :] = resp_tab[cname].data.copy()
return lines_resp_dpi, comp_resp_dpi
def get_resp_arr(drm_dir):
fnames = np.array([fn for fn in os.listdir(drm_dir) if "drm_" in fn])
thetas = np.array([float(fn.split("_")[2]) for fn in fnames])
phis = np.array([float(fn.split("_")[4]) for fn in fnames])
dtp = [("theta", np.float64), ("phi", np.float64), ("fname", fnames.dtype)]
drm_arr = np.empty(len(thetas), dtype=dtp)
drm_arr["theta"] = thetas
drm_arr["phi"] = phis
drm_arr["fname"] = fnames
return drm_arr
class ResponseDPI(object):
def __init__(self, resp_fname, pha_emins, pha_emaxs, phi0, bl_dmask):
print("initing ResponseDPI, with fname")
print(resp_fname)
self.orig_resp_tab = Table.read(resp_fname)
self.pha_tab = Table.read(resp_fname, hdu="EBOUNDS")
self.orig_pha_emins = self.pha_tab["E_MIN"]
self.orig_pha_emaxs = self.pha_tab["E_MAX"]
self.photonEmins = self.orig_resp_tab["ENERG_LO"]
self.photonEmaxs = self.orig_resp_tab["ENERG_HI"]
self.photonEs = (self.photonEmins + self.photonEmaxs) / 2.0
self.NphotonEs = len(self.photonEs)
self.phi0 = phi0 # should be in radians
self.ndets = np.sum(bl_dmask)
self.bl_dmask = bl_dmask
self.set_pha_bins(pha_emins, pha_emaxs)
self.mk_resp_dpis()
def set_pha_bins(self, pha_emins, pha_emaxs):
self.pha_emins = pha_emins
self.pha_emaxs = pha_emaxs
self.Nphabins = len(self.pha_emins)
self.resp_tab = shift_resp_tab_pha_bins(
self.orig_resp_tab,
self.orig_pha_emins,
self.orig_pha_emaxs,
self.pha_emins,
self.pha_emaxs,
)
def set_phi0(self, phi0):
if np.abs(phi0 - self.phi0) > 1e-2:
self.phi0 = phi0
self.mk_resp_dpis()
def mk_resp_dpis(self):
lines_resp_dpis, comp_resp_dpis = resp_tab2resp_dpis(
self.resp_tab, phi_rot=self.phi0
)
self.lines_resp_dpis = lines_resp_dpis[self.bl_dmask]
self.comp_resp_dpis = comp_resp_dpis[self.bl_dmask]
def get_lines_resp_dpis(self):
return self.lines_resp_dpis
def get_comp_resp_dpis(self):
return self.comp_resp_dpis
def get_flor_intp_inds_wts(batxs, batys):
detxax = np.arange(-1, 286 + 2, 8, dtype=np.int64)
detyax = np.arange(-2, 173 + 2, 8, dtype=np.int64)
batxax, batyax = detxy2batxy(detxax, detyax)
flor_detx_dpi, flor_dety_dpi = np.meshgrid(detxax, detyax)
shp = flor_detx_dpi.shape
flor_batxs, flor_batys = detxy2batxy(flor_detx_dpi.ravel(), flor_dety_dpi.ravel())
x0inds = np.digitize(batxs, batxax) - 1
x1inds = x0inds + 1
y0inds = np.digitize(batys, batyax) - 1
y1inds = y0inds + 1
x0s = batxax[x0inds]
x1s = batxax[x1inds]
dxs = x1s - x0s
x0wts = (x1s - batxs) / dxs
x1wts = (batxs - x0s) / dxs
y0s = batyax[y0inds]
y1s = batyax[y1inds]
dys = y1s - y0s
y0wts = (y1s - batys) / dys
y1wts = (batys - y0s) / dys
inds00 = np.ravel_multi_index((y0inds, x0inds), shp)
inds01 = np.ravel_multi_index((y0inds, x1inds), shp)
inds10 = np.ravel_multi_index((y1inds, x0inds), shp)
inds11 = np.ravel_multi_index((y1inds, x1inds), shp)
inds = [inds00, inds01, inds10, inds11]
wts = [y0wts * x0wts, y0wts * x1wts, y1wts * x0wts, y1wts * x1wts]
return inds, wts
@njit(cache=True)
def flor_resp2dpis(flor_resp, flor_inds, flor_wts):
ndets = len(flor_inds[0])
NphotonEs = flor_resp.shape[1]
Nphabins = flor_resp.shape[2]
flor_dpis = np.zeros((ndets, NphotonEs, Nphabins))
for i in range(4):
for j in range(ndets):
flor_dpis[j] += flor_resp[flor_inds[i][j]] * (flor_wts[i][j])
return flor_dpis
class FlorResponseDPI(object):
def __init__(
self,
resp_dname,
pha_tab,
pha_emins,
pha_emaxs,
bl_dmask,
Nside=2**3,
NphotonEs=187,
):
self.resp_dname = resp_dname
self.pha_tab = pha_tab
self.orig_pha_emins = self.pha_tab["E_MIN"].astype(np.float64)
self.orig_pha_emaxs = self.pha_tab["E_MAX"].astype(np.float64)
self.pha_emins = pha_emins
self.pha_emaxs = pha_emaxs
self.Nphabins = len(pha_emins)
self.NphotonEs = NphotonEs
self.ndets = np.sum(bl_dmask)
self.bl_dmask = bl_dmask
self.batxs, self.batys = bldmask2batxys(self.bl_dmask)
self.flor_inds, self.flor_wts = get_flor_intp_inds_wts(self.batxs, self.batys)
self.orig_ndets = 851
self.Nside = Nside
self.resp_dict = {} # hp inds will be the keys
def set_theta_phi(self, theta, phi):
self.phi = phi
self.theta = theta
self.lat = 90.0 - self.theta
self.hp_inds2use, self.hp_wts = hp.get_interp_weights(
self.Nside, self.phi, self.lat, lonlat=True
)
self.calc_resp_dpi()
def open_new_file(self, hp_ind):
fname = "hp_order_3_ind_%d_.npy" % (hp_ind)
resp_arr = np.load(os.path.join(self.resp_dname, fname))
self.resp_dict[hp_ind] = shift_flor_dpi_pha_bins(
resp_arr,
self.orig_pha_emins,
self.orig_pha_emaxs,
self.pha_emins,
self.pha_emaxs,
)
def calc_resp_dpi(self):
resp_dpi0 = np.zeros((self.orig_ndets, self.NphotonEs, self.Nphabins))
for hp_ind, wt in zip(self.hp_inds2use, self.hp_wts):
if not hp_ind in list(self.resp_dict.keys()):
self.open_new_file(hp_ind)
resp_dpi0 += wt * self.resp_dict[hp_ind]
self.resp_dpi = flor_resp2dpis(resp_dpi0, self.flor_inds, self.flor_wts)
def get_resp_dpi(self):
return self.resp_dpi
class FlorResponseDPI(object):
def __init__(
self,
resp_dname,
pha_tab,
pha_emins,
pha_emaxs,
bl_dmask,
Nside=2**3,
NphotonEs=187,
):
self.resp_dname = resp_dname
self.pha_tab = pha_tab
self.orig_pha_emins = self.pha_tab["E_MIN"].astype(np.float64)
self.orig_pha_emaxs = self.pha_tab["E_MAX"].astype(np.float64)
self.pha_emins = pha_emins
self.pha_emaxs = pha_emaxs
self.Nphabins = len(pha_emins)
self.NphotonEs = NphotonEs
self.ndets = np.sum(bl_dmask)
self.bl_dmask = bl_dmask
self.batxs, self.batys = bldmask2batxys(self.bl_dmask)
self.flor_inds, self.flor_wts = get_flor_intp_inds_wts(self.batxs, self.batys)
self.orig_ndets = 851
fname = "/storage/work/jjd330/local/bat_data/OutFoVbursts/GRB131014A/flor_Aeff_adjust.npz"
ratio_file = np.load(fname)
self.sn_ratios = ratio_file["sn_ratios"]
self.ta_ratios = ratio_file["ta_ratios"]
self.pb_ratios = ratio_file["pb_ratios"]
self.Nside = Nside
self.resp_dict = {} # hp inds will be the keys
def set_theta_phi(self, theta, phi):
self.phi = phi
self.theta = theta
self.lat = 90.0 - self.theta
self.hp_inds2use, self.hp_wts = hp.get_interp_weights(
self.Nside, self.phi, self.lat, lonlat=True
)
self.calc_resp_dpi()
def open_new_file(self, hp_ind):
fname = "hp_order_3_ind_%d_.npy" % (hp_ind)
resp_arr = np.load(os.path.join(self.resp_dname, fname))
sn_inds = np.arange(1, 13, dtype=np.int64)
ta_inds = np.arange(14, 29, dtype=np.int64)
pb_inds = np.arange(29, 39, dtype=np.int64)
for sn_ind in sn_inds:
resp_arr[:, :, sn_ind] *= self.sn_ratios
for ta_ind in ta_inds:
resp_arr[:, :, ta_ind] *= self.ta_ratios
for pb_ind in pb_inds:
resp_arr[:, :, pb_ind] *= self.pb_ratios
self.resp_dict[hp_ind] = shift_flor_dpi_pha_bins(
resp_arr,
self.orig_pha_emins,
self.orig_pha_emaxs,
self.pha_emins,
self.pha_emaxs,
)
def calc_resp_dpi(self):
resp_dpi0 = np.zeros((self.orig_ndets, self.NphotonEs, self.Nphabins))
for hp_ind, wt in zip(self.hp_inds2use, self.hp_wts):
if not hp_ind in list(self.resp_dict.keys()):
self.open_new_file(hp_ind)
resp_dpi0 += wt * self.resp_dict[hp_ind]
self.resp_dpi = flor_resp2dpis(resp_dpi0, self.flor_inds, self.flor_wts)
# for sn_ind in sn_inds:
# self.resp_dpi[:,:,sn_ind] *= self.sn_ratios
# for ta_ind in ta_inds:
# self.resp_dpi[:,:,ta_ind] *= self.ta_ratios
# for pb_ind in pb_inds:
# self.resp_dpi[:,:,pb_ind] *= self.pb_ratios
def get_resp_dpi(self):
return self.resp_dpi
class ResponseOutFoV(object):
def __init__(self, resp_dname, pha_emins, pha_emaxs, bl_dmask):
self.resp_dname = resp_dname
self.resp_arr = get_resp_arr(self.resp_dname)
self.thetas = np.unique(self.resp_arr["theta"])
tab = Table.read(os.path.join(self.resp_dname, self.resp_arr["fname"][0]))
pha_tab = Table.read(
os.path.join(self.resp_dname, self.resp_arr["fname"][0]), hdu=2
)
self.PhotonEmins = tab["ENERG_LO"]
self.PhotonEmaxs = tab["ENERG_HI"]
self.PhotonEs = ((self.PhotonEmins + self.PhotonEmaxs) / 2.0).astype(np.float64)
self.NphotonEs = len(self.PhotonEs)
self.pha_emins = pha_emins
self.pha_emaxs = pha_emaxs
self.Nphabins = len(pha_emins)
# self.NphotonEs = NphotonEs
self.ndets = np.sum(bl_dmask)
self.bl_dmask = bl_dmask
self.batxs, self.batys = bldmask2batxys(self.bl_dmask)
self.batzs = 3.087 + np.zeros(self.ndets)
# self.resp_dpi_shape = (173, 286, self.NphotonEs, self.Nphabins)
self.resp_dpi_shape = (self.ndets, self.NphotonEs, self.Nphabins)
self.resp_files = {}
self.full_struct = get_full_struct_manager(Es=self.PhotonEs)
self.full_struct.set_batxyzs(self.batxs, self.batys, self.batzs)
dual_struct = get_dual_struct_obj(self.PhotonEs)
self.comp_obj = Comp_Resp_Obj(self.batxs, self.batys, self.batzs, dual_struct)
self.flor_resp_obj = FlorResponseDPI(
"/gpfs/scratch/jjd330/bat_data/flor_resps/",
pha_tab,
self.pha_emins,
self.pha_emaxs,
self.bl_dmask,
NphotonEs=self.NphotonEs,
)
def set_theta_phi(self, theta, phi):
# use radians or degs ?
self.theta = theta
self.phi = phi
self.thetas2use, self.phis2use, self.wts = self.get_intp_theta_phi_wts(
self.theta, self.phi
)
self.inds4intp = []
for i in range(len(self.wts)):
ind = np.where(
np.isclose(self.thetas2use[i], self.resp_arr["theta"])
& np.isclose(self.phis2use[i], self.resp_arr["phi"])
)[0][0]
self.inds4intp.append(ind)
self.full_struct.set_theta_phi(np.radians(self.theta), np.radians(self.phi))
self.lines_trans_dpis = self.full_struct.get_trans()
if theta > 90.0:
self.comp_obj.set_theta_phi(np.radians(self.theta), np.radians(self.phi))
self.comp_trans_dpis = self.comp_obj.get_trans()
else:
self.comp_trans_dpis = self.lines_trans_dpis
self.flor_resp_obj.set_theta_phi(self.theta, self.phi)
self.calc_resp_dpis()
self.calc_tot_resp_dpis()
def open_resp_file_obj(self, fname):
resp_file_obj = ResponseDPI(
os.path.join(self.resp_dname, fname),
self.pha_emins,
self.pha_emaxs,
np.radians(self.phi),
self.bl_dmask,
)
self.resp_files[fname] = resp_file_obj
def calc_resp_dpis(self):
self.lines_resp_dpis = np.zeros(self.resp_dpi_shape)
self.comp_resp_dpis = np.zeros(self.resp_dpi_shape)
for i in range(len(self.wts)):
k = self.resp_arr["fname"][self.inds4intp[i]]
if not k in list(self.resp_files.keys()):
self.open_resp_file_obj(k)
self.lines_resp_dpis += (
self.wts[i] * self.resp_files[k].get_lines_resp_dpis()
)
self.comp_resp_dpis += self.wts[i] * self.resp_files[k].get_comp_resp_dpis()
def calc_tot_resp_dpis(self):
lines_dpi = self.lines_resp_dpis * (self.lines_trans_dpis[:, :, np.newaxis])
comp_dpi = self.comp_resp_dpis * (self.comp_trans_dpis[:, :, np.newaxis])
self.non_flor_resp_dpi = lines_dpi + comp_dpi
self.flor_resp_dpi = self.flor_resp_obj.get_resp_dpi()
self.tot_resp_dpis = self.non_flor_resp_dpi + self.flor_resp_dpi
def get_lines_resp_dpis(self):
return self.lines_resp_dpis
def get_comp_resp_dpis(self):
return self.comp_resp_dpis
def get_flor_resp_dpis(self):
return self.flor_resp_obj.get_resp_dpi()
def get_tot_resp_dpis(self):
return self.tot_resp_dpis
def get_rate_dpis_from_photon_fluxes(self, photon_fluxes):
rate_dpis = np.zeros((self.ndets, self.Nphabins))
for j in range(self.Nphabins):
rate_dpis[:, j] += np.sum(
photon_fluxes * self.tot_resp_dpis[:, :, j], axis=1
)
return rate_dpis
def get_flor_rate_dpis_from_photon_fluxes(self, photon_fluxes):
rate_dpis = np.zeros((self.ndets, self.Nphabins))
for j in range(self.Nphabins):
rate_dpis[:, j] += np.sum(
photon_fluxes * self.flor_resp_dpi[:, :, j], axis=1
)
return rate_dpis
def get_non_flor_rate_dpis_from_photon_fluxes(self, photon_fluxes):
rate_dpis = np.zeros((self.ndets, self.Nphabins))
for j in range(self.Nphabins):
rate_dpis[:, j] += np.sum(
photon_fluxes * self.non_flor_resp_dpi[:, :, j], axis=1
)
return rate_dpis
def get_intp_theta_phi_wts(self, theta, phi, eps=0.1):
thetas = np.sort(np.unique(self.resp_arr["theta"]))
phis = np.sort(np.unique(self.resp_arr["phi"]))
th0 = np.digitize(theta, thetas) - 1
if theta == 180.0:
th0 -= 1
theta0 = thetas[th0]
theta1 = thetas[th0 + 1]
print(theta0, theta1)
if np.abs(theta0 - theta) < eps:
ths = [theta0]
th_wts = [1.0]
elif np.abs(theta1 - theta) < eps:
ths = [theta1]
th_wts = [1.0]
else:
ths = [theta0, theta1]
dth = theta1 - theta0
th_wts = [(theta1 - theta) / dth, (theta - theta0) / dth]
phi_ = phi - (int(phi) / 45) * 45.0
print(phi_)
if (int(phi) / 45) % 2 == 1:
phi_ = 45.0 - phi_
print(phi_)
ph0 = np.digitize(phi_, phis) - 1
if phi_ == 45.0:
ph0 -= 1
phi0 = phis[ph0]
phi1 = phis[ph0 + 1]
if np.abs(phi0 - phi_) < eps:
phs = [phi0]
ph_wts = [1.0]
elif np.abs(phi1 - phi_) < eps:
phs = [phi1]
ph_wts = [1.0]
else:
phs = [phi0, phi1]
dph = phi1 - phi0
ph_wts = [(phi1 - phi_) / dph, (phi_ - phi0) / dph]
ths_ = []
phs_ = []
wts = []
for i in range(len(ths)):
if ths[i] == 0.0 or ths[i] == 180.0:
ths_.append(ths[i])
phs_.append(0.0)
wts.append(th_wts[i])
continue
for j in range(len(phs)):
ths_.append(ths[i])
phs_.append(phs[j])
wts.append(th_wts[i] * ph_wts[j])
return ths_, phs_, wts
@njit(cache=True, fastmath=True)
def pois_norm_conv_n0(mu, sig):
sig2 = sig**2
return np.exp(((sig2 - mu) ** 2 - mu**2) / (2.0 * sig2))
@njit(cache=True, fastmath=True)
def pois_norm_conv_n1(mu, sig):
sig2 = sig**2
return ((mu - sig2)) * np.exp((sig2 / 2.0) - mu)
@njit(cache=True, fastmath=True)
def pois_norm_conv_n2(mu, sig):
sig2 = sig**2
eterm = np.exp((sig2 / 2.0) - mu)
return eterm * (-mu * sig2 + 0.5 * (mu**2 + sig2**2 + sig2))
@njit(cache=True, fastmath=True)
def pois_norm_conv_n3(mu, sig):
sig2 = sig**2
eterm = np.exp((sig2 / 2.0) - mu)
return eterm * 0.5 * (((mu - sig2) ** 3) / 3.0 + sig2 * (mu - sig2))
@njit(cache=True, fastmath=True)
def pois_norm_conv_n4(mu, sig):
sig2 = sig**2
eterm = np.exp((sig2 / 2.0) - mu)
mu_sig2 = mu - sig2
return (eterm / 24.0) * (
(mu_sig2) ** 4 + 6 * (sig2 * mu_sig2**2) + 3 * (sig2**2)
)
@njit(cache=True, fastmath=True)
def pois_norm_conv_n5(mu, sig):
sig2 = sig**2
eterm = np.exp((sig2 / 2.0) - mu)
mu_sig2 = mu - sig2
return (eterm / (5 * 24.0)) * (
(mu_sig2) ** 5 + 5 * 2 * (sig2 * mu_sig2**3) + 5 * 3 * (sig2**2) * mu_sig2
)
@njit(cache=True, fastmath=True)
def pois_norm_conv_n6(mu, sig):
sig2 = sig**2
eterm = np.exp((sig2 / 2.0) - mu)
mu_sig2 = mu - sig2
return (eterm / (6 * 5 * 24.0)) * (
(mu_sig2) ** 6
+ 5 * 3 * (sig2 * mu_sig2**4)
+ 5 * 3 * 3 * (sig2**2) * mu_sig2**2
+ 5 * 3 * (sig2**3)
)
@njit(cache=True, fastmath=True)
def pois_norm_conv_n7(mu, sig):
sig2 = sig**2
eterm = np.exp((sig2 / 2.0) - mu)
mu_sig2 = mu - sig2
return (eterm / (7 * 6 * 5 * 24.0)) * (
(mu_sig2) ** 7
+ 7 * 3 * (sig2 * mu_sig2**5)
+ 7 * 5 * 3 * (sig2**2) * mu_sig2**3
+ 7 * 5 * 3 * (sig2**3) * mu_sig2
)
@njit(cache=True, fastmath=True)
def num_factorial(N):
res = 1.0
for i in range(1, N + 1):
res *= i
return res
@njit(cache=True, fastmath=True)
def pois_norm_num_conv(mu, sig, N):
res = 0.0
Nmu = 256
dmu = 8.0 * sig / Nmu
norm_A = (1.0 / (2.0 * np.pi * sig**2)) ** 0.5
fact = num_factorial(N)
mu0 = mu - dmu * Nmu / 2
if mu0 < 0:
mu0 = 0.0
for i in range(Nmu):
mu_ = mu0 + i * dmu
norm_prob = norm_A * np.exp(-((mu_ - mu) ** 2) / (2 * sig**2))
pois_prob = ((mu_**N) / fact) * np.exp(-mu_)
res += norm_prob * pois_prob * dmu
return res
@njit(cache=True, fastmath=True)
def logl_pois_norm_conv(mu, sig, N, size):
llh_ = 0.0
for i in range(size):
if N[i] == 0:
llh = np.log(pois_norm_conv_n0(mu[i], sig[i]))
elif N[i] == 1:
llh = np.log(pois_norm_conv_n1(mu[i], sig[i]))
elif N[i] == 2:
llh = np.log(pois_norm_conv_n2(mu[i], sig[i]))
elif N[i] == 3:
llh = np.log(pois_norm_conv_n3(mu[i], sig[i]))
elif N[i] == 4:
llh = np.log(pois_norm_conv_n4(mu[i], sig[i]))
elif N[i] == 5:
llh = np.log(pois_norm_conv_n5(mu[i], sig[i]))
elif N[i] == 6:
llh = np.log(pois_norm_conv_n6(mu[i], sig[i]))
elif N[i] == 7:
llh = np.log(pois_norm_conv_n7(mu[i], sig[i]))
else:
llh = np.log(pois_norm_num_conv(mu[i], sig[i], N[i]))
llh_ += llh
return llh_
class LLH_webins(object):
def __init__(
self,
event_data,
ebins0,
ebins1,
bl_dmask,
t0=None,
t1=None,
model=None,
has_err=False,
):
self._all_data = event_data
self.ebins0 = ebins0
self.ebins1 = ebins1
self.nebins = len(ebins0)
self.bl_dmask = bl_dmask
self.t0 = 0.0
self.t1 = 0.0
self.ebin = -1
self.set_has_error(has_err)
if t0 is not None and t1 is not None:
self.set_time(t0, t1)
if model is not None:
self.set_model(model)
def set_time(self, t0, t1):
"""
Sets the start time and duration for the LLH
analysis.
Parameters:
t0: start time in MET seconds
dt: duration in seconds
"""
if np.isscalar(t0):
t0 = np.array([t0])
if np.isscalar(t1):
t1 = np.array([t1])
if np.all(self.t0 == t0) and np.all(self.t1 == t1):
return
self.t0 = t0
self.dt = 0.0
self.t1 = t1
t_bl = np.zeros(len(self._all_data), dtype=bool)
for i in range(len(self.t0)):
t_bl = np.logical_or(
(self._all_data["TIME"] >= self.t0[i])
& (self._all_data["TIME"] < self.t1[i]),
t_bl,
)
self.dt += self.t1[i] - self.t0[i]
self.data = self._all_data[t_bl]
self.data_dpis = np.array(
det2dpis(self.data, self.ebins0, self.ebins1, bl_dmask=self.bl_dmask)
)
self.data_dpis_flat = np.ravel(self.data_dpis)
self.gamma_vals = get_gammaln(self.data_dpis)
self.data_size = self.data_dpis.size
def set_model(self, model):
self.model = model
self.nparams = self.model.nparams
def set_ebin(self, j):
if "all" in str(j):
self.ebin = -1
else:
self.ebin = j
def set_has_error(self, has_error):
self.has_error = has_error
def get_llh(self, params):
if self.has_error:
# mod_cnts = self.model.get_rate_dpis(params)*self.dt
# mod_err = self.model.get_rate_dpis_err(params)*self.dt
mod_rate, mod_rate_err = self.model.get_rate_dpis_err(
params, ret_rate_dpis=True
)
if not np.all(mod_rate > 0):
return -np.inf
llh = logl_pois_norm_conv(
np.ravel(mod_rate * self.dt),
np.ravel(mod_rate_err * self.dt),
self.data_dpis_flat,
self.data_size,
)
else:
if self.ebin < 0:
mod_cnts = self.model.get_rate_dpis(params) * self.dt
if np.any(mod_cnts <= 0):
return -np.inf
llh = np.sum(
log_pois_prob(mod_cnts, self.data_dpis, gam_val=self.gamma_vals)
)
else:
mod_cnts = self.model.get_rate_dpi(params, self.ebin) * self.dt
if np.any(mod_cnts <= 0):
return -np.inf
llh = np.sum(
log_pois_prob(
mod_cnts,
self.data_dpis[self.ebin],
gam_val=self.gamma_vals[self.ebin],
)
)
return llh
def get_logprior(self, params):
lp = 0.0
if self.model.has_prior:
if self.ebin < 0:
j = None
else:
j = self.ebin
lp = self.model.get_log_prior(params, j=j)
return lp
def get_logprob(self, params):
logp = self.get_logprior(params)
llh = self.get_llh(params)
return logp + llh
def get_logprob_jacob(self, params):
if self.ebin < 0:
mod_cnts = self.model.get_rate_dpis(params) * self.dt
if np.any(np.isclose(mod_cnts, 0)):
mod_cnts = 1e-6 * np.ones_like(mod_cnts)
fact = 1.0 - (self.data_dpis / mod_cnts)
dNs_dparam = self.model.get_dr_dps(params)
jacob = [
np.sum(fact * dNs_dparam[i]) * self.dt for i in range(len(dNs_dparam))
]
else:
mod_cnts = self.model.get_rate_dpi(params, self.ebin) * self.dt
if np.any(np.isclose(mod_cnts, 0)):
mod_cnts = 1e-6 * np.ones_like(mod_cnts)
fact = 1.0 - (self.data_dpis[self.ebin] / mod_cnts)
dR_dparams = self.model.get_dr_dp(params, self.ebin)
if self.model.has_prior:
dNLP_dparams = self.model.get_dnlp_dp(params, self.ebin)
else:
dNLP_dparams = np.zeros(len(dR_dparams))
jacob = [
dNLP_dparams[i] + np.sum(fact * dR_dparams[i]) * self.dt
for i in range(len(dR_dparams))
]
return jacob
def get_logprob_hess(self, params):
if self.ebin < 0:
print("Not supported for multiple ebins yet")
return 0
else:
mod_cnts = self.model.get_rate_dpi(params, self.ebin) * self.dt
if np.any(np.isclose(mod_cnts, 0)):
mod_cnts = 1e-6 * np.ones_like(mod_cnts)
fact = (self.data_dpis[self.ebin]) / np.square(mod_cnts)
dR_dparams = self.model.get_dr_dp(params, self.ebin)
Ndim = len(dR_dparams)
dNLProb_hess = np.zeros((Ndim, Ndim))
for i in range(Ndim):
dNLProb_hess[i, i] = np.sum(np.square(dR_dparams[i] * self.dt) * fact)
for j in range(i + 1, Ndim):
dNLProb_hess[i, j] = np.sum(
(dR_dparams[i] * self.dt) * (dR_dparams[j] * self.dt) * fact
)
dNLProb_hess[j, i] += dNLProb_hess[i, j]
if self.model.has_prior:
dNLProb_hess += self.model.get_hess_nlogprior(params, self.ebin)
return dNLProb_hess
class Bkg_Model_wFlatA(Model):
def __init__(
self, bl_dmask, solid_ang_dpi, nebins, use_prior=False, use_deriv=False
):
self.sa_dpi = solid_ang_dpi
self.solid_angs = solid_ang_dpi[bl_dmask]
self.solid_ang_mean = np.mean(self.solid_angs)
self.rate_names = ["bkg_rate_" + str(i) for i in range(nebins)]
self.flat_names = ["flat_" + str(i) for i in range(nebins)]
# self.rat_names = ['diff_flat_' + str(i) for i\
# in xrange(nebins)]
# 1 = Af + Ad
# rat = Af/Ad
# 1 = Ad*rat + Ad
# Ad = 1 / (1 + rat)
# self.diff_As = 1. / (1. + self.ratios)
# self.flat_As = 1. - self.diff_As
param_names = self.rate_names
param_names += self.flat_names
param_dict = {}
# if t is None:
# rates = bkg_obj.get_rate((bkg_obj.t0+bkg_obj.t1)/2.)[0]
# else:
# rates = bkg_obj.get_rate(t)[0]
for i, pname in enumerate(param_names):
pdict = {}
if "rate" in pname:
pdict["bounds"] = (5e-5, 1e2)
pdict["val"] = 0.05
else:
pdict["bounds"] = (0.0, 1.0)
pdict["val"] = 0.25
pdict["nuis"] = True
pdict["fixed"] = False
param_dict[pname] = pdict
super(Bkg_Model_wFlatA, self).__init__(
"Background", bl_dmask, param_names, param_dict, nebins, has_prior=use_prior
)
self._rate_ones = np.ones(self.ndets)
self._rate_zeros = np.zeros(self.ndets)
self.bkg_sigs = np.zeros(self.nebins)
self.err_factor = 1.0
if use_deriv:
self.has_deriv = True
# if use_prior:
# if exp_rates is not None and bkg_sigs is not None:
# self.set_prior(exp_rates, bkg_sigs)
def set_bkg_row(self, bkg_row, bkg_name="", fix_flats=True, err_factor=2.0):
self.bkg_row = bkg_row
bkg_rates = np.array(
[bkg_row[bkg_name + "bkg_rate_" + str(j)] for j in range(self.nebins)]
)
bkg_rate_errs = np.array(
[
bkg_row["err_" + bkg_name + "bkg_rate_" + str(j)]
for j in range(self.nebins)
]
)
bkg_flats = np.array(
[bkg_row[bkg_name + "flat_" + str(j)] for j in range(self.nebins)]
)
self.flat_vals = bkg_flats
for j, pname in enumerate(self.flat_names):
self.param_dict[pname]["val"] = bkg_flats[j]
self.param_dict[self.rate_names[j]]["val"] = bkg_rates[j]
if fix_flats:
self.param_dict[pname]["fixed"] = True
self.param_dict[pname]["nuis"] = False
self.set_prior(bkg_rates, bkg_rate_errs, err_factor=err_factor)
def set_prior(self, exp_rates, bkg_sigs, err_factor=2.0):
self.exp_rates = exp_rates
self.bkg_sigs = bkg_sigs
self.err_factor = err_factor
self.log_prior_funcs = []
for j in range(self.nebins):
self.log_prior_funcs.append(
Norm_1D(
self.exp_rates[j], np.square(self.err_factor * self.bkg_sigs[j])
)
)
def get_rate_dpis(self, params):
# rate_dpis = []
rate_dpis = np.zeros((self.nebins, self.ndets))
for j in range(self.nebins):
rate_dpis[j] += self.get_rate_dpi(params, j)
# for k, val in params.iteritems():
# for pname in self.param_names:
# j = int(pname[-1])
# rate_dpis[j] += self.diff_As[j]*params[pname]*self.solid_angs +\
# self.flat_As[j]*params[pname]
return rate_dpis
def get_rate_dpi(self, params, j):
rate = params[self.rate_names[j]]
flat_A = params[self.flat_names[j]]
diff_A = 1.0 - flat_A
rate_dpi = rate * ((diff_A / self.solid_ang_mean) * self.solid_angs + flat_A)
return rate_dpi
def get_rate_dpis_err(self, params, ret_rate_dpis=False):
rate_dpis_err = np.zeros((self.nebins, self.ndets))
rate_dpis = np.zeros((self.nebins, self.ndets))
for j in range(self.nebins):
rate_dpi, rate_dpi_err = self.get_rate_dpi_err(params, j, ret_rate_dpi=True)
rate_dpis[j] += rate_dpi
rate_dpis_err[j] += rate_dpi_err
if ret_rate_dpis:
return rate_dpis, rate_dpis_err
return rate_dpis_err
def get_rate_dpi_err(self, params, j, ret_rate_dpi=False):
# rate = params[self.rate_names[j]]
# flat_A = params[self.flat_names[j]]
# diff_A = 1. - flat_A
# make this a flat error for now
# so the dets with lower solid angle
# will have a larger fractional error for now
bkg_sig = self.bkg_sigs[j] * self.err_factor
rate_dpi = self.get_rate_dpi(params, j)
eff_err = 0.04
rate_dpi_err = np.sqrt(bkg_sig**2 + (eff_err * rate_dpi) ** 2)
return rate_dpi, rate_dpi_err
def get_dr_dps(self, params):
dr_dbrs = []
dr_dlrs = []
for j in range(self.nebins):
if (
self.param_dict[self.rate_names[j]]["fixed"]
and self.param_dict[self.flat_names[j]]["fixed"]
):
continue
e_zeros = np.zeros((self.nebins, self.ndets))
e_zeros[j, :] = 1.0
drdps = self.get_dr_dp(params, j)
dr_dbrs.append(drdps[0] * e_zeros)
dr_dlrs.append(drdps[1] * e_zeros)
dr_dps = dr_dbrs
dr_dps += dr_dlrs
return dr_dps
def get_dr_dp(self, params, j):
# dr_dFlats = np.zeros((self.nebins,self.ndets))
# dr_dDifs = np.zeros((self.nebins,self.ndets))
dr_dps = []
rate = params[self.rate_names[j]]
# log_rat = params[self.log_rat_names[j]]
# ratio = np.exp(log_rat)
# diff_A = ratio/(1. + ratio)
# flat_A = 1. - diff_A
flat_A = params[self.flat_names[j]]
diff_A = 1.0 - flat_A
# dr_drate
if not self.param_dict[self.rate_names[j]]["fixed"]:
dr_dps.append(diff_A * self.solid_angs / self.solid_ang_mean + flat_A)
# dr_dlogratio = rate*( dAdiff_d...*solid_angs/solid_ang_mean +
# dAflat_d...)
# dAdiff_dlogratio = ratio / (ratio+1)^2
# dAflat_dlogratio = -ratio / (ratio+1)^2
# dr_dps.append( (rate*ratio/np.square(1.+ratio))*(\
# (self.solid_angs/self.solid_ang_mean) - 1.))
# dr_dflat
if not self.param_dict[self.flat_names[j]]["fixed"]:
dr_dps.append(rate * (1.0 - (self.solid_angs / self.solid_ang_mean)))
return dr_dps
def get_log_prior(self, params, j=None):
lp = 0.0
for pname in self.param_names:
j0 = int(pname[-1])
if j != j0 and j is not None:
continue
lp += self.log_prior_funcs[j].logpdf(params[self.rate_names[j]])
# lp += norm_logpdf(params[pname], self.bkg_sigs[j0], self.exp_rates[j0])
# lp += stats.norm.logpdf(params[pname], loc=self.exp_rates[j0],\
# scale=self.bkg_sigs[j0])
return lp
def get_dnlp_dp(self, params, j):
pname = self.rate_names[j]
dnlp_dps = -1 * self.log_prior_funcs[j].jacob_log_pdf(
params[self.rate_names[j]]
)
if self.param_dict[pname]["fixed"]:
return []
return list(dnlp_dps)
def get_hess_nlogprior(self, params, j):
return -1 * self.log_prior_funcs[j].hess_log_pdf
class Point_Source_Model_Binned_Rates(Model):
# should have methods for getting rate/fully illuminated det
# and for getting the correct ray trace
# Counts_per_full_illum_det_for_equivalent_onaxis = Counts*(sum(rt_onaxis)/sum(rt))
# rate param will be tot_rate/sum(rt)
def __init__(
self,
imx,
imy,
dimxy,
ebins,
rt_obj,
bl_dmask,
name="Point_Source",
err_fact=2.0,
use_prior=False,
rates=None,
errs=None,
use_deriv=False,
):
self.dimxy = dimxy
self.imx = imx
self.imy = imy
self.imx0 = imx - dimxy / 2.0
self.imx1 = imx + dimxy / 2.0
self.imy0 = imy - dimxy / 2.0
self.imy1 = imy + dimxy / 2.0
self.ebins = ebins
self.ebins0 = ebins[0]
self.ebins1 = ebins[1]
nebins = len(self.ebins0)
param_names = ["imx", "imy"]
self.rate_names = ["rate_" + str(i) for i in range(nebins)]
param_names += self.rate_names
param_dict = {}
for pname in param_names:
pdict = {}
if pname == "imx":
pdict["bounds"] = (self.imx0, self.imx1)
pdict["val"] = self.imx
elif pname == "imy":
pdict["bounds"] = (self.imy0, self.imy1)
pdict["val"] = self.imy
else:
if rates is None:
pdict["val"] = 1e-1
else:
j = str(pname[-1])
pdict["val"] = rates[j]
pdict["bounds"] = (5e-8, 1e2)
pdict["nuis"] = False
pdict["fixed"] = False
param_dict[pname] = pdict
super(Point_Source_Model_Binned_Rates, self).__init__(
name, bl_dmask, param_names, param_dict, nebins, has_prior=use_prior
)
# if use_prior:
# self.set_rate_prior(rates, errs)
if use_deriv:
self.has_deriv = True
self.rt_obj = rt_obj
self._rt_im_update = 1e-7
self._rt_imx = imx - 1
self._rt_imy = imy - 1
self._rt = self.get_rt(imx, imy)
# self._rt, self._drt_dx, self._drt_dy = self.get_rt_wderiv(imx, imy)
self._rt_imx = imx
self._rt_imy = imy
def set_rate_prior(self, rates, errs):
self._rates = rates
self._errs = errs
def get_rt_wderiv(self, imx, imy):
if np.hypot(imx - self._rt_imx, imy - self._rt_imy) > self._rt_im_update:
rt, drt_dx, drt_dy = self.rt_obj.get_intp_rt(imx, imy, get_deriv=True)
self._rt = rt[self.bl_dmask]
self._drt_dx = drt_dx[self.bl_dmask]
self._drt_dy = drt_dy[self.bl_dmask]
self._rt_imx = imx
self._rt_imy = imy
self._rt_sum = np.sum(self._rt)
return self._rt, self._drt_dx, self._drt_dy
def get_rt(self, imx, imy):
if np.hypot(imx - self._rt_imx, imy - self._rt_imy) > self._rt_im_update:
rt = self.rt_obj.get_intp_rt(imx, imy)
self._rt = rt[self.bl_dmask]
# self._drt_dx = drt_dx[self.bl_dmask]
# self._drt_dy = drt_dy[self.bl_dmask]
self._rt_imx = imx
self._rt_imy = imy
self._rt_sum = np.sum(self._rt)
return self._rt
def get_rate_dpis(self, params):
imx = params["imx"]
imy = params["imy"]
rt = self.get_rt(imx, imy)
rate_dpis = np.array([rt * params[pname] for pname in self.rate_names])
return rate_dpis
def get_rate_dpis_err(self, params, ret_rate_dpis=False):
imx = params["imx"]
imy = params["imy"]
rt = self.get_rt(imx, imy)
rate_dpis = np.array([rt * params[pname] for pname in self.rate_names])
rate_dpis_err = 0.04 * rate_dpis
if ret_rate_dpis:
return rate_dpis, rate_dpis_err
return rate_dpis_err
def get_rate_dpi(self, params, j):
imx = params["imx"]
imy = params["imy"]
rt = self.get_rt(imx, imy)
rate_dpi = rt * params[self.rate_names[j]]
return rate_dpi
def get_log_prior(self, params):
lp = 0.0
for k, val in params.items():
lp += stats.norm.logpdf(
val, loc=self._rates[int(k[-1])], scale=self._errs[int(k[-1])]
)
return lp
def get_dr_dps(self, params):
imx = params["imx"]
imy = params["imy"]
# rt, drt_dimx, drt_dimy = self.get_rt_wderiv(imx, imy)
rt = self.get_rt(imx, imy)
dr_dps = [rt for i in range(self.nebins)]
dr_dps = []
for i in range(self.nebins):
one = np.zeros(self.nebins)
one[i] = 1.0
dr_dps.append([rt * one[ii] for ii in range(self.nebins)])
if self.param_dict["imx"]["fixed"]:
return dr_dps
dr_dimx = rate_pdet_ebins[:, np.newaxis] * drt_dimx
dr_dimy = rate_pdet_ebins[:, np.newaxis] * drt_dimy
dr_dps = [dr_dimx, dr_dimy] + dr_dps
return dr_dps
def get_dr_dp(self, params, j):
dr_dps = []
imx = params["imx"]
imy = params["imy"]
if self.param_dict[self.rate_names[j]]["fixed"]:
return []
rt = self.get_rt(imx, imy)
dr_dps = [rt]
return dr_dps
class CompoundModel(Model):
def __init__(self, model_list, name=None):
self.model_list = model_list
self.Nmodels = len(model_list)
self.model_names = [model.name for model in model_list]
if name is None:
name = ""
for mname in self.model_names:
name += mname + "+"
name = name[:-1]
param_names = []
self.param_name_map = {}
param_dict = {}
has_prior = False
Tdep = False
self.ntbins = 0
for model in self.model_list:
if model.has_prior:
has_prior = True
if model.Tdep:
Tdep = True
self.ntbins = max(self.ntbins, model.ntbins)
mname = model.name
pname_map = {}
for pname in model.param_names:
if mname == "":
_name = pname
else:
_name = mname + "_" + pname
param_names.append(_name)
param_dict[_name] = model.param_dict[pname]
pname_map[pname] = _name
self.param_name_map[mname] = pname_map
bl_dmask = self.model_list[0].bl_dmask
super(CompoundModel, self).__init__(
name,
bl_dmask,
param_names,
param_dict,
self.model_list[0].nebins,
has_prior=has_prior,
Tdep=Tdep,
)
self._last_params_ebin = [{} for i in range(self.nebins)]
self._last_rate_dpi = [np.ones(self.ndets) for i in range(self.nebins)]
def get_model_params(self, params):
param_list = []
for model in self.model_list:
param = {}
pname_map = self.param_name_map[model.name]
for k in model.param_names:
param[k] = params[pname_map[k]]
param_list.append(param)
return param_list
def get_rate_dpis(self, params, **kwargs):
if self.Tdep:
# tbins0 = kwargs['tbins0']
# tbins1 = kwargs['tbins1']
ntbins = self.ntbins
rate_dpis = np.zeros((ntbins, self.nebins, self.ndets))
else:
rate_dpis = np.zeros((self.nebins, self.ndets))
for model in self.model_list:
param = {}
pname_map = self.param_name_map[model.name]
for k in model.param_names:
param[k] = params[pname_map[k]]
if model.Tdep:
rate_dpis += model.get_rate_dpis(param)
else:
if self.Tdep:
rate_dpi = model.get_rate_dpis(param)[np.newaxis, :, :]
# print "rate_dpi shape: ", rate_dpi.shape
rate_dpis += np.ones_like(rate_dpis) * rate_dpi
else:
rate_dpis += model.get_rate_dpis(param)
return rate_dpis
def get_rate_dpis_err(self, params, ret_rate_dpis=False):
rate_dpis = np.zeros((self.nebins, self.ndets))
err_dpis2 = np.zeros_like(rate_dpis)
for model in self.model_list:
param = {}
pname_map = self.param_name_map[model.name]
for k in model.param_names:
param[k] = params[pname_map[k]]
rate_dpi, err_dpi = model.get_rate_dpis_err(param, ret_rate_dpis=True)
rate_dpis += rate_dpi
err_dpis2 += err_dpi**2
if ret_rate_dpis:
return rate_dpis, np.sqrt(err_dpis2)
return np.sqrt(err_dpis2)
def get_rate_dpi(self, params, j, **kwargs):
if params == self._last_params_ebin[j]:
return self._last_rate_dpi[j]
if self.Tdep:
# tbins0 = kwargs['tbins0']
# tbins1 = kwargs['tbins1']
ntbins = self.ntbins
rate_dpi = np.zeros((ntbins, self.ndets))
else:
rate_dpi = np.zeros(self.ndets)
for model in self.model_list:
param = {}
pname_map = self.param_name_map[model.name]
for k in model.param_names:
param[k] = params[pname_map[k]]
if model.Tdep:
# rate_dpis += model.get_rate_dpis(param, tbins0, tbins1)
rate_dpi += model.get_rate_dpi(param, j)
else:
if self.Tdep:
rate_dpi_ = model.get_rate_dpi(param, j)[np.newaxis, :]
# print "rate_dpi shape: ", rate_dpi.shape
rate_dpi += np.ones_like(rate_dpi) * rate_dpi_
else:
try:
rate_dpi += model.get_rate_dpi(param, j)
except Exception as E:
print(E)
rate_dpi += model.get_rate_dpis(param)[j]
self._last_params_ebin[j] = params
self._last_rate_dpi[j] = rate_dpi
return rate_dpi
def get_log_prior(self, params, j=None):
lp = 0.0
if self.has_prior:
param_list = self.get_model_params(params)
for i, model in enumerate(self.model_list):
if model.has_prior:
try:
lp += model.get_log_prior(param_list[i], j=j)
except:
lp += model.get_log_prior(param_list[i])
return lp
def get_dr_dps(self, params):
# loop through param list and see if it has this function
dr_dps = []
for i, model in enumerate(self.model_list):
param_list = self.get_model_params(params)
if model.has_deriv:
dr_dps += model.get_dr_dps(param_list[i])
return dr_dps
def get_dr_dp(self, params, j):
# loop through param list and see if it has this function
dr_dps = []
for i, model in enumerate(self.model_list):
param_list = self.get_model_params(params)
if model.has_deriv:
dr_dps += model.get_dr_dp(param_list[i], j)
return dr_dps
def get_dnlp_dp(self, params, j):
dNLP_dp = []
if self.has_prior:
param_list = self.get_model_params(params)
for i, model in enumerate(self.model_list):
if model.has_prior:
dNLP_dp += model.get_dnlp_dp(param_list[i], j)
return dNLP_dp
def get_hess_nlogprior(self, params, j):
Ndim = 0
hess_list = []
if self.has_prior:
param_list = self.get_model_params(params)
for i, model in enumerate(self.model_list):
if model.has_prior:
hess = model.get_hess_nlogprior(param_list[i], j)
hess_list.append(hess)
Ndim += hess.shape[0]
hess_nlogprior = np.zeros((Ndim, Ndim))
i0 = 0
for hess in hess_list:
Nd = hess.shape[0]
i1 = i0 + Nd
hess_nlogprior[i0:i1, i0:i1] += hess
i0 = i1
return hess_nlogprior
class Source_Model_OutFoV(Model):
def __init__(
self,
flux_model,
ebins,
bl_dmask,
name="Signal",
use_deriv=False,
use_prior=False,
):
self.fmodel = flux_model
self.ebins = ebins
self.ebins0 = ebins[0]
self.ebins1 = ebins[1]
nebins = len(self.ebins0)
self.flor_resp_dname = "/storage/work/jjd330/local/bat_data/resp_tabs/"
param_names = ["theta", "phi"]
param_names += self.fmodel.param_names
param_dict = {}
for pname in param_names:
pdict = {}
if pname == "theta":
pdict["bounds"] = (0.0, 180.0)
pdict["val"] = 90.0
pdict["nuis"] = False
elif pname == "phi":
pdict["bounds"] = (0.0, 360.0)
pdict["val"] = 180.0
pdict["nuis"] = False
# elif pname == 'd':
# pdict['bounds'] = (1e-4, 1.)
# pdict['val'] = 1e-1
# pdict['nuis'] = False
# elif 'uncoded_frac' in pname:
# pdict['bounds'] = (1e-4, .75)
# pdict['val'] = kum_mode(self.prior_kum_a[pname], self.prior_kum_b[pname])
# pdict['nuis'] = True
# # pdict['val'] = 0.1
else:
pdict["bounds"] = self.fmodel.param_bounds[pname]
if hasattr(self.fmodel, "param_guess"):
pdict["val"] = self.fmodel.param_guess[pname]
else:
pdict["val"] = (pdict["bounds"][1] + pdict["bounds"][0]) / 2.0
pdict["nuis"] = False
pdict["fixed"] = False
param_dict[pname] = pdict
super(Source_Model_OutFoV, self).__init__(
name, bl_dmask, param_names, param_dict, nebins, has_prior=use_prior
)
if use_deriv:
self.has_deriv = True
self.get_batxys()
self.flor_err = 0.2
self.non_flor_err = 0.05
self.ones = np.ones(self.ndets)
def get_batxys(self):
yinds, xinds = np.where(self.bl_dmask)
self.batxs, self.batys = detxy2batxy(xinds, yinds)
def set_theta_phi(self, theta, phi):
self.resp_obj = ResponseOutFoV(
self.flor_resp_dname, self.ebins0, self.ebins1, self.bl_dmask
)
self._theta = theta
self._phi = phi
self.resp_obj.set_theta_phi(theta, phi)
def set_flux_params(self, flux_params):
self.flux_params = flux_params
resp_ebins = np.append(
self.resp_obj.PhotonEmins, [self.resp_obj.PhotonEmaxs[-1]]
)
self.flux_params["A"] = 1.0
self.normed_photon_fluxes = self.fmodel.get_photon_fluxes(
resp_ebins, self.flux_params
)
self.normed_rate_dpis = np.swapaxes(
self.resp_obj.get_rate_dpis_from_photon_fluxes(self.normed_photon_fluxes),
0,
1,
)
self.normed_err_rate_dpis = np.swapaxes(
np.sqrt(
(
self.flor_err
* self.resp_obj.get_flor_rate_dpis_from_photon_fluxes(
self.normed_photon_fluxes
)
)
** 2
+ (
self.non_flor_err
* self.resp_obj.get_non_flor_rate_dpis_from_photon_fluxes(
self.normed_photon_fluxes
)
)
** 2
),
0,
1,
)
def get_rate_dpis(self, params):
theta = params["theta"]
phi = params["phi"]
A = params["A"]
return A * self.normed_rate_dpis
def get_rate_dpis_err(self, params, ret_rate_dpis=False):
err_rate_dpis = params["A"] * self.normed_err_rate_dpis
if ret_rate_dpis:
rate_dpis = self.get_rate_dpis(params)
return rate_dpis, err_rate_dpis
return err_rate_dpis
def get_rate_dpi(self, params, j):
return A * self.normed_rate_dpis[:, j]
def get_log_prior(self, params, j=None):
lp = 0.0
for pname in self.frac_names:
if int(pname[-1]) == j or j is None:
lp += self.prior_func(params, pname)
# lp -= np.log((params[pname]*(np.log(\
# self.param_dict[pname]['bounds'][1]) -\
# np.log(self.param_dict[pname]['bounds'][0]))))
return lp
def get_dnlp_dp(self, params, j):
dnlp_dps = []
for pname in self.frac_names:
if int(pname[-1]) == j or j is None:
# dnlp_dps.append( 1./params[pname] )
dnlp_dps.append(self.deriv_prior_func(params, pname))
return dnlp_dps
def get_hess_nlogprior(self, params, j):
return np.array([[self.deriv2_prior_func(params, self.frac_names[j])]])
def get_dr_dgamma(self, params):
rt = self.get_rt(params["imx"], params["imy"])
drdgs = params["A"] * self.flux2rate.get_gamma_deriv(params["gamma"])
drdgs_trans = params["A"] * self.flux2rate_pbtrans.get_gamma_deriv(
params["gamma"]
)
dr_dgs = np.array(
[
rt * drdg
+ (self._shadow) * drdgs_trans[i]
+ self.max_rt * (self._unfp) * drdg * params[self.frac_names[i]]
for i, drdg in enumerate(drdgs)
]
)
return dr_dgs
def get_dr_dps(self, params):
# dr_dp = np.zeros((self.nebins,self.ndets))
# imx = params['imx']
# imy = params['imy']
# if self.use_rt_deriv:
# rt, drt_dimx, drt_dimy = self.get_rt_wderiv(imx, imy)
# else:
# rt = self.get_rt(imx, imy)
dr_dps = []
for pname in self.param_names:
if self.param_dict[pname]["fixed"]:
continue
if pname == "A":
dr_dps.append(self.get_rate_dpis(params) / params["A"])
elif pname == "gamma":
dr_dps.append(self.get_dr_dgamma(params))
return dr_dps
def min_at_Epeaks_gammas(sig_miner, sig_mod, Epeaks, gammas):
nllhs = []
As = []
flux_params = {"A": 1.0, "Epeak": 150.0, "gamma": -0.25}
Npnts = len(gammas)
for i in range(Npnts):
flux_params["gamma"] = gammas[i]
flux_params["Epeak"] = Epeaks[i]
sig_mod.set_flux_params(flux_params)
pars, nllh, res = sig_miner.minimize()
nllhs.append(nllh[0])
As.append(pars[0][0])
return nllhs, As
def analysis_at_theta_phi(
theta,
phi,
bkg_bf_params,
bkg_mod,
flux_mod,
ev_data,
ebins0,
ebins1,
tbins0,
tbins1,
):
bl_dmask = bkg_mod.bl_dmask
sig_mod = Source_Model_OutFoV(flux_mod, [ebins0, ebins1], bl_dmask, use_deriv=True)
sig_mod.set_theta_phi(theta, phi)
print("theta, phi set")
comp_mod = CompoundModel([bkg_mod, sig_mod])
sig_miner = NLLH_ScipyMinimize_Wjacob("")
sig_llh_obj = LLH_webins(ev_data, ebins0, ebins1, bl_dmask, has_err=True)
sig_llh_obj.set_model(comp_mod)
flux_params = {"A": 1.0, "gamma": 0.5, "Epeak": 1e2}
pars_ = {}
pars_["Signal_theta"] = theta
pars_["Signal_phi"] = phi
for pname, val in bkg_bf_params.items():
# pars_['Background_'+pname] = val
pars_["Background+Cyg X-1_" + pname] = val
for pname, val in flux_params.items():
pars_["Signal_" + pname] = val
sig_miner.set_llh(sig_llh_obj)
fixed_pnames = list(pars_.keys())
fixed_vals = list(pars_.values())
trans = [None for i in range(len(fixed_pnames))]
sig_miner.set_trans(fixed_pnames, trans)
sig_miner.set_fixed_params(fixed_pnames, values=fixed_vals)
sig_miner.set_fixed_params(["Signal_A"], fixed=False)
gamma_ax = np.linspace(-0.4, 1.6, 8 + 1)
gamma_ax = np.linspace(-0.4, 1.6, 4 + 1)[1:-1]
# gamma_ax = np.linspace(-0.4, 1.6, 3+1)
Epeak_ax = np.logspace(np.log10(45.0), 3, 10 + 1)
Epeak_ax = np.logspace(np.log10(45.0), 3, 5 + 1)[1:-1]
# Epeak_ax = np.logspace(np.log10(25.0), 3, 3+1)
gammas, Epeaks = np.meshgrid(gamma_ax, Epeak_ax)
gammas = gammas.ravel()
Epeaks = Epeaks.ravel()
res_dfs = []
ntbins = len(tbins0)
for i in range(ntbins):
t0 = tbins0[i]
t1 = tbins1[i]
dt = t1 - t0
sig_llh_obj.set_time(tbins0[i], tbins1[i])
res_dict = {"theta": theta, "phi": phi, "time": t0, "dur": dt}
res_dict["Epeak"] = Epeaks
res_dict["gamma"] = gammas
nllhs, As = min_at_Epeaks_gammas(sig_miner, sig_mod, Epeaks, gammas)
pars_["Signal_A"] = 1e-10
bkg_nllh = -sig_llh_obj.get_logprob(pars_)
res_dict["nllh"] = np.array(nllhs)
res_dict["A"] = np.array(As)
res_dict["TS"] = np.sqrt(2 * (bkg_nllh - res_dict["nllh"]))
res_dict["bkg_nllh"] = bkg_nllh
res_dfs.append(pd.DataFrame(res_dict))
print("done with %d of %d tbins" % (i + 1, ntbins))
return pd.concat(res_dfs, ignore_index=True)
def main(args):
fname = os.path.join(args.work_dir, args.log_fname + "_" + str(args.job_id))
logging.basicConfig(
filename=fname + ".log",
level=logging.DEBUG,
format="%(asctime)s-" "%(levelname)s- %(message)s",
)
resp_fname = (
"/storage/work/jjd330/local/bat_data/resp_tabs/drm_theta_126.0_phi_30.0_.fits"
)
resp_file = fits.open(resp_fname)
pha_emins, pha_emaxs = resp_file[2].data["E_MIN"].astype(np.float64), resp_file[
2
].data["E_MAX"].astype(np.float64)
ebins0 = np.array([15.0, 24.0, 35.0, 48.0, 64.0])
ebins0 = np.append(ebins0, np.logspace(np.log10(84.0), np.log10(500.0), 5 + 1))[:-1]
ebins0 = np.round(ebins0, decimals=1)[:-1]
ebins1 = np.append(ebins0[1:], [350.0])
nebins = len(ebins0)
ev_data = fits.open(args.evfname)[1].data
if args.trig_time is None:
trigger_time = np.min(ev_data["TIME"])
else:
trigger_time = args.trig_time
enb_tab = Table.read(args.dmask)
enb_ind = np.argmin(np.abs(enb_tab["TIME"] - (trigger_time + args.min_dt)))
dmask = enb_tab[enb_ind]["FLAG"]
mask_vals = mask_detxy(dmask, ev_data)
bl_dmask = dmask == 0.0
bl_ev = (
(ev_data["EVENT_FLAGS"] < 1)
& (ev_data["ENERGY"] < 1e3)
& (ev_data["ENERGY"] >= 10.0)
& (mask_vals == 0.0)
)
ev_data0 = ev_data[bl_ev]
attfile = Table.read(args.attfname)
att_ind = np.argmin(np.abs(attfile["TIME"] - (trigger_time + args.min_dt)))
att_q = attfile["QPARAM"][att_ind]
solid_angle_dpi = np.load(solid_angle_dpi_fname)
bkg_mod = Bkg_Model_wFlatA(bl_dmask, solid_angle_dpi, nebins, use_deriv=True)
llh_obj = LLH_webins(ev_data0, ebins0, ebins1, bl_dmask, has_err=True)
# bkg_miner = NLLH_ScipyMinimize('')
bkg_miner = NLLH_ScipyMinimize_Wjacob("")
bkg_t0 = trigger_time + args.bkg_dt0 # 6.0
bkg_dt = args.bkg_dur # 4.0
bkg_t1 = bkg_t0 + bkg_dt
brt_src_tab = get_srcs_infov(attfile, bkg_t0 + bkg_dt / 2.0)
cygx1_row = brt_src_tab[0]
rt_obj = RayTraces(rt_dir)
cyg_mod = Point_Source_Model_Binned_Rates(
cygx1_row["imx"],
cygx1_row["imy"],
0.1,
[ebins0, ebins1],
rt_obj,
bl_dmask,
use_deriv=True,
name=cygx1_row["Name"],
)
bkg_mod = CompoundModel([bkg_mod, cyg_mod])
llh_obj.set_time(bkg_t0, bkg_t1)
llh_obj.set_model(bkg_mod)
bkg_miner.set_llh(llh_obj)
bkg_miner.set_fixed_params(["Cyg X-1_imx", "Cyg X-1_imy"])
pars, bkg_nllh, res = bkg_miner.minimize()
# bkg_bf_params = {bkg_mod.param_names[i]:pars[0][i] for i in range(len(bkg_mod.param_names))}
i = 0
bkg_bf_params = {}
for cname in bkg_mod.param_names:
if cname in bkg_miner.fixed_params:
continue
bkg_bf_params[cname] = pars[0][i]
i += 1
bkg_bf_params["Cyg X-1_imx"] = cygx1_row["imx"]
bkg_bf_params["Cyg X-1_imy"] = cygx1_row["imy"]
flux_mod = Cutoff_Plaw_Flux(E0=100.0)
dur = args.min_dur
tbins0 = np.arange(args.min_dt, args.max_dt, dur / 2.0) + trigger_time
tbins1 = tbins0 + dur
for i in range(args.Ntdbls):
dur *= 2
tbins0_ = np.arange(args.min_dt, args.max_dt, dur / 2.0) + trigger_time
tbins1_ = tbins0_ + dur
tbins0 = np.append(tbins0, tbins0_)
tbins1 = np.append(tbins1, tbins1_)
ntbins = len(tbins0)
logging.info("ntbins: %d" % (ntbins))
Nside = args.Nside
Npix = hp.nside2npix(Nside)
Njobs = args.Njobs
job_id = args.job_id
Npix2do = 1 + Npix / Njobs
logging.info("Npix2do: %d" % (Npix2do))
hp_ind0 = job_id * Npix2do
hp_ind1 = min(hp_ind0 + Npix2do, Npix)
logging.info("hp_ind0: %d" % (hp_ind0))
logging.info("hp_ind1: %d" % (hp_ind1))
for hp_ind in range(hp_ind0, hp_ind1):
ra, dec = hp.pix2ang(Nside, hp_ind, nest=True, lonlat=True)
theta, phi = convert_radec2thetaphi(ra, dec, att_q)
logging.info("Starting hp_ind %d" % (hp_ind))
res_df = analysis_at_theta_phi(
theta,
phi,
bkg_bf_params,
bkg_mod,
flux_mod,
ev_data0,
ebins0,
ebins1,
tbins0,
tbins1,
)
res_df["dt"] = res_df["time"] - trigger_time
res_df["ra"] = ra
res_df["dec"] = dec
res_df["hp_ind"] = hp_ind
save_fname = os.path.join(args.work_dir, "res_hpind_%d_.csv" % (hp_ind))
res_df.to_csv(save_fname)
logging.info("wrote results to, ")
logging.info(save_fname)
if __name__ == "__main__":
args = cli()
main(args)
|
Swift-BATREPO_NAMENITRATESPATH_START.@NITRATES_extracted@NITRATES-main@nitrates@archive@do_OutFoV_scan.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.