metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "test_context_manager.py",
"repo_name": "AndrewAnnex/SpiceyPy",
"repo_path": "SpiceyPy_extracted/SpiceyPy-main/src/spiceypy/tests/test_context_manager.py",
"type": "Python"
}
|
import spiceypy as spice
from spiceypy.tests.gettestkernels import (
get_standard_kernels,
write_test_meta_kernel,
cleanup_core_kernels,
CoreKernels,
)
import pytest
get_standard_kernels()
write_test_meta_kernel()
# cleanup_core_kernels()
@pytest.mark.parametrize(
["expected_length", "kernel_list"],
[
(1, CoreKernels.lsk),
(len(CoreKernels.standardKernelList) + 1, CoreKernels.testMetaKernel),
(2, [CoreKernels.lsk, CoreKernels.pck]),
(
len(CoreKernels.standardKernelList) + 2,
[CoreKernels.lsk, CoreKernels.testMetaKernel],
),
],
)
def test_input_types(expected_length, kernel_list):
"""
Ensure that a single kernel, a single meta-kernel, a list of kernels, a
list of meta-kernel files, and a list that combines both, are all valid
inputs.
"""
assert spice.ktotal("all") == 0
with spice.KernelPool(kernel_list):
assert spice.ktotal("all") == expected_length
assert spice.ktotal("all") == 0
@pytest.mark.parametrize(
["local_len", "global_len", "local_kernels", "global_kernels"],
[
(1, 1, [CoreKernels.lsk], [CoreKernels.pck]),
(
1,
len(CoreKernels.standardKernelList) + 1,
[CoreKernels.lsk],
CoreKernels.testMetaKernel,
),
(
len(CoreKernels.standardKernelList) + 1,
len(CoreKernels.standardKernelList) + 1,
CoreKernels.testMetaKernel,
CoreKernels.testMetaKernel,
),
(
len(CoreKernels.standardKernelList) + 1,
1,
CoreKernels.testMetaKernel,
[CoreKernels.lsk],
),
],
)
def test_side_effect(local_len, global_len, local_kernels, global_kernels):
"""
Ensure that local kernels are only accessible inside the context manager,
and that global kernels can be accessed both before, and after the context
manager, but not inside it.
"""
assert spice.ktotal("all") == 0
try:
spice.furnsh(global_kernels)
assert spice.ktotal("all") == global_len
with spice.KernelPool(local_kernels):
assert spice.ktotal("all") == local_len
assert spice.ktotal("all") == global_len
finally:
spice.kclear()
assert spice.ktotal("all") == 0
def test_invalid_input():
"""
Ensure that kernels are still unloaded if an exception is raised while
loading them.
"""
assert spice.ktotal("all") == 0
kernels = CoreKernels.standardKernelList + ["foo.file"]
with pytest.raises(spice.utils.exceptions.SpiceNOSUCHFILE):
with spice.KernelPool(kernels):
pass
assert spice.ktotal("all") == 0
@pytest.mark.parametrize(
["expected_len", "kernel_list"],
[
(1, CoreKernels.lsk),
(len(CoreKernels.standardKernelList) + 1, CoreKernels.testMetaKernel),
(2, [CoreKernels.lsk, CoreKernels.pck]),
(
len(CoreKernels.standardKernelList) + 2,
[CoreKernels.lsk, CoreKernels.testMetaKernel],
),
],
)
def test_unload_if_error(expected_len, kernel_list):
"""
Ensure that kernels are unloaded if an error occurs within the context
manager.
"""
assert spice.ktotal("all") == 0
with pytest.raises(
spice.utils.exceptions.SpiceyPyRuntimeError, match="Error in user code"
):
with spice.KernelPool(kernel_list):
assert spice.ktotal("all") == expected_len
raise spice.utils.exceptions.SpiceyPyRuntimeError("Error in user code")
assert spice.ktotal("all") == 0
def test_actually_works():
"""
Check if a spice function that depends on kernels works within the
context manager.
"""
assert spice.ktotal("all") == 0
with spice.KernelPool([CoreKernels.lsk]):
assert spice.str2et("06-28-2000") == 15422464.184185712
assert spice.ktotal("all") == 0
|
AndrewAnnexREPO_NAMESpiceyPyPATH_START.@SpiceyPy_extracted@SpiceyPy-main@src@spiceypy@tests@test_context_manager.py@.PATH_END.py
|
{
"filename": "vds_simple.py",
"repo_name": "h5py/h5py",
"repo_path": "h5py_extracted/h5py-master/examples/vds_simple.py",
"type": "Python"
}
|
"""A simple example of building a virtual dataset.
This makes four 'source' HDF5 files, each with a 1D dataset of 100 numbers.
Then it makes a single 4x100 virtual dataset in a separate file, exposing
the four sources as one dataset.
"""
import h5py
import numpy as np
# create some sample data
data = np.arange(0, 100).reshape(1, 100) + np.arange(1, 5).reshape(4, 1)
# Create source files (0.h5 to 3.h5)
for n in range(4):
with h5py.File(f"{n}.h5", "w") as f:
d = f.create_dataset("data", (100,), "i4", data[n])
# Assemble virtual dataset
layout = h5py.VirtualLayout(shape=(4, 100), dtype="i4")
for n in range(4):
filename = "{}.h5".format(n)
vsource = h5py.VirtualSource(filename, "data", shape=(100,))
layout[n] = vsource
# Add virtual dataset to output file
with h5py.File("VDS.h5", "w", libver="latest") as f:
f.create_virtual_dataset("vdata", layout, fillvalue=-5)
f.create_dataset("data", data=data, dtype="i4")
# read data back
# virtual dataset is transparent for reader!
with h5py.File("VDS.h5", "r") as f:
print("Virtual dataset:")
print(f["vdata"][:, :10])
print("Normal dataset:")
print(f["data"][:, :10])
|
h5pyREPO_NAMEh5pyPATH_START.@h5py_extracted@h5py-master@examples@vds_simple.py@.PATH_END.py
|
{
"filename": "_text.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/ternary/caxis/title/_text.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="text", parent_name="layout.ternary.caxis.title", **kwargs
):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@ternary@caxis@title@_text.py@.PATH_END.py
|
{
"filename": "index.md",
"repo_name": "pymc-devs/pymc",
"repo_path": "pymc_extracted/pymc-main/docs/source/learn/core_notebooks/index.md",
"type": "Markdown"
}
|
(core_notebooks)=
# Notebooks on core features
:::{toctree}
:maxdepth: 1
pymc_overview
GLM_linear
model_comparison
posterior_predictive
dimensionality
pymc_pytensor
Gaussian_Processes
:::
:::{note}
The notebooks above are executed with each version of the library
(available on the navigation bar). In addition, a much larger gallery
of example notebooks is available at the {doc}`"Examples" tab <nb:gallery>`.
These are executed more sparsely and independently.
They include a watermark to show which versions were used to run them.
:::
|
pymc-devsREPO_NAMEpymcPATH_START.@pymc_extracted@pymc-main@docs@source@learn@core_notebooks@index.md@.PATH_END.py
|
{
"filename": "_button.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/layout/updatemenu/_button.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Button(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.updatemenu"
_path_str = "layout.updatemenu.button"
_valid_props = {
"args",
"args2",
"execute",
"label",
"method",
"name",
"templateitemname",
"visible",
}
# args
# ----
@property
def args(self):
"""
Sets the arguments values to be passed to the Plotly method set
in `method` on click.
The 'args' property is an info array that may be specified as:
* a list or tuple of up to 3 elements where:
(0) The 'args[0]' property accepts values of any type
(1) The 'args[1]' property accepts values of any type
(2) The 'args[2]' property accepts values of any type
Returns
-------
list
"""
return self["args"]
@args.setter
def args(self, val):
self["args"] = val
# args2
# -----
@property
def args2(self):
"""
Sets a 2nd set of `args`, these arguments values are passed to
the Plotly method set in `method` when clicking this button
while in the active state. Use this to create toggle buttons.
The 'args2' property is an info array that may be specified as:
* a list or tuple of up to 3 elements where:
(0) The 'args2[0]' property accepts values of any type
(1) The 'args2[1]' property accepts values of any type
(2) The 'args2[2]' property accepts values of any type
Returns
-------
list
"""
return self["args2"]
@args2.setter
def args2(self, val):
self["args2"] = val
# execute
# -------
@property
def execute(self):
"""
When true, the API method is executed. When false, all other
behaviors are the same and command execution is skipped. This
may be useful when hooking into, for example, the
`plotly_buttonclicked` method and executing the API command
manually without losing the benefit of the updatemenu
automatically binding to the state of the plot through the
specification of `method` and `args`.
The 'execute' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["execute"]
@execute.setter
def execute(self, val):
self["execute"] = val
# label
# -----
@property
def label(self):
"""
Sets the text label to appear on the button.
The 'label' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["label"]
@label.setter
def label(self, val):
self["label"] = val
# method
# ------
@property
def method(self):
"""
Sets the Plotly method to be called on click. If the `skip`
method is used, the API updatemenu will function as normal but
will perform no API calls and will not bind automatically to
state updates. This may be used to create a component interface
and attach to updatemenu events manually via JavaScript.
The 'method' property is an enumeration that may be specified as:
- One of the following enumeration values:
['restyle', 'relayout', 'animate', 'update', 'skip']
Returns
-------
Any
"""
return self["method"]
@method.setter
def method(self, val):
self["method"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this button is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
args
Sets the arguments values to be passed to the Plotly
method set in `method` on click.
args2
Sets a 2nd set of `args`, these arguments values are
passed to the Plotly method set in `method` when
clicking this button while in the active state. Use
this to create toggle buttons.
execute
When true, the API method is executed. When false, all
other behaviors are the same and command execution is
skipped. This may be useful when hooking into, for
example, the `plotly_buttonclicked` method and
executing the API command manually without losing the
benefit of the updatemenu automatically binding to the
state of the plot through the specification of `method`
and `args`.
label
Sets the text label to appear on the button.
method
Sets the Plotly method to be called on click. If the
`skip` method is used, the API updatemenu will function
as normal but will perform no API calls and will not
bind automatically to state updates. This may be used
to create a component interface and attach to
updatemenu events manually via JavaScript.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
visible
Determines whether or not this button is visible.
"""
def __init__(
self,
arg=None,
args=None,
args2=None,
execute=None,
label=None,
method=None,
name=None,
templateitemname=None,
visible=None,
**kwargs
):
"""
Construct a new Button object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.updatemenu.Button`
args
Sets the arguments values to be passed to the Plotly
method set in `method` on click.
args2
Sets a 2nd set of `args`, these arguments values are
passed to the Plotly method set in `method` when
clicking this button while in the active state. Use
this to create toggle buttons.
execute
When true, the API method is executed. When false, all
other behaviors are the same and command execution is
skipped. This may be useful when hooking into, for
example, the `plotly_buttonclicked` method and
executing the API command manually without losing the
benefit of the updatemenu automatically binding to the
state of the plot through the specification of `method`
and `args`.
label
Sets the text label to appear on the button.
method
Sets the Plotly method to be called on click. If the
`skip` method is used, the API updatemenu will function
as normal but will perform no API calls and will not
bind automatically to state updates. This may be used
to create a component interface and attach to
updatemenu events manually via JavaScript.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
visible
Determines whether or not this button is visible.
Returns
-------
Button
"""
super(Button, self).__init__("buttons")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.updatemenu.Button
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.updatemenu.Button`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("args", None)
_v = args if args is not None else _v
if _v is not None:
self["args"] = _v
_v = arg.pop("args2", None)
_v = args2 if args2 is not None else _v
if _v is not None:
self["args2"] = _v
_v = arg.pop("execute", None)
_v = execute if execute is not None else _v
if _v is not None:
self["execute"] = _v
_v = arg.pop("label", None)
_v = label if label is not None else _v
if _v is not None:
self["label"] = _v
_v = arg.pop("method", None)
_v = method if method is not None else _v
if _v is not None:
self["method"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@layout@updatemenu@_button.py@.PATH_END.py
|
{
"filename": "api.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/yt/frontends/eagle/api.py",
"type": "Python"
}
|
from . import tests
from .data_structures import EagleDataset, EagleNetworkDataset
from .fields import EagleNetworkFieldInfo
from .io import IOHandlerEagleNetwork
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@yt@frontends@eagle@api.py@.PATH_END.py
|
{
"filename": "_estimator_html_repr.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scikit-learn/py3/sklearn/utils/_estimator_html_repr.py",
"type": "Python"
}
|
import html
import itertools
from contextlib import closing
from inspect import isclass
from io import StringIO
from pathlib import Path
from string import Template
from .. import __version__, config_context
from .fixes import parse_version
class _IDCounter:
"""Generate sequential ids with a prefix."""
def __init__(self, prefix):
self.prefix = prefix
self.count = 0
def get_id(self):
self.count += 1
return f"{self.prefix}-{self.count}"
def _get_css_style():
import importlib.resources
return (importlib.resources.files("sklearn") / "utils" / "_estimator_html_repr.css").read_text(encoding="utf-8")
_CONTAINER_ID_COUNTER = _IDCounter("sk-container-id")
_ESTIMATOR_ID_COUNTER = _IDCounter("sk-estimator-id")
_CSS_STYLE = _get_css_style()
class _VisualBlock:
"""HTML Representation of Estimator
Parameters
----------
kind : {'serial', 'parallel', 'single'}
kind of HTML block
estimators : list of estimators or `_VisualBlock`s or a single estimator
If kind != 'single', then `estimators` is a list of
estimators.
If kind == 'single', then `estimators` is a single estimator.
names : list of str, default=None
If kind != 'single', then `names` corresponds to estimators.
If kind == 'single', then `names` is a single string corresponding to
the single estimator.
name_details : list of str, str, or None, default=None
If kind != 'single', then `name_details` corresponds to `names`.
If kind == 'single', then `name_details` is a single string
corresponding to the single estimator.
dash_wrapped : bool, default=True
If true, wrapped HTML element will be wrapped with a dashed border.
Only active when kind != 'single'.
"""
def __init__(
self, kind, estimators, *, names=None, name_details=None, dash_wrapped=True
):
self.kind = kind
self.estimators = estimators
self.dash_wrapped = dash_wrapped
if self.kind in ("parallel", "serial"):
if names is None:
names = (None,) * len(estimators)
if name_details is None:
name_details = (None,) * len(estimators)
self.names = names
self.name_details = name_details
def _sk_visual_block_(self):
return self
def _write_label_html(
out,
name,
name_details,
outer_class="sk-label-container",
inner_class="sk-label",
checked=False,
doc_link="",
is_fitted_css_class="",
is_fitted_icon="",
):
"""Write labeled html with or without a dropdown with named details.
Parameters
----------
out : file-like object
The file to write the HTML representation to.
name : str
The label for the estimator. It corresponds either to the estimator class name
for a simple estimator or in the case of a `Pipeline` and `ColumnTransformer`,
it corresponds to the name of the step.
name_details : str
The details to show as content in the dropdown part of the toggleable label. It
can contain information such as non-default parameters or column information for
`ColumnTransformer`.
outer_class : {"sk-label-container", "sk-item"}, default="sk-label-container"
The CSS class for the outer container.
inner_class : {"sk-label", "sk-estimator"}, default="sk-label"
The CSS class for the inner container.
checked : bool, default=False
Whether the dropdown is folded or not. With a single estimator, we intend to
unfold the content.
doc_link : str, default=""
The link to the documentation for the estimator. If an empty string, no link is
added to the diagram. This can be generated for an estimator if it uses the
`_HTMLDocumentationLinkMixin`.
is_fitted_css_class : {"", "fitted"}
The CSS class to indicate whether or not the estimator is fitted. The
empty string means that the estimator is not fitted and "fitted" means that the
estimator is fitted.
is_fitted_icon : str, default=""
The HTML representation to show the fitted information in the diagram. An empty
string means that no information is shown.
"""
# we need to add some padding to the left of the label to be sure it is centered
padding_label = " " if is_fitted_icon else "" # add padding for the "i" char
out.write(
f'<div class="{outer_class}"><div'
f' class="{inner_class} {is_fitted_css_class} sk-toggleable">'
)
name = html.escape(name)
if name_details is not None:
name_details = html.escape(str(name_details))
label_class = (
f"sk-toggleable__label {is_fitted_css_class} sk-toggleable__label-arrow"
)
checked_str = "checked" if checked else ""
est_id = _ESTIMATOR_ID_COUNTER.get_id()
if doc_link:
doc_label = "<span>Online documentation</span>"
if name is not None:
doc_label = f"<span>Documentation for {name}</span>"
doc_link = (
f'<a class="sk-estimator-doc-link {is_fitted_css_class}"'
f' rel="noreferrer" target="_blank" href="{doc_link}">?{doc_label}</a>'
)
padding_label += " " # add additional padding for the "?" char
fmt_str = (
'<input class="sk-toggleable__control sk-hidden--visually"'
f' id="{est_id}" '
f'type="checkbox" {checked_str}><label for="{est_id}" '
f'class="{label_class} {is_fitted_css_class}">{padding_label}{name}'
f"{doc_link}{is_fitted_icon}</label><div "
f'class="sk-toggleable__content {is_fitted_css_class}">'
f"<pre>{name_details}</pre></div> "
)
out.write(fmt_str)
else:
out.write(f"<label>{name}</label>")
out.write("</div></div>") # outer_class inner_class
def _get_visual_block(estimator):
"""Generate information about how to display an estimator."""
if hasattr(estimator, "_sk_visual_block_"):
try:
return estimator._sk_visual_block_()
except Exception:
return _VisualBlock(
"single",
estimator,
names=estimator.__class__.__name__,
name_details=str(estimator),
)
if isinstance(estimator, str):
return _VisualBlock(
"single", estimator, names=estimator, name_details=estimator
)
elif estimator is None:
return _VisualBlock("single", estimator, names="None", name_details="None")
# check if estimator looks like a meta estimator (wraps estimators)
if hasattr(estimator, "get_params") and not isclass(estimator):
estimators = [
(key, est)
for key, est in estimator.get_params(deep=False).items()
if hasattr(est, "get_params") and hasattr(est, "fit") and not isclass(est)
]
if estimators:
return _VisualBlock(
"parallel",
[est for _, est in estimators],
names=[f"{key}: {est.__class__.__name__}" for key, est in estimators],
name_details=[str(est) for _, est in estimators],
)
return _VisualBlock(
"single",
estimator,
names=estimator.__class__.__name__,
name_details=str(estimator),
)
def _write_estimator_html(
out,
estimator,
estimator_label,
estimator_label_details,
is_fitted_css_class,
is_fitted_icon="",
first_call=False,
):
"""Write estimator to html in serial, parallel, or by itself (single).
For multiple estimators, this function is called recursively.
Parameters
----------
out : file-like object
The file to write the HTML representation to.
estimator : estimator object
The estimator to visualize.
estimator_label : str
The label for the estimator. It corresponds either to the estimator class name
for simple estimator or in the case of `Pipeline` and `ColumnTransformer`, it
corresponds to the name of the step.
estimator_label_details : str
The details to show as content in the dropdown part of the toggleable label.
It can contain information as non-default parameters or column information for
`ColumnTransformer`.
is_fitted_css_class : {"", "fitted"}
The CSS class to indicate whether or not the estimator is fitted or not. The
empty string means that the estimator is not fitted and "fitted" means that the
estimator is fitted.
is_fitted_icon : str, default=""
The HTML representation to show the fitted information in the diagram. An empty
string means that no information is shown. If the estimator to be shown is not
the first estimator (i.e. `first_call=False`), `is_fitted_icon` is always an
empty string.
first_call : bool, default=False
Whether this is the first time this function is called.
"""
if first_call:
est_block = _get_visual_block(estimator)
else:
is_fitted_icon = ""
with config_context(print_changed_only=True):
est_block = _get_visual_block(estimator)
# `estimator` can also be an instance of `_VisualBlock`
if hasattr(estimator, "_get_doc_link"):
doc_link = estimator._get_doc_link()
else:
doc_link = ""
if est_block.kind in ("serial", "parallel"):
dashed_wrapped = first_call or est_block.dash_wrapped
dash_cls = " sk-dashed-wrapped" if dashed_wrapped else ""
out.write(f'<div class="sk-item{dash_cls}">')
if estimator_label:
_write_label_html(
out,
estimator_label,
estimator_label_details,
doc_link=doc_link,
is_fitted_css_class=is_fitted_css_class,
is_fitted_icon=is_fitted_icon,
)
kind = est_block.kind
out.write(f'<div class="sk-{kind}">')
est_infos = zip(est_block.estimators, est_block.names, est_block.name_details)
for est, name, name_details in est_infos:
if kind == "serial":
_write_estimator_html(
out,
est,
name,
name_details,
is_fitted_css_class=is_fitted_css_class,
)
else: # parallel
out.write('<div class="sk-parallel-item">')
# wrap element in a serial visualblock
serial_block = _VisualBlock("serial", [est], dash_wrapped=False)
_write_estimator_html(
out,
serial_block,
name,
name_details,
is_fitted_css_class=is_fitted_css_class,
)
out.write("</div>") # sk-parallel-item
out.write("</div></div>")
elif est_block.kind == "single":
_write_label_html(
out,
est_block.names,
est_block.name_details,
outer_class="sk-item",
inner_class="sk-estimator",
checked=first_call,
doc_link=doc_link,
is_fitted_css_class=is_fitted_css_class,
is_fitted_icon=is_fitted_icon,
)
def estimator_html_repr(estimator):
"""Build a HTML representation of an estimator.
Read more in the :ref:`User Guide <visualizing_composite_estimators>`.
Parameters
----------
estimator : estimator object
The estimator to visualize.
Returns
-------
html: str
HTML representation of estimator.
Examples
--------
>>> from sklearn.utils._estimator_html_repr import estimator_html_repr
>>> from sklearn.linear_model import LogisticRegression
>>> estimator_html_repr(LogisticRegression())
'<style>...</div>'
"""
from sklearn.exceptions import NotFittedError
from sklearn.utils.validation import check_is_fitted
if not hasattr(estimator, "fit"):
status_label = "<span>Not fitted</span>"
is_fitted_css_class = ""
else:
try:
check_is_fitted(estimator)
status_label = "<span>Fitted</span>"
is_fitted_css_class = "fitted"
except NotFittedError:
status_label = "<span>Not fitted</span>"
is_fitted_css_class = ""
is_fitted_icon = (
f'<span class="sk-estimator-doc-link {is_fitted_css_class}">'
f"i{status_label}</span>"
)
with closing(StringIO()) as out:
container_id = _CONTAINER_ID_COUNTER.get_id()
style_template = Template(_CSS_STYLE)
style_with_id = style_template.substitute(id=container_id)
estimator_str = str(estimator)
# The fallback message is shown by default and loading the CSS sets
# div.sk-text-repr-fallback to display: none to hide the fallback message.
#
# If the notebook is trusted, the CSS is loaded which hides the fallback
# message. If the notebook is not trusted, then the CSS is not loaded and the
# fallback message is shown by default.
#
# The reverse logic applies to HTML repr div.sk-container.
# div.sk-container is hidden by default and the loading the CSS displays it.
fallback_msg = (
"In a Jupyter environment, please rerun this cell to show the HTML"
" representation or trust the notebook. <br />On GitHub, the"
" HTML representation is unable to render, please try loading this page"
" with nbviewer.org."
)
html_template = (
f"<style>{style_with_id}</style>"
f'<div id="{container_id}" class="sk-top-container">'
'<div class="sk-text-repr-fallback">'
f"<pre>{html.escape(estimator_str)}</pre><b>{fallback_msg}</b>"
"</div>"
'<div class="sk-container" hidden>'
)
out.write(html_template)
_write_estimator_html(
out,
estimator,
estimator.__class__.__name__,
estimator_str,
first_call=True,
is_fitted_css_class=is_fitted_css_class,
is_fitted_icon=is_fitted_icon,
)
out.write("</div></div>")
html_output = out.getvalue()
return html_output
class _HTMLDocumentationLinkMixin:
"""Mixin class allowing to generate a link to the API documentation.
This mixin relies on three attributes:
- `_doc_link_module`: it corresponds to the root module (e.g. `sklearn`). Using this
mixin, the default value is `sklearn`.
- `_doc_link_template`: it corresponds to the template used to generate the
link to the API documentation. Using this mixin, the default value is
`"https://scikit-learn.org/{version_url}/modules/generated/
{estimator_module}.{estimator_name}.html"`.
- `_doc_link_url_param_generator`: it corresponds to a function that generates the
parameters to be used in the template when the estimator module and name are not
sufficient.
The method :meth:`_get_doc_link` generates the link to the API documentation for a
given estimator.
This useful provides all the necessary states for
:func:`sklearn.utils.estimator_html_repr` to generate a link to the API
documentation for the estimator HTML diagram.
Examples
--------
If the default values for `_doc_link_module`, `_doc_link_template` are not suitable,
then you can override them:
>>> from sklearn.base import BaseEstimator
>>> estimator = BaseEstimator()
>>> estimator._doc_link_template = "https://website.com/{single_param}.html"
>>> def url_param_generator(estimator):
... return {"single_param": estimator.__class__.__name__}
>>> estimator._doc_link_url_param_generator = url_param_generator
>>> estimator._get_doc_link()
'https://website.com/BaseEstimator.html'
"""
_doc_link_module = "sklearn"
_doc_link_url_param_generator = None
@property
def _doc_link_template(self):
sklearn_version = parse_version(__version__)
if sklearn_version.dev is None:
version_url = f"{sklearn_version.major}.{sklearn_version.minor}"
else:
version_url = "dev"
return getattr(
self,
"__doc_link_template",
(
f"https://scikit-learn.org/{version_url}/modules/generated/"
"{estimator_module}.{estimator_name}.html"
),
)
@_doc_link_template.setter
def _doc_link_template(self, value):
setattr(self, "__doc_link_template", value)
def _get_doc_link(self):
"""Generates a link to the API documentation for a given estimator.
This method generates the link to the estimator's documentation page
by using the template defined by the attribute `_doc_link_template`.
Returns
-------
url : str
The URL to the API documentation for this estimator. If the estimator does
not belong to module `_doc_link_module`, the empty string (i.e. `""`) is
returned.
"""
if self.__class__.__module__.split(".")[0] != self._doc_link_module:
return ""
if self._doc_link_url_param_generator is None:
estimator_name = self.__class__.__name__
# Construct the estimator's module name, up to the first private submodule.
# This works because in scikit-learn all public estimators are exposed at
# that level, even if they actually live in a private sub-module.
estimator_module = ".".join(
itertools.takewhile(
lambda part: not part.startswith("_"),
self.__class__.__module__.split("."),
)
)
return self._doc_link_template.format(
estimator_module=estimator_module, estimator_name=estimator_name
)
return self._doc_link_template.format(
**self._doc_link_url_param_generator(self)
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scikit-learn@py3@sklearn@utils@_estimator_html_repr.py@.PATH_END.py
|
{
"filename": "plot_dlogp.py",
"repo_name": "philbull/RadioFisher",
"repo_path": "RadioFisher_extracted/RadioFisher-master/plotting/plot_dlogp.py",
"type": "Python"
}
|
#!/usr/bin/python
"""
Process EOS Fisher matrices and plot P(k).
"""
import numpy as np
import pylab as P
from rfwrapper import rf
import matplotlib.patches
import matplotlib.cm
import os
cosmo = rf.experiments.cosmo
names = ['EuclidRef_paper', 'exptL_paper', 'aexptM_paper', 'yCHIME_paper'] #'exptS_paper']
colours = ['#CC0000', '#1619A1', '#5B9C0A', '#990A9C', 'c', 'm'] # DETF/F/M/S
labels = ['DETF IV', 'Facility', 'Stage II', 'Stage I']
linestyle = [[2, 4, 6, 4], [], [8, 4], [3, 4], [], [], [], []]
names = ['yCHIME_paper', 'yCHIME_nocut_paper', 'yCHIME_avglow_paper', 'EuclidRef_paper']
labels = names
"""
#names = ['EuclidRef', 'cexptLx', 'cexptLy', 'iexptOpt']
#labels = ['Euclid', 'Fac. quadrature', 'Fac. min.', 'MEGA']
#names = ['yCHIME', 'yCHIME_nocut']
#labels = ['CHIME', 'CHIME nocut']
names = ['testSKA1SURfull1', 'ftestSKA1SURfull1_fixedfov']
labels = ['SKA1-SUR old', 'SKA1-SUR fixed FOV']
names = ['fSKA1SURfull1', 'fSKA1SURfull2', 'SKA1SURfull2', 'SKA1MIDfull1', 'SKA1MIDfull2', 'BOSS']
labels = ['SKA1-SUR Full B1', 'SKA1-SUR Full B2 Fixed FOV', 'SKA1-SUR Full B2', 'SKA1-MID Full B1', 'SKA1-MID Full B2', 'BOSS']
names = ['FAST', 'FAST4yr', 'fSKA1SURfull1', 'EuclidRef', 'yCHIME']
labels = ['FAST 10k hrs', 'FAST 4yr', 'SKA1-SUR Full B1', 'Euclid', 'CHIME']
linestyle = [[], [], [], [], [], [], []]
names = ['SKA1MID350XXX_25000', 'fSKA1SUR350XXX_25000']
labels = ['SKA1MID350', 'fSKA1SUR350']
"""
# Get f_bao(k) function
cosmo = rf.load_power_spectrum(cosmo, "cache_pk.dat", force_load=True)
fbao = cosmo['fbao']
# Fiducial value and plotting
P.subplot(111)
for k in range(len(names)):
root = "output/" + names[k]
# Load cosmo fns.
dat = np.atleast_2d( np.genfromtxt(root+"-cosmofns-zc.dat") ).T
zc, Hc, dAc, Dc, fc = dat
z, H, dA, D, f = np.genfromtxt(root+"-cosmofns-smooth.dat").T
kc = np.genfromtxt(root+"-fisher-kc.dat").T
# Load Fisher matrices as fn. of z
Nbins = zc.size
F_list = [np.genfromtxt(root+"-fisher-full-%d.dat" % i) for i in range(Nbins)]
# EOS FISHER MATRIX
# Actually, (aperp, apar) are (D_A, H)
pnames = rf.load_param_names(root+"-fisher-full-0.dat")
zfns = []; excl = []
F, lbls = rf.combined_fisher_matrix( F_list, expand=zfns, names=pnames,
exclude=excl )
# Just do the simplest thing for P(k) and get 1/sqrt(F)
cov = [np.sqrt(1. / np.diag(F)[lbls.index(lbl)]) for lbl in lbls if "pk" in lbl]
cov = np.array(cov)
pk = cosmo['pk_nobao'](kc) * (1. + fbao(kc))
# Replace nan/inf values
cov[np.where(np.isnan(cov))] = 1e10
cov[np.where(np.isinf(cov))] = 1e10
pw0 = rf.indexes_for_sampled_fns(11, zc.size, zfns)
pwa = rf.indexes_for_sampled_fns(12, zc.size, zfns)
#for jj in range(kc.size):
# print "%5.5e %5.5e" % (kc[jj], cov[jj])
print "-"*50
print names[k]
print lbls[pw0], 1. / np.sqrt(F[pw0,pw0])
print lbls[pwa], 1. / np.sqrt(F[pwa,pwa])
"""
# Output dP/P
print "-"*50
print names[k]
for jj in range(zc.size):
if zc[jj] == 1.:
cov = [np.sqrt(1. / np.diag(F_list[jj])[lbls.index(lbl)]) for lbl in lbls if "pk" in lbl]
for _k in range(kc.size):
print "%5.5e %5.5e" % (kc[_k], cov[_k])
"""
"""
if k == 0:
# Plot shaded region
P.fill_between(kc, np.ones(kc.size)*1e-10, cov, facecolor='#e1e1e1', edgecolor='none')
else:
# Plot errorbars
P.plot(kc, cov, color=colours[k], label=labels[k], lw=2.2, ls=linestyle[k])
"""
line = P.plot(kc, cov, color=colours[k], label=labels[k], lw=2.4, marker='None')
#line = P.plot(kc/0.7, cov, color=colours[k], label=labels[k], lw=1.8, marker='.')#lw=2.4)
# Set custom linestyle
print linestyle[k]
line[0].set_dashes(linestyle[k])
#P.axhline(1., ls='dashed', color='k', lw=1.5)
#P.axvline(1e-2, ls='dashed', color='k', lw=1.5)
P.xscale('log')
P.yscale('log')
P.xlim((1.3e-3, 1.2e0))
P.ylim((9e-4, 1e1))
P.legend(loc='lower left', prop={'size':'large'}, frameon=False)
P.tick_params(axis='both', which='major', labelsize=20, size=8., width=1.5, pad=8.)
P.tick_params(axis='both', which='minor', labelsize=20, size=5., width=1.5)
P.xlabel(r"$k \,[\mathrm{Mpc}^{-1}]$", fontdict={'fontsize':'xx-large'})
P.ylabel(r"$\Delta P / P$", fontdict={'fontsize':'xx-large'})
P.tight_layout()
# Set size
P.gcf().set_size_inches(8.,6.)
#P.savefig("fig04-dlogp.pdf", transparent=True) # 100
P.show()
|
philbullREPO_NAMERadioFisherPATH_START.@RadioFisher_extracted@RadioFisher-master@plotting@plot_dlogp.py@.PATH_END.py
|
{
"filename": "plot.py",
"repo_name": "revoltek/losoto",
"repo_path": "losoto_extracted/losoto-master/losoto/operations/plot.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from losoto.lib_operations import *
from losoto._logging import logger as logging
from losoto.lib_unwrap import unwrap, unwrap_2d
logging.debug('Loading PLOT module.')
def _run_parser(soltab, parser, step):
axesInPlot = parser.getarraystr( step, 'axesInPlot' ) # no default
axisInTable = parser.getstr( step, 'axisInTable', '' )
axisInCol = parser.getstr( step, 'axisInCol', '' )
axisDiff = parser.getstr( step, 'axisDiff', '' )
NColFig = parser.getint( step, 'NColFig', 0 )
figSize = parser.getarrayint( step, 'figSize', [0,0] )
markerSize = parser.getint( step, 'markerSize', 2 )
minmax = parser.getarrayfloat( step, 'minmax', [0.,0.] )
log = parser.getstr( step, 'log', '' )
plotFlag = parser.getbool( step, 'plotFlag', False )
doUnwrap = parser.getbool( step, 'doUnwrap', False )
refAnt = parser.getstr( step, 'refAnt', '' )
refDir = parser.getstr( step, 'refDir', '' )
soltabsToAdd = parser.getarraystr( step, 'soltabsToAdd', [] )
makeAntPlot = parser.getbool( step, 'makeAntPlot', False )
makeMovie = parser.getbool( step, 'makeMovie', False )
prefix = parser.getstr( step, 'prefix', '' )
ncpu = parser.getint( '_global', 'ncpu', 0 )
parser.checkSpelling( step, soltab, ['axesInPlot', 'axisInTable', 'axisInCol', 'axisDiff', 'NColFig', 'figSize', 'markerSize', 'minmax', 'log', \
'plotFlag', 'doUnwrap', 'refAnt', 'refDir', 'soltabsToAdd', 'makeAntPlot', 'makeMovie', 'prefix'])
return run(soltab, axesInPlot, axisInTable, axisInCol, axisDiff, NColFig, figSize, markerSize, minmax, log, \
plotFlag, doUnwrap, refAnt, refDir, soltabsToAdd, makeAntPlot, makeMovie, prefix, ncpu)
def _plot(Nplots, NColFig, figSize, markerSize, cmesh, axesInPlot, axisInTable, xvals, yvals, xlabelunit, ylabelunit, datatype, filename, titles, log, dataCube, minZ, maxZ, plotFlag, makeMovie, antCoords, outQueue):
import sys
from itertools import cycle, chain
if not 'matplotlib' in sys.modules:
import matplotlib as mpl
mpl.rcParams['xtick.labelsize'] = 20
mpl.rcParams['font.size'] = 20
mpl.use("Agg")
import matplotlib.pyplot as plt # after setting the backend
# find common min and max if not set
flat = dataCube.filled(np.nan).flatten()
if np.isnan(flat).all() or np.all(flat==0):
minZ=-0.1; maxZ=0.1
elif minZ == 0 and maxZ == 0:
if datatype == 'phase':
minZ = np.nanmin(flat)
maxZ = np.nanmax(flat)
elif datatype == 'amplitude' and len(axesInPlot) > 1:
flat[np.isnan(flat)] = np.nanmedian(flat) # get rid of nans (problem in "<" below)
maxZ = np.nanmedian( flat ) + 3*np.nanstd( flat[ (flat / np.nanmedian(flat) ) < 100 ] )
maxZ = np.nanmin( [np.nanmax( flat ), maxZ] )
minZ = np.nanmin( flat )
else:
minZ = np.nanmin(flat)
maxZ = np.nanmax(flat)
# prevent same min/max (still a problem at 0)
if minZ == maxZ:
if minZ > 0:
minZ *= 0.99
maxZ *= 1.01
else:
minZ *= 1.01
maxZ *= 0.99
# add some space for clock plots
if datatype == 'Clock':
minZ -= 1e-8
maxZ += 1e-8
logging.info("Autoset min: %f, max: %f" % (minZ, maxZ))
# if user-defined number of col use that
if NColFig != 0: Nc = NColFig
else: Nc = int(np.ceil(np.sqrt(Nplots)))
Nr = int(np.ceil(float(Nplots)/Nc))
if figSize[0] == 0:
if makeMovie: figSize[0]=5+2*Nc
else: figSize[0]=10+3*Nc
if figSize[1] == 0:
if makeMovie: figSize[1]=4+1*Nr
else: figSize[1]=8+2*Nr
figgrid, axa = plt.subplots(Nr, Nc, sharex=True, sharey=True, figsize=figSize)
if Nplots == 1: axa = np.array([axa])
figgrid.subplots_adjust(hspace=0, wspace=0)
axaiter = chain.from_iterable(axa)
# axes label
if len(axa.shape) == 1: # only one row
[ax.set_xlabel(axesInPlot[0]+xlabelunit, fontsize=20) for ax in axa[:]]
if cmesh:
axa[0].set_ylabel(axesInPlot[1]+ylabelunit, fontsize=20)
else:
axa[0].set_ylabel(datatype+ylabelunit, fontsize=20)
else:
[ax.set_xlabel(axesInPlot[0]+xlabelunit, fontsize=20) for ax in axa[-1,:]]
if cmesh:
[ax.set_ylabel(axesInPlot[1]+ylabelunit, fontsize=20) for ax in axa[:,0]]
else:
[ax.set_ylabel(datatype+ylabelunit, fontsize=20) for ax in axa[:,0]]
# if gaps in time, collapse and add a black vertical line on separation points
if axesInPlot[0] == 'time' and cmesh == False:
delta = np.abs(xvals[:-1] - xvals[1:])
jumps = np.where( delta > 100*np.median(delta) )[0] # jump if larger than 100 times the minimum step
# remove jumps
for j in jumps: xvals[j+1:] -= delta[j]
gap = xvals[-1] / 100 # 1%
for j in jumps: xvals[j+1:] += gap
im = None
for Ntab, title in enumerate(titles):
ax = axa.flatten()[Ntab]
ax.text(.5, .9, title, horizontalalignment='center', fontsize=14, transform=ax.transAxes)
# add vertical lines and numbers at jumps (numbers are the jump sizes)
if axesInPlot[0] == 'time' and cmesh == False and not np.all(np.isnan(dataCube[Ntab].filled(np.nan))):
flat = dataCube[Ntab].filled(np.nan).flatten()
[ ax.axvline(xvals[j]+gap/2., color='k') for j in jumps ]
if minZ != 0: texty = minZ + np.abs(np.nanmin(flat))*0.01
else: texty = np.nanmin(dataCube[Ntab]) + np.abs(np.nanmin(flat))*0.01
[ ax.text( xvals[j]+gap/2., texty, '%.0f' % delta[j], fontsize=10 ) for j in jumps ]
# set log scales if activated
if 'X' in log: ax.set_xscale('log')
if 'Y' in log: ax.set_yscale('log')
colors = cycle(['#377eb8','#b88637','#4daf4a','#984ea3','#ffff33','#f781bf'])
for Ncol, data in enumerate(dataCube[Ntab]):
# set color, use defined colors if a few lines, otherwise a continuum colormap
if len(dataCube[Ntab]) <= 6:
color = next(colors)
colorFlag = '#e41a1c'
else:
color = plt.cm.jet(Ncol/float(len(dataCube[Ntab])-1)) # from 0 to 1
colorFlag = 'k'
vals = dataCube[Ntab][Ncol]
if np.ma.getmask(dataCube[Ntab][Ncol]).all():
continue
# 3D cmesh plot
if cmesh:
# stratch the imshow output to fill the plot size
bbox = ax.get_window_extent().transformed(figgrid.dpi_scale_trans.inverted())
aspect = ((xvals[-1]-xvals[0])*bbox.height)/((yvals[-1]-yvals[0])*bbox.width)
if 'Z' in log:
if minZ == 0: minZ = np.log10(1e-6)
else: minZ = np.log10(minZ)
maxZ = np.log10(maxZ)
vals = np.log10(vals)
if datatype == 'phase' or datatype == 'rotation':
cmap = plt.cm.hsv
else:
try:
cmap = plt.cm.viridis
except AttributeError:
cmap = plt.cm.rainbow
if not np.all(np.diff(yvals) == np.diff(yvals)[0]): # if not evenly spaced
gapsize = np.amax(np.abs(np.diff(yvals)))
gapidx = np.argmax(np.abs(np.diff(yvals))) # index BEFORE the gap
y_spacing = np.mean(np.diff(yvals)[np.diff(yvals) != gapsize]) # y-spacing outside of the gap
if gapsize > 2*y_spacing: # if gap greater 2 times y-spacing, pad NaNs
vals = np.insert(vals, gapidx+1, np.nan * np.ones((int(gapsize / y_spacing),len(vals[0]))), axis=0) # fill gap with NaNs
# ugly fix to enforce min/max as imshow has some problems with very large numbers
if not np.isnan(vals).all():
with np.errstate(invalid='ignore'): # silence warnings that occure for comparison to NaN values
vals.data[vals.filled(np.nanmedian(vals.data)) > maxZ] = maxZ
vals.data[vals.filled(np.nanmedian(vals.data)) < minZ] = minZ
im = ax.imshow(vals.filled(np.nan), origin='lower', interpolation="none", cmap=cmap, norm=None, \
extent=[xvals[0],xvals[-1],yvals[0],yvals[-1]], aspect=str(aspect), vmin=minZ, vmax=maxZ)
# make an antenna plot
elif antCoords != []:
ax.set_xlabel('')
ax.set_ylabel('')
ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticks([])
#vals = (vals-0.9)/(1.1-0.9)
areas = ( 5+vals*10 )**2 # normalize marker diameter in pts**2 to 15-30 pt - assumes vals are between 0 and 1!
ax.scatter(antCoords[0], antCoords[1], c=vals, s=areas, cmap=plt.cm.jet, vmin=-0.5, vmax=0.5)
size = np.max( [np.max(antCoords[0])-np.min(antCoords[0]), np.max(antCoords[1])-np.min(antCoords[1])] )*1.1 # make img squared
ax.set_xlim( xmin=np.median(antCoords[0])-size/2., xmax=np.median(antCoords[0])+size/2. )
ax.set_ylim( ymin=np.median(antCoords[1])-size/2., ymax=np.median(antCoords[1])+size/2. )
# 2D scatter plot
else:
ax.plot(xvals[~vals.mask], vals[~vals.mask], 'o', color=color, markersize=markerSize, markeredgecolor='none') # flagged data are automatically masked
if plotFlag:
ax.plot(xvals[vals.mask], vals.data[vals.mask], 'o', color=colorFlag, markersize=markerSize, markeredgecolor='none') # plot flagged points
ax.set_xlim(xmin=min(xvals), xmax=max(xvals))
ax.set_ylim(ymin=minZ, ymax=maxZ)
if not im is None:
# add a color bar to show scale
figgrid.colorbar(im, ax=axa.ravel().tolist(), use_gridspec=True, fraction=0.02, pad=0.005, aspect=35)
logging.info("Saving "+filename+'.png')
try:
figgrid.savefig(filename+'.png', bbox_inches='tight')
except:
figgrid.tight_layout()
figgrid.savefig(filename+'.png')
del im, figgrid
plt.close()
def run(soltab, axesInPlot, axisInTable='', axisInCol='', axisDiff='', NColFig=0, figSize=[0,0], markerSize=2, minmax=[0,0], log='', \
plotFlag=False, doUnwrap=False, refAnt='', refDir='', soltabsToAdd='', makeAntPlot=False, makeMovie=False, prefix='', ncpu=0):
"""
This operation for LoSoTo implements basic plotting
WEIGHT: flag-only compliant, no need for weight
Parameters
----------
axesInPlot : array of str
1- or 2-element array which says the coordinates to plot (2 for 3D plots).
axisInTable : str, optional
the axis to plot on a page - e.g. ant to get all antenna's on one file. By default ''.
axisInCol : str, optional
The axis to plot in different colours - e.g. pol to get correlations with different colors. By default ''.
axisDiff : str, optional
This must be a len=2 axis and the plot will have the differential value - e.g. 'pol' to plot XX-YY. By default ''.
NColFig : int, optional
Number of columns in a multi-table image. By default is automatically chosen.
figSize : array of int, optional
Size of the image [x,y], if one of the values is 0, then it is automatically chosen. By default automatic set.
markerSize : int, optional
Size of the markers in the 2D plot. By default 2.
minmax : array of float, optional
Min max value for the independent variable (0 means automatic). By default 0.
log : bool, optional
Use Log='XYZ' to set which axes to put in Log. By default ''.
plotFlag : bool, optional
Whether to plot also flags as red points in 2D plots. By default False.
doUnwrap : bool, optional
Unwrap phases. By default False.
refAnt : str, optional
Reference antenna for phases. By default None.
refDir : str, optional
Reference direction for phases. By default None.
soltabsToAdd : str, optional
Tables to "add" (e.g. 'sol000/tec000'), it works only for tec and clock to be added to phases. By default None. [To Be Implemented]
makeAntPlot : bool, optional
Make a plot containing antenna coordinates in x,y and in color the value to plot, axesInPlot must be [ant]. By default False.
makeMovie : bool, optional
Make a movie summing up all the produced plots, by default False.
prefix : str, optional
Prefix to add before the self-generated filename, by default None.
ncpu : int, optional
Number of cpus, by default all available.
"""
import os
import numpy as np
logging.info("Plotting soltab: "+soltab.name)
# input check
# str2list
if axisInTable == '': axisInTable = []
else: axisInTable = [axisInTable]
if axisInCol == '': axisInCol = []
else: axisInCol = [axisInCol]
if axisDiff == '': axisDiff = []
else: axisDiff = [axisDiff]
if len(set(axisInTable+axesInPlot+axisInCol+axisDiff)) != len(axisInTable+axesInPlot+axisInCol+axisDiff):
logging.error('Axis defined multiple times.')
return 1
# just because we use lists, check that they are 1-d
if len(axisInTable) > 1 or len(axisInCol) > 1 or len(axisDiff) > 1:
logging.error('Too many TableAxis/ColAxis/DiffAxis, they must be at most one each.')
return 1
for axis in axesInPlot+axisInCol+axisDiff:
if axis not in soltab.getAxesNames():
logging.error('Axis \"'+axis+'\" not found.')
return 1
if makeMovie:
prefix = prefix+'__tmp__'
if os.path.dirname(prefix) != '' and not os.path.exists(os.path.dirname(prefix)):
logging.debug('Creating '+os.path.dirname(prefix)+'.')
os.makedirs(os.path.dirname(prefix))
if refAnt == '': refAnt = None
elif refAnt != 'closest' and refAnt != 'auto' and not refAnt in soltab.getAxisValues('ant', ignoreSelection = True):
logging.warning('Reference antenna '+refAnt+' not found. Using: atomatic search.')
refAnt = 'auto'
if refDir == '': refDir = None
elif refDir != 'center' and not refDir in soltab.getAxisValues('dir', ignoreSelection = True):
logging.error('Reference direction '+refDir+' not found. Using: '+soltab.getAxisValues('dir')[1])
refDir = soltab.getAxisValues('dir')[1]
minZ, maxZ = minmax
solset = soltab.getSolset()
soltabsToAdd = [ solset.getSoltab(soltabName) for soltabName in soltabsToAdd ]
cmesh = False
if len(axesInPlot) == 2:
cmesh = True
# not color possible in 3D
axisInCol = []
elif len(axesInPlot) != 1:
logging.error('Axes must be a len 1 or 2 array.')
return 1
# end input check
# all axes that are not iterated by anything else
axesInFile = soltab.getAxesNames()
for axis in axisInTable+axesInPlot+axisInCol+axisDiff:
axesInFile.remove(axis)
# set subplots scheme
if axisInTable != []:
Nplots = soltab.getAxisLen(axisInTable[0])
else:
Nplots = 1
# prepare antennas coord in makeAntPlot case
if makeAntPlot:
if axesInPlot != ['ant']:
logging.error('If makeAntPlot is selected the "Axes" values must be "ant"')
return 1
antCoords = [[],[]]
for ant in soltab.getAxisValues('ant'): # select only user-selected antenna in proper order
antCoords[0].append(+1*soltab.getSolset().getAnt()[ant][1])
antCoords[1].append(-1*soltab.getSolset().getAnt()[ant][0])
else:
antCoords = []
datatype = soltab.getType()
# start processes for multi-thread
mpm = multiprocManager(ncpu, _plot)
# compute dataCube size
shape = []
if axisInTable != []: shape.append(soltab.getAxisLen(axisInTable[0]))
else: shape.append(1)
if axisInCol != []: shape.append(soltab.getAxisLen(axisInCol[0]))
else: shape.append(1)
if cmesh:
shape.append(soltab.getAxisLen(axesInPlot[1]))
shape.append(soltab.getAxisLen(axesInPlot[0]))
else:
shape.append(soltab.getAxisLen(axesInPlot[0]))
# will contain the data to pass to each thread to make 1 image
dataCube = np.ma.zeros( shape=shape, fill_value=np.nan )
# cycle on files
if makeMovie: pngs = [] # store png filenames
for vals, coord, selection in soltab.getValuesIter(returnAxes=axisDiff+axisInTable+axisInCol+axesInPlot):
# set filename
filename = ''
for axis in axesInFile:
filename += axis+str(coord[axis])+'_'
filename = filename[:-1] # remove last _
if prefix+filename == '': filename = 'plot'
# axis vals (they are always the same, regulat arrays)
xvals = coord[axesInPlot[0]]
# if plotting antenna - convert to number
if axesInPlot[0] == 'ant':
xvals = np.arange(len(xvals))
# if plotting time - convert in h/min/s
xlabelunit=''
if axesInPlot[0] == 'time':
if xvals[-1] - xvals[0] > 3600:
xvals = (xvals-xvals[0])/3600. # hrs
xlabelunit = ' [hr]'
elif xvals[-1] - xvals[0] > 60:
xvals = (xvals-xvals[0])/60. # mins
xlabelunit = ' [min]'
else:
xvals = (xvals-xvals[0]) # sec
xlabelunit = ' [s]'
# if plotting freq convert in MHz
elif axesInPlot[0] == 'freq':
xvals = xvals/1.e6 # MHz
xlabelunit = ' [MHz]'
if cmesh:
# axis vals (they are always the same, regular arrays)
yvals = coord[axesInPlot[1]]
# same as above but for y-axis
if axesInPlot[1] == 'ant':
yvals = np.arange(len(yvals))
if len(xvals) <= 1 or len(yvals) <=1:
logging.error('3D plot must have more then one value per axes.')
mpm.wait()
return 1
ylabelunit=''
if axesInPlot[1] == 'time':
if yvals[-1] - yvals[0] > 3600:
yvals = (yvals-yvals[0])/3600. # hrs
ylabelunit = ' [hr]'
elif yvals[-1] - yvals[0] > 60:
yvals = (yvals-yvals[0])/60. # mins
ylabelunit = ' [min]'
else:
yvals = (yvals-yvals[0]) # sec
ylabelunit = ' [s]'
elif axesInPlot[1] == 'freq': # Mhz
yvals = yvals/1.e6
ylabelunit = ' [MHz]'
else:
yvals = None
if datatype == 'clock':
datatype = 'Clock'
ylabelunit = ' (s)'
elif datatype == 'tec':
datatype = 'dTEC'
ylabelunit = ' (TECU)'
elif datatype == 'rotationmeasure':
datatype = 'dRM'
ylabelunit = r' (rad m$^{-2}$)'
elif datatype == 'tec3rd':
datatype = r'dTEC$_3$'
ylabelunit = r' (rad m$^{-3}$)'
else:
ylabelunit = ''
# cycle on tables
soltab1Selection = soltab.selection # save global selection and subselect only axex to iterate
soltab.selection = selection
titles = []
for Ntab, (vals, coord, selection) in enumerate(soltab.getValuesIter(returnAxes=axisDiff+axisInCol+axesInPlot)):
# set tile
titles.append('')
for axis in coord:
if axis in axesInFile+axesInPlot+axisInCol: continue
titles[Ntab] += axis+':'+str(coord[axis])+' '
titles[Ntab] = titles[Ntab][:-1] # remove last ' '
# cycle on colors
soltab2Selection = soltab.selection
soltab.selection = selection
for Ncol, (vals, weight, coord, selection) in enumerate(soltab.getValuesIter(returnAxes=axisDiff+axesInPlot, weight=True, refAnt=refAnt, refDir=refDir)):
# differential plot
if axisDiff != []:
# find ordered list of axis
names = [axis for axis in soltab.getAxesNames() if axis in axisDiff+axesInPlot]
if axisDiff[0] not in names:
logging.error("Axis to differentiate (%s) not found." % axisDiff[0])
mpm.wait()
return 1
if len(coord[axisDiff[0]]) != 2:
logging.error("Axis to differentiate (%s) has too many values, only 2 is allowed." % axisDiff[0])
mpm.wait()
return 1
# find position of interesting axis
diff_idx = names.index(axisDiff[0])
# roll to first place
vals = np.rollaxis(vals,diff_idx,0)
vals = vals[0] - vals[1]
weight = np.rollaxis(weight,diff_idx,0)
weight[0][ weight[1]==0 ] = 0
weight = weight[0]
del coord[axisDiff[0]]
# add tables if required (e.g. phase/tec)
for soltabToAdd in soltabsToAdd:
logging.warning('soltabsToAdd not implemented. Ignoring.')
# newCoord = {}
# for axisName in coord.keys():
# # prepare selected on present axes
# if axisName in soltabToAdd.getAxesNames():
# if type(coord[axisName]) is np.ndarray:
# newCoord[axisName] = coord[axisName]
# else:
# newCoord[axisName] = [coord[axisName]] # avoid being interpreted as regexp, faster
#
# soltabToAdd.setSelection(**newCoord)
# valsAdd = np.squeeze(soltabToAdd.getValues(retAxesVals=False, weight=False, refAnt=refAnt))
#
# # add missing axes
# print ('shape:', vals.shape)
# for axisName in coord.keys():
# if not axisName in soltabToAdd.getAxesNames():
# # find axis positions
# axisPos = soltab.getAxesNames().index(axisName)
# # create a new axes for the table to add and duplicate the values
# valsAdd = np.expand_dims(valsAdd, axisPos)
# print ('shape to add:', valsAdd.shape)
#
# if soltabToAdd.getType() == 'clock':
# valsAdd = 2. * np.pi * valsAdd * coord['freq']
# elif soltabToAdd.getType() == 'tec':
# valsAdd = -8.44797245e9 * valsAdd / coord['freq']
# else:
# logging.warning('Only Clock or TEC can be added to solutions. Ignoring: '+soltabToAdd.getType()+'.')
# continue
#
# if valsAdd.shape != vals.shape:
# logging.error('Cannot combine the table '+soltabToAdd.getType()+' with '+soltab.getType()+'. Wrong shape.')
# mpm.wait()
# return 1
#
# vals += valsAdd
# normalize
if (soltab.getType() == 'phase'):
vals = normalize_phase(vals)
if (soltab.getType() == 'rotation'):
vals = np.mod(vals + np.pi/2., np.pi) - np.pi/2.
# is user requested axis in an order that is different from h5parm, we need to transpose
if cmesh:
if soltab.getAxesNames().index(axesInPlot[0]) < soltab.getAxesNames().index(axesInPlot[1]):
vals = vals.T
weight = weight.T
# unwrap if required
if (soltab.getType() == 'phase') and doUnwrap:
if len(axesInPlot) == 1:
vals = unwrap(vals)
else:
flags = np.array((weight == 0), dtype=bool)
if not (flags == True).all():
vals = unwrap_2d(vals, flags, coord[axesInPlot[0]], coord[axesInPlot[1]])
dataCube[Ntab,Ncol] = vals
sel1 = np.where(weight == 0.)
sel2 = np.where(np.isnan(vals))
if cmesh:
dataCube[Ntab,Ncol,sel1[0],sel1[1]] = np.ma.masked
dataCube[Ntab,Ncol,sel2[0],sel2[1]] = np.ma.masked
else:
dataCube[Ntab,Ncol,sel1[0]] = np.ma.masked
dataCube[Ntab,Ncol,sel2[0]] = np.ma.masked
soltab.selection = soltab2Selection
### end cycle on colors
# if dataCube too large (> 500 MB) do not go parallel
if np.array(dataCube).nbytes > 1024*1024*500:
logging.debug('Big plot, parallel not possible.')
_plot(Nplots, NColFig, figSize, markerSize, cmesh, axesInPlot, axisInTable, xvals, yvals, xlabelunit, ylabelunit, datatype, prefix+filename, titles, log, dataCube, minZ, maxZ, plotFlag, makeMovie, antCoords, None)
else:
mpm.put([Nplots, NColFig, figSize, markerSize, cmesh, axesInPlot, axisInTable, xvals, yvals, xlabelunit, ylabelunit, datatype, prefix+filename, titles, log, np.ma.copy(dataCube), minZ, maxZ, plotFlag, makeMovie, antCoords]) # copy is necessary otherwise other cycles overwrite the dataCube
if makeMovie: pngs.append(prefix+filename+'.png')
soltab.selection = soltab1Selection
### end cycle on tables
mpm.wait()
del mpm
if makeMovie:
def long_substr(strings):
"""
Find longest common substring
"""
substr = ''
if len(strings) > 1 and len(strings[0]) > 0:
for i in range(len(strings[0])):
for j in range(len(strings[0])-i+1):
if j > len(substr) and all(strings[0][i:i+j] in x for x in strings):
substr = strings[0][i:i+j]
return substr
movieName = long_substr(pngs)
assert movieName != '' # need a common prefix, use prefix keyword in case
logging.info('Making movie: '+movieName)
# make every movie last 20 sec, min one second per slide
fps = np.ceil(len(pngs)/200.)
ss="mencoder -ovc lavc -lavcopts vcodec=mpeg4:vpass=1:vbitrate=6160000:mbd=2:keyint=132:v4mv:vqmin=3:lumi_mask=0.07:dark_mask=0.2:"+\
"mpeg_quant:scplx_mask=0.1:tcplx_mask=0.1:naq -mf type=png:fps="+str(fps)+" -nosound -o "+movieName.replace('__tmp__','')+".mpg mf://"+movieName+"* > mencoder.log 2>&1"
os.system(ss)
#for png in pngs: os.system('rm '+png)
return 0
|
revoltekREPO_NAMElosotoPATH_START.@losoto_extracted@losoto-master@losoto@operations@plot.py@.PATH_END.py
|
{
"filename": "run_server.py",
"repo_name": "MichelleLochner/astronomaly",
"repo_path": "astronomaly_extracted/astronomaly-main/astronomaly/frontend/run_server.py",
"type": "Python"
}
|
from flask import Flask, render_template, request, Response
import json
from os.path import join
from astronomaly.frontend.interface import Controller
import logging
import argparse
# Main function to serve Astronomaly
parser = argparse.ArgumentParser(description='Run the Astronomaly server')
help_str = 'Location of the script Astronomaly should run. \
See the scripts folder for examples.'
parser.add_argument('script', help=help_str)
args = parser.parse_args()
script = args.script
webapp_dir = join('..', '..', 'webapp')
app = Flask(__name__,
static_folder=join(webapp_dir, 'public'),
template_folder=join(webapp_dir, 'public'))
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
controller = Controller(script)
@app.route('/')
def index():
"""
Serves the main page
"""
return render_template('index.html')
@app.route('/getindex', methods=["POST"])
def get_index():
"""
Returns the actual index (e.g. "obj287") of an instance given its position
in the array.
"""
if request.method == "POST":
ind = request.get_json()
ind = controller.get_original_id_from_index(int(ind))
return json.dumps(ind)
else:
return ""
@app.route('/searchObjID', methods=["POST"])
def search_original_id():
"""
Searches for the original id and, if it exists in the index, returns the
corresponding position in the current ordered list.
"""
if request.method == "POST":
object_name = request.get_json()
ind = controller.get_index_from_original_id(object_name)
return json.dumps(str(ind))
else:
return ""
@app.route('/getdatatype', methods=["POST"])
def get_data_type():
"""
Serves the data type we're working with (e.g. "image", "light_curve",
"raw_features")
"""
if request.method == "POST":
return json.dumps(controller.get_data_type())
else:
return ""
@app.route('/getmetadata', methods=["POST"])
def get_metadata():
"""
Serves the metadata for a particular instance
"""
if request.method == "POST":
idx = str(request.get_json())
output = controller.get_metadata(idx)
return json.dumps(output)
else:
return ""
@app.route('/getcoordinates', methods=["POST"])
def get_coordinates():
"""
Serves the coordinates (if available) for a particular object in string
format, separated by a comma
"""
if request.method == "POST":
idx = str(request.get_json())
output = controller.get_coordinates(idx)
return json.dumps(output)
else:
return ""
@app.route('/getlightcurve', methods=["POST"])
def get_light_curve():
"""
Serves the display data for a light curve
"""
if request.method == "POST":
idx = str(request.get_json())
output = controller.get_display_data(idx)
output = json.dumps(output)
return output
else:
return ""
@app.route('/getfeatures', methods=["POST"])
def get_features():
"""
Serves the features ready to be displayed in a table.
"""
if request.method == "POST":
idx = str(request.get_json())
output = controller.get_features(idx)
output = json.dumps(output)
return output
else:
return ""
@app.route('/getrawfeatures', methods=["POST"])
def get_raw_features():
"""
Serves raw features ready for basic plotting
"""
if request.method == "POST":
idx = str(request.get_json())
output = controller.get_display_data(idx)
output = json.dumps(output)
return output
else:
return ""
@app.route('/getimage', methods=["GET", "POST"])
def get_image():
"""
Serves the current instance as an image to be displayed
"""
if request.method == "POST":
idx = str(request.get_json())
output = controller.get_display_data(idx)
if output is None:
return ""
return Response(output.getvalue(), mimetype='image/png')
else:
return ""
@app.route('/getColumns', methods=["GET", "POST"])
def get_available_columns():
"""
Tells the frontend whether or not active learning has been run so that it
can display the appropriate options when selecting which column to colour
by
"""
if request.method == "POST":
output = controller.get_active_learning_columns()
return json.dumps(output)
else:
return ""
@app.route('/visualisation', methods=["GET", "POST"])
def get_visualisation():
"""
Serves the data to be displayed on the visualisation tab
"""
if request.method == "POST":
color_by_column = request.get_json()
output = controller.get_visualisation_data(
color_by_column=color_by_column)
js = json.dumps(output)
return js
@app.route('/retrain', methods=["GET", "POST"])
def retrain():
"""
Calls the human-in-the-loop learning
"""
res = controller.run_active_learning()
return json.dumps(res)
@app.route('/checkFits', methods=["GET", "POST"])
def check_fits():
"""
Checks if we're using fits files to decide whether or not to display the
open viewer button
"""
res = controller.check_if_fits_file()
return json.dumps(res)
@app.route('/openViewer', methods=["GET", "POST"])
def open_viewer():
"""
Runs the local fits viewer (such as ds9)
"""
idx = request.get_json()
res = controller.open_local_fits_viewer(idx)
return json.dumps(res)
@app.route('/deletelabels', methods=["GET", "POST"])
def delete_labels():
"""
Deletes the existing labels allowing the user to start again
"""
controller.delete_labels()
return json.dumps("success")
@app.route('/sort', methods=["GET", "POST"])
def sort_data():
"""
Sorts the data by a requested column
"""
if request.method == "POST":
args = request.get_json()
column = (str)(args[0])
show_unlabelled_first = args[1]
if column == "random":
controller.randomise_ml_scores(show_unlabelled_first)
else:
controller.sort_ml_scores(column, show_unlabelled_first)
return json.dumps("success")
@app.route('/label', methods=["GET", "POST"])
def get_label():
"""
Records the label given to an instance by a human
"""
if request.method == "POST":
out_dict = request.get_json()
idx = out_dict['id']
label = (float)(out_dict['label'])
controller.set_human_label(idx, label)
return json.dumps("success")
@app.route('/getmaxid', methods=["GET", "POST"])
def get_max_id():
"""
Let's the frontend know how long the list of objects is
"""
if request.method == "POST":
max_id = controller.get_max_id()
return json.dumps(max_id)
@app.route('/getlistindex', methods=["GET", "POST"])
def get_list_index():
"""
Let's the frontend know how long the list of objects is
"""
if request.method == "POST":
idx = controller.current_index
return json.dumps(idx)
@app.route('/setlistindex', methods=["GET", "POST"])
def set_list_index():
"""
Let's the frontend know how long the list of objects is
"""
if request.method == "POST":
idx = int(request.get_json())
controller.current_index = idx
return json.dumps("success")
@app.route('/close', methods=["GET", "POST"])
def close():
"""
Let's the frontend know how long the list of objects is
"""
if request.method == "POST":
controller.clean_up()
print("Exiting Astronomaly... Goodbye!")
shutdown_hook = request.environ.get('werkzeug.server.shutdown')
if shutdown_hook is not None:
shutdown_hook()
return Response("Bye", mimetype='text/plain')
if __name__ == "__main__":
controller.run_pipeline()
host = 'http://127.0.0.1:5000/'
print('##### Astronomaly server now running #####')
print('Open this link in your browser:', host)
print()
app.run()
|
MichelleLochnerREPO_NAMEastronomalyPATH_START.@astronomaly_extracted@astronomaly-main@astronomaly@frontend@run_server.py@.PATH_END.py
|
{
"filename": "test_rank.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/frame/methods/test_rank.py",
"type": "Python"
}
|
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas._libs.algos import (
Infinity,
NegInfinity,
)
from pandas import (
DataFrame,
Index,
Series,
)
import pandas._testing as tm
class TestRank:
s = Series([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3])
df = DataFrame({"A": s, "B": s})
results = {
"average": np.array([1.5, 5.5, 7.0, 3.5, np.nan, 3.5, 1.5, 8.0, np.nan, 5.5]),
"min": np.array([1, 5, 7, 3, np.nan, 3, 1, 8, np.nan, 5]),
"max": np.array([2, 6, 7, 4, np.nan, 4, 2, 8, np.nan, 6]),
"first": np.array([1, 5, 7, 3, np.nan, 4, 2, 8, np.nan, 6]),
"dense": np.array([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]),
}
def test_rank(self, float_frame):
sp_stats = pytest.importorskip("scipy.stats")
float_frame.loc[::2, "A"] = np.nan
float_frame.loc[::3, "B"] = np.nan
float_frame.loc[::4, "C"] = np.nan
float_frame.loc[::5, "D"] = np.nan
ranks0 = float_frame.rank()
ranks1 = float_frame.rank(1)
mask = np.isnan(float_frame.values)
fvals = float_frame.fillna(np.inf).values
exp0 = np.apply_along_axis(sp_stats.rankdata, 0, fvals)
exp0[mask] = np.nan
exp1 = np.apply_along_axis(sp_stats.rankdata, 1, fvals)
exp1[mask] = np.nan
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# integers
df = DataFrame(
np.random.default_rng(2).integers(0, 5, size=40).reshape((10, 4))
)
result = df.rank()
exp = df.astype(float).rank()
tm.assert_frame_equal(result, exp)
result = df.rank(1)
exp = df.astype(float).rank(1)
tm.assert_frame_equal(result, exp)
def test_rank2(self):
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0
result = df.rank(1, pct=True)
tm.assert_frame_equal(result, expected)
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = df.rank(0) / 2.0
result = df.rank(0, pct=True)
tm.assert_frame_equal(result, expected)
df = DataFrame([["b", "c", "a"], ["a", "c", "b"]])
expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])
result = df.rank(1, numeric_only=False)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])
result = df.rank(0, numeric_only=False)
tm.assert_frame_equal(result, expected)
df = DataFrame([["b", np.nan, "a"], ["a", "c", "b"]])
expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 3.0, 2.0]])
result = df.rank(1, numeric_only=False)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 1.0, 2.0]])
result = df.rank(0, numeric_only=False)
tm.assert_frame_equal(result, expected)
# f7u12, this does not work without extensive workaround
data = [
[datetime(2001, 1, 5), np.nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 1)],
]
df = DataFrame(data)
# check the rank
expected = DataFrame([[2.0, np.nan, 1.0], [2.0, 3.0, 1.0]])
result = df.rank(1, numeric_only=False, ascending=True)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[1.0, np.nan, 2.0], [2.0, 1.0, 3.0]])
result = df.rank(1, numeric_only=False, ascending=False)
tm.assert_frame_equal(result, expected)
df = DataFrame({"a": [1e-20, -5, 1e-20 + 1e-40, 10, 1e60, 1e80, 1e-30]})
exp = DataFrame({"a": [3.5, 1.0, 3.5, 5.0, 6.0, 7.0, 2.0]})
tm.assert_frame_equal(df.rank(), exp)
def test_rank_does_not_mutate(self):
# GH#18521
# Check rank does not mutate DataFrame
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 3)), dtype="float64"
)
expected = df.copy()
df.rank()
result = df
tm.assert_frame_equal(result, expected)
def test_rank_mixed_frame(self, float_string_frame):
float_string_frame["datetime"] = datetime.now()
float_string_frame["timedelta"] = timedelta(days=1, seconds=1)
float_string_frame.rank(numeric_only=False)
with pytest.raises(TypeError, match="not supported between instances of"):
float_string_frame.rank(axis=1)
def test_rank_na_option(self, float_frame):
sp_stats = pytest.importorskip("scipy.stats")
float_frame.loc[::2, "A"] = np.nan
float_frame.loc[::3, "B"] = np.nan
float_frame.loc[::4, "C"] = np.nan
float_frame.loc[::5, "D"] = np.nan
# bottom
ranks0 = float_frame.rank(na_option="bottom")
ranks1 = float_frame.rank(1, na_option="bottom")
fvals = float_frame.fillna(np.inf).values
exp0 = np.apply_along_axis(sp_stats.rankdata, 0, fvals)
exp1 = np.apply_along_axis(sp_stats.rankdata, 1, fvals)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# top
ranks0 = float_frame.rank(na_option="top")
ranks1 = float_frame.rank(1, na_option="top")
fval0 = float_frame.fillna((float_frame.min() - 1).to_dict()).values
fval1 = float_frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(sp_stats.rankdata, 0, fval0)
exp1 = np.apply_along_axis(sp_stats.rankdata, 1, fval1)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# descending
# bottom
ranks0 = float_frame.rank(na_option="top", ascending=False)
ranks1 = float_frame.rank(1, na_option="top", ascending=False)
fvals = float_frame.fillna(np.inf).values
exp0 = np.apply_along_axis(sp_stats.rankdata, 0, -fvals)
exp1 = np.apply_along_axis(sp_stats.rankdata, 1, -fvals)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# descending
# top
ranks0 = float_frame.rank(na_option="bottom", ascending=False)
ranks1 = float_frame.rank(1, na_option="bottom", ascending=False)
fval0 = float_frame.fillna((float_frame.min() - 1).to_dict()).values
fval1 = float_frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(sp_stats.rankdata, 0, -fval0)
exp1 = np.apply_along_axis(sp_stats.rankdata, 1, -fval1)
tm.assert_numpy_array_equal(ranks0.values, exp0)
tm.assert_numpy_array_equal(ranks1.values, exp1)
# bad values throw error
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
with pytest.raises(ValueError, match=msg):
float_frame.rank(na_option="bad", ascending=False)
# invalid type
with pytest.raises(ValueError, match=msg):
float_frame.rank(na_option=True, ascending=False)
def test_rank_axis(self):
# check if using axes' names gives the same result
df = DataFrame([[2, 1], [4, 3]])
tm.assert_frame_equal(df.rank(axis=0), df.rank(axis="index"))
tm.assert_frame_equal(df.rank(axis=1), df.rank(axis="columns"))
@pytest.mark.parametrize("ax", [0, 1])
def test_rank_methods_frame(self, ax, rank_method):
sp_stats = pytest.importorskip("scipy.stats")
xs = np.random.default_rng(2).integers(0, 21, (100, 26))
xs = (xs - 10.0) / 10.0
cols = [chr(ord("z") - i) for i in range(xs.shape[1])]
for vals in [xs, xs + 1e6, xs * 1e-6]:
df = DataFrame(vals, columns=cols)
result = df.rank(axis=ax, method=rank_method)
sprank = np.apply_along_axis(
sp_stats.rankdata,
ax,
vals,
rank_method if rank_method != "first" else "ordinal",
)
sprank = sprank.astype(np.float64)
expected = DataFrame(sprank, columns=cols).astype("float64")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", ["O", "f8", "i8"])
def test_rank_descending(self, rank_method, dtype):
if "i" in dtype:
df = self.df.dropna().astype(dtype)
else:
df = self.df.astype(dtype)
res = df.rank(ascending=False)
expected = (df.max() - df).rank()
tm.assert_frame_equal(res, expected)
expected = (df.max() - df).rank(method=rank_method)
if dtype != "O":
res2 = df.rank(method=rank_method, ascending=False, numeric_only=True)
tm.assert_frame_equal(res2, expected)
res3 = df.rank(method=rank_method, ascending=False, numeric_only=False)
tm.assert_frame_equal(res3, expected)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("dtype", [None, object])
def test_rank_2d_tie_methods(self, rank_method, axis, dtype):
df = self.df
def _check2d(df, expected, method="average", axis=0):
exp_df = DataFrame({"A": expected, "B": expected})
if axis == 1:
df = df.T
exp_df = exp_df.T
result = df.rank(method=rank_method, axis=axis)
tm.assert_frame_equal(result, exp_df)
frame = df if dtype is None else df.astype(dtype)
_check2d(frame, self.results[rank_method], method=rank_method, axis=axis)
@pytest.mark.parametrize(
"rank_method,exp",
[
("dense", [[1.0, 1.0, 1.0], [1.0, 0.5, 2.0 / 3], [1.0, 0.5, 1.0 / 3]]),
(
"min",
[
[1.0 / 3, 1.0, 1.0],
[1.0 / 3, 1.0 / 3, 2.0 / 3],
[1.0 / 3, 1.0 / 3, 1.0 / 3],
],
),
(
"max",
[[1.0, 1.0, 1.0], [1.0, 2.0 / 3, 2.0 / 3], [1.0, 2.0 / 3, 1.0 / 3]],
),
(
"average",
[[2.0 / 3, 1.0, 1.0], [2.0 / 3, 0.5, 2.0 / 3], [2.0 / 3, 0.5, 1.0 / 3]],
),
(
"first",
[
[1.0 / 3, 1.0, 1.0],
[2.0 / 3, 1.0 / 3, 2.0 / 3],
[3.0 / 3, 2.0 / 3, 1.0 / 3],
],
),
],
)
def test_rank_pct_true(self, rank_method, exp):
# see gh-15630.
df = DataFrame([[2012, 66, 3], [2012, 65, 2], [2012, 65, 1]])
result = df.rank(method=rank_method, pct=True)
expected = DataFrame(exp)
tm.assert_frame_equal(result, expected)
@pytest.mark.single_cpu
def test_pct_max_many_rows(self):
# GH 18271
df = DataFrame({"A": np.arange(2**24 + 1), "B": np.arange(2**24 + 1, 0, -1)})
result = df.rank(pct=True).max()
assert (result == 1).all()
@pytest.mark.parametrize(
"contents,dtype",
[
(
[
-np.inf,
-50,
-1,
-1e-20,
-1e-25,
-1e-50,
0,
1e-40,
1e-20,
1e-10,
2,
40,
np.inf,
],
"float64",
),
(
[
-np.inf,
-50,
-1,
-1e-20,
-1e-25,
-1e-45,
0,
1e-40,
1e-20,
1e-10,
2,
40,
np.inf,
],
"float32",
),
([np.iinfo(np.uint8).min, 1, 2, 100, np.iinfo(np.uint8).max], "uint8"),
(
[
np.iinfo(np.int64).min,
-100,
0,
1,
9999,
100000,
1e10,
np.iinfo(np.int64).max,
],
"int64",
),
([NegInfinity(), "1", "A", "BA", "Ba", "C", Infinity()], "object"),
(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 5)],
"datetime64",
),
],
)
def test_rank_inf_and_nan(self, contents, dtype, frame_or_series):
dtype_na_map = {
"float64": np.nan,
"float32": np.nan,
"object": None,
"datetime64": np.datetime64("nat"),
}
# Insert nans at random positions if underlying dtype has missing
# value. Then adjust the expected order by adding nans accordingly
# This is for testing whether rank calculation is affected
# when values are intertwined with nan values.
values = np.array(contents, dtype=dtype)
exp_order = np.array(range(len(values)), dtype="float64") + 1.0
if dtype in dtype_na_map:
na_value = dtype_na_map[dtype]
nan_indices = np.random.default_rng(2).choice(range(len(values)), 5)
values = np.insert(values, nan_indices, na_value)
exp_order = np.insert(exp_order, nan_indices, np.nan)
# Shuffle the testing array and expected results in the same way
random_order = np.random.default_rng(2).permutation(len(values))
obj = frame_or_series(values[random_order])
expected = frame_or_series(exp_order[random_order], dtype="float64")
result = obj.rank()
tm.assert_equal(result, expected)
def test_df_series_inf_nan_consistency(self):
# GH#32593
index = [5, 4, 3, 2, 1, 6, 7, 8, 9, 10]
col1 = [5, 4, 3, 5, 8, 5, 2, 1, 6, 6]
col2 = [5, 4, np.nan, 5, 8, 5, np.inf, np.nan, 6, -np.inf]
df = DataFrame(
data={
"col1": col1,
"col2": col2,
},
index=index,
dtype="f8",
)
df_result = df.rank()
series_result = df.copy()
series_result["col1"] = df["col1"].rank()
series_result["col2"] = df["col2"].rank()
tm.assert_frame_equal(df_result, series_result)
def test_rank_both_inf(self):
# GH#32593
df = DataFrame({"a": [-np.inf, 0, np.inf]})
expected = DataFrame({"a": [1.0, 2.0, 3.0]})
result = df.rank()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"na_option,ascending,expected",
[
("top", True, [3.0, 1.0, 2.0]),
("top", False, [2.0, 1.0, 3.0]),
("bottom", True, [2.0, 3.0, 1.0]),
("bottom", False, [1.0, 3.0, 2.0]),
],
)
def test_rank_inf_nans_na_option(
self, frame_or_series, rank_method, na_option, ascending, expected
):
obj = frame_or_series([np.inf, np.nan, -np.inf])
result = obj.rank(method=rank_method, na_option=na_option, ascending=ascending)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"na_option,ascending,expected",
[
("bottom", True, [1.0, 2.0, 4.0, 3.0]),
("bottom", False, [1.0, 2.0, 4.0, 3.0]),
("top", True, [2.0, 3.0, 1.0, 4.0]),
("top", False, [2.0, 3.0, 1.0, 4.0]),
],
)
def test_rank_object_first(self, frame_or_series, na_option, ascending, expected):
obj = frame_or_series(["foo", "foo", None, "foo"])
result = obj.rank(method="first", na_option=na_option, ascending=ascending)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"data,expected",
[
(
{"a": [1, 2, "a"], "b": [4, 5, 6]},
DataFrame({"b": [1.0, 2.0, 3.0]}, columns=Index(["b"], dtype=object)),
),
({"a": [1, 2, "a"]}, DataFrame(index=range(3), columns=[])),
],
)
def test_rank_mixed_axis_zero(self, data, expected):
df = DataFrame(data, columns=Index(list(data.keys()), dtype=object))
with pytest.raises(TypeError, match="'<' not supported between instances of"):
df.rank()
result = df.rank(numeric_only=True)
tm.assert_frame_equal(result, expected)
def test_rank_string_dtype(self, string_dtype_no_object):
# GH#55362
obj = Series(["foo", "foo", None, "foo"], dtype=string_dtype_no_object)
result = obj.rank(method="first")
exp_dtype = (
"Float64" if string_dtype_no_object == "string[pyarrow]" else "float64"
)
if string_dtype_no_object.storage == "python":
# TODO nullable string[python] should also return nullable Int64
exp_dtype = "float64"
expected = Series([1, 2, None, 3], dtype=exp_dtype)
tm.assert_series_equal(result, expected)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@frame@methods@test_rank.py@.PATH_END.py
|
{
"filename": "clean_2.py",
"repo_name": "CPM-project/MCPM",
"repo_path": "MCPM_extracted/MCPM-master/examples/asteroid_first_try/clean_2.py",
"type": "Python"
}
|
"""
Removes points which are not on the K2C9 superstamp.
"""
import sys
from K2fov.c9 import inMicrolensRegion
with open(sys.argv[1]) as data:
lines = data.readlines()
for line in lines:
ra = float(line.split()[1])
dec = float(line.split()[2])
if inMicrolensRegion(ra, dec):
print(line[:-1])
|
CPM-projectREPO_NAMEMCPMPATH_START.@MCPM_extracted@MCPM-master@examples@asteroid_first_try@clean_2.py@.PATH_END.py
|
{
"filename": "MosvizNIRISSExample.ipynb",
"repo_name": "spacetelescope/jdaviz",
"repo_path": "jdaviz_extracted/jdaviz-main/notebooks/MosvizNIRISSExample.ipynb",
"type": "Jupyter Notebook"
}
|
# Mosviz NIRISS example notebook
Note: We silence most warnings for now. For debugging, you can comment out the next cell and then restart the kernel to re-enable warnings.
```python
from astropy.utils.data import download_file
import pathlib
import tempfile
import warnings
from zipfile import ZipFile
from jdaviz import Mosviz
warnings.simplefilter('ignore')
```
Next, start Mosviz.
```python
mosviz = Mosviz()
mosviz.show()
```
In the cells below, we download and extract an example dataset (a subset of the NIRISS
data from the JWST GLASS early release science program).
```python
# Run this cell if your desired data path is a temporary directory.
data_dir = tempfile.gettempdir()
```
# *** OR ***
# Change this cell type to Code and run this cell if you have a different data path.
# Replace the value with your real path.
data_dir = "/Users/username/Data/"
```python
example_data = 'https://stsci.box.com/shared/static/txtjqn2wn0oowolf7pt4x11cz4w2x1ga.zip'
fn = download_file(example_data, cache=True)
with ZipFile(fn, 'r') as sample_data_zip:
sample_data_zip.extractall(data_dir)
level3_path = pathlib.Path(data_dir)
data_dir = level3_path
```
Once the data is loaded with the code below, you can click any of the rows in the MOS table
to display the data for the corresponding object.
```python
mosviz.load_data(directory=f"{data_dir}/niriss_1324_reprocessed/", instrument="niriss")
```
## MOS Table
The MOS table itself can be accessed and modified from the API. We can export the entire table to a `QTable` object:
```python
mosviz.to_table()
```
Here we see all the information shown in the UI table, in addition to a few additional columns. Redshift, for example, is hidden in the UI unless at least one entry has a non-zero redshift assigned. We can either assign the redshift from the Line List plugin, or programmatically (for the currently selected object).
```python
mosviz.set_redshift(0.896)
```
In addition to `to_table`, we can extract the array of a single column:
```python
mosviz.get_column('Redshift')
```
and can also pass an array to any column, or a value for a specific row:
```python
mosviz.update_column('Redshift', 1.158, row=4)
```
Additionally, we can add custom named columns. Here we'll add a notes column and then populate the value for the second row:
```python
mosviz.add_column('Notes')
```
```python
mosviz.update_column('Notes', 'low S/N', row=2)
```
Once created, these columns will display by default in the UI. To hide a column from the UI (but still include in the underlying table object to access with the API):
```python
mosviz.hide_column('Notes')
```
|
spacetelescopeREPO_NAMEjdavizPATH_START.@jdaviz_extracted@jdaviz-main@notebooks@MosvizNIRISSExample.ipynb@.PATH_END.py
|
{
"filename": "test_publisher.py",
"repo_name": "crossbario/crossbar",
"repo_path": "crossbar_extracted/crossbar-master/crossbar/bridge/rest/test/test_publisher.py",
"type": "Python"
}
|
#####################################################################################
#
# Copyright (c) typedef int GmbH
# SPDX-License-Identifier: EUPL-1.2
#
#####################################################################################
import json
from twisted.internet.defer import inlineCallbacks, maybeDeferred
from autobahn.wamp.exception import ApplicationError
from crossbar.test import TestCase
from crossbar._compat import native_string
from crossbar._logging import LogCapturer
from crossbar._log_categories import log_categories
from crossbar.bridge.rest import PublisherResource
from crossbar.bridge.rest.test import MockPublisherSession, renderResource
class PublisherTestCase(TestCase):
"""
Unit tests for L{PublisherResource}. These tests publish no real WAMP
messages, but test the interation of the HTTP request and the resource.
"""
@inlineCallbacks
def test_basic_publish(self):
"""
Test a very basic publish to a topic.
"""
session = MockPublisherSession(self)
resource = PublisherResource({}, session)
with LogCapturer() as l:
request = yield renderResource(resource,
b"/",
method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=b'{"topic": "com.test.messages", "args": [1]}')
self.assertEqual(len(session._published_messages), 1)
self.assertEqual(session._published_messages[0]["args"], (1, ))
self.assertEqual(request.code, 200)
logs = l.get_category("AR200")
self.assertEqual(len(logs), 1)
self.assertEqual(logs[0]["code"], 200)
self.assertEqual(json.loads(native_string(request.get_written_data())),
{"id": session._published_messages[0]["id"]})
# ensure we have all the format-keys AR200 asks for (can we
# extract these from the _log_categories string instead?)
self.assertIn('code', logs[0])
self.assertIn('reason', logs[0])
@inlineCallbacks
def test_publish_error(self):
"""
A publish that errors will return the error to the client.
"""
class RejectingPublisherSession(object):
"""
A mock WAMP session.
"""
def publish(self, topic, *args, **kwargs):
return maybeDeferred(self._publish, topic, *args, **kwargs)
def _publish(self, topic, *args, **kwargs):
raise ApplicationError('wamp.error.not_authorized', foo="bar")
session = RejectingPublisherSession()
resource = PublisherResource({}, session)
with LogCapturer() as l:
request = yield renderResource(resource,
b"/",
method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=b'{"topic": "com.test.messages", "args": [1]}')
self.assertEqual(request.code, 200)
logs = l.get_category("AR456")
self.assertEqual(len(logs), 1)
self.assertEqual(logs[0]["code"], 200)
self.assertEqual(json.loads(native_string(request.get_written_data())), {
"error": "wamp.error.not_authorized",
"args": [],
"kwargs": {
"foo": "bar"
}
})
@inlineCallbacks
def test_publish_cberror(self):
"""
A publish that errors with a Crossbar failure will return a generic
error to the client and log the exception.
"""
class RejectingPublisherSession(object):
"""
A mock WAMP session.
"""
def publish(self, topic, *args, **kwargs):
return maybeDeferred(self._publish, topic, *args, **kwargs)
def _publish(self, topic, *args, **kwargs):
raise ValueError("ono")
session = RejectingPublisherSession()
resource = PublisherResource({}, session)
with LogCapturer() as l:
request = yield renderResource(resource,
b"/",
method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=b'{"topic": "com.test.messages", "args": [1]}')
self.assertEqual(request.code, 500)
logs = l.get_category("AR456")
self.assertEqual(len(logs), 1)
self.assertEqual(logs[0]["code"], 500)
logs = l.get_category("AR500")
self.assertEqual(len(logs), 1)
self.assertEqual(json.loads(native_string(request.get_written_data())), {
"error": "wamp.error.runtime_error",
"args": ["Sorry, Crossbar.io has encountered a problem."],
"kwargs": {}
})
# We manually logged it, so this one is OK
self.flushLoggedErrors(ValueError)
@inlineCallbacks
def test_publish_needs_topic(self):
"""
Test that attempted publishes without a topic will be rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource({}, session)
with LogCapturer() as l:
request = yield renderResource(resource,
b"/",
method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=b'{}')
self.assertEqual(len(session._published_messages), 0)
self.assertEqual(request.code, 400)
errors = l.get_category("AR455")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
self.assertEqual(json.loads(native_string(request.get_written_data())), {
"error": log_categories["AR455"].format(key="topic"),
"args": [],
"kwargs": {}
})
|
crossbarioREPO_NAMEcrossbarPATH_START.@crossbar_extracted@crossbar-master@crossbar@bridge@rest@test@test_publisher.py@.PATH_END.py
|
{
"filename": "find_mosaic_pointings.py",
"repo_name": "mhardcastle/ddf-pipeline",
"repo_path": "ddf-pipeline_extracted/ddf-pipeline-master/utils/find_mosaic_pointings.py",
"type": "Python"
}
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import range
from past.utils import old_div
import os,sys
import numpy as np
import argparse
from auxcodes import sepn
from surveys_db import SurveysDB
deg2rad = np.pi/180.0
rad2deg = 180.0/np.pi
def read_pointingfile(pointingfilename=None):
# pointingfilename should no longer be used as we use the database instead
assert(pointingfilename is None)
pointingdict = {}
with SurveysDB(readonly=True) as sdb:
sdb.cur.execute('select * from fields')
fields=sdb.cur.fetchall()
# turn list of dicts into dict of lists...
for f in fields:
if f['lotss_field'] != 1:
continue
pointingname=f['id']
pointingdict[pointingname] = [f['status'],f['ra'],f['decl']]
return pointingdict
def find_pointings_to_mosaic(pointingdict,mospointingname):
seps = np.array([])
tomospointings = np.array([])
ra1,dec1 = pointingdict[mospointingname][1],pointingdict[mospointingname][2]
for j in pointingdict:
ra2,dec2 = pointingdict[j][1],pointingdict[j][2]
seps = np.append(seps,(sepn(ra1*deg2rad,dec1*deg2rad,ra2*deg2rad,dec2*deg2rad)*rad2deg))
tomospointings = np.append(tomospointings,j)
sortedseps = np.sort(seps)
# For now include the 6 closest pointings plus keep including others until we have all that are within the distance of 1.1 times the 6th closest pointing. At later stage try to take into account e.g. elongation of beam for low declination observations.
endmosindex = 7
for i in range(7,20):
if (sortedseps[i]/sortedseps[6]) < 1.1:
endmosindex = i
mosaicpointings = tomospointings[np.where(seps < sortedseps[endmosindex])]
mosseps = seps[np.where(seps < sortedseps[endmosindex])]
return mosaicpointings,mosseps
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('mospointingname', type=str, help='Mosaic central pointing name')
args = parser.parse_args()
pointingdict = read_pointingfile()
mosaicpointings,mosseps = find_pointings_to_mosaic(pointingdict,args.mospointingname)
printline = ''
printlineseps=''
for i in range(len(mosaicpointings)):
printline += (',%s'%mosaicpointings[i])
printlineseps += (',%s'%mosseps[i])
print('Separations: %s'%printlineseps[1:])
print('Pointings: %s'%printline[1:])
|
mhardcastleREPO_NAMEddf-pipelinePATH_START.@ddf-pipeline_extracted@ddf-pipeline-master@utils@find_mosaic_pointings.py@.PATH_END.py
|
{
"filename": "blockchain.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/document_loaders/blockchain.py",
"type": "Python"
}
|
import os
import re
import time
from enum import Enum
from typing import List, Optional
import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class BlockchainType(Enum):
"""Enumerator of the supported blockchains."""
ETH_MAINNET = "eth-mainnet"
ETH_GOERLI = "eth-goerli"
ETH_SEPOLIA = "eth-sepolia"
ETH_HOLESKY = "eth-holesky"
POLYGON_MAINNET = "polygon-mainnet"
POLYGON_MUMBAI = "polygon-mumbai"
POLYGON_AMOY = "polygon-amoy"
ARB_MAINNET = "arb-mainnet"
ARB_SEPOLIA = "arb-sepolia"
OP_MAINNET = "opt-mainnet"
OP_SEPOLIA = "opt-sepolia"
BASE_MAINNET = "base-mainnet"
BASE_SEPOLIA = "base-sepolia"
BLAST_MAINNET = "blast-mainnet"
BLAST_SEPOLIA = "blast-sepolia"
ZKSYNC_MAINNET = "zksync-mainnet"
ZKSYNC_SEPOLIA = "zksync-sepolia"
ZORA_MAINNET = "zora-mainnet"
ZORA_SEPOLIA = "zora-sepolia"
class BlockchainDocumentLoader(BaseLoader):
"""Load elements from a blockchain smart contract.
See supported blockchains here: https://python.langchain.com/v0.2/api_reference/community/document_loaders/langchain_community.document_loaders.blockchain.BlockchainType.html
If no BlockchainType is specified, the default is Ethereum mainnet.
The Loader uses the Alchemy API to interact with the blockchain.
ALCHEMY_API_KEY environment variable must be set to use this loader.
The API returns 100 NFTs per request and can be paginated using the
startToken parameter.
If get_all_tokens is set to True, the loader will get all tokens
on the contract. Note that for contracts with a large number of tokens,
this may take a long time (e.g. 10k tokens is 100 requests).
Default value is false for this reason.
The max_execution_time (sec) can be set to limit the execution time
of the loader.
Future versions of this loader can:
- Support additional Alchemy APIs (e.g. getTransactions, etc.)
- Support additional blockchain APIs (e.g. Infura, Opensea, etc.)
""" # noqa: E501
def __init__(
self,
contract_address: str,
blockchainType: BlockchainType = BlockchainType.ETH_MAINNET,
api_key: str = "docs-demo",
startToken: str = "",
get_all_tokens: bool = False,
max_execution_time: Optional[int] = None,
):
"""
Args:
contract_address: The address of the smart contract.
blockchainType: The blockchain type.
api_key: The Alchemy API key.
startToken: The start token for pagination.
get_all_tokens: Whether to get all tokens on the contract.
max_execution_time: The maximum execution time (sec).
"""
self.contract_address = contract_address
self.blockchainType = blockchainType.value
self.api_key = os.environ.get("ALCHEMY_API_KEY") or api_key
self.startToken = startToken
self.get_all_tokens = get_all_tokens
self.max_execution_time = max_execution_time
if not self.api_key:
raise ValueError("Alchemy API key not provided.")
if not re.match(r"^0x[a-fA-F0-9]{40}$", self.contract_address):
raise ValueError(f"Invalid contract address {self.contract_address}")
def load(self) -> List[Document]:
result = []
current_start_token = self.startToken
start_time = time.time()
while True:
url = (
f"https://{self.blockchainType}.g.alchemy.com/nft/v2/"
f"{self.api_key}/getNFTsForCollection?withMetadata="
f"True&contractAddress={self.contract_address}"
f"&startToken={current_start_token}"
)
response = requests.get(url)
if response.status_code != 200:
raise ValueError(
f"Request failed with status code {response.status_code}"
)
items = response.json()["nfts"]
if not items:
break
for item in items:
content = str(item)
tokenId = item["id"]["tokenId"]
metadata = {
"source": self.contract_address,
"blockchain": self.blockchainType,
"tokenId": tokenId,
}
result.append(Document(page_content=content, metadata=metadata))
# exit after the first API call if get_all_tokens is False
if not self.get_all_tokens:
break
# get the start token for the next API call from the last item in array
current_start_token = self._get_next_tokenId(result[-1].metadata["tokenId"])
if (
self.max_execution_time is not None
and (time.time() - start_time) > self.max_execution_time
):
raise RuntimeError("Execution time exceeded the allowed time limit.")
if not result:
raise ValueError(
f"No NFTs found for contract address {self.contract_address}"
)
return result
# add one to the tokenId, ensuring the correct tokenId format is used
def _get_next_tokenId(self, tokenId: str) -> str:
value_type = self._detect_value_type(tokenId)
if value_type == "hex_0x":
value_int = int(tokenId, 16)
elif value_type == "hex_0xbf":
value_int = int(tokenId[2:], 16)
else:
value_int = int(tokenId)
result = value_int + 1
if value_type == "hex_0x":
return "0x" + format(result, "0" + str(len(tokenId) - 2) + "x")
elif value_type == "hex_0xbf":
return "0xbf" + format(result, "0" + str(len(tokenId) - 4) + "x")
else:
return str(result)
# A smart contract can use different formats for the tokenId
@staticmethod
def _detect_value_type(tokenId: str) -> str:
if isinstance(tokenId, int):
return "int"
elif tokenId.startswith("0x"):
return "hex_0x"
elif tokenId.startswith("0xbf"):
return "hex_0xbf"
else:
return "hex_0xbf"
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@document_loaders@blockchain.py@.PATH_END.py
|
{
"filename": "ccf.py",
"repo_name": "mlafarga/raccoon",
"repo_path": "raccoon_extracted/raccoon-master/raccoon/ccf.py",
"type": "Python"
}
|
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
import glob
import os
from astropy.io import fits
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
# from scipy.interpolate import griddata
import ipdb
import colorcet as cc
import cmocean
from . import peakutils
from . import plotutils
from . import pyutils
# import ccflibfort77 as ccflibfort
# from . import ccflibfort as ccflibfort
from . import ccflibfort
# from raccoon import ccflibfort77 as ccflibfort
# distro = pyutils.get_distro()
# if 'ubuntu' in distro: from raccoon import ccflibfort77ubuntu as ccflibfort
# elif 'debian' in distro: from raccoon import ccflibfort77debian as ccflibfort
# else: import ccflibfort77 as ccflibfort
# dirhere = os.path.dirname(os.path.realpath(__file__))
dirhere = os.path.abspath(os.path.dirname(__file__))
# import pkg_resources
# # dirhere = pkg_resources.resource_filename(__name__, 'data/mask/CARM_VIS/')
# dirhere = pkg_resources.resource_filename('raccoon', 'data/')
###############################################################################
# CCF
def computeccf(w, f, c, wm, fm, rv, ron=None, forig=None):
"""
Wrapper for the fortran functions `ccflibfort.computeccf`, `ccflibfort.computeccferr`, `computeccferrfluxorig`.
Compute the CCF error only if `ron` is not None (default). Not computing the error makes the execution slightly faster.
Parameters
----------
w, f : 1d array like, same shape
Spectrum (1d, i.e. for 1 order). Wavelength units must be consistent with mask wavelength `wm`.
c : 1d array like, same shape as `w` and `f`
Blaze function, to take into account the observed flux. If no blaze, input an array with ones, e.g. `c = np.ones_like(w)`.
wm, fm : 1d array like, same shape
Mask position (`wm`) and weight (`fm`).
rv : 1d array-like
RV array for which the mask is Doppler-shifted.
ron : float
Read out noise. If None (default), no CCF error is computed.
forig : 1d array
Original spectrum flux (before correcting for order ratios).
Returns
-------
ccf, ccferr : 1d array
"""
# Check non-empy arrays
if len(w) == 0 or len(f) == 0 or len(c) == 0 or len(wm) == 0 or len(fm) == 0:
ccf, ccferr = rv * np.nan, rv * np.nan
return ccf, ccferr
if ron is None:
ccf = ccflibfort.computeccf(w, f, c, wm, fm, rv)
ccferr = np.ones_like(ccf) * np.nan
else:
if forig is None:
ccf, ccferr = ccflibfort.computeccferr(w, f, c, wm, fm, rv, ron)
else:
ccf, ccferr = ccflibfort.computeccferrfluxorig(w, f, c, forig, wm, fm, rv, ron)
return ccf, ccferr
def computerverr(rv, ccf, ccferr, returnall=False):
"""Compute RV error from CCF profile.
"""
der = np.ones_like(rv) * np.nan
# rverr = np.ones_like(rv) * np.nan
rverrsum = 0.
for i in range(len(rv)):
# Derivative
if i == 0: # start
der[i] = np.abs((ccf[i+1]-ccf[i])/(rv[i+1]-rv[i]))
elif i == len(rv)-1: # end
der[i] = np.abs((ccf[i]-ccf[i-1])/(rv[i]-rv[i-1]))
else: # rest
der[i] = np.abs(((ccf[i+1]-ccf[i])/(rv[i+1]-rv[i]) + (ccf[i]-ccf[i-1])/(rv[i]-rv[i-1]))/2.)
ccferr = np.array(ccferr)
der = np.array(der)
# RV err
rverr = ccferr / der
# RV err total
rverrsum = np.sum(1. / rverr**2)
rverrt = 1./np.sqrt(rverrsum)
if returnall:
return rverrt, der, rverr
return rverrt
def sumccf(rv, ccf, ords):
"""
Sum CCFs of different orders.
Parameters
----------
rv :
ccf : list of 1d arrays
Arrays with the CCF of each order.
ords : array
Spectrum orders to use.
"""
ccfsum = np.zeros_like(rv)
for o in ords:
ccfsum += ccf[o]
return ccfsum
###############################################################################
# Fit function
def fitgaussian(x, y):
"""
Gaussian G(x)=shift+amp*e^(-(x-cen)^2/(2*wid^2))
"""
lmfitresult = peakutils.fit_gaussian_peak(x, y, amp_hint=-0.5, cen_hint=None, wid_hint=0.01, shift_hint=0.8, minmax='min')
return lmfitresult
def fitgaussianfortran(rv, ccf, fitrng, funcfitnam='gaussian', nfitpar=4):
"""Fit a Gaussian function to the CCF and return the best fit parameters.
Uses the subroutine `fitccf` from the fortran library `ccflibfort`.
Gaussian definition:
shift + amp * e**(-(x-cen)**2 / wid)
FWHM = 2*sqrt(ln(2)*wid)
Parameters
----------
rv, ccf : 1d arrays
funcfitnam : str, {'gaussian'}
nfitpar : int, {4}
Number of parameters to fit (4 in case of a Gaussian)
Returns
-------
fitpar : dict
Dictionary with the best fit parameters of the Gaussian.
"""
# Fit parameters and errors
a, da = [np.nan]*nfitpar, [np.nan]*nfitpar
# Arbitrary sig
sig = np.array([.01]*len(rv)) # #### TEST CHANGE VALUES
# Parameter guess
ccfmin = min(ccf) # CCF minimum
rvccfmin = rv[np.argmin(ccf)] # RV where CCF is minimum
ain = np.array([ccfmin-1, rvccfmin, 5., 1.]) # Initial fit params [amp,cen,wid,shift]
lista = np.array([1, 1, 1, 1], dtype='int') # 1=fit, 0=no fit
# # Fit only if CCF minimum near `rv` center
# rvcen = rv[int(len(rv)/2.)]
# if rvccfmin<rvcen+10. and rvccfmin>rvcen-10.:
# if fitrng=='max':
# a,da = ccflibfort.fitccfmax(rv,ccf,sig,ain,lista,funcfitnam)
# else:
# a,da = ccflibfort.fitccf(rv,ccf,sig,fitrng,ain,lista,funcfitnam)
if fitrng == 'max':
a, da = ccflibfort.fitccfmax(rv, ccf, sig, ain, lista, funcfitnam)
elif fitrng == 'maxabs':
a, da = ccflibfort.fitccfmaxabs(rv, ccf, sig, ain, lista, funcfitnam)
else:
a, da = ccflibfort.fitccf(rv, ccf, sig, fitrng, ain, lista, funcfitnam)
# Put parameters in dictionary
fitpar = {
'amp': a[0], 'cen': a[1], 'wid': a[2], 'shift': a[3],
'amperr': da[0], 'cenerr': da[1], 'widerr': da[2], 'shifterr': da[3]}
return fitpar
###############################################################################
# Bisector
def computebisector(x, y, xerr, n=100):
"""
Compute bisector and its errors.
Parameters
----------
x, y : 1d arrays
Data. Must have a Gaussian-like shape.
xerr : 1d array
x datapoints errors.
n : int (default 100)
Number of points of bisector.
Returns
-------
bx, by : 1d arrays
Bisector x and y coordinates.
bxerr : 1d array
Bisector x datapoints error.
bx1, bx2 : 1d arrays
x values at the bisector heights `by` for each side of the line.
bx1err, bx2err : 1d arrays
Errors for the data `bx1` and `bx2`.
"""
# y minimum and maxima (maxima: absolute maxima each side)
imin = np.nanargmin(y) # Minimum
imax1 = np.nanargmax(y[:imin]) # Maximum left part
imax2 = imin + np.nanargmax(y[imin:]) # Maximum right part
if imax2 == len(y): imax2p = imax2
else: imax2p = imax2 + 1 # plus one
y_smallestmax = np.nanmin([y[imax1], y[imax2]]) # Smallest maximum
# Bisector y heights
by = np.linspace(y[imin], y_smallestmax, n)
# Interpolate bisector y to x for both sides of the y
# interp1d(x, y)
# - Function
interpolate_x1 = interp1d(y[imax1:imin+1], x[imax1:imin+1], kind='linear')
interpolate_x2 = interp1d(y[imin:imax2p], x[imin:imax2p], kind='linear')
# - Bisector x values
bx1 = interpolate_x1(by)
bx2 = interpolate_x2(by)
# Compute bisector
bx = (bx2 + bx1)/2.
# -----------------------
# Bisector error
# xerr
# Do not have RVerr for the bisector x datapoints, only for the original x points
# Solution: Interpolate the error
# - Function
interpolate_x1err = interp1d(y[imax1:imin+1], xerr[imax1:imin+1], kind='linear')
interpolate_x2err = interp1d(y[imin:imax2p], xerr[imin:imax2p], kind='linear')
# - Bisector x error values
bx1err = interpolate_x1err(by)
bx2err = interpolate_x2err(by)
# Compute bisector error (error propagation)
bxerr = np.sqrt(bx1err**2 + bx2err**2) / 2.
return bx, by, bxerr, bx1, bx2, bx1err, bx2err
def computebisector_bis(x, y, ybotmin_percent=10., ybotmax_percent=40., ytopmin_percent=60., ytopmax_percent=90., verb=True):
"""
Compute bisector inverse slope (BIS).
Note that the bisector is not interpolated in order to find the points closest to the region limits defined by `ybotmin_percent` etc. So if bisector sampling is low the points won't be correctly selected.
Parameters
----------
x, y : 1d array like
Bisector coordinates.
ybotmin_percent, ybotmax_percent : float
Bisector bottom region limits in percentage.
ytopmin_percent, ytopmax_percent : float
Bisector top region limits in percentage.
Notes
-----
BIS definition from Queloz et al. 2001
"""
# Check bisector sampling
s = len(y)
warn = 'Not good sampling to compute BIS!' if s < 100. else ''
if verb: print(' {} points in bisector.', warn)
# Bisector up and down region limits -> absolute value
y_min = np.nanmin(y)
y_max = np.nanmax(y)
y_delta = y_max - y_min
ybotmin = y_min + y_delta * ybotmin_percent/100.
ybotmax = y_min + y_delta * ybotmax_percent/100.
ytopmin = y_min + y_delta * ytopmin_percent/100.
ytopmax = y_min + y_delta * ytopmax_percent/100.
# Bisector up and down region limits indices
# Note: Approximate regions, depend on bisector sampling
iybotmin = np.nanargmin(np.abs(y-ybotmin))
iybotmax = np.nanargmin(np.abs(y-ybotmax))
iytopmin = np.nanargmin(np.abs(y-ytopmin))
iytopmax = np.nanargmin(np.abs(y-ytopmax))
# Compute mean RV in each region
#### Should it be a weighted mean?
xmeantop = np.nanmean(x[iytopmin:iytopmax+1])
xmeanbot = np.nanmean(x[iybotmin:iybotmax+1])
# Compute bisector inverse slope BIS
bis = xmeantop - xmeanbot
return bis, xmeantop, xmeanbot, iybotmin, iybotmax, iytopmin, iytopmax
def computebisector_biserr(x, y, xerr, n=100, bybotmin_percent=10., bybotmax_percent=40., bytopmin_percent=60., bytopmax_percent=90., xrealsampling=None, verb=True, returnall=False):
"""
Compute bisector, bisector inverse slope (BIS) and their errors.
Same code as in functions `computebisector` to compute the bisector and in `computebisector_bis` to compute the BIS. Copied here because intermediate products needed to compute the BIS error.
If do not need the BIS error, use the other functions
- `computebisector`
- `computebisector_bis`
Parameters
----------
x, y :
xerr :
n : int (default 100)
bybotmin_percent, bybotmax_percent : float
Bisector bottom region limits in percentage.
bytopmin_percent, bytopmax_percent : float
Bisector top region limits in percentage.
xrealsampling : float (default None)
Sampling of the real data. Usually the input data provided (`x` and `y`) is oversampled in order to correctly compute the bisector. So need to provide what would be the real sampling in order to compute the BIS error properly.
bybotmin_percent ...
verb
returnall : bool (default False)
Output a lot intermediate of products.
Returns
-------
"""
# Bisector
# --------
# y minimum and maxima (maxima: absolute maxima each side)
imin = np.nanargmin(y) # Minimum
imax1 = np.nanargmax(y[:imin]) # Maximum left part
imax2 = imin + np.nanargmax(y[imin:]) # Maximum right part
if imax2 == len(y): imax2p = imax2
else: imax2p = imax2 + 1 # plus one
y_smallestmax = np.nanmin([y[imax1], y[imax2]]) # Smallest maximum
# Bisector y heights
by = np.linspace(y[imin], y_smallestmax, n)
# Interpolate bisector y to x for both sides of the y
# interp1d(x, y)
# - Function
interpolate_x1 = interp1d(y[imax1:imin+1], x[imax1:imin+1], kind='linear')
interpolate_x2 = interp1d(y[imin:imax2p], x[imin:imax2p], kind='linear')
# - Bisector x values
bx1 = interpolate_x1(by)
bx2 = interpolate_x2(by)
# Compute bisector
bx = (bx2 + bx1)/2.
# -----------------------
# Bisector error
# --------------
# xerr
# Do not have RVerr for the bisector x datapoints, only for the original x points
# Solution: Interpolate the error
# - Function
interpolate_x1err = interp1d(y[imax1:imin+1], xerr[imax1:imin+1], kind='linear')
interpolate_x2err = interp1d(y[imin:imax2p], xerr[imin:imax2p], kind='linear')
# - Bisector x error values
bx1err = interpolate_x1err(by)
bx2err = interpolate_x2err(by)
# Compute bisector error (error propagation)
bxerr = np.sqrt(bx1err**2 + bx2err**2) / 2.
# -----------------------
# BIS
# ---
# Check bisector sampling
s = len(by)
warn = 'Not good sampling to compute BIS!' if s < 100. else ''
if verb: print(' {} points in bisector.'.format(s), warn)
# Bisector up and down region limits -> absolute value
by_min = np.nanmin(by)
by_max = np.nanmax(by)
by_delta = by_max - by_min
bybotmin = by_min + by_delta * bybotmin_percent/100.
bybotmax = by_min + by_delta * bybotmax_percent/100.
bytopmin = by_min + by_delta * bytopmin_percent/100.
bytopmax = by_min + by_delta * bytopmax_percent/100.
# Bisector up and down region limits indices
# Note: Approximate regions, depend on bisector sampling
ibybotmin = np.nanargmin(np.abs(by-bybotmin))
ibybotmax = np.nanargmin(np.abs(by-bybotmax))
ibytopmin = np.nanargmin(np.abs(by-bytopmin))
ibytopmax = np.nanargmin(np.abs(by-bytopmax))
# Compute mean RV in each region
#### Should it be a weighted mean?
bxmeantop = np.nanmean(bx[ibytopmin:ibytopmax+1])
bxmeanbot = np.nanmean(bx[ibybotmin:ibybotmax+1])
# Compute bisector inverse slope BIS
bis = bxmeantop - bxmeanbot
# -----------------------
# BIS error
# ---------
# Compute number of points in top and bottom regions
# Use regions x width and x sampling
# - Original data x sampling
if xrealsampling is None:
dx_data = x[1] - x[0]
if verb:
print('No real sampling provided for bisector! Need to compute BIS error. Using sampling from input data: {}'.format(dx_data))
else:
dx_data = xrealsampling
# - Top and bottom regions x width (use only one side of the line)
dx_top = np.abs(interpolate_x1(bytopmax) - interpolate_x1(bytopmin))
dx_bot = np.abs(interpolate_x1(bybotmax) - interpolate_x1(bybotmin))
# - Number of points
ntop = dx_top / dx_data
nbot = dx_bot / dx_data
# Mean error top and bottom regions
bxtopmeanerr = np.nanmean(bxerr[ibybotmin:ibybotmax+1]) / np.sqrt(ntop)
bxbotmeanerr = np.nanmean(bxerr[ibytopmin:ibytopmax+1]) / np.sqrt(nbot)
# BIS error
biserr = np.sqrt(bxtopmeanerr**2 + bxbotmeanerr**2)
if not returnall:
return bx, by, bxerr, bis, biserr
else:
return bx, by, bxerr, bis, biserr, bx1, bx2, bx1err, bx2err, bxmeantop, bxmeanbot, ibybotmin, ibybotmax, ibytopmin, ibytopmax
###############################################################################
# Utils
def compute_wstd(x, e):
"""
Compute weighted standard deviation.
From: https://stackoverflow.com/questions/2413522/weighted-standard-deviation-in-numpy
Returns:
wstd
wmean
"""
masknan = np.isfinite(x) & np.isfinite(e)
x = x[masknan]
e = e[masknan]
# Weights
w = 1./np.array(e)**2
# Weighted mean
wmean = np.average(x, weights=w)
# Std
wstd = np.sqrt(np.average((x-wmean)**2, weights=w))
return wstd, wmean
def determine_fitrng(fitrng, rv, ccf, imin=None, verb=True):
"""
Determine which points of the CCF use for the fit.
Parameters
----------
fitrng : str or float
rv, ccf : 1d array
CCF datapoints. `rv` only needed if `fitrng` is float. `ccf` only needed for `fitrng` equal to `maxabs`, `maxcl`, float or `all` (actually for `all` only need the array size).
imin : int or None (default None)
Position (array index) of the minimum. If None (default) look for the absolute minimum in `ccf`.
Returns
-------
ifit1, ifit2 : int
Indices of the start and end datapoints of the RV and CCF arrays to use to fit. Select data using array slicing. No need to add `+ 1` to `ifit2` when doing array slicing.
"""
# From absolute maximum at left side of the minimum to absolute maximum at right side
if fitrng == 'maxabs':
ifit1, ifit2, _ = peakutils.min_find_maxabs(ccf, imin=imin)
# Handle array ends
if ifit2 < len(ccf): ifit2 += 1
# From closes maxima at each side of the minimum
elif fitrng == 'maxcl':
ifit1, ifit2, _ = peakutils.min_find_maxclosest(ccf, imin=imin)
# Handle array ends
if ifit2 < len(ccf): ifit2 = ifit2 + 1
# All CCF
elif fitrng == 'all':
ifit1 = 0
ifit2 = len(ccf)
# CCF min +/- rng
elif pyutils.isfloatnum(fitrng):
rng = float(fitrng)
# Minimum
rvmin = rv[np.nanargmin(ccf)]
# Range indices
ifit1 = np.nanargmin(abs(rv - (rvmin - rng)))
ifit2 = np.nanargmin(abs(rv - (rvmin + rng)))
# Check if range if within RV array
if ifit1 == 0 or ifit1 == 1 or ifit2 == len(rv) - 1 or ifit2 == len(rv):
if verb: print(' Fit range too large, fitting all.')
return ifit1, ifit2
def selectmask_carmenesgto(filmask, spt, vsini, sptdefault='M3.5', vsinidefault=2.0, verbose=True):
"""
vsini selection not implemented!!
Parameters
----------
filmask : str
Mask-selection file, contaiing the information of the masks to be used. Columns:
0) object used to make the mask `objmask`
1) spectral type of `objmask`
2) `vsini` of `objmask`
3) path to mask file
Columns: 0 'objmask': CARMENES id
1 'spt': Spectral type of 'objmask'
2 'vsini': vsini of 'objmask' [km/s]
3 'file': Path to mask file
spt : str
Spectral type of the target, e.g. `M3.5`.
vsini : float
vsini of the target [km/s].
sptdefault : str
Value to use if `spt` is not valid.
vsinidefault : float
Value to use if `vsini` is not valid.
Returns
-------
maskinfo : pandas Series
File and information of the mask selected for 'obj'.
Labels: 'objmask', 'spt', 'vsini', 'file'.
E.g. get the file of the mask: `maskinfo['file']`
"""
# Check valid inputs
if spt is None:
stp = sptdefault
elif len(spt) != 4:
stp = sptdefault
if vsini is None:
vsini = vsinidefault
elif not np.isfinite(vsini):
vsini = vsinidefault
# Read maskdat
df = pd.read_csv(filmask, sep='\s+', header=0, names=None, usecols=None, skip_blank_lines=True, comment='#')
# ---- Remove M ----
# SpT num: M3.5 -> 3.5
spt = float(spt[1:])
df['sptnum'] = [float(s[1:]) for s in df['spt']]
# Available spectral types
sptavailable = np.sort(np.unique(df['sptnum']))
# Select masks with the same spectral type as the object
if spt in sptavailable:
dfspt = df[df['sptnum'] == spt]
else:
sptclosest = sptavailable[np.nanargmin(np.abs(sptavailable - spt))]
dfspt = df[df['sptnum'] == sptclosest]
# # Select mask with vsini closest to the object `obj` vsini
idx = np.nanargmin(np.abs(dfspt['vsini']-vsini)) # In case of multiple occurrences of the minimum values, argmin return the indices corresponding to the first occurrence
# maskinfo = df.loc[idx]
maskinfo = dfspt.iloc[0]
return maskinfo
def showmask_default():
"""List avalilable masks"""
lisinst = glob.glob(os.path.join(dirhere, 'data/mask/*'))
print('List available masks for different instruments')
for inst in lisinst:
print(os.path.basename(inst))
lismaskdefault = glob.glob(os.path.join(inst, '*.mas'))
for mask in lismaskdefault:
print(' ', os.path.basename(mask))
return
def listmask_default():
"""Default masks."""
dictmask = {
'CARM_VIS': {
# Mask ID, SpT, vsini [km/s], Mask file
'J12123+544Sdefault': {'sptmask': 'M0.0V', 'vsinimask': '2.0', 'objmask': 'J12123+544S', 'filmask': os.path.join(dirhere, 'data/mask/CARM_VIS/', 'J12123+544Sserval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
'J11033+359default': {'sptmask': 'M1.5V', 'vsinimask': '2.0', 'objmask': 'J11033+359', 'filmask': os.path.join(dirhere, 'data/mask/CARM_VIS/', 'J11033+359serval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
'J19169+051Ndefault': {'sptmask': 'M2.5V', 'vsinimask': '2.0', 'objmask': 'J19169+051N', 'filmask': os.path.join(dirhere, 'data/mask/CARM_VIS/', 'J19169+051Nserval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
'J07274+052default': {'sptmask': 'M3.5V', 'vsinimask': '2.0', 'objmask': 'J07274+052', 'filmask': os.path.join(dirhere, 'data/mask/CARM_VIS/', 'J07274+052serval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
'J13229+244default': {'sptmask': 'M4.0V', 'vsinimask': '2.0', 'objmask': 'J13229+244', 'filmask': os.path.join(dirhere, 'data/mask/CARM_VIS/', 'J13229+244serval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
# 'J13229+244default': {'sptmask': 'M4.0V', 'vsinimask': '2.0', 'objmask': 'J13229+244', 'filmask': pkg_resources.resource_filename('raccoon', 'data/mask/CARM_VIS/J13229+244serval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
# 'J13229+244default': {'sptmask': 'M4.0V', 'vsinimask': '2.0', 'objmask': 'J13229+244', 'filmask': pkg_resources.resource_filename('raccoon', 'data/J13229+244serval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
# 'J13229+244default': {'sptmask': 'M4.0V', 'vsinimask': '2.0', 'objmask': 'J13229+244', 'filmask': pkg_resources.resource_filename(pkg_resources.Requirement.parse("raccoon"), 'data/mask/CARM_VIS/J13229+244serval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
'J20260+585default': {'sptmask': 'M5.0V', 'vsinimask': '2.0', 'objmask': 'J20260+585', 'filmask': os.path.join(dirhere, 'data/mask/CARM_VIS/', 'J20260+585serval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
'J10564+070default': {'sptmask': 'M6.0V', 'vsinimask': '2.9', 'objmask': 'J10564+070', 'filmask': os.path.join(dirhere, 'data/mask/CARM_VIS/', 'J10564+070serval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
'J02530+168default': {'sptmask': 'M7.0V', 'vsinimask': '2.0', 'objmask': 'J02530+168', 'filmask': os.path.join(dirhere, 'data/mask/CARM_VIS/', 'J02530+168serval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
},
'CARM_NIR': {
# Mask ID, SpT, vsini [km/s], Mask file
'J12123+544Sdefault': {'sptmask': 'M0.0V', 'vsinimask': '2.0', 'objmask': 'J12123+544S', 'filmask': os.path.join(dirhere, 'data/mask/CARM_NIR/', 'J12123+544Sserval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
'J11033+359default': {'sptmask': 'M1.5V', 'vsinimask': '2.0', 'objmask': 'J11033+359', 'filmask': os.path.join(dirhere, 'data/mask/CARM_NIR/', 'J11033+359serval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
'J19169+051Ndefault': {'sptmask': 'M2.5V', 'vsinimask': '2.0', 'objmask': 'J19169+051N', 'filmask': os.path.join(dirhere, 'data/mask/CARM_NIR/', 'J19169+051Nserval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
'J07274+052default': {'sptmask': 'M3.5V', 'vsinimask': '2.0', 'objmask': 'J07274+052', 'filmask': os.path.join(dirhere, 'data/mask/CARM_NIR/', 'J07274+052serval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
'J13229+244default': {'sptmask': 'M4.0V', 'vsinimask': '2.0', 'objmask': 'J13229+244', 'filmask': os.path.join(dirhere, 'data/mask/CARM_NIR/', 'J13229+244serval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
'J20260+585default': {'sptmask': 'M5.0V', 'vsinimask': '2.0', 'objmask': 'J20260+585', 'filmask': os.path.join(dirhere, 'data/mask/CARM_NIR/', 'J20260+585serval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
'J10564+070default': {'sptmask': 'M6.0V', 'vsinimask': '2.9', 'objmask': 'J10564+070', 'filmask': os.path.join(dirhere, 'data/mask/CARM_NIR/', 'J10564+070serval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
'J02530+168default': {'sptmask': 'M7.0V', 'vsinimask': '2.0', 'objmask': 'J02530+168', 'filmask': os.path.join(dirhere, 'data/mask/CARM_NIR/', 'J02530+168serval_tellbervmax_fwhm2.00-30.00_contrminmin0.06_depthwq0.60_contrastmeanfwhm-1.mas')},
},
'HARPS': {},
'HARPN': {},
'EXPRES': {},
'ESPRESSO': {},
'ESPRESSO4x2': {},
'ESPRESSO8x4': {},
}
return dictmask
def selectmask_default(maskid, inst):
"""Select mask from default available."""
dictmask = listmask_default()
mask = dictmask[inst][maskid]
return mask
def selecttell_default(inst):
dicttell = {
'CARM_VIS': os.path.join(dirhere, 'data/tellurics/CARM_VIS/telluric_mask_carm_short.dat'),
'CARM_NIR': os.path.join(dirhere, 'data/tellurics/CARM_NIR/telluric_mask_nir4.dat'),
}
tell = dicttell[inst]
return tell
def selectfilphoenixf(spt, dirin=os.path.join(dirhere, 'data/phoenix/')):
"""
- lte02900-5.00-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits
- lte03000-5.00-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits
- lte03100-5.00-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits
- lte03200-5.00-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits
- lte03300-5.00-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits
- lte03400-5.00-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits
- lte03900-5.00-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits
"""
lisfil = np.sort(glob.glob(dirin))
spt_T = {
# TODO Check CARM data
0.0: 3800,
0.5: 3800,
1.0: 3600,
1.5: 3600,
2.0: 3400,
2.5: 3400,
3.0: 3250,
3.5: 3250,
4.0: 3100,
4.5: 3100,
5.0: 2800,
5.5: 2800,
6.0: 2600,
6.5: 2600,
7.0: 2500,
7.5: 2500,
8.0: 2400,
8.5: 2400,
9.0: 2300,
}
# TODO
phoenixfilf = None
return phoenixfilf
###############################################################################
# Input / Output
def outfits_ccfall(
rv,
ccfsum, ccfparsum, # sum
ccf, ccfpar, # orders
bxsum, bysum, # sum
bx, by, # orders
headerobs,
filout,
):
# rv,ccfo,ccfsum,fitparo,ccfpar,fitparsum,ccfparsum,headerspec,filout='ccf.fits'):
"""
Save CCF data in a FITS file.
Data saved:
- Header of the observation -> Header of primary extension
- RV -> axis
- CCF sum -> data CCFSUM
- CCF sum parameters -> header CCFSUM
- CCF of each order -> data CCFO
- CCF of each order parameters -> data ccfpar
- Bisector (x and y) -> BISECTORSUM
Note: Bisector data inverted wrt CCF data: axis contains flux and data contains RV (because bisector steps constant in flux, not in RV).
To read data use `infits_ccfall`.
"""
nord = len(ccf)
ords = np.arange(0, len(ccf), 1)
# Observation header
hdu0 = fits.PrimaryHDU(header=headerobs)
# -----------------------
# CCF sum
hduccfsum = fits.ImageHDU(ccfsum, name='ccfsum')
hduccfsum.header['CRPIX1'] = (1, 'Reference pixel')
hduccfsum.header['CRVAL1'] = (rv[0], 'Value of reference pixel [km/s]')
hduccfsum.header['CDELT1'] = (rv[1]-rv[0], 'Step [km/s]')
hduccfsum.header['CUNIT1'] = ('km/s', 'Units')
# -----------------------
# CCF sum parameters
# - Save in table?
# - Save in CCFSUM header
# None to nan
ccfparsum = {k: (v if v is not None else np.nan) for k, v in ccfparsum.items()}
# NaN nor infinite not allowed in FITS header -> str(nan)
ccfparsum = {k: (v if np.isfinite(v if not isinstance(v, str) else np.nan) else str(v)) for k, v in ccfparsum.items()}
hduccfsum.header['HIERARCH CCF FIT AMP'] = (ccfparsum['fitamp'], 'Fit')
hduccfsum.header['HIERARCH CCF FIT CEN'] = (ccfparsum['fitcen'], 'Fit')
hduccfsum.header['HIERARCH CCF FIT WID'] = (ccfparsum['fitwid'], 'Fit')
hduccfsum.header['HIERARCH CCF FIT SHIFT'] = (ccfparsum['fitshift'], 'Fit')
hduccfsum.header['HIERARCH CCF FIT AMPERR'] = (ccfparsum['fitamperr'], 'Fit')
hduccfsum.header['HIERARCH CCF FIT CENERR'] = (ccfparsum['fitcenerr'], 'Fit')
hduccfsum.header['HIERARCH CCF FIT WIDERR'] = (ccfparsum['fitwiderr'], 'Fit')
hduccfsum.header['HIERARCH CCF FIT SHIFTERR'] = (ccfparsum['fitshifterr'], 'Fit')
hduccfsum.header['HIERARCH CCF RV'] = (ccfparsum['rv'], 'Corrected [km/s]')
hduccfsum.header['HIERARCH CCF FWHM'] = (ccfparsum['fwhm'], '[km/s]')
hduccfsum.header['HIERARCH CCF CONTRAST'] = (ccfparsum['contrast'], '[%]')
hduccfsum.header['HIERARCH CCF BIS'] = (ccfparsum['bis'], '[km/s]')
hduccfsum.header['HIERARCH CCF RVERR'] = (ccfparsum['rverr'], '[km/s]')
hduccfsum.header['HIERARCH CCF FWHMERR'] = (ccfparsum['fwhmerr'], '[km/s]')
hduccfsum.header['HIERARCH CCF CONTRASTERR'] = (ccfparsum['contrasterr'], '[%]')
hduccfsum.header['HIERARCH CCF BISERR'] = (ccfparsum['biserr'], '[km/s]')
hduccfsum.header['HIERARCH CCF RVERRABS'] = (ccfparsum['rverrabs'], 'RV err + shift error [km/s]')
# Comments on the parameters
hduccfsum.header['COMMENT'] = 'Fit function Gaussian G(x)=shift+amp*e^(-(x-cen)^2/(2*wid^2))'
hduccfsum.header['COMMENT'] = 'RV=cen'
hduccfsum.header['COMMENT'] = 'FWHM=2*sqrt(2*ln(2))*wid'
hduccfsum.header['COMMENT'] = 'CONTRAST=-amp/shift*100'
hduccfsum.header['COMMENT'] = 'BIS (bisector inverse slope):'
hduccfsum.header['COMMENT'] = ' Difference between the average bisector'
hduccfsum.header['COMMENT'] = ' values in the CCF regions from 0.9% to 0.6%'
hduccfsum.header['COMMENT'] = ' and from 0.4% to 0.1%'
hduccfsum.header['COMMENT'] = ' (default values, may be different)'
# RV corrections
hduccfsum.header['HIERARCH berv'] = (ccfparsum['berv'], '[m/s]')
hduccfsum.header['HIERARCH drift'] = (ccfparsum['drift'], '[m/s]')
hduccfsum.header['HIERARCH sa'] = (ccfparsum['sa'], '[m/s]')
hduccfsum.header['HIERARCH berverr'] = (ccfparsum['berverr'], '[m/s]')
hduccfsum.header['HIERARCH drifterr'] = (ccfparsum['drifterr'], '[m/s]')
hduccfsum.header['HIERARCH saerr'] = (ccfparsum['saerr'], '[m/s]')
hduccfsum.header['HIERARCH shift'] = (ccfparsum['shift'], '-BERV+drift+sa+otherdrift [m/s]')
hduccfsum.header['HIERARCH shifterr'] = (ccfparsum['shifterr'], '[m/s]')
hduccfsum.header['HIERARCH otherdrift'] = (ccfparsum['otherdrift'], '[m/s]')
hduccfsum.header['HIERARCH otherdrifterr'] = (ccfparsum['otherdrifterr'], '[m/s]')
# Other observation data
hduccfsum.header['HIERARCH BJD'] = (ccfparsum['bjd'], 'BJD')
hduccfsum.header['HIERARCH REFERENCE ORDER'] = (ccfparsum['oref'], 'oref')
hduccfsum.header['HIERARCH SNR REFERENCE ORDER'] = (ccfparsum['snroref'], 'snroref')
hduccfsum.header['HIERARCH READOUT NOISE'] = (ccfparsum['ron'], 'ron')
hduccfsum.header['HIERARCH EXPTIME'] = (ccfparsum['exptime'], 'exptime')
# Mask and mask number of lines
hduccfsum.header['MASKFIL'] = (ccfparsum['filmask'], 'Mask file')
hduccfsum.header['MASKFILN'] = (ccfparsum['filmaskname'], 'Mask file name')
hduccfsum.header['HIERARCH MASK TARGET'] = (ccfparsum['objmask'], 'Mask target')
hduccfsum.header['HIERARCH MASK TARGET SPT'] = (ccfparsum['sptmask'], 'Mask target SpT')
hduccfsum.header['HIERARCH MASK TARGET VSINI'] = (ccfparsum['vsinimask'], 'Mask target vsini')
for o in ords:
hduccfsum.header['HIERARCH MASK NLINO{}'.format(o)] = (ccfparsum['nlino{}'.format(o)], 'Num mask lines order {}'.format(o))
hduccfsum.header['HIERARCH MASK NLIN TOTAL'] = (ccfparsum['nlint'], 'Total num of mask lines (taking into account BERV, mask shift, order overlap)')
hduccfsum.header['HIERARCH MASK NLIN ORIGINAL'] = (ccfparsum['nlinoriginal'], 'Num of mask lines original mask')
# -----------------------
# CCF orders
hduccfo = fits.ImageHDU(ccf, name='ccfo')
# Cannot have nan in header. If all nan -> 0
if (~np.isfinite(rv)).all(): rv = np.zeros_like(rv)
hduccfo.header['CRPIX1'] = (1, 'Reference pixel')
hduccfo.header['CRVAL1'] = (rv[0], 'Value of reference pixel [km/s]')
hduccfo.header['CDELT1'] = (rv[1]-rv[0], 'Step [km/s]')
hduccfo.header['CUNIT1'] = ('km/s', 'Units')
# -----------------------
# CCF orders parameters
hduccfpar = fits.TableHDU(ccfpar.to_records(), name='ccfparo')
hduccfpar.header['COMMENT'] = 'fitamp, fitamperr, fitcen, fitcenerr, fitwid, fitwiderr, fitshift, fitshifterr'
hduccfpar.header['COMMENT'] = 'RV, RVERR, FWHM, FWHMERR [km/s]'
hduccfpar.header['COMMENT'] = 'CONTRAST, CONTRASTERR [%]'
hduccfpar.header['COMMENT'] = 'BIS, BISERR [km/s]'
# -----------------------
# Bisector
# Bisector steps constant in CCF, not in RV
hdubisectorsum = fits.ImageHDU(bxsum, name='bisectorsum')
# Cannot have nan in header. If all nan -> 0
if (~np.isfinite(bysum)).all(): bysum = np.zeros_like(bysum)
hdubisectorsum.header['CRPIX1'] = (1, 'Reference pixel')
hdubisectorsum.header['CRVAL1'] = (bysum[0], 'Value of reference pixel [flux]')
hdubisectorsum.header['CDELT1'] = (bysum[1]-bysum[0], 'Step [flux]')
hdubisectorsum.header['CUNIT1'] = ('flux', 'Units')
# -----------------------
# Bisector orders
hdubisectorrvo = fits.ImageHDU(bx, name='bisectorrvo')
hdubisectorccfo = fits.ImageHDU(by, name='bisectorccfo')
# -----------------------
# Save
hdulist = fits.HDUList([hdu0, hduccfsum, hduccfo, hduccfpar, hdubisectorsum, hdubisectorrvo, hdubisectorccfo])
# If problems with original header -> Remove it
try:
hdulist.writeto(filout, overwrite=True, output_verify='silentfix+warn')
except ValueError:
hdu0 = fits.PrimaryHDU() # replace original header
hdulist = fits.HDUList([hdu0, hduccfsum, hduccfo, hduccfpar, hdubisectorsum, hdubisectorrvo, hdubisectorccfo])
hdulist.writeto(filout, overwrite=True, output_verify='silentfix+warn')
return
def infits_ccfall(filin):
"""Read CCF data from files created with `outfits_ccfall`.
Returns
-------
rv : 1d-array
RV grid of the CCFs.
ccfsum : 1d-array
CCF of all orders coadded (same length as `rv`).
ccfparsum : dict
Parameters of the coadded CCF.
ccf : nd-array
CCFs of each order. Shape: (number of orders, RV grid length).
ccfpar : pandas DataFrame
Parameters of the CCF of each order.
bxsum : 1d-array
Bisector of the coadded CCF (x-axis). Length: 100 points (unless changed when computing the bisector).
bysum : 1d-array
Bisector of the coadded CCF (y-axis). Length: 100 points (unless changed when computing the bisector).
bx : nd-array
Bisectors of the CCF of each order (x-axis). Shape: (number of orders, 100 (unless changed when computing the bisector)).
by : nd-array
Bisectors of the CCF of each order (y-axis). Shape: (number of orders, 100 (unless changed when computing the bisector)).
headerobs : astropy Header
Header of the original reduced spectrum file.
FITS info example
-----------------
>>> hdulist.info()
No. Name Ver Type Cards Dimensions Format
0 PRIMARY 1 PrimaryHDU 340 ()
1 CCFSUM 1 ImageHDU 36 (129,) float64
2 CCFO 1 ImageHDU 12 (129, 61) float64
3 CCFPAR 1 TableHDU 62 61R x 16C [I21, D25.17, D25.17, D25.17, D25.17, D25.17, D25.17, D25.17, D25.17, D25.17, D25.17, D25.17, D25.17, D25.17, D25.17, D25.17]
4 BISECTORSUM 1 ImageHDU 11 (100,) float64
5 BISECTORRVO 1 ImageHDU 8 (100, 61) float64
6 BISECTORCCFO 1 ImageHDU 8 (100, 61) float64
"""
with fits.open(filin) as hdulist:
nord = len(hdulist['ccfo'].data)
ords = np.arange(0, nord, 1)
# Observation header
headerobs = hdulist[0].header
# RV
h = hdulist['ccfsum'].header
n = h['NAXIS1']
stp = h['CDELT1']
ini_pix = h['CRPIX1']
ini_val = h['CRVAL1']
# fin_val = ini_val + (n - (ini_pix - 1)) * stp
fin_val = ini_val + n * stp
rv = np.arange(ini_val, fin_val, stp)
# Issue with decimals: rv array can end up having an extra point
# E.g. J09439+269: From FITS header, initial RV value is `ini_val = 19.1788947345789` but from the original RV array, initial RV value is `rv[0]=19.178894734578897`.
if len(rv) == n+1: rv = rv[:-1]
elif len(rv) != n: print('rv array length wrong')
# CCF sum
ccfsum = hdulist['ccfsum'].data
# CCF sum parameters
ccfparsum = {}
h = hdulist['ccfsum'].header
ccfparsum['fitamp'] = h['HIERARCH CCF FIT AMP']
ccfparsum['fitcen'] = h['HIERARCH CCF FIT CEN']
ccfparsum['fitwid'] = h['HIERARCH CCF FIT WID']
ccfparsum['fitshift'] = h['HIERARCH CCF FIT SHIFT']
ccfparsum['fitamperr'] = h['HIERARCH CCF FIT AMPERR']
ccfparsum['fitcenerr'] = h['HIERARCH CCF FIT CENERR']
ccfparsum['fitwiderr'] = h['HIERARCH CCF FIT WIDERR']
ccfparsum['fitshifterr'] = h['HIERARCH CCF FIT SHIFTERR']
ccfparsum['rv'] = h['HIERARCH CCF RV']
ccfparsum['fwhm'] = h['HIERARCH CCF FWHM']
ccfparsum['contrast'] = h['HIERARCH CCF CONTRAST']
ccfparsum['bis'] = h['HIERARCH CCF BIS']
ccfparsum['rverr'] = h['HIERARCH CCF RVERR']
ccfparsum['fwhmerr'] = h['HIERARCH CCF FWHMERR']
ccfparsum['contrasterr'] = h['HIERARCH CCF CONTRASTERR']
ccfparsum['biserr'] = h['HIERARCH CCF BISERR']
ccfparsum['rverrabs'] = h['HIERARCH CCF RVERRABS']
# RV corrections
ccfparsum['berv'] = h['HIERARCH berv']
ccfparsum['drift'] = h['HIERARCH drift']
ccfparsum['sa'] = h['HIERARCH sa']
ccfparsum['berverr'] = h['HIERARCH berverr']
ccfparsum['drifterr'] = h['HIERARCH drifterr']
ccfparsum['saerr'] = h['HIERARCH saerr']
ccfparsum['shift'] = h['HIERARCH shift']
ccfparsum['shifterr'] = h['HIERARCH shifterr']
ccfparsum['otherdrift'] = h['HIERARCH otherdrift']
ccfparsum['otherdrifterr'] = h['HIERARCH otherdrifterr']
# Other observation data
ccfparsum['bjd'] = h['HIERARCH BJD']
ccfparsum['oref'] = h['HIERARCH REFERENCE ORDER']
ccfparsum['snroref'] = h['HIERARCH SNR REFERENCE ORDER']
ccfparsum['ron'] = h['HIERARCH READOUT NOISE']
ccfparsum['exptime'] = h['HIERARCH EXPTIME']
# Mask and mask number of lines
ccfparsum['filmask'] = h['MASKFILN']
ccfparsum['filmaskname'] = h['MASKFILN']
ccfparsum['objmask'] = h['HIERARCH MASK TARGET']
ccfparsum['sptmask'] = h['HIERARCH MASK TARGET SPT']
ccfparsum['vsinimask'] = h['HIERARCH MASK TARGET VSINI']
for o in ords:
ccfparsum['nlino{}'.format(o)] = h['HIERARCH MASK NLINO{}'.format(o)]
ccfparsum['nlint'] = h['HIERARCH MASK NLIN TOTAL']
ccfparsum['nlinoriginal'] = h['HIERARCH MASK NLIN ORIGINAL']
for k, v in ccfparsum.items():
if v == 'nan' or v == 'inf' or v == '-inf':
ccfparsum[k] = np.nan
# CCF orders
ccf = hdulist['ccfo'].data
# CCF orders parameters
ccfpar = pd.DataFrame.from_records(hdulist['ccfparo'].data, index='orders')
ccfpar.index.set_names('orders', inplace=True)
# Bisector CCF sum
bxsum = hdulist['bisectorsum'].data
h = hdulist['bisectorsum'].header
n = h['NAXIS1']
stp = h['CDELT1']
ini_pix = h['CRPIX1']
ini_val = h['CRVAL1']
fin_val = ini_val + n * stp
try: bysum = np.arange(ini_val, fin_val, stp)
except: bysum = np.ones_like(bxsum) * np.nan
# Bisector orders
bx = hdulist['bisectorrvo'].data
by = hdulist['bisectorccfo'].data
return rv, ccfsum, ccfparsum, ccf, ccfpar, bxsum, bysum, bx, by, headerobs
def outdat_ccf(filout, rv, ccf):
"""Save single CCF data in text file. Columns: 0) RV, 1) CCF.
"""
data = np.vstack([rv, ccf])
np.savetxt(filout, data.T, fmt='%.8f')
return
def outdat_ccfparTS(filout, data, cols=['bjd', 'rv', 'fwhm', 'contrast', 'bis', 'rverr', 'fwhmerr', 'contrasterr', 'biserr'], sep=' ', na_rep=np.nan, header=False, index=False, float_format='%0.8f'):
"""Save CCF parameters TS in text file.
Parameters
----------
data : pandas DataFrame
cols : list
Names of the `data` columns to be saved to file. The columns must exists in the DataFrame.
Default: ['bjd', 'rv', 'fwhm', 'contrast', 'bis', 'rverr', 'fwhmerr', 'contrasterr', 'biserr']
"""
data.to_csv(filout, columns=cols, sep=sep, na_rep=na_rep, header=header, index=index, float_format=float_format)
return
###############################################################################
# Plots
pmstr = r"$\pm$"
# pmstr = u'\u00B1'.encode('utf-8')
def plot_ccf(rv, ccf, ccfpar=None, title='', **kwargs):
fig, ax = plt.subplots()
# CCF
ax.plot(rv, ccf, 'ko', **kwargs)
# Info
if ccfpar is not None:
infoobs = 'BJD {:.2f}\nSNR{} {:.0f}'.format(ccfpar['bjd'], ccfpar['oref'], ccfpar['snroref'])
infomask = 'nlin {}, {}'.format(ccfpar['nlinoriginal'], ccfpar['nlint'])
infoccfpar = 'RV {:.2f}{}{:.2f} ({:.2f}) m/s\nFWHM {:.2f}{}{:.2f} km/s\nContrast {:.2f}{}{:.2f} %\nBIS {:.2f}{}{:.2f} m/s'.format(ccfpar['rv']*1.e3, pmstr, ccfpar['rverr']*1.e3, ccfpar['rverrabs']*1.e3, ccfpar['fwhm'], pmstr, ccfpar['fwhmerr'], ccfpar['contrast'], pmstr, ccfpar['contrasterr'], ccfpar['bis']*1.e3, pmstr, ccfpar['biserr']*1.e3)
infoccffit = 'amp {:.2f}{}{:.2f}\ncen {:.2f}{}{:.2f}\nwid {:.2f}{}{:.2f}\nshift {:.2f}{}{:.2f}'.format(ccfpar['fitamp']*1.e3, pmstr, ccfpar['fitamperr']*1.e3, ccfpar['fitcen'], pmstr, ccfpar['fitcenerr'], ccfpar['fitwid'], pmstr, ccfpar['fitwiderr'], ccfpar['fitshift']*1.e3, pmstr, ccfpar['fitshifterr']*1.e3)
# - infobs + infomask + infoccfpar lower left
bbox_props = dict(boxstyle="square", fc="w", ec=None, lw=0, alpha=0.7)
x, y = 0.03, 0.03
ax.text(x, y, infoobs + '\n' + infomask + '\n\n' + infoccfpar, ha='left', va='bottom', transform=ax.transAxes, fontsize='xx-small', color='k', bbox=bbox_props)
ax.set_xlabel('RV [km/s]')
# ax.set_ylabel('CCF')
plotutils.Labeloffset(ax, label="CCF", axis="y")
ax.set_title(title)
ax.minorticks_on()
return fig, ax
def plot_ccf_fit(rv, ccf, ccfpar, title='', fitpar='', **kwargs):
fig, ax = plt.subplots()
# CCF
ax.plot(rv, ccf, 'ko', **kwargs)
# Fit
fit = peakutils.gaussian(rv, amp=ccfpar['fitamp'], cen=ccfpar['fitcen'], wid=ccfpar['fitwid'], shift=ccfpar['fitshift'])
ax.plot(rv, fit, 'C1--')
# Info
infoobs = 'BJD {:.2f}\nSNR{} {:.0f}'.format(ccfpar['bjd'], ccfpar['oref'], ccfpar['snroref'])
infomask = 'nlin {}, {}'.format(ccfpar['nlinoriginal'], ccfpar['nlint'])
infoccfpar = 'RV {:.2f}{}{:.2f} ({:.2f}) m/s\nFWHM {:.2f}{}{:.2f} km/s\nContrast {:.2f}{}{:.2f} %\nBIS {:.2f}{}{:.2f} m/s'.format(ccfpar['rv']*1.e3, pmstr, ccfpar['rverr']*1.e3, ccfpar['rverrabs']*1.e3, ccfpar['fwhm'], pmstr, ccfpar['fwhmerr'], ccfpar['contrast'], pmstr, ccfpar['contrasterr'], ccfpar['bis']*1.e3, pmstr, ccfpar['biserr']*1.e3)
infoccffit = 'amp {:.2e}{}{:.2e}\ncen {:.2f}{}{:.2f}\nwid {:.2f}{}{:.2f}\nshift {:.2e}{}{:.2e}'.format(ccfpar['fitamp']*1.e3, pmstr, ccfpar['fitamperr']*1.e3, ccfpar['fitcen'], pmstr, ccfpar['fitcenerr'], ccfpar['fitwid'], pmstr, ccfpar['fitwiderr'], ccfpar['fitshift']*1.e3, pmstr, ccfpar['fitshifterr']*1.e3)
# - infobs + infomask + infoccfpar lower left
bbox_props = dict(boxstyle="square", fc="w", ec=None, lw=0, alpha=0.7)
x, y = 0.03, 0.03
ax.text(x, y, infoobs + '\n' + infomask + '\n\n' + infoccfpar, ha='left', va='bottom', transform=ax.transAxes, fontsize='xx-small', color='k', bbox=bbox_props)
# - infoccffit lower right
bbox_props = dict(boxstyle="square", fc="w", ec=None, lw=0, alpha=0.7)
x, y = 1.-0.03, 0.03
ax.text(x, y, infoccffit, ha='right', va='bottom', transform=ax.transAxes, fontsize='xx-small', color='k', bbox=bbox_props)
ax.set_xlabel('RV [km/s]')
plotutils.Labeloffset(ax, label="CCF", axis="y")
#ax.set_ylabel('CCF')
# Add fit params text
# ...
ax.set_title(title)
ax.minorticks_on()
return fig, ax
def plot_ccf_fit_line(rv, ccf, ccfpar, title='', fitpar='', **kwargs):
fig, ax = plt.subplots()
# CCF
ax.plot(rv, ccf, 'k-', **kwargs)
# Fit
fit = peakutils.gaussian(rv, amp=ccfpar['fitamp'], cen=ccfpar['fitcen'], wid=ccfpar['fitwid'], shift=ccfpar['fitshift'])
ax.plot(rv, fit, 'C1--')
# Info
infoobs = 'BJD {:.2f}\nSNR{} {:.0f}'.format(ccfpar['bjd'], ccfpar['oref'], ccfpar['snroref'])
infomask = 'nlin {}, {}'.format(ccfpar['nlinoriginal'], ccfpar['nlint'])
infoccfpar = 'RV {:.2f}{}{:.2f} ({:.2f}) m/s\nFWHM {:.2f}{}{:.2f} km/s\nContrast {:.2f}{}{:.2f} %\nBIS {:.2f}{}{:.2f} m/s'.format(ccfpar['rv']*1.e3, pmstr, ccfpar['rverr']*1.e3, ccfpar['rverrabs']*1.e3, ccfpar['fwhm'], pmstr, ccfpar['fwhmerr'], ccfpar['contrast'], pmstr, ccfpar['contrasterr'], ccfpar['bis']*1.e3, pmstr, ccfpar['biserr']*1.e3)
infoccffit = 'amp {:.2e}{}{:.2e}\ncen {:.2f}{}{:.2f}\nwid {:.2f}{}{:.2f}\nshift {:.2e}{}{:.2e}'.format(ccfpar['fitamp']*1.e3, pmstr, ccfpar['fitamperr']*1.e3, ccfpar['fitcen'], pmstr, ccfpar['fitcenerr'], ccfpar['fitwid'], pmstr, ccfpar['fitwiderr'], ccfpar['fitshift']*1.e3, pmstr, ccfpar['fitshifterr']*1.e3)
# - infobs + infomask + infoccfpar lower left
bbox_props = dict(boxstyle="square", fc="w", ec=None, lw=0, alpha=0.7)
x, y = 0.03, 0.03
ax.text(x, y, infoobs + '\n' + infomask + '\n\n' + infoccfpar, ha='left', va='bottom', transform=ax.transAxes, fontsize='xx-small', color='k', bbox=bbox_props)
# - infoccffit lower right
bbox_props = dict(boxstyle="square", fc="w", ec=None, lw=0, alpha=0.7)
x, y = 1.-0.03, 0.03
ax.text(x, y, infoccffit, ha='right', va='bottom', transform=ax.transAxes, fontsize='xx-small', color='k', bbox=bbox_props)
ax.set_xlabel('RV [km/s]')
plotutils.Labeloffset(ax, label="CCF", axis="y")
#ax.set_ylabel('CCF')
# Add fit params text
# ...
ax.set_title(title)
ax.minorticks_on()
return fig, ax
def plot_ccf_fit_diff(rv, ccf, ccfpar, title='', fitpar='', diffzoom=True, parvelunit='kms', **kwargs):
fig, ax = plt.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios':[3,1]})
# CCF
ax[0].plot(rv, ccf, 'ko', **kwargs)
# Fit
fit = peakutils.gaussian(rv, amp=ccfpar['fitamp'], cen=ccfpar['fitcen'], wid=ccfpar['fitwid'], shift=ccfpar['fitshift'])
ax[0].plot(rv, fit, 'C1--')
# Data units
if parvelunit == 'kms': facv = 1.
elif parvelunit == 'ms': facv = 1.e3
# Info
try: infoobs = 'BJD {:.2f}\nSNR{} {:.0f}'.format(ccfpar['bjd'], ccfpar['oref'], ccfpar['snroref'])
except: infoobs = ''
try: infomask = 'nlin {}, {}'.format(ccfpar['nlinoriginal'], ccfpar['nlint'])
except: infomask = ''
try: infoccfpar = 'RV {:.2f}{}{:.2f} ({:.2f}) m/s\nFWHM {:.2f}{}{:.2f} km/s\nContrast {:.2f}{}{:.2f} %\nBIS {:.2f}{}{:.2f} m/s'.format(ccfpar['rv']*1.e3, pmstr, ccfpar['rverr']*1.e3, ccfpar['rverrabs']*1.e3, ccfpar['fwhm'], pmstr, ccfpar['fwhmerr'], ccfpar['contrast'], pmstr, ccfpar['contrasterr'], ccfpar['bis']*1.e3, pmstr, ccfpar['biserr']*1.e3)
except: infoccfpar = ''
try: infoccffit = 'amp {:.2e}{}{:.2e}\ncen {:.2f}{}{:.2f}\nwid {:.2f}{}{:.2f}\nshift {:.2e}{}{:.2e}'.format(ccfpar['fitamp']*1.e3, pmstr, ccfpar['fitamperr']*1.e3, ccfpar['fitcen']*facv, pmstr, ccfpar['fitcenerr']*facv, ccfpar['fitwid'], pmstr, ccfpar['fitwiderr'], ccfpar['fitshift']*1.e3, pmstr, ccfpar['fitshifterr']*1.e3)
except: infoccffit = ''
# - infobs + infomask + infoccfpar lower left
bbox_props = dict(boxstyle="square", fc="w", ec=None, lw=0, alpha=0.7)
x, y = 0.03, 0.03
ax[0].text(x, y, infoobs + '\n' + infomask + '\n\n' + infoccfpar, ha='left', va='bottom', transform=ax[0].transAxes, fontsize='xx-small', color='k', bbox=bbox_props)
# - infoccffit lower right
bbox_props = dict(boxstyle="square", fc="w", ec=None, lw=0, alpha=0.7)
x, y = 1.-0.03, 0.03
ax[0].text(x, y, infoccffit, ha='right', va='bottom', transform=ax[0].transAxes, fontsize='xx-small', color='k', bbox=bbox_props)
# Difference
diff = ccf - fit
if diffzoom:
icen = len(rv) / 2.
di = len(rv) / 8.
mask = [True if (i >= icen - di) and (i <= icen + di) else False for i in range(len(rv))]
ax[1].plot(rv[mask], diff[mask], 'ko')
else:
ax[1].plot(rv, diff, 'ko')
ax[-1].set_xlabel('RV [km/s]')
# plotutils.Labeloffset(ax[0], label="CCF", axis="y")
ax[0].set_ylabel('CCF')
# Add fit params text
# ...
ax[0].set_title(title)
for a in ax:
a.minorticks_on()
return fig, ax[0], ax[1]
def plot_ccfo_lines(rv, ccfo, lisord=None, printord=True, multiline=True, cb=True, cmap=cc.cm.rainbow4, lw=2, alpha=0.9, xlabel='RV [km/s]', ylabel='Order CCF', cblabel='Order', ax=None, **kwargs):
# cmap = cc.cm.CET_I1
# cmap = 'Spectral'
"""Plot order CCFs as given, no normalisation or offset.
Parameters
----------
rv : 1D array-like
RV grid of the CCF
ccfo : 2D array-like
CCFs of each order
lisord : 1D array-like
Order number of each CCF in `ccfo`. If None, order numbers are 0 to `len(ccfo)`. Only shown if `printord` is `True` (default).
multiline : bool, default True
If True plot lines following a specific color map (default). If not, color-code following default color-cycle.
"""
# Make sure we have an axis where to plot the fit
if not isinstance(ax, mpl.axes._subplots.Axes): ax = plt.gca()
# Order number
if lisord is None: lisord = np.arange(0, len(ccfo), 1)
# Plot
if multiline == False:
# Plot lines following default color cycle
for o, ccf in zip(lisord, ccfo):
# CCF
ax.plot(rv, ccf, cmap=cmap, lw=lw, alpha=alpha, **kwargs)
# Order number TODO
# ax.text()
else:
# Plot lines following a specific map
# CCF
xs = np.array([rv for i in ccfo])
ys = np.array(ccfo)
c = np.array(lisord)
lc = plotutils.multiline(xs, ys, c, cmap=cmap, lw=lw, alpha=alpha, ax=ax, **kwargs)
# Colorbar
if cb:
cbar = plt.colorbar(lc, ax=ax, aspect=15, pad=0.02, label=cblabel)
cbar.minorticks_on()
# Labels
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def plot_ccfo_map(rv, ccfo, lisord=None, cb=True, cmap=cmocean.cm.deep_r, extent='data', vmin=None, vmax=None, extend='neither', xlabel='RV [km/s]', ylabel='Order', cblabel='CCF', ax=None):
"""Plot map of order CCFs as given, no normalisation or offset.
Parameters
----------
rv : 1D array-like
RV grid of the CCF
ccfo : 2D array-like
CCFs of each order
lisord : 1D array-like
Order number of each CCF in `ccfo`. If None, order numbers are 0 to `len(ccfo)`. Only shown if `printord` is `True` (default).
"""
# Make sure we have an axis where to plot the fit
if not isinstance(ax, mpl.axes._subplots.Axes): ax = plt.gca()
# Orde rnumber
if lisord is None: lisord = np.arange(0, len(ccfo), 1)
# Plot
if extent == 'data': extent = [rv[0], rv[-1], lisord[0], lisord[-1]]
nocb = not cb
ax = plotutils.plot_map(ccfo, cblabel, interpolation='none', origin='lower', extent=extent, vmin=vmin, vmax=vmax, extend=extend, cmap=cmap, axcb=None, nocb=nocb, aspect=15, pad=0.02, fraction=0.15, ax=ax)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def plot_ccfo_lines_map(rv, ccfo, ccfsum, ccferrsum, lisord=None,
ylabelsum = 'Coadded\nCCF',
multiline=True, cbline=True, cmapline=cc.cm.rainbow4, lw=2, alpha=0.9, ylabelline='Order CCF', cblabelline='Order',
cbmap=True, cmapmap=cmocean.cm.deep_r, extent='data', vmin=None, vmax=None, extend='neither', ylabelmap='Order', cblabelmap='CCF',
xlabel='RV [km/s]', title=''
):
"""3-panel plot with CCFsum, CCFo lines, CCFo map, one below each other.
"""
fig, ax = plt.subplots(3,1, figsize=(8,7), sharex=True, gridspec_kw={'height_ratios': [1,2.5,2.5]})
axsum = ax[0]
axo = ax[1]
axmap = ax[2]
# CCF sum
axsum.errorbar(rv, ccfsum, yerr=ccferrsum, fmt='k.')
axsum.set_ylabel(ylabelsum)
# CCFo
axo = plot_ccfo_lines(rv, ccfo, lisord=lisord, cmap=cmapline, lw=lw, alpha=alpha, xlabel=None, ylabel=ylabelline, cblabel=cblabelline, ax=axo)
# CCFo map
axmap = plot_ccfo_map(rv, ccfo, lisord=lisord, cmap=cmapmap, extent=extent, vmin=vmin, vmax=vmax, extend=extend, xlabel=xlabel, ylabel=ylabelmap, cblabel=cblabelmap, ax=axmap)
#
ax[0].set_title(title)
for a in ax.flatten():
a.minorticks_on()
plt.tight_layout()
fig.subplots_adjust(hspace=0.1) # wspace=0.08, hspace=0.08
# Adjust ccfsum panel size
boxo = axo.get_position()
boxsum = axsum.get_position()
# Set based on width
axsum.set_position([boxsum.x0, boxsum.y0, boxo.width, boxsum.height]) # left, bottom, width, height
# # Or set with Bbox and coordinates
# boxsum_new = mpl.transforms.Bbox([[boxsum.x0, boxsum.y0], [boxo.x1, boxsum.y1]])
# axsum.set_position(boxsum_new)
#
return fig, axsum, axo, axmap
def plot_lisccf_lines_map(rv, lisccf, lisccferr, time, maskgray=None, maskmap=None, labelrv='RV [km/s]', labelccf='CCF',
cmapline=cc.cm.rainbow4, lw=2, alpha=0.9, cbline=True, labeltime='Time',
interpolation='none', origin='lower', extent='data', vmin=None, vmax=None, extend='neither', cbmap=True, cmapmap=cmocean.cm.deep_r,
title='',):
"""2-panel plot with list of CCF (coadded CCFs from different observations) lines color-coded as a function of time, and CCF map with time on y-axis and color-coded on CCF value, one below each other.
Issues
------
Assumes data (list of CCFs) is sorted by `time`, if not the map will be wrong.
Works well for data equally spaced. Workaround if gaps, but issues if overlapping data.
TODO: add errorbars `lisccferr`
maskgray : 1D array-like
Plot set of CCFs in gray (e.g. out-of-transit, or low S/N)
"""
if maskgray is None: maskgray = np.zeros_like(time, dtype=bool)
fig, ax = plt.subplots(2,1, figsize=(8,6), sharex=True, gridspec_kw={'height_ratios': [3,1.2]})
axline = ax[0]
axmap = ax[1]
# CCF lines
xs = np.array([rv for i in lisccf[~maskgray]])
ys = np.array(lisccf[~maskgray])
c = np.array(time[~maskgray])
lc = plotutils.multiline(xs, ys, c, cmap=cmapline, lw=lw, alpha=alpha, ax=axline)
# Colorbar
if cbline:
cbar = plt.colorbar(lc, ax=axline, aspect=15, pad=0.02, label=labeltime)
cbar.minorticks_on()
axline.set_ylabel(labelccf)
# CCF line gray
for ccf in lisccf[maskgray]:
axline.plot(rv, ccf, '0.5', lw=1, alpha=alpha, zorder=0)
# CCF map
if maskmap is None: maskmap = np.ones_like(time, dtype=bool)
nocb = not cbmap
#
# If set the aspect of the small panel to be the same of the big on, the cb width is narrower and it looks weird. Hence need to change it as follows:
# Width of cb of small panel to be the same of bigger panel:
# aspect_cb_bigpanel = 15
# bigpanel_height = 3
# smallpanel_height = 1.2
# aspect_cb_smallpanel = aspect_cb_bigpanel * smallpanel_height / bigpanel_height = 6
#
# Check if overlapping observations
# Overlapping if time between obs is less than 10 s # HARDCODED, depends on exptime
overlappingobs = False
time_min = np.diff(time[maskmap]).min()
if time_min * 24 * 3600 < 10:
overlappingobs = True
#
# Check if observations are non-consecutive
consecutiveobs = True
time_min = np.diff(time[maskmap]).min()
time_min_range = 2
for dt in np.diff(time[maskmap]):
if dt > time_min*time_min_range:
consecutiveobs = False
break
# If obs are non-consecutive, imshow will show the wrong "grid" because it assumes the values are equally spaced. Need to add "fake" nan-filled CCFs in between non-consecutive obs.
# Add a nan-filled CCF at the beginning and end because if not the actual ones might be hidden by the borders
# For non-uniform grid spacing, use plt.pcolor(rv, time, lisccf) or pcolormesh. But have everything coded with imshow, so it's easier to tweak the data.
if (consecutiveobs is False) and (overlappingobs is False):
# Add nan-filled CCF between non-consecutive observations
# Define consecutive by twice the minimum difference in time
time_min = np.diff(time[maskmap]).min() * 2
time_new = []
lisccf_new = []
maskmap_new = []
nanccf = np.ones_like(lisccf[0])*np.nan
# Fake obs beginning
time_new.append(time[maskmap][0]-time_min)
lisccf_new.append(nanccf)
maskmap_new.append(True)
for obs_i in range(len(time[maskmap])):
time_new.append(time[maskmap][obs_i])
lisccf_new.append(lisccf[obs_i])
maskmap_new.append(maskmap[obs_i])
# Last obs
if obs_i == len(time[maskmap])-1:
continue
# Time diff with following observation
dt = time[maskmap][obs_i+1] - time[maskmap][obs_i]
# If difference larger than minimum time, assume non-consecutive obs, and fill with nan
if dt > time_min:
# How many new nan-filled obs are needed
nobs_new = int(dt/time_min)
for newobs in range(nobs_new):
time_new.append(time[maskmap][obs_i]*(newobs+2))
lisccf_new.append(nanccf)
maskmap_new.append(True)
# Fake obs end
time_new.append(time[maskmap][-1]+time_min)
lisccf_new.append(nanccf)
maskmap_new.append(True)
# Arrays
time = np.array(time_new)
lisccf = np.array(lisccf_new)
maskmap = np.array(maskmap_new)
#
#
if (consecutiveobs is False) and (overlappingobs is True):
# Add nan-filled CCF between non-consecutive observations
##### Define consecutive by twice the minimum difference in time
## Define consecutive by the mean difference in time
# DOESN'T REALLY WORK
time_min = np.diff(time[maskmap]).mean()
time_new = []
lisccf_new = []
maskmap_new = []
nanccf = np.ones_like(lisccf[0])*np.nan
# Fake obs beginning
time_new.append(time[maskmap][0]-time_min)
lisccf_new.append(nanccf)
maskmap_new.append(True)
for obs_i in range(len(time[maskmap])):
time_new.append(time[maskmap][obs_i])
lisccf_new.append(lisccf[obs_i])
maskmap_new.append(maskmap[obs_i])
# Last obs
if obs_i == len(time[maskmap])-1:
continue
# Time diff with following observation
dt = time[maskmap][obs_i+1] - time[maskmap][obs_i]
# If difference larger than minimum time, assume non-consecutive obs, and fill with nan
if dt > time_min:
# How many new nan-filled obs are needed
nobs_new = int(dt/time_min)
for newobs in range(nobs_new):
time_new.append(time[maskmap][obs_i]*(newobs+2))
lisccf_new.append(nanccf)
maskmap_new.append(True)
# Fake obs end
time_new.append(time[maskmap][-1]+time_min)
lisccf_new.append(nanccf)
maskmap_new.append(True)
# Arrays
time = np.array(time_new)
lisccf = np.array(lisccf_new)
maskmap = np.array(maskmap_new)
#
# ipdb.set_trace()
if extent == 'data': extent = [rv[0], rv[-1], time[maskmap][0], time[maskmap][-1]]
axmap = plotutils.plot_map(lisccf[maskmap], labelccf, interpolation=interpolation, origin=origin, extent=extent, vmin=vmin, vmax=vmax, extend=extend, cmap=cmapmap, axcb=None, nocb=nocb, aspect=6, pad=0.02, fraction=0.15, ax=axmap)
# # Contour plot of irregularly spaced data
# xcontour = [rv for i in lisccf[maskmap]]
# ycontour = [phase for i in rv]
# zcontour = lisccf[maskmap].flatten()
# axmap.tricontour(xcotour, ycotour, zcotour, levels=14, linewidths=0.5, colors='k')
# cntr2 = axmap.tricontourf(xcontour, ycontour, zcontour, levels=14, cmap="RdBu_r")
# fig.colorbar(cntr2, ax=axmap)
axmap.set_ylabel(labeltime)
ax[-1].set_xlabel(labelrv)
ax[0].set_title(title)
for a in ax.flatten():
a.minorticks_on()
plt.tight_layout()
fig.subplots_adjust(hspace=0.1) # wspace=0.08, hspace=0.08
# Adjust ccfsum panel size (if 1st panel has no colorbar)
if cbline == False:
boxo = axo.get_position()
boxsum = axsum.get_position()
# Set based on width
axsum.set_position([boxsum.x0, boxsum.y0, boxo.width, boxsum.height]) # left, bottom, width, height
# # Or set with Bbox and coordinates
# boxsum_new = mpl.transforms.Bbox([[boxsum.x0, boxsum.y0], [boxo.x1, boxsum.y1]])
# axsum.set_position(boxsum_new)
return fig, axline, axmap
def plot_ccf_bisector():
pass
return
def printstats(stats, ax, data, err=None, x=0.97, y=0.93, ha='right', va='top', color='k', join='\n', fontsize='xx-small'):
"""
stats : list
E.g. ['std', 'wstd']
join : str
E.g. whitespace, new line
"""
s = ''
if 'std' in stats:
s += 'std {:.2f}'.format(np.nanstd(data))
if 'wstd' in stats and err is not None:
#w = 1./np.array(err)**2
s += join + 'wstd {:.2f}'.format(compute_wstd(data, err)[0])
bbox_props = dict(boxstyle="square", fc="w", ec=None, lw=0, alpha=0.7)
ax.text(x, y, s, ha=ha, va=va, transform=ax.transAxes, fontsize=fontsize, color=color, bbox=bbox_props)
return ax
def plot_ccfparbasic_servalrvc(data, plotserval=True, shiftserval=True, title='', stats=['std', 'wstd'], **kwargs):
"""
Plot CCF parameters TS (parameter vs BJD).
Assume 'RV' and 'BIS' in [km/s] -> transform it to [m/s]
Assume SERVAL merged with CCF data in `data`.
stats : List with 'std', 'wstd'
Show std of the parameter
"""
# Plot style
if kwargs is None:
kwargs = dict()
kwargs['linestyle'] = kwargs.get('linestyle', 'None')
kwargs['marker'] = kwargs.get('marker', 'o')
# Plot
fig, ax = plt.subplots(4, 1, figsize=(8, 12), sharex=True)
# BJD
xsub = 2400000.
bjd = data['bjd'] - xsub
xlab = 'BJD - {:.0f}'.format(xsub)
if plotserval:
bjds = data['bjd'] - xsub
# RV
# - CCF
if plotserval: lab = 'CCF'
else: lab = ''
ax[0].errorbar(bjd, data['rv']*1.e3, yerr=data['rverrabs']*1.e3, ms=5, elinewidth=2, capsize=2, capthick=2, zorder=0, label=lab, **kwargs)
ax[0].set_ylabel('RV [m/s]')
printstats(stats, ax[0], data['rv']*1.e3, err=data['rverr']*1.e3)
# - SERVAL
if plotserval:
ys = data['servalrvc']
lab = 'SERVAL RVC'
if shiftserval:
ys = ys - np.nanmedian(ys) + np.nanmedian(data['rv']*1.e3)
# lab = lab + ' shift'
l = ax[0].errorbar(bjds, ys, yerr=data['servalrvcerr'], ms=5, elinewidth=2, capsize=2, capthick=2, zorder=1, label=lab, **kwargs)
printstats(stats, ax[0], data['servalrvc'], err=data['servalrvcerr'], y=0.07, va='bottom', color=l.lines[0].get_color())
ax[0].legend(fontsize='xx-small', loc='upper left', framealpha=0.7, edgecolor='None')
# ax[0].legend(fontsize='x-small', loc='upper left', frameon=False)
# FWHM
ax[1].errorbar(bjd, data['fwhm'], yerr=data['fwhmerr'], ms=5, elinewidth=2, capsize=2, capthick=2, **kwargs)
ax[1].set_ylabel('FWHM [km/s]')
printstats(stats, ax[1], data['fwhm'], err=data['fwhmerr'])
# Contrast
ax[2].errorbar(bjd, data['contrast'], yerr=data['contrasterr'], ms=5, elinewidth=2, capsize=2, capthick=2, **kwargs)
ax[2].set_ylabel('Contrast [%]')
printstats(stats, ax[2], data['contrast'], err=data['contrasterr'])
# BIS
ax[3].errorbar(bjd, data['bis']*1.e3, yerr=data['biserr']*1.e3, ms=5, elinewidth=2, capsize=2, capthick=2, **kwargs)
ax[3].set_ylabel('BIS [m/s]')
printstats(stats, ax[3], data['bis']*1.e3, err=data['biserr']*1.e3)
# General plot
for a in ax:
a.minorticks_on()
ax[0].set_title(title)
ax[-1].set_xlabel(xlab)
return fig, ax
def plot_ccfparbasic_servalrvc_separated(data, dataserval=None, shiftserval=True, title='', stats=['std', 'wstd'], **kwargs):
"""
Plot CCF parameters TS (parameter vs BJD).
Assume 'RV' and 'BIS' in [km/s] -> transform it to [m/s]
SERVAL data not merged with CCF data `data` (can have different observations).
stats : {'std', None}
Show std of the parameter
"""
# Plot style
if kwargs is None:
kwargs = dict()
kwargs['linestyle'] = kwargs.get('linestyle', 'None')
kwargs['marker'] = kwargs.get('marker', 'o')
# Plot
fig, ax = plt.subplots(4, 1, figsize=(8, 12), sharex=True)
# BJD
xsub = 2400000.
bjd = data['bjd'] - xsub
xlab = 'BJD - {:.0f}'.format(xsub)
if dataserval is not None:
bjds = dataserval['bjd'] - xsub
# RV
# - CCF
if dataserval is not None: lab = 'CCF {} obs'.format(len(data.index))
else: lab = ''
ax[0].errorbar(bjd, data['rv']*1.e3, yerr=data['rverrabs']*1.e3, ms=5, elinewidth=2, capsize=2, capthick=2, zorder=0, label=lab, **kwargs)
ax[0].set_ylabel('RV [m/s]')
printstats(stats, ax[0], data['rv']*1.e3, err=data['rverr']*1.e3)
# - SERVAL
if dataserval is not None:
ys = dataserval['servalrvc']
lab = 'SERVAL RVC {} obs'.format(len(dataserval.index))
if shiftserval:
ys = ys - np.nanmedian(ys) + np.nanmedian(data['rv']*1.e3)
# lab = lab + ' shift'
l = ax[0].errorbar(bjds, ys, yerr=dataserval['servalrvcerr'], ms=5, elinewidth=2, capsize=2, capthick=2, zorder=1, label=lab, **kwargs)
printstats(stats, ax[0], dataserval['servalrvc'], err=dataserval['servalrvcerr'], y=0.07, va='bottom', color=l.lines[0].get_color())
ax[0].legend(fontsize='xx-small', loc='upper left', framealpha=0.7, edgecolor='None')
# ax[0].legend(fontsize='x-small', loc='upper left', frameon=False)
# FWHM
ax[1].errorbar(bjd, data['fwhm'], yerr=data['fwhmerr'], ms=5, elinewidth=2, capsize=2, capthick=2, **kwargs)
ax[1].set_ylabel('FWHM [km/s]')
printstats(stats, ax[1], data['fwhm'], err=data['fwhmerr'])
# Contrast
ax[2].errorbar(bjd, data['contrast'], yerr=data['contrasterr'], ms=5, elinewidth=2, capsize=2, capthick=2, **kwargs)
ax[2].set_ylabel('Contrast [%]')
printstats(stats, ax[2], data['contrast'], err=data['contrasterr'])
# BIS
ax[3].errorbar(bjd, data['bis']*1.e3, yerr=data['biserr']*1.e3, ms=5, elinewidth=2, capsize=2, capthick=2, **kwargs)
ax[3].set_ylabel('BIS [m/s]')
printstats(stats, ax[3], data['bis']*1.e3, err=data['biserr']*1.e3)
# General plot
for a in ax:
a.minorticks_on()
ax[0].set_title(title)
ax[-1].set_xlabel(xlab)
return fig, ax
def plot_ccfrv(data, title='', stats=['std', 'wstd'], **kwargs):
"""
stats : {'std', None}
Show std of the parameter
"""
# Plot style
if kwargs is None:
kwargs = dict()
kwargs['linestyle'] = kwargs.get('linestyle', 'None')
kwargs['marker'] = kwargs.get('marker', 'o')
# Plot
fig, ax = plt.subplots(1, 1, figsize=(8, 6), sharex=True)
# BJD
xsub = 2400000.
bjd = data['bjd'] - xsub
xlab = 'BJD - {:.0f}'.format(xsub)
# RV CCF
y = data['rv']*1.e3
ax.errorbar(bjd, y, yerr=data['rverrabs']*1.e3, ms=5, elinewidth=2, capsize=2, capthick=2, **kwargs)
ax.set_ylabel('RV [m/s]')
printstats(stats, ax, data['rv']*1.e3, err=data['rverr']*1.e3)
# General plot
ax.minorticks_on()
ax.set_title(title)
ax.set_xlabel(xlab)
return fig, ax
def plot_ccfrv_servalrvc(data, shiftserval=True, title='', stats=['std', 'wstd'], **kwargs):
"""
stats : {'std', None}
Show std of the parameter
"""
# Plot style
if kwargs is None:
kwargs = dict()
kwargs['linestyle'] = kwargs.get('linestyle', 'None')
kwargs['marker'] = kwargs.get('marker', 'o')
# Plot
fig, ax = plt.subplots(1, 1, figsize=(8, 6), sharex=True)
# BJD
xsub = 2400000.
bjd = data['bjd'] - xsub
xlab = 'BJD - {:.0f}'.format(xsub)
# RV
# - CCF
y = data['rv']*1.e3
lab = 'CCF'
ax.errorbar(bjd, y, yerr=data['rverrabs']*1.e3, label=lab, ms=5, elinewidth=2, capsize=2, capthick=2, zorder=0, **kwargs)
ax.set_ylabel('RV [m/s]')
printstats(stats, ax, data['rv']*1.e3, err=data['rverr']*1.e3)
# - SERVAL
ys = data['servalrvc']
lab = 'SERVAL RVC'
if shiftserval:
ys = ys - np.nanmedian(ys) + np.nanmedian(data['rv']*1.e3)
# lab = lab + ' shift'
l = ax.errorbar(bjd, ys, yerr=data['servalrvcerr'], label=lab, ms=5, elinewidth=2, capsize=2, capthick=2, zorder=1, **kwargs)
printstats(stats, ax, data['servalrvc'], err=data['servalrvcerr'], y=0.07, va='bottom', color=l.lines[0].get_color())
ax.legend(fontsize='xx-small', loc='upper left', framealpha=0.7, edgecolor='None')
# General plot
ax.minorticks_on()
ax.set_title(title)
ax.set_xlabel(xlab)
return fig, ax
def plot_ccfrv_servalrvc_diff(data, shiftserval=True, title='', stats=['std', 'wstd'], **kwargs):
"""
stats : {'std', None}
Show std of the parameter
"""
# Plot style
if kwargs is None:
kwargs = dict()
kwargs['linestyle'] = kwargs.get('linestyle', 'None')
kwargs['marker'] = kwargs.get('marker', 'o')
# Plot
fig, ax = plt.subplots(2, 1, figsize=(8, 6), sharex=True, gridspec_kw={'height_ratios':[3, 1]})
# BJD
xsub = 2400000.
bjd = data['bjd'] - xsub
xlab = 'BJD - {:.0f}'.format(xsub)
# RV
# - CCF
y = data['rv']*1.e3
lab = 'CCF RV'
ax[0].errorbar(bjd, y, yerr=data['rverrabs']*1.e3, label=lab, ms=5, elinewidth=2, capsize=2, capthick=2, zorder=0, **kwargs)
ax[0].set_ylabel('RV [m/s]')
printstats(stats, ax[0], data['rv']*1.e3, err=data['rverr']*1.e3)
# - SERVAL
ys = data['servalrvc']
lab = 'SERVAL RVC'
if shiftserval:
ys = ys - np.nanmedian(ys) + np.nanmedian(data['rv']*1.e3)
# lab = lab + ' shift'
l = ax[0].errorbar(bjd, ys, yerr=data['servalrvcerr'], label=lab, ms=5, elinewidth=2, capsize=2, capthick=2, zorder=1, **kwargs)
printstats(stats, ax[0], data['servalrvc'], err=data['servalrvcerr'], y=0.07, va='bottom', color=l.lines[0].get_color())
ax[0].legend(fontsize='xx-small', loc='upper left', framealpha=0.7, edgecolor='None')
# Difference residuals
diff = y - ys
ax[1].plot(bjd, diff, **kwargs)
ax[1].hlines(0, np.nanmin(bjd), np.nanmax(bjd), colors='.6', linestyle='dashed')
ax[1].set_ylabel('Diff\n[m/s]')
printstats(stats, ax[1], diff)
# General plot
for a in ax:
a.minorticks_on()
ax[0].set_title(title)
ax[-1].set_xlabel(xlab)
return fig, ax
|
mlafargaREPO_NAMEraccoonPATH_START.@raccoon_extracted@raccoon-master@raccoon@ccf.py@.PATH_END.py
|
{
"filename": "SPT_ACT_summer_school_2024_candl.ipynb",
"repo_name": "Lbalkenhol/candl",
"repo_path": "candl_extracted/candl-main/notebooks/SPT_ACT_summer_school_2024/SPT_ACT_summer_school_2024_candl.ipynb",
"type": "Jupyter Notebook"
}
|
<p align="center">
<img src="https://raw.githubusercontent.com/Lbalkenhol/candl/main/docs/logos/candl_wordmark%26symbol_col_RGB.png" width="750" alt="candl" />
</p>
---
This notebook will introduce you to `candl` (https://arxiv.org/abs/2401.13433). `candl` is a likelihood code that provides easy access to CMB data to allow you to perform more intuitive analysis. Out of the box, `candl` comes with the latest SPT and ACT data installed (primary CMB and lensing power spectrum measurements). Along with the likelihood code, `candl` features an interface module that allows you to easily connect the likelihoods to theory codes (e.g. CAMB, CLASS) and samplers (e.g. Cobaya, MontePython) as well as a tools module that provides short cuts for common calculations. You will explore these in part 1 of this notebook. Moreover, `candl` likelihoods are differentiable, i.e. they allow you to not only obtain the value of the likelihood function, but also its slope. You will learn what this means in detail, how to work with derivatives in practice, and why they are useful in parts 2 and 3 of this notebook.
__Resources:__
* `candl` documentation: https://candl.readthedocs.io/en/latest/
* `candl` GitHub repository: https://github.com/Lbalkenhol/candl
* your expert tutors!
__Overview:__
[Part I: `candl` Basics](#part1)
This part will run you through the basics of using `candl`: how to initialise a likelihood, access different data products, understand the data model, and evaluate the likelihood. For this part of the notebook you will be using the SPT-3G 2018 TT/TE/EE data set.
[Part II: Building a Differentiable Pipeline, Calculating Derivatives](#part2)
In this part you will build a differentiable pipeline from cosmological parameters all the way to the likelihood value. We will look at two useful applications using the ACT DR4 TT/TE/EE data set.
[Part III: Gradient-Powered Likelihood Exploration](#part3)
In this part you will find the best-fit point of the ACT DR4 TT/TE/EE data set using traditional and gradient-powered methods.
__Disambiguation__:
The word "likelihood" gets thrown around a lot. Depending on the context it can mean different things, such as:
* A piece of software: `candl` is a likelihood code.
* A mathematical function: the function $\mathcal{L}(\theta)$, where $\theta$ are some model parameters.
* Related: a concept in Bayesian statistics.
* A python function: the implementation of the mathematical function.
## Requirements
This notebook has a few requirements for packages and codes, beyond what you have installed so far. Execute the cell below, it will automatically install the required packages for you and download an archive with additional data into the directory where you have placed this notebook.
The second cell then features our required python imports and some setup for plotting.
```python
# Install required packages
# Assumes you have previously cloned and installed candl from GitHub
!pip install jax==0.4.24
!pip install jaxlib==0.4.24
!pip install cosmopower-jax
!pip install camb
# Grab the CosmoPower emulator models
!unzip cosmopower_models.zip
!rm cosmopower_models.zip
!rm -r __MACOSX
```
```python
# Prepare plotting
%matplotlib inline
# Standard imports
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from copy import deepcopy
# Import candl
import candl
import candl.data
# Make plots pretty
import candl.plots
candl.plots.set_plot_style()
```
---
<a id='part1'></a>
# PART I: `candl` Basics
Let's learn the basics of `candl`, how to access different parts of the data, evaluate the likelihood, and use transformations.
<span style="color:blue">
<b>Background:</b>
</span>
Generally, CMB likelihoods can be written as:
$$ -2\log{\mathcal{L}(\theta)} = \left\{\hat{D} - \mathcal{T}\left[D^{\mathrm{CMB}}(\theta), \theta \right]\right\}^T \mathcal{C}^{-1} \left\{\hat{D} - \mathcal{T}\left[D^{\mathrm{CMB}}(\theta), \theta \right]\right\}, $$
where $\mathcal{L}$ is the likelihood, $\theta$ are the (cosmological and nuisance) parameters, $D^{\mathrm{CMB}}$ is the CMB power spectrum, $\mathcal{C}$ the band power covariance matrix and $\mathcal{T}$ the data model. The data model represents a series of transformations that need to be applied to the CMB power spectrum so that it can be compared to the data; this includes accounting for e.g. foreground contamination or calibration uncertainty.
__`candl`__ likelihoods take a dictionary populated with (cosmological and nuisance) parameters, as well as CMB spectra as an input and return the negative log-likelihood value. The data model is implemented as a series of transformations that are applied to the theory spectra in a specific order. These transformations are specified in a `.yaml` file, along with all other necessary information (such as information where to find the band powers, what kind of spectra are being supplied, etc.).
We'll start by initialising a candl likelihood for the SPT-3G 2018 TT/TE/EE data set. Programatically, `candl` likelihoods are python classes and their `log_like` function implements the equation above. Execute the cell below and inspect the output produced by the initialisation paying attention to the information on spectra and the data model.
```python
# Initialise the SPT-3G 2018 TT/TE/EE likelihood
candl_like = candl.Like(candl.data.SPT3G_2018_TTTEEE)
```
<hr width="100%" style="border-style: none none dotted" color="#FFFFFF" size="6">
## Data Access
`candl` likelihoods have a lot of useful attributes that allow us to directly access different parts of the data set. Let's get an idea for the information that is available.
<span style="color:red">
<b>Exercise:</b>
</span>
Plot the band powers of the data set, including error bars. You can decide for yourself whether it is best to show all data together, or whether it is easier to look at a given spectrum individually.
<span style="color:green">
<b>Tips:</b>
</span>
The band powers in `candl` are stored as a long vector with all measurements concatenated. The order is specified by the atrtibute `.spec_order` (which matches the output of the initialisation above). The following attributes will also help you:
* `.spec_order`: a list specifying the order of the spectra in the data vector
* `.data_bandpowers`: a long vector of band powers
* `.covariance`: the covariance matrix
* `.bins_start_ix`, `.bins_stop_ix`: lists that contain start and stop indices for the spectra
* `.N_bins`: a list containing the number of bins of each spectrum
* `.effective_ells`: the weighted bins centres of all all spectra
```python
```
<hr width="100%" style="border-style: none none dotted" color="#FFFFFF" size="6">
## Data Structure and Data Model
Let's peel back the curtain a bit. As mentioned before, `candl` likelihoods require `.yaml` files with some specific information inside them in order to be initialised. These `.yaml` files follow the following structure:
<pre style="margin-left: 25pt">
name: [name of data set]
band_power_file: [relative path of the band power file]
covariance_file: [relative path of the band power file]
window_functions_folder: [relative path of the window functions folder]
spectra_info:
- [spectrum id]: [number of bins]
data_model:
- [transformation]
priors:
- [par_names]: [name of parameter to apply the prior to]
[central_value]: [central value of the prior]
[prior_std]: [width of the prior (standard deviation)]
</pre>
Let's zoom in on the data model. Execute the cell below and look at the list of transformations modules.
```python
candl_like.data_model
```
<span style="color:blue">
<b>Background:</b>
</span>
We can access directly the transformations that make up the data model of the likelihood. Each transformation has an `.output()` function, which takes as the input a dictionary of parameters it requires to compute and returns a long vector of the changes to be applied. The returned vector contains the contributions for all spectra matching the order of the data vector, though it is unbinned, i.e. of length `candl_like.N_spectra_total x len(candl_like.ells)`. Most transformations (such as foregrounds) are additive, apart from calibration, which is multiplicative.
<span style="color:red">
<b>Exercise:</b>
</span>
Focus on the CIB clustering mdule. Inspect the `.yaml` file of the data set (you can see its location on your machine via `candl_like.data_set_file`) and identify the part that specifies the CIB clustering module. Find the names of the parameters required to evaluate it. Check for their default values in the prior block of the `.yaml` file and obtain predictions from the module for these paramter values. Plot the (non-zero) predictions for all spectra to compare their amplitudes.
Then, sample from the prior and plot different draws of the foreground contribution for one spectrum of your choice (e.g. TT 220x220). Show the chosen band powers with error bars in the same panel.
<span style="color:green">
<b>Tips:</b>
</span>
On google colab, click on the folder icon on the left to open the file browser. If you have trouble locating the file, you can also view it on [GitHub](https://github.com/Lbalkenhol/candl/blob/main/candl/data/SPT3G_2018_TTTEEE_v0/SPT3G_2018_TTTEEE.yaml).
```python
```
<hr width="100%" style="border-style: none none dotted" color="#FFFFFF" size="6">
## Evaluating the Likelihood, Calculating Residuals
Having familiarised ourselves with different aspects of the data and how the code functions, it's about time we obtain a likelihood value! As mentioned above, the `log_like` (and also the `chi_square`) functions of `candl` likelihoods take as input a dictionary with CMB spectra and (cosmological and nuisance) parameter values. We can obtain the CMB spectra from a Boltzmann solver; here, we will use `CAMB`, which you may already be familiar with from other sessions. If not, `CAMB` returns CMB spectra for a given combination of cosmological parameters. The cells below show you an example of how to interface `candl` with `CAMB`.
```python
import camb
import candl.interface
```
```python
# Set up CAMB with some baseline accuracy settings
CAMB_pars = camb.CAMBparams()
CAMB_pars.set_for_lmax(candl_like.ell_max+200, lens_potential_accuracy=2)
# This creates a function that takes a dictionary of cosmological parameters as an input and returns a dictionary of CMB spectra
CAMB_pars_to_theory_specs = candl.interface.get_CAMB_pars_to_theory_specs_func(CAMB_pars)
# Define fiducial parameters, cosmology matches Planck 2018 best fit
fid_pars = {'H0': 67.37,
'ombh2': 0.02233,
'omch2': 0.1198,
'logA': 3.043,
'ns': 0.9652,
'tau': 0.054}
# Nuisance parameters are set to the central values of their priors
for par_name in candl_like.required_nuisance_parameters:
for prior in candl_like.priors:
if par_name in prior.par_names:
fid_pars[par_name] = prior.central_value[prior.par_names.index(par_name)]
```
```python
# Get the theory spectra from CAMB
CAMB_CMB_Dls = CAMB_pars_to_theory_specs(fid_pars, candl_like.ell_max)
# Put things into the right format for candl
pars_for_like = deepcopy(fid_pars)
pars_for_like["Dl"] = CAMB_CMB_Dls
# Hand off to the likelihood
logl_value = candl_like.log_like(pars_for_like)
chisq_value = candl_like.chi_square(pars_for_like)
# Print results
# Note that for likelihoods with a varying beam covariance matrix (such as SPT-3G 2018 TT/TE/EE) the chi-square value is not twice the log-likelihood value,
# plus the logl function also applies any priors
print(f"logL = {float(logl_value)}")
print(f"chisq = {float(chisq_value)}")
```
Evaluating a likelihood is great, but often we want to get a deeper understanding of why the data likes or dislikes a certain model. For this purpose, it can be handy to calculate the difference between the data and the model and search for features in these residuals. The `candl.tools.pars_to_model_specs()` function is helpful here: it returns the binned and unbinned model spectra. Under the hood, it queries the `CAMB_pars_to_theory_specs` function, then applies all transformations, and finally, bins the spectra.
```python
import candl.tools
```
```python
# Obtain the model spectrum
model_spectrum, binned_model_spectrum = candl.tools.pars_to_model_specs(candl_like, fid_pars, CAMB_pars_to_theory_specs)
# Calculate error bars
# Note that we need to add the beam correlation matrix to the covariance - a peculiarity of the SPT-3G 2018 TT/TE/EE data set
error_bars = np.sqrt(np.diag(candl_like.covariance) + np.diag(candl_like.beam_correlation) * binned_model_spectrum**2)
# Calculate residuals: model - data
residuals = binned_model_spectrum - candl_like.data_bandpowers
relative_residuals = residuals / error_bars
```
```python
# Plot the residuals
plt.close()
# Set figure size
fig = plt.gcf()
fig.set_size_inches(3 * 3.464, 1 * 3.464)
# Plot residuals and horizonal line
plt.plot(relative_residuals)
plt.axhline(0, color="k", lw=0.5)
# Dividers for spectra
for i, spec in enumerate(candl_like.spec_order[:-1]):
plt.axvline(candl_like.bins_stop_ix[i], color="k", ls="--", lw=0.5)
# Finish plot with limits, ticks, labels
plt.xlim(0, len(relative_residuals)-1)
plt.ylim((-8, 8))
plt.xticks(candl_like.bins_start_ix+(candl_like.bins_stop_ix-candl_like.bins_start_ix)/2,
candl_like.spec_order,
rotation=45)
plt.subplots_adjust(bottom=0.2)
plt.ylabel(r"$\Delta D_\ell / \sigma$")
plt.show()
```
<span style="color:red">
<b>Exercise:</b>
</span>
Calculate residuals for two more sets of model spectra by varying your favourite cosmological parameter. Increase and Decrease it from the fiducial value and plot the residuals for all three model on the same panel. Do you have an intuition for the shape the residuals take given the parameter you varied?
```python
```
<hr width="100%" style="border-style: none none dotted" color="#FFFFFF" size="6">
## Discussion
In this part, you have learned the basics of `candl` and carried out key parts of high-level CMB data analysis. Making (and interpreting) residual plots is a key part of analysing CMB data - you'll be hard-pressed to find a CMB power spectrum paper without such a plot. With the data band powers at your fingertips and the ability to access the data model directly, you have great tools at your disposal to develop a deeper understanding of the model constraints produced by a given data set.
The `candl.tools` module features a lot more functionality that goes beyond what we've accessed in this part. For example `candl.tools.get_foreground_contributions()` can return the contributions of all foreground modules in different formats, `candl.tools.undo_transformations` can be used to obtain a CMB-only (foreground-free) version of the data band powers, and you can use `candl.tools.generate_mock_data` to generate mock band powers.
---
<a id='part2'></a>
# PART II: Building a Differentiable Pipeline, Calculating Derivatives
`candl` can not only be used to evaluate $\mathcal{L}(\theta)$, but also $\partial\mathcal{L}/\partial \theta$ thanks to `JAX` and the magic of automatic differentiation. We will now compute derivatives of the likelihood and use them in two example calculations. In doing so, we will also learn how to use the neural network-based emulators of the CMB power spectrum `CosmoPower`.
<span style="color:blue">
<b>Background:</b>
</span>
__`JAX`__ (https://jax.readthedocs.io/en/latest/index.html) is a google-developed python library. For the purpose of this school you can think of it as a smart numpy replacement. `candl` optionally uses `JAX`, meaning that everything you have done up until now did not require `JAX`. However, if `JAX` is installed, `candl` likelihoods run faster and become differentiable through "auto-diff".
__Automatic differentiation__, or "auto-diff", is a computer science concept that is seeing more and more applications in scientific computing and in particular cosmology (see e.g. https://arxiv.org/abs/2302.05163). It is a way to obtain derivatives of functions you write with respect to their input parameters - without resorting to finite difference methods. How auto-diff works is beyond the scope of this school, though if you are interested take a look at https://jax.readthedocs.io/en/latest/automatic-differentiation.html.
__`CosmoPower`__ (https://arxiv.org/abs/2106.03846) is a framework for neural network-based emulators of the CMB power spectrum. You can think of it as a short-cut for evaluating Boltzmann solvers, such as CAMB. `CosmoPower` models are trained on a series of pre-calculated CMB spectra and perform effectively an interpolation between the known points in order to avoid slow CAMB evalutations. Not only do they provide a speed-up, but `CosmoPower` models are also differentiable. Below, we will use `CosmoPower-JAX` (https://arxiv.org/abs/2305.06347) to stay within the same code eco-system and use models trained on high-precision CAMB spectra for the SPT-3G 2018 TT/TE/EE analysis (https://arxiv.org/abs/2212.05642, https://github.com/alessiospuriomancini/cosmopower/tree/main/cosmopower/trained_models/SPT_high_accuracy).
The diagram belows how the different codes work together to create a pipeline that is differentiable. We start out on the left with our input parameters: some cosmological (top blue box) and some nuisance parameters (bottom blue box). The cosmological parameters are fed into `CosmoPower`, which produces a CMB power spectrum ($D_\ell^{CMB}$). This power spectrum in return is then passed to `candl` along with any nuisance parameters and `candl` calculates the likelihood value, $\mathcal{L}$. Since all codes in the black brackets are differentiable, we can also calculate the derivate of the likelihood with respect to any of the input parameters (cosmological and nuisance), $\partial\mathcal{L}/\partial \theta$.
<p align="center">
<img src="https://raw.githubusercontent.com/Lbalkenhol/candl/main/notebooks/SPT_ACT_summer_school_2024/diff_pipeline.png" width="80%" alt="Differentiable Pipeline"/>
</p>
Why do we bother with this? There are many useful calculations in cosmology that use derivatives of the observables with respect to cosmological parameters. Having access to reliable derivatives, without resorting to finite difference calculations, which can be plagued by numerical issues, trivialises a lot of these calculations. In this part, we will work through some examples that will give you a taste for how gradient information can be useful.
To get started, we will switch data sets to the ACT DR4 TT/TE/EE data. We start by loading it into `candl`. Inspect the output that is produces for a moment. It only features two sets of band powers for each spectrum, with `d` and `w` denoting observations from the ACT deep and wide fields, respectively. This likelihood is already marginalised over foregrounds, providing us with CMB-only band powers. The only nuisance parameter remaining is `yp`, a final calibration of the data. Then we load our CosmoPower models.
```python
# Initialise the ACT DR4 TT/TE/EE likelihood
candl_like = candl.Like(candl.data.ACT_DR4_TTTEEE)
# Define our fiducial parameters again - now without the SPT nuisance parameters, but adding ACT calibration
fid_pars = {'H0': 67.37,
'ombh2': 0.02233,
'omch2': 0.1198,
'ns': 0.9652,
'logA': 3.043,
'tau': 0.054,
'yp': 1.0}
```
```python
# Grab a theory calculator and initialise it
# Here, we use our differentiable, high-precision CosmoPower models
# These take care of the step of moving from cosmological parameters to theory CMB spectra
# There may be some warnings printed, but unless there are any errors you are all good
cp_emulator_filenames = {"TT": "./cosmopower_models/cmb_spt_TT_NN.npz",
"TE": "./cosmopower_models/cmb_spt_TE_PCAplusNN.npz",
"EE": "./cosmopower_models/cmb_spt_EE_NN.npz"}
CP_pars_to_theory_specs = candl.interface.get_CosmoPowerJAX_pars_to_theory_specs_func(cp_emulator_filenames)
```
<hr width="100%" style="border-style: none none dotted" color="#FFFFFF" size="6">
## Fisher Matrices and Forecasting
<span style="color:blue">
<b>Background:</b>
</span>
Forecasting parameter constraints given data error bars is a common task in cosmology. The most straightforward approach is to use the Fisher information matrix, which, when inverted, gives the expected parameter covariance matrix.
For CMB experiments, we can calculate the Fisher matrix as follows:
$$ F = \frac{\partial D_\ell}{\partial \theta}^T C^{-1} \frac{\partial D_\ell}{\partial \theta}, $$
where $F$ is the Fisher matrix, $\partial D_\ell / \partial \theta$ the derivative of the model spectra with respect to parameters, and $C$ is the band power covariance matrix. This encapsulates the constraining power of the data and the data only, i.e. it does not take into account the effect of priors. These need to be included manually, by adding the inverse covariance to the relevant parameters [1]. For the ACT data, the only parameter with a prior is the optical depth to reionisation, $\tau$.
<span style="color:red">
<b>Exercise:</b>
</span>
Use Fisher matrices to study the impact of the prior on $\tau$, the optical depth to reionisation, on constraints from the ACT DR4 data in $\Lambda \mathrm{CDM}$. Compare with the default choice (a Gaussian prior of width $0.015$ (standard deviation)) to constraints when replacing the $\tau$ prior on the ACT data with the choice of the SPT-3G 2018 TT/TE/EE analysis (standard deviation of $0.0074$). Does the difference in the choice of prior width matter? Which parameters are most effected?
<span style="color:green">
<b>Tips:</b>
</span>
From `jax.jacrev(func)` you obtain a function that efficiently evaluates the derivative of `func`. For more information see: https://jax.readthedocs.io/en/latest/notebooks/autodiff_cookbook.html
[1] If you are wondering why, consider that the contribution from a prior on parameter $\tau$ to the log-likelihood is $0.5*[(\tau-\tau_0)/\sigma_{\tau}]^2$, where $\tau_0$ is the central value of the prior and $\sigma_{\tau}$ its width. The Fisher matrix holds the second derivatives of the log-likelihood with respect to the parameters; taking the second derivative of the expression above, only $1/\sigma_{\tau}^2$ survives.
```python
import jax
```
```python
# This plotting routine can help you visualise the parameter covariance matrix
# Look at the doc string below to understand how to use it
def plot_confidence_ellipse(par1, par2, par_cov, par_order, colour):
"""
Adds 68% and 95% confidence ellipses to the plot for two parameters.
par1: str
The name of the parameter along the x-axis
par2: str
The name of the parameter along the y-axis
par_cov: array
The parameters covariance matrix
par_order: list of strings
The order of parameters in the covariance matrix
colour:
The colour of the contours - any valid matplotlib specification should work
"""
ax = plt.gca()
for i in [2,1]:
candl.plots.add_confidence_ellipse(ax,
par_cov,
par_order.index(par1),
par_order.index(par2),
fid_pars[par1],
fid_pars[par2],
i,
ec=colour,
facecolor="None")
```
```python
```
<hr width="100%" style="border-style: none none dotted" color="#FFFFFF" size="6">
## ACT DR4 TE Artificial Recalibration
<img src="https://raw.githubusercontent.com/Lbalkenhol/candl/main/notebooks/SPT_ACT_summer_school_2024/aiola20_fig14b.png" width="400" align="left" alt="Aiola et al. 2020 Figure 14"/>
<span style="color:blue">
<b>Background:</b>
</span>
The image on the left shows constraints placed by the ACT DR4 TT/TE/EE data in $\Lambda \mathrm{CDM}$ in the $n_s - \Omega_b h^2$ plane (blue contours) (taken from https://arxiv.org/abs/2007.07288). The authors note a preference for lower $\Omega_b h^2$ and higher $n_s$ data compared to Planck and WMAP results (purple and yellow/green contours, respectively).
The size of the difference of the ACT results w.r.t. Planck is compatible with a statistical fluctuation. Furthermore, the authors see no evidence for such a re-calibration of their $TE$ power spectrum in their data. Nevertheless, they show how their results shift when artificially multiplying their $TE$ band powers by 5% (light and dark blue contours) and can in such a way be brought to a closer agreement with the satellite data.
In order to produce these results, new MCMC runs with the TE power spectrum artifically multiplied by 0.95 and 1.05 are typically required. This is computationally expensive, especially bearing in mind that there may be multiple effects at the band power level that project along the same axis in parameter space. Without a good intuition, you can find yourself applying many different offsets to the band powers with varying amplitudes and running an MCMC analysis for each case.
With a differentiable pipeline, you can investigate how band power biases project to parameter space more efficiently. Consider the following formula (https://arxiv.org/abs/1908.01626):
$$ \Delta \theta = F^{-1} \frac{\partial D_\ell}{\partial \theta} C^{-1} \Delta \hat{D}_\ell, $$
where $\Delta \theta$ is the shift to best-fit parameters caused by a displacement $\Delta \hat{D}_\ell$ of the band powers, $C$ the band power covariance matrix, $F$ is the Fisher matrix and $\partial D_\ell / \partial \theta$ the derivative of the model spectra with respect to parameters.
<span style="color:red">
<b>Exercise:</b>
</span>
Create your own version of the plot above, focussing only on the ACT data (forgetting about WMAP and Planck). Do not worry about centering your plot on the best-fit point of the ACT data, you can keep using the `fid_pars` from above - what we are interested in are the degenreacies in parameter space. Indicate the shift to the best-fit point of the ACT data when re-calibratin the $TE$ power spectrum by 100 equally-spaced factors between 0.9 and 1.1. Are you able to recover the ACT results? What advantages or disadvantages do you see in your method compared to performing multiple MCMC analyses?
Optional: come up with a different systematic (e.g. $TT$ miscalibration, contamination of the $TE$ data by an $\ell^2$ term, temperature-to-polarisation leakage, ...). How do they compare to the case of a TE recalibration for the two parameters of interest? Can you come up with any intuition for the degeneracy directions you see?
```python
```
<hr width="100%" style="border-style: none none dotted" color="#FFFFFF" size="6">
## Discussion
In this part, you have learned how to combine `candl` with `CosmoPower-JAX` to create a differentiable pipeline from parameters to the likelihood value. You've used this to show how auto-diff can be a powerful tool for forcasting by trivialising the calculation of Fisher matrices. Moreover, you've explored how you can flexibly translate a bias at the band power level to parameters using an example from a recent paper.
---
<a id='part3'></a>
# PART III: Gradient-Powered Likelihood Exploration
We will now see how gradient information can be used to explore the likelihood surface. In particular, we will code a "traditional" and a gradient-based algorithm to find the best-fit point of the data set and compare the performance of the two algorithms.
<span style="color:blue">
<b>Background:</b>
</span>
Finding the best-fit point of the data amounts to finding the global extremum (that is not at infinity) of the likelihood function. Conventions on the sign of the likelihood differ. Some codes, such as `candl`, return the log likelihood, which is typically negative and we want to find the maximum. However, we conventionally speak about __minimisation__ when we want to find the best-fit point of the data.
In principle, the likelihood surface can take a complicated shape that can be difficult to explore. However, modern primary CMB data has become quite constraining, such that for $\Lambda \mathrm{CMB}$ we can typically approximate it as a (multivariate) Gaussian. Hence, in the algorithms below we will assume that the likelihood surface has a single peak, which we want to get to as quickly as possible.
The diagram below illustrates the idea. Imagine our likelihood compares our band powers (black points, left panel) to some model predictions (coloured lines, left panel) as we vary some cosmological parameter $\theta$. The (negative) log-likelihood is shown as a function of this parameter $\theta$ in the right panel. The best-fit point is the minimum of this function, and the goal of our minimisation algorithms will be to find it. If we only access the likelihood value, our information is limited to the function values (indicated by the coloured points in the right panel). However, if we also have gradient information we know the slope too (indicated by the coloured arrows in the right panel).
<p align="center">
<img src="https://raw.githubusercontent.com/Lbalkenhol/candl/main/notebooks/SPT_ACT_summer_school_2024/chi2_surface.png" width="80%" alt="Differentiable Pipeline"/>
</p>
We will first write a minimisation algorithm that does not use gradient information, which we will run with CAMB and CosmoPower as theory codes. We will then develop an algorithm that uses gradient information and compare the performance of the different minmisers.
<hr width="100%" style="border-style: none none dotted" color="#FFFFFF" size="6">
## Traditional Minimiser
<span style="color:red">
<b>Exercise:</b>
</span>
Write an algorithm that find the best-fit point of the data. Do not use gradient-information from automatic differentiation. There are many different possible solutions. If you're unsure how to start, look at the MCMC session and modify the sampling code in there to work as a minimiser.
First, run the algorith with CAMB as a theory code and limit the maximum number of steps (i.e. CAMB evaluations) for the sake of your computer to 50. Then run the algorithm with CosmoPower as a theory code, without a hard limit to the number of steps. Does using CosmoPower help with the minimisation compared to CAMB?
The code in the two cells below will help you get started.
```python
import candl.tools
# Re-initialise CAMB - we need to grab theory spectra to higher ell than for the SPT case
CAMB_pars = camb.CAMBparams()
CAMB_pars.set_for_lmax(candl_like.ell_max+200, lens_potential_accuracy=2)
CAMB_pars_to_theory_specs = candl.interface.get_CAMB_pars_to_theory_specs_func(CAMB_pars)# this function takes a dictionary of cosmological parameters and returns a dictionary of CMB spectra
# Bundle CAMB and our likelihood together into a single function that moves from a parameter dictionary to a log like value
# Do the same for CosmoPower
pars_to_loglike_CAMB = candl.tools.get_params_to_logl_func(candl_like, CAMB_pars_to_theory_specs)
pars_to_loglike_CP = candl.tools.get_params_to_logl_func(candl_like, CP_pars_to_theory_specs)
# Evaluate the functions to make sure they run
# It's okay that these don't match exactly - the CP models were trained on CAMB spectra withs slightly higher accuracy settings
test_pars = deepcopy(fid_pars)
print(pars_to_loglike_CAMB(test_pars))
print(pars_to_loglike_CP(test_pars))
```
```python
# A proposal matrix for steps respecting degeneracy directions - can help you if you want to
par_order = ["H0", "ombh2", "omch2", "ns", "logA", "tau", "yp"]
proposal_cov = np.array([[ 2.25041962e+00, 1.30313866e-04, -5.37763426e-03, 1.01357518e-02, -8.31895908e-04, 5.38781094e-03, -1.68596642e-03],
[ 1.30313866e-04, 9.50311718e-08, -1.36037147e-07, -1.96156941e-06, 1.40972274e-06, 2.23821352e-07, 1.90509016e-07],
[-5.37763426e-03, -1.36037147e-07, 1.35070005e-05, -2.94150104e-05, 5.14036014e-06, -1.33453348e-05, 4.62296510e-06],
[ 1.01357518e-02, -1.96156941e-06, -2.94150104e-05, 2.37304854e-04, -1.47941044e-04, 2.90535404e-05, 1.74905120e-06],
[-8.31895908e-04, 1.40972274e-06, 5.14036014e-06, -1.47941044e-04, 9.09444696e-04, 3.80825486e-04, -9.50179538e-06],
[ 5.38781094e-03, 2.23821352e-07, -1.33453348e-05, 2.90535404e-05, 3.80825486e-04, 2.08304046e-04, -1.10725552e-06],
[-1.68596642e-03, 1.90509016e-07, 4.62296510e-06, 1.74905120e-06, -9.50179538e-06, -1.10725552e-06, 2.16146131e-05]])
# Your minimiser code
```
```python
# Compare the performance with CAMB and CosmoPower
```
<hr width="100%" style="border-style: none none dotted" color="#FFFFFF" size="6">
## Gradient-Based Minimiser
<span style="color:red">
<b>Exercise:</b>
</span>
Write an algorithm that find the best-fit point of the data using gradient information from auto-diff. The functions in the cell below will help you with this. Again, many different solutions are possible, but a simple yet powerful algorithm is the Newton-Raphson method (https://en.wikipedia.org/wiki/Newton%27s_method_in_optimization). Does your gradient-based minimiser converge in less steps than the traditional one?
```python
# We grab short cuts for the derivative and the Hessian
pars_to_loglike_CP_deriv = jax.jit(jax.jacrev(pars_to_loglike_CP))
pars_to_loglike_CP_hessian = jax.jit(jax.hessian(pars_to_loglike_CP))
```
```python
# Your gradient-based minimiser
```
```python
# Compare their performance
```
```python
# Make a triangle plot and trace the journey of the minimisers!
# Plug your data into the variables below and you should get a nice triangle plot with the trajectories of the algorithms
# YOUR DATA GOES HERE
overall_bf_point = {} # a dictionary of the best-fit point
parameter_covariance_matrix = np.zeros((1,1)) # the parameter covariance matrix (used to draw the ellipses in the triangle plot)
parameter_covariance_matrix_order = [] # the order of parameters in the covariance matrix
traditional_minimiser_points = [{}] # a list of dictionaries - each entry is a step of the traditional minimiser
gradient_minimiser_points = [{}] # a list of dictionaries - each entry is a step of the gradient-based minimiser
# REMAINING PLOTTING CODE - DO NOT TOUCH
pars_to_plot = ["H0", "ombh2", "omch2", "ns", "logA"]# tau is prior dominated, yp is a nuisance parameters
plt.close()
# Plot parameters
sigma_plot_range = 3
sigma_levels = 2
ax = candl.plots.triangle_plot_from_cov(pars_to_plot = pars_to_plot,# which parameters to plot
bf_point = overall_bf_point,# Newton best-fit point
par_cov = parameter_covariance_matrix,# the parameter covariance matrix
pars_in_cov = parameter_covariance_matrix_order,# the order of parameters in the covariance
sigma_plot_range = sigma_plot_range,# sets the axes limits
sigma_levels = sigma_levels)# how many sigma levels to plot
# Plot the Traditional minimiser
candl.plots.add_min_trajectory(ax = ax,# axis instance
pars_to_plot = pars_to_plot,# which parameters to plot
eval_points = gradient_minimiser_points,# a list of all the points in the minimiser trajectory
par_cov = parameter_covariance_matrix,# covariance of the best run - this is used to set the correct height in the 1d panels
pars_in_cov = parameter_covariance_matrix_order,# the order of parameters in the covariance
bf_point = overall_bf_point,# the best-fit point - again used in the 1d panels
base_colour = "purple")# the base colour used to show the trajectory path
# Plot the Newton minimiser
candl.plots.add_min_trajectory(ax = ax,# axis instance
pars_to_plot = pars_to_plot,# which parameters to plot
eval_points = traditional_minimiser_points,# a list of all the points in the minimiser trajectory
par_cov = parameter_covariance_matrix,# covariance of the best run - this is used to set the correct height in the 1d panels
pars_in_cov = parameter_covariance_matrix_order,# the order of parameters in the covariance
bf_point = overall_bf_point,# the best-fit point - again used in the 1d panels
dark_colours=False,
base_colour = "orange")# the base colour used to show the trajectory path
# Add a make-shift legend
ax[0,0].text(overall_bf_point[pars_to_plot[0]]+6*sigma_levels*np.sqrt(parameter_covariance_matrix[parameter_covariance_matrix_order.index(pars_to_plot[0]),parameter_covariance_matrix_order.index(pars_to_plot[0])]),
0.9,
"Gradient-Based Minimiser (NR)",
color="orange")
ax[0,0].text(overall_bf_point[pars_to_plot[0]]+6*sigma_levels*np.sqrt(parameter_covariance_matrix[parameter_covariance_matrix_order.index(pars_to_plot[0]),parameter_covariance_matrix_order.index(pars_to_plot[0])]),
0.7,
"Traditional Minimiser with CosmoPower",
color="purple")
plt.show()
```
<hr width="100%" style="border-style: none none dotted" color="#FFFFFF" size="6">
## Discussion
In this part, you have explored minmisation algorithms and experienced how how gradient-information can be a real gamechanger for the exploration of the likelihood surface. A simple extension you may consider (that is often done as a pre-cautionary measure in practice) is to launch multiple independent minimiser runs from random points in parameter space to ensure they all converge on the same point. Furthermore, while we have assessed in how many steps the different algorithms converge, we have not profiled (timed) them.
You can find a simple implementation of the Newton-Raphson method in the `candl.tools` module. Additionally, we can interface `candl` with `Optax` (a google-developed optimisation library for JAX, https://github.com/google-deepmind/optax) and access fanicer algorithms, such as ADAM (https://arxiv.org/abs/1412.6980). Moreover, gradient-information is also helpful for MCMC sampling - allowing you to propose more informative points to efficiently build up a representation of the posterior. If you're curious check out the `differentiable_tutorial` on the `candl` GitHub repository that develops these ideas further.
---
# Closing Remarks
We touched on many subjects in this notebook: you started handling data, accessing the data model of a likelihood, and made residual plots. You then built a differentiable pipeline and used it on real applications of CMB data analysis. Finally, you developed different minimisation algorithms - with and without gradient information - and compared their performance.
Still, this is just the tip on the ice-berg. There are many more features in `candl`. The tools library has a variety of functions that allow you to manipulate data and short-cut common analysis tasks. The interface library allows you to run `candl` likelihoods seemlessly with other cosmology codes, such as `Cobaya`, or `CLASS`. candl also includes the latest lensing likelihoods from SPT and ACT, which we haven't touched yet.
The pairing of `candl` with differentiable theory codes is particularly powerful. It trivialises the calculation of derivatives through auto-diff, which extends to Fisher matrices. This allows us to perform many calculations quickly and robustly. Moreover, for exploring the likelihood surface, gradient information is invaluable. This also extends from minimisation to MCMC sampling.
More generally, this is an exciting time for likelihood analysis in the CMB world. Many people are working on differentiable thoery codes (be they emulators, or full Boltzmann solvers), gradient-based samplers, and other techniques. This allows us to scrutinise our data more closely, which will be key as new data points with smaller and smaller error bars come in.
<p align="center">
<img src="https://media.tenor.com/enoxxJtm0yMAAAAM/neo-plugging-to-matrix.gif" width="500" alt="Welcome, Neo."/>
</p>
|
LbalkenholREPO_NAMEcandlPATH_START.@candl_extracted@candl-main@notebooks@SPT_ACT_summer_school_2024@SPT_ACT_summer_school_2024_candl.ipynb@.PATH_END.py
|
{
"filename": "TIRAVEL_Quickstart.ipynb",
"repo_name": "SPARTA-dev/SPARTA",
"repo_path": "SPARTA_extracted/SPARTA-master/examples/examples/TIRAVEL_Quickstart.ipynb",
"type": "Jupyter Notebook"
}
|
# TIRAVEL_Quickstart
### The notebook contains an RV calculation refinement tool, based on Zucker & Mazeh 2006 TIRAVEL - Template Independent RAdial VELocity measurement (arXiv:astro-ph/0607293).
This partial implementation of the TIRAVEL method enables using initial observation RV values to create a template for more accurate RV extraction.
To demonstrate our RV extraction and refinement process, we use HARPS observations of LTT 9779 (TOI-193) used by Jenkins Et. Al. to detect an ultra-hot Neptune with a period of 0.79 days (arXiv:2009.12832).
### 1 Imports & funcs
```python
import sys
from sparta.Auxil.PeriodicityDetector import PeriodicityDetector
from sparta.UNICOR.Spectrum import Spectrum
from sparta.UNICOR.Template import Template
from sparta.Auxil.TimeSeries import TimeSeries
from sparta.Observations import Observations
import numpy as np
import random
from scipy import interpolate
from PyAstronomy import pyasl
import matplotlib.pyplot as plt
from scipy import signal
from copy import deepcopy
```
### 2 Using the Observations class to read and process the TOI-193 HARPS observations.
`Observations` class enables one to load observation data from a given folder
and place it into a TimeSeries object.
```python
# Note we used sample_rate to sampple only half of the available observations
obs_data = Observations(survey='HARPS', target_visits_lib='data/TOI193/', min_wv=4990, max_wv=5090, sample_rate=2)
for i in obs_data.time_series.vals:
i = i.SpecPreProccess()
plt.figure(figsize=(7, 3.5))
plt.title("TOI-193 observations")
plt.ylabel("Flux")
plt.xlabel("Wavelength (Angstrom)")
for i, s in enumerate(obs_data.time_series.vals):
plt.plot(s.wv[0], s.sp[0], alpha=0.3)
plt.show()
print(len(obs_data.time_series.vals), "epochs.")
```

16 epochs.
### 3 Using the Template class to download and process a matching library template.
`Template` class enables downloading and processing PHEONIX library template for RV extraction.
Here we use the published TOI-193 parameters. We also applying rotational and gaussian broadening to the spectrum.
```python
# Loading a Phoenix synthetic spectrum
t = Template(temp=5500, log_g=4.5, metal=0, alpha=0, min_val=4980, max_val=5100, air=False)
template_star_broadend = Template(template=Spectrum(wv=t.model.wv, sp=t.model.sp).InterpolateSpectrum())
v_sin_i = 1.5
print('RotationalBroadening.', end=' ')
template_star_broadend.RotationalBroadening(epsilon=0.5, vsini=v_sin_i)
template_star_broadend = deepcopy(Template(template=Spectrum(wv=[template_star_broadend.model.wv[0]], sp=[template_star_broadend.model.sp[0]]).SpecPreProccess()))
template_star_broadend = Template(template=Spectrum(wv=template_star_broadend.model.wv, sp=template_star_broadend.model.sp).InterpolateSpectrum(delta=0.5))
print('GaussianBroadening.', end=' ')
template_star_broadend.GaussianBroadening(resolution=115_000)
template_star_final = Template(template=Spectrum(wv=template_star_broadend.model.wv, sp=template_star_broadend.model.sp).SpecPreProccess())
```
RotationalBroadening. GaussianBroadening.
```python
plt.figure(figsize=(20,6))
plt.title("TOI-193 HARRPS observations VS. Library Template")
plt.ylabel("Flux")
plt.xlabel("Wavelength (Angstrom)")
plt.plot(template_star_final.model.wv[0], template_star_final.model.sp[0], alpha=0.3, color="blue")
for i, s in enumerate(obs_data.time_series.vals):
plt.plot(s.wv[0], s.sp[0], alpha=0.1)
plt.legend(["library template"])
plt.show()
```

### 3 Using fastccf method to extract RV values for the observations using the template.
```python
calculated_vrad_list = obs_data.calc_rv_against_template(template_star_final, dv=0.1, # 0.01
VelBound=[-117, 117], combine_ccfs=False,
fastccf=True).vels
```
100%|█| 16/16 [00:00<00:00, 20.71it/s
```python
plt.figure(figsize=(8,5))
plt.title("TOI-193 Phase-Folded RVs")
plt.ylabel("RV (km/s)")
plt.xlabel("Phase")
plt.scatter([ttt % 0.79 for ttt in obs_data.time_series.times], calculated_vrad_list, alpha=0.4)
plt.show()
```

### 3 Using TIRAVEL to refine our RV measurements.
```python
template_TIRAVEL = obs_data.time_series.TIRAVEL([calculated_vrad_list])
for i in obs_data.time_series.vals:
i = i.TrimSpec(Ntrim=150)
```
```python
# tir = deepcopy(template_star_broadend)
```
```python
refined_vrad_list = obs_data.calc_rv_against_template(template_TIRAVEL, dv=0.1,
VelBound=[-117, 117], combine_ccfs=False,
fastccf=True).vels
```
100%|█| 16/16 [00:00<00:00, 42.91it/s
```python
plt.figure(figsize=(20,6))
plt.plot(template_star_final.model.wv[0], template_star_final.model.sp[0], alpha=0.5)
plt.plot(template_TIRAVEL.model.wv[0], template_TIRAVEL.model.sp[0], alpha=0.8)
plt.legend(["library template", "TIRAVEL template"])
plt.show()
```

```python
plt.figure(figsize=(8,5))
plt.title("TOI-193 Phase-Folded RVs")
plt.ylabel("RV (km/s)")
plt.xlabel("Phase")
plt.scatter([ttt % 0.79 for ttt in obs_data.time_series.times], calculated_vrad_list, alpha=0.4)
plt.scatter([ttt % 0.79 for ttt in obs_data.time_series.times], refined_vrad_list, alpha=0.4)
plt.legend(["orig_RVs", "TIRAVEL_RVs"])
plt.show()
```

### 2. Using the PeriodicityDetector class to run GLS on the spectra
```python
# PeriodicityDetector object initialization
obs_data.initialize_periodicity_detector(freq_range=(0.5, 2), periodogram_grid_resolution=1_000)
obs_data.periodicity_detector.period_truth = [0.792] # The published planetary period in days
```
First we will try running the GLS periodogram using our initial RV estimations.
```python
obs_data.time_series.calculated_vrad_list = calculated_vrad_list
```
```python
obs_data.periodicity_detector.calc_GLS()
obs_data.periodicity_detector.plot_periodograms(plot_vals=True)
plt.show()
```

The resulted periodogram doesn't present the highest peak in the expected published period.
Now we'll run the periodogram on the refined RV data.
```python
obs_data.time_series.calculated_vrad_list = refined_vrad_list
```
```python
obs_data.periodicity_detector.calc_GLS()
obs_data.periodicity_detector.plot_periodograms(plot_vals=True)
plt.show()
```

As expected, the refined RV estimation present slightly better results in detecting the planetary period in the data.
Although this is not a significant enough detection example, it is a light-weight demonstration to the TIRAVEL advantages.
|
SPARTA-devREPO_NAMESPARTAPATH_START.@SPARTA_extracted@SPARTA-master@examples@examples@TIRAVEL_Quickstart.ipynb@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterpolargl/marker/colorbar/tickfont/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="shadow",
parent_name="scatterpolargl.marker.colorbar.tickfont",
**kwargs,
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterpolargl@marker@colorbar@tickfont@_shadow.py@.PATH_END.py
|
{
"filename": "B_Model_Tutorial_7_Factor_Graphs.ipynb",
"repo_name": "jmschrei/pomegranate",
"repo_path": "pomegranate_extracted/pomegranate-master/docs/tutorials/B_Model_Tutorial_7_Factor_Graphs.ipynb",
"type": "Jupyter Notebook"
}
|
## Factor Graphs
author: Jacob Schreiber <br>
contact: jmschreiber91@gmail.com
Factor graphs are a powerful and, in my opinion, underused probabilistic model. Potentially, one reason that they are underutilized is that they are not as conceptually intuitive as Bayesian networks or mixture models. Here, we will alleviate any confusion.
Factor graphs are similar to Bayesian networks in that they consist of a set of probability distributions and a graph connecting them. However, unlike Bayesian networks, this graph is bipartate, and the set of probability distributions is not simply the variables in a data set; rather, the distributions are the union of the marginal distributions of each variable and the factor distributions, which are usually multivariate. In the graph, marginal distributions are on one side, factor distributions are on the other side, and the undirected edges only factor distributions with the marginal distributions of the variables that comprise it.
One way of thinking about this is to start with a Bayesian network. If you convert all the conditional probability distributions into joint probability distributions and keep the univariate distributions as is, you now have your set of factor distributions. Then, for each variable in your network, you add in a marginal distribution. This gives you the set of nodes in your factor graph. Finally, you add an edge between each factor distribution and the marginal distributions corresponding to the variables that make up that joint distribution. When the factor is a univariate distribution, you just add a single edge between that factor and its marginal distribution.
Once the factor graph is instantiated, inference involves an iterative message passing algorithm. In the first step, marginal distributions emit messages which are copies of themselves. Then, in the second step, factor distributions take in estimates of each marginal distribution, combine them with the joint probability parameters, and emit new estimates for each variable back to each marginal distribution. Finally, in the third step, the marginal distributions take in these estimates, average them together, and emit the new estimates back to each variable. The second and third steps are repeated until convergence.
Note: although parameter fitting is implemented for factor graphs, structure learning is not yet supported.
```python
%pylab inline
import seaborn; seaborn.set_style('whitegrid')
import torch
%load_ext watermark
%watermark -m -n -p torch,pomegranate
```
Populating the interactive namespace from numpy and matplotlib
torch : 1.13.0
pomegranate: 1.0.0
Compiler : GCC 11.2.0
OS : Linux
Release : 4.15.0-208-generic
Machine : x86_64
Processor : x86_64
CPU cores : 8
Architecture: 64bit
### Initialization and Fitting
A factor graph is comprised of two sets of distributions: the factor distributions, which are a mixture of joint probabilities and univariate probabilities, and the marginal distributions, which are one univariate probability distribution per variable in your data. Marginal distributions are usually set to the uniform distribution, whereas the factor distributions encode statistics about the underlying data. These two sets of distributions are connected using an undirected unweighted biparte graph such that factors can be connected to marginals but not to other factors. Each variable in your data must have a corresponding marginal distribution and appear in at least one distribution on the factor side. Each variable can occur in multiple factor distributions.
Let's start off by implementing the simplest factor graph: two variables joined in a joint categorical distribution.
```python
from pomegranate.distributions import Categorical
from pomegranate.distributions import JointCategorical
from pomegranate.factor_graph import FactorGraph
m1 = Categorical([[0.5, 0.5]])
m2 = Categorical([[0.5, 0.5]])
f1 = JointCategorical([[0.1, 0.3], [0.2, 0.4]])
model = FactorGraph([f1], [m1, m2], [(m1, f1), (m2, f1)])
```
Note that the order that edges are added to the model denote the ordering of variables in the `JointCategorical` distribution, in that the first edge containing `f1` (just the first edge in this case) indicates that the parent of that edge is the first dimension in the factor tensor.
Similarly, the ordering that marginal distributions are added to the model correspond to the ordering of the variables in your data. Specifically, `m1` covers the first column, `m2` covers the second column, and accordingly `f1` is made up of data from the first and second columns. The ordering of variables in a factor do not need to be sorted. If the edges were added as `[(m2, f1), (m1, f1)]`, a valid factor graph would be produced with the only difference being that the first dimension in the `f1` tensor would correspond to the second column data.
If you are constructing the factor graph in a more programmatic way you might prefer to use the `add_edge`, `add_marginal`, and `add_factor` methods to build the graph slowly. These methods are used internally when these values are passed into the initialization but can also be called themselves.
```python
model = FactorGraph()
model.add_factor(f1)
model.add_marginal(m1)
model.add_marginal(m2)
model.add_edge(m1, f1)
model.add_edge(m2, f1)
```
If you have data, you can then train the factor parameters in the same way that you can fit other parameters.
```python
X = torch.randint(2, size=(11, 2))
X
```
tensor([[0, 0],
[1, 0],
[1, 0],
[0, 0],
[1, 1],
[0, 1],
[1, 0],
[1, 1],
[0, 0],
[1, 1],
[0, 1]])
```python
model.fit(X)
model.factors[0].probs
```
Parameter containing:
tensor([[0.2727, 0.1818],
[0.2727, 0.2727]])
Importantly, these updates DO NOT change the marginal distribution values. This is because the factor values encode everything that you can learn from previous data (the likelihood function), whereas the marginal distributions represent your prior probability across symbols for that variable. When doing inference when you know the symbol coming from some variable, you set the marginal distributions to be that value 100%.
```python
model.marginals[0].probs
```
Parameter containing:
tensor([[0.5000, 0.5000]])
### Probabilities and Log Probabilities
Like other methods, one can calculate the probability of each example given the model. Unlike other models, this probability is factorized across the factors and the marginal distributions to be $P(X) = \prod\limits_{i=0}^{f} P(X_{p_i} | F_i) \prod\limits_{i=0}^{d} P(X_i | M_i)$ when there are f factors and d dimensions to your data. Basically, you are evaluating the probability of the data under the likelihood of the model (the product over the factors) and multiplying that by the probability of the data under the marginals (the product over the marginals). Without any prior information, the marginal probability is constant and can be ignored.
```python
model.probability(X[:1])
```
tensor([0.0682])
Remember that the above value includes multiplication by marginal distributions, so will probably not directly match any of those in the factor.
```python
model.log_probability(X)
```
tensor([-2.6856, -2.6856, -2.6856, -2.6856, -2.6856, -3.0910, -2.6856, -2.6856,
-2.6856, -2.6856, -3.0910])
### Predictions
Similarly to Bayesian networks, factor graphs can make predictions for missing values in data sets. In fact, Bayesian networks and Markov networks both frequently construct factor graphs in the backend to do the actual inference. These approaches use the sum-product algorithm, also called loopy belief propagation. The algorithm works essentially as follows:
- Initialize messages TO each factor FROM each marginal that is a copy of the marginal distribution
- For each factor, iterate over the marginal distributions it is connected to and calculate the factor's marginal distribution using all OTHER messages to it, ignoring the message from the marginal distribution of interest, and send its belief of what that distribution should be back to it
- For each marginal, multiply all incoming messages together to get an estimate of what the new marginal value should be.
After the messages converge the new marginal distributions are returned representing the probabilities of each distribution
When one knows what some of the variables should be, such as when they have an incomplete matrix, you can set the initial marginal probability distributions (not the messages, the actual distributions) to be consistent with that (i.e., in the categorical case, observed a `2` means assigning 100% of the probability to that category instead of using uniform probabilities).
Specifying an incomplete matrix is done by using a `torch.masked.MaskedTensor`.
```python
X_torch = torch.tensor(X[:4])
mask = torch.tensor([[True, False],
[False, True],
[True, True],
[False, False]])
X_masked = torch.masked.MaskedTensor(X_torch, mask=mask)
```
/tmp/ipykernel_89475/916229149.py:1: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
X_torch = torch.tensor(X[:4])
/home/jmschr/anaconda3/lib/python3.9/site-packages/torch/masked/maskedtensor/core.py:156: UserWarning: The PyTorch API of MaskedTensors is in prototype stage and will change in the near future. Please open a Github issue for features requests and see our documentation on the torch.masked module for further information about the project.
warnings.warn(("The PyTorch API of MaskedTensors is in prototype stage "
The mask indicates with a `True` value which indices are known. It does not matter what data value is taken when the mask is `False`.
```python
model.predict(X_masked)
```
tensor([[0, 0],
[0, 0],
[1, 0],
[1, 0]])
This is a somewhat discrete view of the result of the sum-product algorithm though because a full distribution is calculated. If you would like to get the entire predicted probabilities you can do that:
```python
model.predict_proba(X_masked)
```
[tensor([[1.0000, 0.0000],
[0.5000, 0.5000],
[0.0000, 1.0000],
[0.4545, 0.5455]]),
tensor([[0.6000, 0.4000],
[1.0000, 0.0000],
[1.0000, 0.0000],
[0.5455, 0.4545]])]
Note that the output here is a list with two dimensions. Each tensor in the list corresponds to a dimension in the underlying data and the tensors have shape `(n_examples, n_categories)` when using categorical data types. Basically, the first row in the first tensor includes the probabilities that element `X_masked[0, 0]` takes value 0 and 1. Because `X_masked[0, 0]` is an observed value, i.e., has a mask value of `True`, this means that the probability is clamped to 1 at the observed value.
If we only want the log probabilities we can calculate those as well.
```python
model.predict_log_proba(X_masked)
```
[tensor([[ 0.0000, -inf],
[-0.6931, -0.6931],
[ -inf, 0.0000],
[-0.7885, -0.6061]]),
tensor([[-0.5108, -0.9163],
[ 0.0000, -inf],
[ 0.0000, -inf],
[-0.6061, -0.7885]])]
### Summarize
Factor graphs can make use of the summarization API for training just like the other models. Basically, you can summarize one or more batches of data using the `summarize` method and then call the `from_summaries` method to update the parameters of the distribution. This will only update the parameters of the factors and leave the marginal distributions the same.
```python
model.summarize(X[:3])
model.from_summaries()
```
Then, we can check the parameters.
```python
model.factors[0].probs
```
Parameter containing:
tensor([[0.3333, 0.0000],
[0.6667, 0.0000]])
|
jmschreiREPO_NAMEpomegranatePATH_START.@pomegranate_extracted@pomegranate-master@docs@tutorials@B_Model_Tutorial_7_Factor_Graphs.ipynb@.PATH_END.py
|
{
"filename": "_variant.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatterternary/hoverlabel/font/_variant.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="variant",
parent_name="scatterternary.hoverlabel.font",
**kwargs,
):
super(VariantValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
values=kwargs.pop(
"values",
[
"normal",
"small-caps",
"all-small-caps",
"all-petite-caps",
"petite-caps",
"unicase",
],
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatterternary@hoverlabel@font@_variant.py@.PATH_END.py
|
{
"filename": "_textfont.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/treemap/pathbar/_textfont.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "treemap.pathbar"
_path_str = "treemap.pathbar.textfont"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Textfont object
Sets the font used inside `pathbar`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.treemap.pathbar.Textfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.pathbar.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.treemap.pathbar.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@treemap@pathbar@_textfont.py@.PATH_END.py
|
{
"filename": "test_suspectMasking.py",
"repo_name": "lsst/ip_isr",
"repo_path": "ip_isr_extracted/ip_isr-main/tests/test_suspectMasking.py",
"type": "Python"
}
|
#
# LSST Data Management System
# Copyright 2008, 2009, 2010 LSST Corporation.
#
# This product includes software developed by the
# LSST Project (http://www.lsst.org/).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program. If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#
import unittest
import numpy as np
import lsst.utils.tests
import lsst.geom
import lsst.afw.image as afwImage
import lsst.afw.cameraGeom as cameraGeom
import lsst.ip.isr as ipIsr
class IsrTestCases(lsst.utils.tests.TestCase):
def setUp(self):
ampInfo = cameraGeom.Amplifier.Builder()
ampInfo.setRawBBox(lsst.geom.Box2I(lsst.geom.Point2I(-5, 7), lsst.geom.Extent2I(53, 104)))
ampInfo.setSuspectLevel(25000)
self.ampInfo = ampInfo
self.isrTask = ipIsr.IsrTask()
def tearDown(self):
self.ampInfo = None
self.isrTask = None
def testBasicMasking(self):
"""Test that masking works
"""
maxVal = 32000
fracSuspect = 0.3
suspectLevel = maxVal*(1 - fracSuspect)
bbox = self.ampInfo.getRawBBox()
self.ampInfo.setSuspectLevel(suspectLevel)
maskedImage = makeRampMaskedImage(bbox, 0, maxVal)
imArr = maskedImage.getImage().getArray()
desSetArr = imArr >= suspectLevel
exposure = afwImage.ExposureF(maskedImage)
inMaskedImage = maskedImage.Factory(maskedImage, True) # deep copy
self.isrTask.suspectDetection(exposure, self.ampInfo)
maskArr = maskedImage.getMask().getArray()
suspectMask = maskedImage.getMask().getPlaneBitMask("SUSPECT")
measSetArr = maskArr == suspectMask
self.assertImagesEqual(desSetArr, measSetArr)
self.assertMaskedImagesAlmostEqual(inMaskedImage, maskedImage, doMask=False)
def testNanLevel(self):
"""Test that setting the suspect level to nan disables masking
"""
bbox = self.ampInfo.getRawBBox()
self.ampInfo.setSuspectLevel(float("nan"))
maskedImage = makeRampMaskedImage(bbox, 0, 32000)
exposure = afwImage.ExposureF(maskedImage)
inMaskedImage = maskedImage.Factory(maskedImage, True) # deep copy
self.isrTask.suspectDetection(exposure, self.ampInfo)
self.assertMaskedImagesAlmostEqual(inMaskedImage, maskedImage)
def testRenamedMasking(self):
"""Test that masking works using some other mask name instead of the
default.
"""
AltMaskName = "BAD" # pick something that exists for simplicity
isrConfig = ipIsr.IsrTask.ConfigClass()
isrConfig.suspectMaskName = AltMaskName
isrTask = ipIsr.IsrTask(config=isrConfig)
maxVal = 32000
fracSuspect = 0.3
suspectLevel = maxVal*(1 - fracSuspect)
bbox = self.ampInfo.getRawBBox()
self.ampInfo.setSuspectLevel(suspectLevel)
maskedImage = makeRampMaskedImage(bbox, 0, maxVal)
imArr = maskedImage.getImage().getArray()
desSetArr = imArr >= suspectLevel
exposure = afwImage.ExposureF(maskedImage)
inMaskedImage = maskedImage.Factory(maskedImage, True) # deep copy
isrTask.suspectDetection(exposure, self.ampInfo)
maskArr = maskedImage.getMask().getArray()
suspectMask = maskedImage.getMask().getPlaneBitMask(AltMaskName)
measSetArr = maskArr == suspectMask
self.assertImagesEqual(desSetArr, measSetArr)
self.assertMaskedImagesAlmostEqual(inMaskedImage, maskedImage, doMask=False)
def makeRampMaskedImage(bbox, minVal, maxVal, imgClass=afwImage.MaskedImageF):
"""Make a ramp image of the specified size and image class
Image values start from 0 at the lower left corner and increase by 1 along
rows.
Variance values equal image values + 100
Mask values equal 0
"""
mi = imgClass(bbox)
imageArr = mi.getImage().getArray()
varianceArr = mi.getVariance().getArray()
maskArr = mi.getMask().getArray()
imData = np.linspace(minVal, maxVal, imageArr.size)
imData.shape = (bbox.getHeight(), bbox.getWidth())
imageArr[:] = imData
varianceArr[:] = 100 + imData
maskArr[:] = 0
return mi
class MemoryTester(lsst.utils.tests.MemoryTestCase):
pass
def setup_module(module):
lsst.utils.tests.init()
if __name__ == "__main__":
lsst.utils.tests.init()
unittest.main()
|
lsstREPO_NAMEip_isrPATH_START.@ip_isr_extracted@ip_isr-main@tests@test_suspectMasking.py@.PATH_END.py
|
{
"filename": "test_combine_fast_slow.py",
"repo_name": "spacetelescope/jwst",
"repo_path": "jwst_extracted/jwst-main/jwst/flatfield/tests/test_combine_fast_slow.py",
"type": "Python"
}
|
"""
Test for flat_field.combine_fast_slow
"""
import numpy as np
import pytest
from scipy.integrate import quad
from astropy.modeling import polynomial
from stdatamodels.jwst.datamodels import dqflags
from jwst.flatfield.flat_field import combine_fast_slow
@pytest.mark.parametrize('flat_err_1,flat_err_2',
[(0.0, None), (np.nan, None), (np.nan, np.nan),
(0.0, 0.1), (np.nan, 0.1), (0, np.nan),
(0.1, 0.0), (0.1, np.nan),
(0.1, None), (0.1, 0.1)])
def test_combine_fast_slow(flat_err_1, flat_err_2):
"""Assign an array of values to tab_wl; these are strictly increasing,
but they're not uniformly spaced.
The abscissas and weights in combine_fast_slow are for three-point
Gaussian integration, which will (in principle) integrate a fifth-order
polynomial exactly. So for a test, we use a set of six polynomial
coefficients to create the tab_flat array by evaluating the polynomial
with those coefficients over the tab_wl array.
"""
# Generate an array (tab_wl) of wavelengths, not uniformly spaced.
wl_coeff = {'c0': 5., 'c1': 1., 'c2': -0.05}
wl_poly = polynomial.Polynomial1D(degree=2, **wl_coeff)
tab_index = np.arange(6000, dtype=np.float64) / 1000.
tab_wl = wl_poly(tab_index)
del tab_index
coeff = {'c0': -41.9,
'c1': 30.7,
'c2': -8.3,
'c3': 1.15,
'c4': -7.8e-2,
'c5': 2.0e-3}
poly = polynomial.Polynomial1D(degree=5, **coeff)
tab_flat = poly(tab_wl)
tab_flat_err = np.full(tab_flat.shape, flat_err_1, dtype=tab_flat.dtype)
wl0 = 7.37
dwl0 = 2.99
lower = wl0 - dwl0 / 2.
upper = wl0 + dwl0 / 2.
# Compute the integral of the polynomial over wavelength.
# This function returns a tuple, the integral and an error estimate.
integral = quad(poly, lower, upper)[0]
# We want the average over the interval, not the integral itself.
correct_value = integral / (upper - lower)
# Make a simple flat with the expected bin width in wavelength
wl_1d = np.arange(-5, 5) * dwl0 + wl0
wl = np.tile(wl_1d, (10, 1))
flat_2d = np.ones((10, 10), dtype=float)
flat_dq = np.zeros((10, 10), dtype=np.uint32)
# input 2d flat error
if flat_err_2 is None:
flat_err = None
v2 = 0
else:
flat_err = np.full((10, 10), flat_err_2)
v2 = flat_err_2**2
# Expected output variance is:
# v = v1 * f2^2 + v2 * f1^2, where f2 is always 1
err_result = np.sqrt(np.nansum([flat_err_1**2, v2 * correct_value**2]))
dispaxis = 1
value, new_dq, new_err = combine_fast_slow(wl, flat_2d, flat_dq, flat_err, tab_wl, tab_flat, tab_flat_err, dispaxis)
# Check output shapes
assert value.shape == wl.shape
assert new_dq.shape == wl.shape
assert new_err.shape == wl.shape
# Column 5 is the expected value
assert np.allclose(value[:, 5], correct_value, rtol=1.e-8, atol=1.e-8)
assert np.all(new_dq[:, 5] == 0)
assert np.allclose(new_err[:, 5], err_result, rtol=1.e-8, atol=1.e-8)
# Columns 0-2 are bad (negative wavelengths, not marked in DQ)
assert np.all(value[:, :3] == 1)
assert np.all(new_dq[:, :3] == 0)
if flat_err is None or np.isnan(flat_err_2):
assert np.all(new_err[:, :3] == 0)
else:
assert np.allclose(new_err[:, :3], flat_err[:, :3], equal_nan=True)
# Columns 3, 4, 6-9 are not covered by the tabular data
# (missing values, marked in DQ)
bad_value = dqflags.pixel['NO_FLAT_FIELD'] | dqflags.pixel['DO_NOT_USE']
assert np.all(value[:, (3, 4, 6, 7, 8, 9)] == 1)
assert np.all(new_dq[:, (3, 4, 6, 7, 8, 9)] == bad_value)
if flat_err is None or np.isnan(flat_err_2):
assert np.all(new_err[:, (3, 4, 6, 7, 8, 9)] == 0)
else:
assert np.allclose(new_err[:, (3, 4, 6, 7, 8, 9)],
flat_err[:, (3, 4, 6, 7, 8, 9)])
|
spacetelescopeREPO_NAMEjwstPATH_START.@jwst_extracted@jwst-main@jwst@flatfield@tests@test_combine_fast_slow.py@.PATH_END.py
|
{
"filename": "_y.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/mapbox/domain/_y.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="y", parent_name="layout.mapbox.domain", **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
items=kwargs.pop(
"items",
[
{"editType": "plot", "max": 1, "min": 0, "valType": "number"},
{"editType": "plot", "max": 1, "min": 0, "valType": "number"},
],
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@mapbox@domain@_y.py@.PATH_END.py
|
{
"filename": "_tickwidth.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/contour/colorbar/_tickwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="tickwidth", parent_name="contour.colorbar", **kwargs
):
super(TickwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@contour@colorbar@_tickwidth.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "freelunchtheorem/Conditional_Density_Estimation",
"repo_path": "Conditional_Density_Estimation_extracted/Conditional_Density_Estimation-master/README.md",
"type": "Markdown"
}
|
[](https://travis-ci.org/freelunchtheorem/Conditional_Density_Estimation) [](https://pepy.tech/project/cde)
# Conditional Density Estimation (CDE)
## Description
Implementations of various methods for conditional density estimation
* **Parametric neural network based methods**
* Mixture Density Network (MDN)
* Kernel Mixture Network (KMN)
* Normalizing Flows (NF)
* **Nonparametric methods**
* Conditional Kernel Density Estimation (CKDE)
* Neighborhood Kernel Density Estimation (NKDE)
* **Semiparametric methods**
* Least Squares Conditional Density Estimation (LSKDE)
Beyond estimating conditional probability densities, the package features extensive functionality for computing:
* **Centered moments:** mean, covariance, skewness and kurtosis
* **Statistical divergences:** KL-divergence, JS-divergence, Hellinger distance
* **Percentiles and expected shortfall**
For the parametric models (MDN, KMN, NF), we recommend the usage of noise regularization which is supported by our implementation. For details, we refer to the paper [Noise Regularization for Conditional Density Estimation](https://arxiv.org/abs/1907.08982).
## Installation
To use the library, you can directly use the python package index:
```bash
pip install cde
```
or clone the GitHub repository and run
```bash
pip install .
```
Note that the package only supports tensorflow versions between 1.4 and 1.7.
## Documentation and paper
See the documentation [here](https://freelunchtheorem.github.io/Conditional_Density_Estimation). A paper on best practices and benchmarks on conditional density estimation with neural networks that makes extensive use of this library can be found [here](https://arxiv.org/abs/1903.00954).
## Usage
The following code snipped holds an easy example that demonstrates how to use the cde package.
```python
from cde.density_simulation import SkewNormal
from cde.density_estimator import KernelMixtureNetwork
import numpy as np
""" simulate some data """
density_simulator = SkewNormal(random_seed=22)
X, Y = density_simulator.simulate(n_samples=3000)
""" fit density model """
model = KernelMixtureNetwork("KDE_demo", ndim_x=1, ndim_y=1, n_centers=50,
x_noise_std=0.2, y_noise_std=0.1, random_seed=22)
model.fit(X, Y)
""" query the conditional pdf and cdf """
x_cond = np.zeros((1, 1))
y_query = np.ones((1, 1)) * 0.1
prob = model.pdf(x_cond, y_query)
cum_prob = model.cdf(x_cond, y_query)
""" compute conditional moments & VaR """
mean = model.mean_(x_cond)[0][0]
std = model.std_(x_cond)[0][0]
skewness = model.skewness(x_cond)[0]
```
## Citing
If you use our CDE implementation in your research, you can cite it as follows:
```
@article{rothfuss2019conditional,
title={Conditional Density Estimation with Neural Networks: Best Practices and Benchmarks},
author={Rothfuss, Jonas and Ferreira, Fabio and Walther, Simon and Ulrich, Maxim},
journal={arXiv:1903.00954},
year={2019}
}
```
If you use noise regularization for regularizing the MDN, KMN or NF conditional density model, please cite
```
@article{rothfuss2019noisereg,
title={Noise Regularization for Conditional Density Estimation},
author={Jonas Rothfuss and Fabio Ferreira and Simon Boehm and Simon Walther
and Maxim Ulrich and Tamim Asfour and Andreas Krause},
year={2019},
journal={arXiv:1907.08982},
}
```
## Todo
- creating a branch just for our conditional estimators + python package
- support for TensorFlow versions > 1.7 (your help would be highly appreciated here)
|
freelunchtheoremREPO_NAMEConditional_Density_EstimationPATH_START.@Conditional_Density_Estimation_extracted@Conditional_Density_Estimation-master@README.md@.PATH_END.py
|
{
"filename": "testing plummers sphere-Copy2-checkpoint.ipynb",
"repo_name": "jan-rybizki/Galaxia_wrap",
"repo_path": "Galaxia_wrap_extracted/Galaxia_wrap-master/notebook/.ipynb_checkpoints/testing plummers sphere-Copy2-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
```python
"""
Plummer model generator
This module contains a function used to create Plummer (1911) models, which
follow a spherically symmetric density profile of the form:
rho = c * (1 + r**2)**(-5/2)
"""
import numpy
import numpy.random
from math import pi, sqrt
from amuse.units import nbody_system
from amuse import datamodel
__all__ = ["new_plummer_sphere", "new_plummer_model"]
class MakePlummerModel(object):
def __init__(self, number_of_particles, convert_nbody = None, radius_cutoff = 22.8042468, mass_cutoff = 0.999,
do_scale = False, random_state = None, random = None):
self.number_of_particles = number_of_particles
self.convert_nbody = convert_nbody
self.mass_cutoff = min(mass_cutoff, self.calculate_mass_cuttof_from_radius_cutoff(radius_cutoff))
self.do_scale = do_scale
if not random_state == None:
print("DO NOT USE RANDOM STATE")
self.random_state = None
if random is None:
self.random = numpy.random
else:
self.random = random
def calculate_mass_cuttof_from_radius_cutoff(self, radius_cutoff):
if radius_cutoff > 99999:
return 1.0
scale_factor = 16.0 / (3.0 * pi)
rfrac = radius_cutoff * scale_factor
denominator = pow(1.0 + rfrac ** 2, 1.5)
numerator = rfrac ** 3
return numerator/denominator
def calculate_radius(self, index):
mass_min = (index * self.mass_cutoff) / self.number_of_particles
mass_max = ((index+1) * self.mass_cutoff) / self.number_of_particles
random_mass_fraction = self.random.uniform(mass_min, mass_max)
radius = 1.0 / sqrt( pow (random_mass_fraction, -2.0/3.0) - 1.0)
return radius
def calculate_radius_uniform_distribution(self):
return 1.0 / numpy.sqrt( numpy.power(self.random.uniform(0,self.mass_cutoff,(self.number_of_particles,1)), -2.0/3.0) - 1.0)
def new_positions_spherical_coordinates(self):
pi2 = pi * 2
radius = self.calculate_radius_uniform_distribution()
theta = numpy.arccos(self.random.uniform(-1.0,1.0, (self.number_of_particles,1)))
phi = self.random.uniform(0.0,pi2, (self.number_of_particles,1))
return (radius,theta,phi)
def new_velocities_spherical_coordinates(self, radius):
pi2 = pi * 2
x,y = self.new_xy_for_velocity()
velocity = x * sqrt(2.0) * numpy.power( 1.0 + radius*radius, -0.25)
theta = numpy.arccos(self.random.uniform(-1.0,1.0, (self.number_of_particles,1)))
phi = self.random.uniform(0.0,pi2, (self.number_of_particles,1))
return (velocity,theta,phi)
def coordinates_from_spherical(self, radius, theta, phi):
x = radius * numpy.sin( theta ) * numpy.cos( phi )
y = radius * numpy.sin( theta ) * numpy.sin( phi )
z = radius * numpy.cos( theta )
return (x,y,z)
def new_xy_for_velocity(self):
number_of_selected_items = 0
selected_values_for_x = numpy.zeros(0)
selected_values_for_y = numpy.zeros(0)
while (number_of_selected_items < self.number_of_particles):
x = self.random.uniform(0,1.0, (self.number_of_particles-number_of_selected_items))
y = self.random.uniform(0,0.1, (self.number_of_particles-number_of_selected_items))
g = (x**2) * numpy.power(1.0 - x**2, 3.5)
compare = y <= g
selected_values_for_x = numpy.concatenate((selected_values_for_x, x.compress(compare)))
selected_values_for_y= numpy.concatenate((selected_values_for_x, y.compress(compare)))
number_of_selected_items = len(selected_values_for_x)
return numpy.atleast_2d(selected_values_for_x).transpose(), numpy.atleast_2d(selected_values_for_y).transpose()
def new_model(self):
m = numpy.zeros((self.number_of_particles,1)) + (1.0 / self.number_of_particles)
radius, theta, phi = self.new_positions_spherical_coordinates()
position = numpy.hstack(self.coordinates_from_spherical(radius, theta, phi))
radius, theta, phi = self.new_velocities_spherical_coordinates(radius)
velocity = numpy.hstack(self.coordinates_from_spherical(radius, theta, phi))
position = position / 1.695
velocity = velocity / sqrt(1 / 1.695)
return (m, position, velocity)
@property
def result(self):
masses = numpy.ones(self.number_of_particles) / self.number_of_particles
radius, theta, phi = self.new_positions_spherical_coordinates()
x,y,z = self.coordinates_from_spherical(radius, theta, phi)
radius, theta, phi = self.new_velocities_spherical_coordinates(radius)
vx,vy,vz = self.coordinates_from_spherical(radius, theta, phi)
result = datamodel.Particles(self.number_of_particles)
result.mass = nbody_system.mass.new_quantity(masses)
result.x = nbody_system.length.new_quantity(x.reshape(self.number_of_particles)/1.695)
result.y = nbody_system.length.new_quantity(y.reshape(self.number_of_particles)/1.695)
result.z = nbody_system.length.new_quantity(z.reshape(self.number_of_particles)/1.695)
result.vx = nbody_system.speed.new_quantity(vx.reshape(self.number_of_particles) / sqrt(1/1.695))
result.vy = nbody_system.speed.new_quantity(vy.reshape(self.number_of_particles) / sqrt(1/1.695))
result.vz = nbody_system.speed.new_quantity(vz.reshape(self.number_of_particles) / sqrt(1/1.695))
result.radius = 0 | nbody_system.length
result.move_to_center()
if self.do_scale:
result.scale_to_standard()
if not self.convert_nbody is None:
result = datamodel.ParticlesWithUnitsConverted(result, self.convert_nbody.as_converter_from_si_to_generic())
result = result.copy()
return result
def new_plummer_model(number_of_particles, *list_arguments, **keyword_arguments):
"""
Create a plummer sphere with the given number of particles. Returns
a set of stars with equal mass and positions and velocities distributed
to fit a plummer star distribution model. The model is centered around the
origin. Positions and velocities are optionally scaled such that the kinetic and
potential energies are 0.25 and -0.5 in nbody-units, respectively.
:argument number_of_particles: Number of particles to include in the plummer sphere
:argument convert_nbody: When given will convert the resulting set to SI units
:argument radius_cutoff: Cutoff value for the radius (defaults to 22.8042468)
:argument mass_cutoff: Mass percentage inside radius of 1
:argument do_scale: scale the result to exact nbody units (M=1, K=0.25, U=-0.5)
"""
uc = MakePlummerModel(number_of_particles, *list_arguments, **keyword_arguments)
return uc.result
new_plummer_sphere = new_plummer_model
```
```python
import numpy as np
from amuse.units import units
```
```python
Mcluster= 100. | units.MSun
Rcluster= 1. | units.parsec
converter= nbody_system.nbody_to_si(Mcluster,Rcluster)
N = 1000
stars = new_plummer_sphere(N,converter)
stars.x.as_quantity_in(units.parsec)
stars.vx.as_quantity_in(units.kms)
stars.mass.as_quantity_in(units.MSun)
```
```python
print(stars)
```
key mass radius vx vy vz x y z
- kg m m * s**-1 m * s**-1 m * s**-1 m m m
==================== =========== =========== =========== =========== =========== =========== =========== ===========
8570000922529145642 1.989e+29 0.000e+00 -7.630e+01 -2.202e+02 -9.571e+01 -7.165e+15 1.175e+16 -1.924e+14
16866305370086961643 1.989e+29 0.000e+00 3.083e+02 -1.495e+02 -2.422e+02 -1.127e+16 1.692e+16 -2.547e+16
3019341560404727600 1.989e+29 0.000e+00 6.799e+01 2.047e+02 3.433e+02 5.700e+15 2.645e+16 -4.216e+16
9117812955890796496 1.989e+29 0.000e+00 -6.986e+01 -1.746e+02 1.463e+02 -9.282e+15 -2.395e+16 -3.681e+15
10494064922514935983 1.989e+29 0.000e+00 5.127e+01 4.360e+01 3.053e+02 1.462e+16 -1.553e+16 -1.161e+16
933562790889811644 1.989e+29 0.000e+00 -9.603e+01 -4.110e+02 3.400e+02 1.993e+16 -3.579e+16 -4.827e+15
2799784261108908469 1.989e+29 0.000e+00 -1.425e+02 -1.016e+02 -1.030e+02 4.252e+15 -6.147e+14 -1.468e+16
14204795523942641610 1.989e+29 0.000e+00 -3.458e+02 5.481e-01 3.205e+01 4.841e+16 -2.640e+16 2.751e+16
15968786880919290942 1.989e+29 0.000e+00 -7.995e+01 -4.651e+01 3.904e+02 -7.712e+15 2.825e+15 -1.334e+16
5384214454233089650 1.989e+29 0.000e+00 -8.957e+01 2.408e+02 -3.225e+02 5.975e+15 1.324e+16 1.037e+16
11694318008629110429 1.989e+29 0.000e+00 4.534e+01 4.962e+02 -1.972e+01 -6.504e+15 2.327e+16 -1.315e+15
10468859765627707184 1.989e+29 0.000e+00 2.029e+02 2.033e+02 1.601e+02 -1.545e+15 -6.771e+14 -4.466e+15
14037989317420485962 1.989e+29 0.000e+00 -2.562e+02 -3.550e+02 -1.981e+02 1.779e+16 -7.389e+15 -1.755e+16
14696957979004543443 1.989e+29 0.000e+00 -1.271e+02 -1.703e+02 -1.770e+02 -4.040e+15 7.984e+14 8.039e+15
17450387517227513179 1.989e+29 0.000e+00 -4.699e+01 -6.429e+01 2.053e+02 -4.004e+15 -9.026e+15 7.453e+15
16371813288870014278 1.989e+29 0.000e+00 -2.200e+02 -4.478e+02 3.243e+02 -7.467e+15 -1.510e+16 9.998e+14
18257101678874285164 1.989e+29 0.000e+00 -5.997e+01 2.142e+02 8.987e+01 -5.401e+15 -9.939e+16 2.597e+16
4235285509314623680 1.989e+29 0.000e+00 3.492e+02 2.605e+02 4.722e+02 2.494e+16 9.416e+15 -6.823e+15
6305373368182115250 1.989e+29 0.000e+00 3.111e+02 6.102e+02 -1.383e+02 4.630e+15 -8.373e+15 -3.081e+15
8658431730275604591 1.989e+29 0.000e+00 3.789e+02 1.334e+02 -4.368e+02 1.324e+16 -3.405e+15 -7.004e+14
... ... ... ... ... ... ... ... ...
9506530745109383956 1.989e+29 0.000e+00 -6.691e+01 -1.640e+02 -2.043e+02 7.108e+16 -2.860e+16 3.084e+15
4110116295055423995 1.989e+29 0.000e+00 -1.981e+02 -2.468e+02 -3.306e+02 5.974e+15 4.625e+16 2.774e+16
7576594238786647211 1.989e+29 0.000e+00 -3.030e+02 -8.113e+01 1.249e+02 -3.970e+15 1.992e+16 1.708e+15
15571495463014044435 1.989e+29 0.000e+00 -3.976e+02 3.159e+02 2.465e+02 2.743e+16 1.053e+16 -1.198e+16
2754897307865800261 1.989e+29 0.000e+00 1.492e+02 -8.151e+01 -4.655e+02 2.818e+15 7.633e+15 2.567e+15
12394952476165677204 1.989e+29 0.000e+00 -3.713e+02 2.364e+02 5.769e+02 -4.757e+15 1.174e+16 1.225e+15
2690794349631812192 1.989e+29 0.000e+00 2.197e+02 -1.236e+01 -2.966e+02 -3.550e+15 2.421e+15 1.187e+16
5015937241314305491 1.989e+29 0.000e+00 -1.377e+02 1.195e+02 -9.651e+01 1.181e+16 8.715e+16 1.038e+17
7917057405666164567 1.989e+29 0.000e+00 -8.997e+01 -3.953e+02 -8.788e+01 1.028e+16 1.083e+16 1.438e+16
5324560192936279476 1.989e+29 0.000e+00 -1.737e+02 2.662e+02 -9.893e+01 -1.963e+16 2.257e+15 3.271e+16
15426111458719525995 1.989e+29 0.000e+00 1.271e+02 2.942e+02 -4.832e+02 1.078e+16 -2.429e+16 -1.086e+16
3278817653049919000 1.989e+29 0.000e+00 -1.906e+02 2.904e+02 -4.827e+01 5.677e+15 9.446e+15 1.006e+16
15047600948894891976 1.989e+29 0.000e+00 -5.902e+02 4.129e+01 -2.340e+02 1.378e+16 1.762e+16 9.621e+15
9188816936628549142 1.989e+29 0.000e+00 -1.276e+02 -5.699e+02 -6.673e+02 -4.068e+15 -6.627e+15 -5.996e+15
17247018077410035510 1.989e+29 0.000e+00 -1.488e+02 -3.642e+02 1.602e+02 2.065e+16 -6.337e+15 -3.206e+16
15815574283011827068 1.989e+29 0.000e+00 -4.883e+01 -2.192e+02 -1.239e+02 4.104e+15 -7.949e+15 -1.618e+16
5017310566950932681 1.989e+29 0.000e+00 -8.746e+01 -2.139e+02 -9.114e+00 -1.263e+16 -1.132e+15 -2.200e+16
2326402129812388904 1.989e+29 0.000e+00 -3.115e+02 2.026e+02 3.376e+02 5.303e+16 -1.899e+16 2.696e+16
5624911068823610574 1.989e+29 0.000e+00 -3.928e+02 1.455e+02 8.931e+01 5.561e+15 1.716e+14 -1.316e+16
11054286897726687194 1.989e+29 0.000e+00 -6.441e+01 1.774e+02 9.861e+01 -3.865e+15 -2.176e+15 -1.329e+16
==================== =========== =========== =========== =========== =========== =========== =========== ===========
```python
np.mean(x)
```
quantity<-1.45519152284e-14 m>
```python
```
|
jan-rybizkiREPO_NAMEGalaxia_wrapPATH_START.@Galaxia_wrap_extracted@Galaxia_wrap-master@notebook@.ipynb_checkpoints@testing plummers sphere-Copy2-checkpoint.ipynb@.PATH_END.py
|
{
"filename": "plotsettings.py",
"repo_name": "Anirbancosmo/limpy",
"repo_path": "limpy_extracted/limpy-master/limpy/plotsettings.py",
"type": "Python"
}
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 3 23:25:50 2020
@author: anirbanroy
"""
import matplotlib as pl
pl.rcParams["figure.figsize"] = 5, 5
pl.rcParams["xtick.top"] = True
pl.rcParams["ytick.right"] = True
pl.rcParams["ytick.minor.visible"] = True
pl.rcParams["xtick.minor.visible"] = True
pl.rcParams["font.size"] = "22"
pl.rcParams["legend.fontsize"] = "13"
pl.rcParams["figure.titlesize"] = "medium"
pl.rcParams["figure.titlesize"] = "medium"
pl.rcParams["xtick.major.size"] = "10"
pl.rcParams["xtick.minor.size"] = "5"
pl.rcParams["xtick.major.width"] = "1"
pl.rcParams["xtick.minor.width"] = "1"
pl.rcParams["ytick.major.size"] = "7"
pl.rcParams["ytick.minor.size"] = "3"
pl.rcParams["ytick.major.width"] = "1"
pl.rcParams["ytick.minor.width"] = "1"
pl.rcParams["xtick.direction"] = "in"
pl.rcParams["ytick.direction"] = "in"
pl.rcParams["axes.labelpad"] = "5.0"
pl.rcParams["xtick.labelsize"] = "17"
pl.rcParams["ytick.labelsize"] = "17"
pl.rcParams["axes.labelsize"] = "17"
pl.rcParams["axes.labelsize"] = "17"
pl.rcParams["xtick.major.pad"] = "5"
pl.rcParams["xtick.minor.pad"] = "5"
pl.rc("axes", linewidth=1.5)
pl.rcParams["legend.borderaxespad"] = "1.6"
pl.style.use("seaborn-bright")
pl.rcParams['text.usetex'] = True
pl.rcParams['font.family'] = 'serif'
pl.rcParams['font.serif'] = 'cm'
pl.rcParams['font.size'] = '20'
pl.rcParams['axes.labelsize'] = '20'
|
AnirbancosmoREPO_NAMElimpyPATH_START.@limpy_extracted@limpy-master@limpy@plotsettings.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/choroplethmapbox/marker/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._line import Line
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(__name__, [], ["._line.Line"])
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@choroplethmapbox@marker@__init__.py@.PATH_END.py
|
{
"filename": "test_easyguide.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/tests/contrib/easyguide/test_easyguide.py",
"type": "Python"
}
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import io
import warnings
import pytest
import torch
from torch.distributions import constraints
import pyro
import pyro.distributions as dist
from pyro.contrib.easyguide import EasyGuide, easy_guide
from pyro.infer import SVI, Trace_ELBO
from pyro.infer.autoguide.initialization import init_to_mean, init_to_median
from pyro.optim import Adam
from pyro.util import ignore_jit_warnings
# The model from tutorial/source/easyguide.ipynb
def model(batch, subsample, full_size):
with ignore_jit_warnings():
num_time_steps = len(batch)
result = [None] * num_time_steps
drift = pyro.sample("drift", dist.LogNormal(-1, 0.5))
with pyro.plate("data", full_size, subsample=subsample):
z = 0.0
for t in range(num_time_steps):
z = pyro.sample("state_{}".format(t), dist.Normal(z, drift))
result[t] = pyro.sample(
"obs_{}".format(t), dist.Bernoulli(logits=z), obs=batch[t]
)
return torch.stack(result)
def check_guide(guide):
full_size = 50
batch_size = 20
num_time_steps = 8
pyro.set_rng_seed(123456789)
data = model([None] * num_time_steps, torch.arange(full_size), full_size)
assert data.shape == (num_time_steps, full_size)
pyro.get_param_store().clear()
pyro.set_rng_seed(123456789)
svi = SVI(model, guide, Adam({"lr": 0.02}), Trace_ELBO())
for epoch in range(2):
beg = 0
while beg < full_size:
end = min(full_size, beg + batch_size)
subsample = torch.arange(beg, end)
batch = data[:, beg:end]
beg = end
svi.step(batch, subsample, full_size=full_size)
@pytest.mark.parametrize("init_fn", [None, init_to_mean, init_to_median])
def test_delta_smoke(init_fn):
@easy_guide(model)
def guide(self, batch, subsample, full_size):
self.map_estimate("drift")
with self.plate("data", full_size, subsample=subsample):
self.group(match="state_[0-9]*").map_estimate()
if init_fn is not None:
guide.init = init_fn
check_guide(guide)
class PickleGuide(EasyGuide):
def __init__(self, model):
super().__init__(model)
self.init = init_to_median
def guide(self, batch, subsample, full_size):
self.map_estimate("drift")
with self.plate("data", full_size, subsample=subsample):
self.group(match="state_[0-9]*").map_estimate()
def test_serialize():
guide = PickleGuide(model)
check_guide(guide)
# Work around https://github.com/pytorch/pytorch/issues/27972
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
f = io.BytesIO()
torch.save(guide, f)
f.seek(0)
actual = torch.load(f)
assert type(actual) == type(guide)
assert dir(actual) == dir(guide)
check_guide(guide)
check_guide(actual)
@pytest.mark.parametrize("init_fn", [None, init_to_mean, init_to_median])
def test_subsample_smoke(init_fn):
rank = 2
@easy_guide(model)
def guide(self, batch, subsample, full_size):
self.map_estimate("drift")
group = self.group(match="state_[0-9]*")
cov_diag = pyro.param(
"state_cov_diag",
lambda: torch.full(group.event_shape, 0.01),
constraint=constraints.positive,
)
cov_factor = pyro.param(
"state_cov_factor", lambda: torch.randn(group.event_shape + (rank,)) * 0.01
)
with self.plate("data", full_size, subsample=subsample):
loc = pyro.param(
"state_loc",
lambda: torch.full((full_size,) + group.event_shape, 0.5),
event_dim=1,
)
group.sample(
"states", dist.LowRankMultivariateNormal(loc, cov_factor, cov_diag)
)
if init_fn is not None:
guide.init = init_fn
check_guide(guide)
@pytest.mark.parametrize("init_fn", [None, init_to_mean, init_to_median])
def test_amortized_smoke(init_fn):
rank = 2
@easy_guide(model)
def guide(self, batch, subsample, full_size):
num_time_steps, batch_size = batch.shape
self.map_estimate("drift")
group = self.group(match="state_[0-9]*")
cov_diag = pyro.param(
"state_cov_diag",
lambda: torch.full(group.event_shape, 0.01),
constraint=constraints.positive,
)
cov_factor = pyro.param(
"state_cov_factor", lambda: torch.randn(group.event_shape + (rank,)) * 0.01
)
if not hasattr(self, "nn"):
self.nn = torch.nn.Linear(
group.event_shape.numel(), group.event_shape.numel()
)
self.nn.weight.data.fill_(1.0 / num_time_steps)
self.nn.bias.data.fill_(-0.5)
pyro.module("state_nn", self.nn)
with self.plate("data", full_size, subsample=subsample):
loc = self.nn(batch.t())
group.sample(
"states", dist.LowRankMultivariateNormal(loc, cov_factor, cov_diag)
)
if init_fn is not None:
guide.init = init_fn
check_guide(guide)
def test_overlapping_plates_ok():
def model(batch, subsample, full_size):
# This is ok because the shared plate is left of the nonshared plate.
with pyro.plate("shared", full_size, subsample=subsample, dim=-2):
x = pyro.sample("x", dist.Normal(0, 1))
with pyro.plate("nonshared", 2, dim=-1):
y = pyro.sample("y", dist.Normal(0, 1))
xy = x + y.sum(-1, keepdim=True)
return pyro.sample("z", dist.Normal(xy, 1), obs=batch)
@easy_guide(model)
def guide(self, batch, subsample, full_size):
with self.plate("shared", full_size, subsample=subsample, dim=-2):
group = self.group(match="x|y")
loc = pyro.param(
"guide_loc",
torch.zeros((full_size, 1) + group.event_shape),
event_dim=1,
)
scale = pyro.param(
"guide_scale",
torch.ones((full_size, 1) + group.event_shape),
constraint=constraints.positive,
event_dim=1,
)
group.sample("xy", dist.Normal(loc, scale).to_event(1))
# Generate data.
full_size = 5
batch_size = 2
data = model(None, torch.arange(full_size), full_size)
assert data.shape == (full_size, 1)
# Train for one epoch.
pyro.get_param_store().clear()
svi = SVI(model, guide, Adam({"lr": 0.02}), Trace_ELBO())
beg = 0
while beg < full_size:
end = min(full_size, beg + batch_size)
subsample = torch.arange(beg, end)
batch = data[beg:end]
beg = end
svi.step(batch, subsample, full_size=full_size)
def test_overlapping_plates_error():
def model(batch, subsample, full_size):
# This is an error because the shared plate is right of the nonshared plate.
with pyro.plate("shared", full_size, subsample=subsample, dim=-1):
x = pyro.sample("x", dist.Normal(0, 1))
with pyro.plate("nonshared", 2, dim=-2):
y = pyro.sample("y", dist.Normal(0, 1))
xy = x + y.sum(-2)
return pyro.sample("z", dist.Normal(xy, 1), obs=batch)
@easy_guide(model)
def guide(self, batch, subsample, full_size):
with self.plate("shared", full_size, subsample=subsample, dim=-1):
group = self.group(match="x|y")
loc = pyro.param(
"guide_loc", torch.zeros((full_size,) + group.event_shape), event_dim=1
)
scale = pyro.param(
"guide_scale",
torch.ones((full_size,) + group.event_shape),
constraint=constraints.positive,
event_dim=1,
)
group.sample("xy", dist.Normal(loc, scale).to_event(1))
# Generate data.
full_size = 5
batch_size = 2
data = model(None, torch.arange(full_size), full_size)
assert data.shape == (full_size,)
# Train for one epoch.
pyro.get_param_store().clear()
svi = SVI(model, guide, Adam({"lr": 0.02}), Trace_ELBO())
beg = 0
end = min(full_size, beg + batch_size)
subsample = torch.arange(beg, end)
batch = data[beg:end]
beg = end
with pytest.raises(ValueError, match="Group expects all per-site plates"):
svi.step(batch, subsample, full_size=full_size)
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@tests@contrib@easyguide@test_easyguide.py@.PATH_END.py
|
{
"filename": "UVLFs.py",
"repo_name": "JulianBMunoz/Zeus21",
"repo_path": "Zeus21_extracted/Zeus21-main/zeus21/UVLFs.py",
"type": "Python"
}
|
"""
Compute UVLFs given our SFR and HMF models.
Author: Julian B. Muñoz
UT Austin - June 2023
Edited by Hector Afonso G. Cruz
JHU - July 2024
"""
from . import cosmology
from . import constants
from .sfrd import SFR_II
from .cosmology import bias_Tinker
import numpy as np
from scipy.special import erf
def MUV_of_SFR(SFRtab, kappaUV):
'returns MUV, uses SFR. Dust added later in loglike.'
#convert SFR to MUVs
LUVtab = SFRtab/kappaUV
MUVtab = 51.63 - 2.5 * np.log10(LUVtab) #AB magnitude
return MUVtab
#and combine to get UVLF:
def UVLF_binned(Astro_Parameters,Cosmo_Parameters,HMF_interpolator, zcenter, zwidth, MUVcenters, MUVwidths, DUST_FLAG=True, RETURNBIAS = False):
'Binned UVLF in units of 1/Mpc^3/mag, for bins at <zcenter> with a Gaussian width zwidth, centered at MUV centers with tophat width MUVwidths. z width only in HMF since that varies the most rapidly. If flag RETURNBIAS set to true it returns number-avgd bias instead of UVLF, still have to divide by UVLF'
if(constants.NZ_TOINT>1):
DZ_TOINT = np.linspace(-np.sqrt(constants.NZ_TOINT/3.),np.sqrt(constants.NZ_TOINT/3.),constants.NZ_TOINT) #in sigmas around zcenter
else:
DZ_TOINT = np.array([0.0])
WEIGHTS_TOINT = np.exp(-DZ_TOINT**2/2.)/np.sum(np.exp(-DZ_TOINT**2/2.)) #assumed Gaussian in z, fair
SFRlist = SFR_II(Astro_Parameters,Cosmo_Parameters,HMF_interpolator, HMF_interpolator.Mhtab, zcenter, zcenter)
sigmaUV = Astro_Parameters.sigmaUV
if (constants.FLAG_RENORMALIZE_LUV == True): #lower the LUV (or SFR) to recover the true avg, not log-avg
SFRlist/= np.exp((np.log(10)/2.5*sigmaUV)**2/2.0)
MUVbarlist = MUV_of_SFR(SFRlist, Astro_Parameters._kappaUV) #avg for each Mh
MUVbarlist = np.fmin(MUVbarlist,constants._MAGMAX)
if(RETURNBIAS==True): # weight by bias
biasM = np.array([bias_Tinker(Cosmo_Parameters, HMF_interpolator.sigma_int(HMF_interpolator.Mhtab,zcenter+dz*zwidth)) for dz in DZ_TOINT])
else: # do not weight by bias
biasM = np.ones_like(WEIGHTS_TOINT)
HMFtab = np.array([HMF_interpolator.HMF_int(HMF_interpolator.Mhtab,zcenter+dz*zwidth) for dz in DZ_TOINT])
HMFcurr = np.sum(WEIGHTS_TOINT * HMFtab.T * biasM.T,axis=1)
#cannot directly 'dust' the theory since the properties of the IRX-beta relation are calibrated on observed MUV. Recursion instead:
currMUV = MUVbarlist
if(DUST_FLAG==True):
currMUV2 = np.ones_like(currMUV)
while(np.sum(np.abs((currMUV2-currMUV)/currMUV)) > 0.02):
currMUV = MUVbarlist + AUV(Astro_Parameters,zcenter,currMUV)
currMUV2 = currMUV
MUVcuthi = MUVcenters + MUVwidths/2.
MUVcutlo = MUVcenters - MUVwidths/2.
xhi = np.subtract.outer(MUVcuthi , currMUV)/(np.sqrt(2) * sigmaUV)
xlo = np.subtract.outer(MUVcutlo, currMUV )/(np.sqrt(2) * sigmaUV)
weights = (erf(xhi) - erf(xlo)).T/(2.0 * MUVwidths)
UVLF_filtered = np.trapz(weights.T * HMFcurr, HMF_interpolator.Mhtab, axis=-1)
return UVLF_filtered
#####Here the dust attenuation
def AUV(Astro_Parameters, z, MUV, HIGH_Z_DUST = True, _zmaxdata=8.0):
'Average attenuation A as a function of OBSERVED z and magnitude. If using on theory iterate until convergence. HIGH_Z_DUST is whether to do dust at higher z than 0 or set to 0. Fix at \beta(z=8) result if so'
betacurr = beta(z,MUV)
C0, C1 = Astro_Parameters.C0dust, Astro_Parameters.C1dust
sigmabeta = 0.34 #from Bouwens 2014
Auv = C0 + 0.2*np.log(10)*sigmabeta**2 * C1**2 + C1 * betacurr
Auv=Auv.T
if not (HIGH_Z_DUST):
Auv*=np.heaviside(_zmaxdata - z,0.5)
Auv=Auv.T
return np.fmax(Auv, 0.0)
def beta(z, MUV):
'Color as a function of redshift and mag, interpolated from Bouwens 2013-14 data.'
zdatbeta = [2.5,3.8,5.0,5.9,7.0,8.0]
betaMUVatM0 = [-1.7,-1.85,-1.91,-2.00,-2.05,-2.13]
dbeta_dMUV = [-0.20,-0.11,-0.14,-0.20,-0.20,-0.15]
_MUV0 = -19.5
_c = -2.33
betaM0 = np.interp(z, zdatbeta, betaMUVatM0, left=betaMUVatM0[0], right=betaMUVatM0[-1])
dbetaM0 = (MUV - _MUV0).T * np.interp(z, zdatbeta, dbeta_dMUV, left=dbeta_dMUV[0], right=dbeta_dMUV[-1])
sol1 = (betaM0-_c) * np.exp(dbetaM0/(betaM0-_c))+_c #for MUV > MUV0
sol2 = dbetaM0 + betaM0 #for MUV < MUV0
return sol1.T * np.heaviside(MUV - _MUV0, 0.5) + sol2.T * np.heaviside(_MUV0 - MUV, 0.5)
|
JulianBMunozREPO_NAMEZeus21PATH_START.@Zeus21_extracted@Zeus21-main@zeus21@UVLFs.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattermap/legendgrouptitle/font/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._weight import WeightValidator
from ._variant import VariantValidator
from ._textcase import TextcaseValidator
from ._style import StyleValidator
from ._size import SizeValidator
from ._shadow import ShadowValidator
from ._lineposition import LinepositionValidator
from ._family import FamilyValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._weight.WeightValidator",
"._variant.VariantValidator",
"._textcase.TextcaseValidator",
"._style.StyleValidator",
"._size.SizeValidator",
"._shadow.ShadowValidator",
"._lineposition.LinepositionValidator",
"._family.FamilyValidator",
"._color.ColorValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattermap@legendgrouptitle@font@__init__.py@.PATH_END.py
|
{
"filename": "plot_utils.py",
"repo_name": "nanograv/pint_pal",
"repo_path": "pint_pal_extracted/pint_pal-main/src/pint_pal/plot_utils.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 4 09:30:59 2020
@author: bshapiroalbert
Code since butchered by many timers.
"""
import numpy as np
import matplotlib.pyplot as plt
import copy
from astropy import log
import astropy.units as u
import yaml
import pint.toa as toa
import pint.models as model
import pint.fitter as fitter
import pint.utils as pu
import subprocess
from pint_pal.utils import *
import os
from pint_pal.timingconfiguration import TimingConfiguration
import pint_pal.lite_utils as lu
PACKAGE_DIR = os.path.dirname(__file__)
with open(os.path.join(PACKAGE_DIR, "plot_settings.yaml"), "r") as cf:
config = yaml.safe_load(cf)
# plot_settings.yaml now has a NANOGrav 20-yr specific colorscheme (ng20_c).
# If you want to go back to the old colors (or are doing DR3), change this to
# colorschemes["febe"] = config["febe_c"] AND markers["febe"] = config["febe_m"]
colorschemes, markers, labels = {}, {}, {}
colorschemes["obs"] = config["obs_c"]
colorschemes["pta"] = config["pta_c"]
colorschemes["febe"] = config["ng20_c"]
markers["obs"] = config["obs_m"]
markers["pta"] = config["pta_m"]
markers["febe"] = config["ng20_m"]
labels = config["label_names"]
def call(x):
subprocess.call(x, shell=True)
def set_color_and_marker(colorby):
if colorby == "pta":
colorscheme = colorschemes["pta"]
markerscheme = markers["pta"]
elif colorby == "obs":
colorscheme = colorschemes["observatories"]
markerscheme = markers["observatories"]
elif colorby == "f":
colorscheme = colorschemes["febe"]
markerscheme = markers["febe"]
return colorscheme, markerscheme
def plot_residuals_time(
fitter,
restype="postfit",
colorby="f",
plotsig=False,
avg=False,
whitened=False,
save=False,
legend=True,
title=True,
axs=None,
mixed_ecorr=False,
**kwargs,
):
"""
Make a plot of the residuals vs. time
Arguments
---------
fitter [object] : The PINT fitter object.
restype ['string'] : Type of residuals, pre or post fit, to plot from fitter object. Options are:
'prefit' - plot the prefit residuals.
'postfit' - plot the postfit residuals (default)
'both' - overplot both the pre and post-fit residuals.
colorby ['string']: What to use to determine color/markers
'pta' - color residuals by PTA (default)
'obs' - color residuals by telescope
'f' - color residuals by frontend/backend pair (flag not used by all PTAs).
plotsig [boolean] : If True plot number of measurements v. residuals/uncertainty, else v. residuals
[default: False].
avg [boolean] : If True and not wideband fitter, will compute and plot epoch-average residuals [default: False].
whitened [boolean] : If True will compute and plot whitened residuals [default: False].
save [boolean] : If True will save plot with the name "resid_v_mjd.png" Will add averaged/whitened
as necessary [default: False].
legend [boolean] : If False, will not print legend with plot [default: True].
title [boolean] : If False, will not print plot title [default: True].
axs [string] : If not None, should be defined subplot value and the figure will be used as part of a
larger figure [default: None].
Optional Arguments:
--------------------
res [list/array] : List or array of residual values to plot. Will override values from fitter object.
errs [list/array] : List or array of residual error values to plot. Will override values from fitter object.
mjds [list/array] : List or array of TOA MJDs to plot. Will override values from toa object.
obs[list/array] : List or array of TOA observatories combinations. Will override values from toa object.
figsize [tuple] : Size of the figure passed to matplotlib [default: (10,4)].
fmt ['string'] : matplotlib format option for markers [default: ('x')]
color ['string'] : matplotlib color option for plot [default: color dictionary in plot_utils.py file]
alpha [float] : matplotlib alpha options for plot points [default: 0.5]
mixed_ecorr [boolean]: If True, allows avging with mixed ecorr/no ecorr TOAs.
"""
# Check if wideband
if fitter.is_wideband:
NB = False
if avg == True:
raise ValueError(
"Cannot epoch average wideband residuals, please change 'avg' to False."
)
else:
NB = True
# Check if want epoch averaged residuals
if avg == True and restype == "prefit" and mixed_ecorr == True:
avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True)
no_avg_dict = no_ecorr_average(
fitter.toas, fitter.resids_init, use_noise_model=True
)
elif avg == True and restype == "postfit" and mixed_ecorr == True:
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True)
elif avg == True and restype == "both" and mixed_ecorr == True:
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True)
avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True)
no_avg_dict_pre = no_ecorr_average(
fitter.toas, fitter.resids_init, use_noise_model=True
)
elif avg == True and restype == "prefit" and mixed_ecorr == False:
avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True)
elif avg == True and restype == "postfit" and mixed_ecorr == False:
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
elif avg == True and restype == "both" and mixed_ecorr == False:
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True)
# Get residuals
if "res" in kwargs.keys():
res = kwargs["res"]
else:
if restype == "prefit":
if NB == True:
if avg == True and mixed_ecorr == True:
res = avg_dict["time_resids"].to(u.us)
res_no_avg = no_avg_dict["time_resids"].to(u.us)
elif avg == True and mixed_ecorr == False:
res = avg_dict["time_resids"].to(u.us)
else:
res = fitter.resids_init.time_resids.to(u.us)
else:
res = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us)
elif restype == "postfit":
if NB == True:
if avg == True and mixed_ecorr == True:
res = avg_dict["time_resids"].to(u.us)
res_no_avg = no_avg_dict["time_resids"].to(u.us)
elif avg == True:
res = avg_dict["time_resids"].to(u.us)
else:
res = fitter.resids.time_resids.to(u.us)
else:
res = fitter.resids.residual_objs["toa"].time_resids.to(u.us)
elif restype == "both":
if NB == True:
if avg == True and mixed_ecorr == True:
res = avg_dict["time_resids"].to(u.us)
res_no_avg = no_avg_dict["time_resids"].to(u.us)
res_pre = avg_dict_pre["time_resids"].to(u.us)
res_pre_no_avg = no_avg_dict_pre["time_resids"].to(u.us)
elif avg == True and mixed_ecorr == False:
res = avg_dict["time_resids"].to(u.us)
res_pre = avg_dict_pre["time_resids"].to(u.us)
else:
res = fitter.resids.time_resids.to(u.us)
res_pre = fitter.resids_init.time_resids.to(u.us)
else:
res = fitter.resids.residual_objs["toa"].time_resids.to(u.us)
res_pre = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us)
else:
raise ValueError(
"Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."
% (restype)
)
# Check if we want whitened residuals
if whitened == True and ("res" not in kwargs.keys()):
if avg == True and mixed_ecorr == True:
if restype != "both":
res = whiten_resids(avg_dict, restype=restype)
res_no_avg = whiten_resids(no_avg_dict, restype=restype)
else:
res = whiten_resids(avg_dict_pre, restype="prefit")
res_pre = whiten_resids(avg_dict, restype="postfit")
res_pre = res_pre.to(u.us)
res_no_avg = whiten_resids(avg_dict_pre, restype="prefit")
res_pre_no_avg = whiten_resids(avg_dict, restype="postfit")
res_pre_no_avg = res_pre_no_avg.to(u.us)
res = res.to(u.us)
res_no_avg = res_no_avg.to(u.us)
elif avg == True and mixed_ecorr == False:
if restype != "both":
res = whiten_resids(avg_dict, restype=restype)
else:
res = whiten_resids(avg_dict_pre, restype="prefit")
res_pre = whiten_resids(avg_dict, restype="postfit")
res_pre = res_pre.to(u.us)
res = res.to(u.us)
else:
if restype != "both":
res = whiten_resids(fitter, restype=restype)
else:
res = whiten_resids(fitter, restype="prefit")
res_pre = whiten_resids(fitter, restype="postfit")
res_pre = res_pre.to(u.us)
res = res.to(u.us)
# Get errors
if "errs" in kwargs.keys():
errs = kwargs["errs"]
else:
if restype == "prefit":
if avg == True and mixed_ecorr == True:
errs = avg_dict["errors"].to(u.us)
errs_no_avg = no_avg_dict["errors"].to(u.us)
elif avg == True and mixed_ecorr == False:
errs = avg_dict["errors"].to(u.us)
else:
errs = fitter.toas.get_errors().to(u.us)
elif restype == "postfit":
if NB == True:
if avg == True and mixed_ecorr == True:
errs = avg_dict["errors"].to(u.us)
errs_no_avg = no_avg_dict["errors"].to(u.us)
elif avg == True and mixed_ecorr == False:
errs = avg_dict["errors"].to(u.us)
else:
errs = fitter.resids.get_data_error().to(u.us)
else:
errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us)
elif restype == "both":
if NB == True:
if avg == True and mixed_ecorr == True:
errs = avg_dict["errors"].to(u.us)
errs_pre = avg_dict_pre["errors"].to(u.us)
errs_no_avg = no_avg_dict["errors"].to(u.us)
errs_no_avg_pre = no_avg_dict_pre["errors"].to(u.us)
elif avg == True and mixed_ecorr == False:
errs = avg_dict["errors"].to(u.us)
errs_pre = avg_dict_pre["errors"].to(u.us)
else:
errs = fitter.resids.get_data_error().to(u.us)
errs_pre = fitter.toas.get_errors().to(u.us)
else:
errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us)
errs_pre = fitter.toas.get_errors().to(u.us)
# Get MJDs
if "mjds" in kwargs.keys():
mjds = kwargs["mjds"]
else:
mjds = fitter.toas.get_mjds().value
if avg == True and mixed_ecorr == True:
mjds = avg_dict["mjds"].value
mjds_no_avg = no_avg_dict["mjds"].value
years_no_avg = (mjds_no_avg - 51544.0) / 365.25 + 2000.0
elif avg == True and mixed_ecorr == False:
mjds = avg_dict["mjds"].value
# Convert to years
years = (mjds - 51544.0) / 365.25 + 2000.0
# In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr.
# Create combined arrays
if avg == True and mixed_ecorr == True:
combo_res = np.hstack((res, res_no_avg))
combo_errs = np.hstack((errs, errs_no_avg))
combo_years = np.hstack((years, years_no_avg))
if restype == "both":
combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre))
combo_res_pre = np.hstack((res_pre, res_no_avg_pre))
# Get colorby flag values (obs, PTA, febe, etc.)
if "colorby" in kwargs.keys():
cb = kwargs["colorby"]
else:
cb = np.array(fitter.toas[colorby])
# . Seems to run a little faster but not robust to obs?
# cb = np.array(fitter.toas.get_flag_value(colorby)[0])
if avg == True:
avg_cb = []
for iis in avg_dict["indices"]:
avg_cb.append(cb[iis[0]])
if mixed_ecorr == True:
no_avg_cb = []
for jjs in no_avg_dict["indices"]:
no_avg_cb.append(cb[jjs])
no_ecorr_cb = np.array(no_avg_cb)
cb = np.array(avg_cb)
# Get the set of unique flag values
if avg == True and mixed_ecorr == True:
cb = np.hstack((cb, no_ecorr_cb))
CB = set(cb)
colorscheme, markerscheme = set_color_and_marker(colorby)
if "figsize" in kwargs.keys():
figsize = kwargs["figsize"]
else:
figsize = (10, 5)
if axs == None:
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(111)
else:
fig = plt.gcf()
ax1 = axs
for i, c in enumerate(CB):
inds = np.where(cb == c)[0]
if not inds.tolist():
cb_label = ""
else:
cb_label = cb[inds][0]
# Get plot preferences
if "fmt" in kwargs.keys():
mkr = kwargs["fmt"]
else:
try:
mkr = markers[cb_label]
if restype == "both":
mkr_pre = "."
except Exception:
mkr = "x"
log.log(1, "Color by Flag doesn't have a marker label!!")
if "color" in kwargs.keys():
clr = kwargs["color"]
else:
try:
clr = colorscheme[cb_label]
except Exception:
clr = "k"
log.log(1, "Color by Flag doesn't have a color!!")
if "alpha" in kwargs.keys():
alpha = kwargs["alpha"]
else:
alpha = 0.5
if avg == True and mixed_ecorr == True:
if plotsig:
combo_sig = combo_res[inds] / combo_[inds]
ax1.errorbar(
combo_years[inds],
combo_sig,
yerr=len(combo_errs[inds]) * [1],
fmt=mkr,
color=clr,
label=cb_label,
alpha=alpha,
picker=True,
)
if restype == "both":
combo_sig_pre = combo_res_pre[inds] / combo_errs_pre[inds]
ax1.errorbar(
combo_years[inds],
combo_sig_pre,
yerr=len(combo_errs_pre[inds]) * [1],
fmt=mkr_pre,
color=clr,
label=cb_label + " Prefit",
alpha=alpha,
picker=True,
)
else:
ax1.errorbar(
combo_years[inds],
combo_res[inds],
yerr=combo_errs[inds],
fmt=mkr,
color=clr,
label=cb_label,
alpha=alpha,
picker=True,
)
if restype == "both":
ax1.errorbar(
combo_years[inds],
combo_res_rpe[inds],
yerr=combo_errs_pre[inds],
fmt=mkr_pre,
color=clr,
label=cb_label + " Prefit",
alpha=alpha,
picker=True,
)
else:
if plotsig:
sig = res[inds] / errs[inds]
ax1.errorbar(
years[inds],
sig,
yerr=len(errs[inds]) * [1],
fmt=mkr,
color=clr,
label=cb_label,
alpha=alpha,
picker=True,
)
if restype == "both":
sig_pre = res_pre[inds] / errs_pre[inds]
ax1.errorbar(
years[inds],
sig_pre,
yerr=len(errs_pre[inds]) * [1],
fmt=mkr_pre,
color=clr,
label=cb_label + " Prefit",
alpha=alpha,
picker=True,
)
else:
ax1.errorbar(
years[inds],
res[inds],
yerr=errs[inds],
fmt=mkr,
color=clr,
label=cb_label,
alpha=alpha,
picker=True,
)
if restype == "both":
ax1.errorbar(
years[inds],
res_pre[inds],
yerr=errs_pre[inds],
fmt=mkr_pre,
color=clr,
label=cb_label + " Prefit",
alpha=alpha,
picker=True,
)
# Set second axis
ax1.set_xlabel(r"Year")
ax1.grid(True)
ax2 = ax1.twiny()
mjd0 = ((ax1.get_xlim()[0]) - 2004.0) * 365.25 + 53005.0
mjd1 = ((ax1.get_xlim()[1]) - 2004.0) * 365.25 + 53005.0
ax2.set_xlim(mjd0, mjd1)
if plotsig:
if avg and whitened:
ax1.set_ylabel(
"Average Residual/Uncertainty \n (Whitened)", multialignment="center"
)
elif avg and not whitened:
ax1.set_ylabel("Average Residual/Uncertainty")
elif whitened and not avg:
ax1.set_ylabel(
"Residual/Uncertainty \n (Whitened)", multialignment="center"
)
else:
ax1.set_ylabel("Residual/Uncertainty")
else:
if avg and whitened:
ax1.set_ylabel(
"Average Residual ($\mu$s) \n (Whitened)", multialignment="center"
)
elif avg and not whitened:
ax1.set_ylabel("Average Residual ($\mu$s)")
elif whitened and not avg:
ax1.set_ylabel("Residual ($\mu$s) \n (Whitened)", multialignment="center")
else:
ax1.set_ylabel("Residual ($\mu$s)")
if legend:
if len(CB) > 5:
ncol = int(np.ceil(len(CB) / 2))
y_offset = 1.15
else:
ncol = len(CB)
y_offset = 1.0
ax1.legend(
loc="upper center",
bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]),
ncol=ncol,
)
if title:
if len(CB) > 5:
y_offset = 1.1
else:
y_offset = 1.0
plt.title(
"%s %s timing residuals" % (fitter.model.PSR.value, restype),
y=y_offset + 1.0 / figsize[1],
)
if axs == None:
plt.tight_layout()
if save:
ext = ""
if whitened:
ext += "_whitened"
if avg:
ext += "_averaged"
if NB:
ext += "_NB"
else:
ext += "_WB"
if restype == "prefit":
ext += "_prefit"
elif restype == "postfit":
ext += "_postfit"
elif restype == "both":
ext += "_pre_post_fit"
plt.savefig("%s_resid_v_mjd%s.png" % (fitter.model.PSR.value, ext))
if axs == None:
# Define clickable points
text = ax2.text(0, 0, "")
# Define point highlight color
stamp_color = "#FD9927"
def onclick(event):
# Get X and Y axis data
xdata = mjds
if plotsig:
ydata = (res / errs).decompose().value
else:
ydata = res.value
# Get x and y data from click
xclick = event.xdata
yclick = event.ydata
# Calculate scaled distance, find closest point index
d = np.sqrt(((xdata - xclick) / 10.0) ** 2 + (ydata - yclick) ** 2)
ind_close = np.where(np.min(d) == d)[0]
# highlight clicked point
ax2.scatter(xdata[ind_close], ydata[ind_close], marker="x", c=stamp_color)
# Print point info
text.set_position((xdata[ind_close], ydata[ind_close]))
if plotsig:
text.set_text(
"TOA Params:\n MJD: %s \n Res/Err: %.2f \n Index: %s"
% (xdata[ind_close][0], ydata[ind_close], ind_close[0])
)
else:
text.set_text(
"TOA Params:\n MJD: %s \n Res: %.2f \n Index: %s"
% (xdata[ind_close][0], ydata[ind_close], ind_close[0])
)
fig.canvas.mpl_connect("button_press_event", onclick)
return
def plot_FD_delay(
fitter=None,
model_object=None,
save=False,
title=True,
axs=None,
legend=True,
show_bin=True,
**kwargs,
):
"""
Make a plot of frequency (MHz) vs the time delay (us) implied by FD parameters.
Z. Arzoumanian, The NANOGrav Nine-year Data Set: Observations, Arrival
Time Measurements, and Analysis of 37 Millisecond Pulsars, The
Astrophysical Journal, Volume 813, Issue 1, article id. 65, 31 pp.(2015).
Eq.(2):
FDdelay = sum(c_i * (log(obs_freq/1GHz))^i)
This can be run with EITHER a PINT fitter object OR PINT model object. If run with a model object, the user will need to specify which frequencies they would like to plot FD delays over.
Arguments
----------
fitter[object] : The PINT fitter object.
model[object] : The PINT model object. Can be used instead of fitter
save [boolean] : If True will save plot with the name "FD_delay.png"[default: False].
title [boolean] : If False, will not print plot title [default: True].
axs [string] : If not None, should be defined subplot value and the figure will be used as part of a
larger figure [default: None].
Optional Arguments:
--------------------
freqs [list/array] : List or array of frequencies (MHz) to plot. Will override values from toa object.
show_bin [boolean] : Show the delay corresponding to 1 bin of the profile for comparison. (requires fitter)
figsize [tuple] : Size of the figure passed to matplotlib [default: (8,4)].
ls ['string'] : matplotlib format option for linestyle [default: ('-')]
color ['string'] : matplotlib color option for plot [default: green]
alpha [float] : matplotlib alpha options for error regions [default: 0.2]
loc ['string'] : matplotlib legend location [default: 'upper right'] Only used when legend = True
"""
# Make sure that either a fitter or model object has been specified
if fitter == None and model_object == None:
raise Exception("Need to specify either a fitter or model object")
# Get frequencies
if "freqs" in kwargs.keys():
freqs = kwargs["freqs"]
elif model_object is not None:
raise Exception(
"Using a PINT model object. Need to add list/array of frequencies to calculate FD delay over"
)
else:
freqs = fitter.toas.get_freqs().value
freqs = np.sort(freqs)
# Get FD delay in units of milliseconds as a function of frequency. This will eventually by available in PINT and become redundant. PINT version may need to be modified to allow for calculation of error regions
def get_FD_delay(pint_model_object, freqs):
FD_map = model.TimingModel.get_prefix_mapping(pint_model_object, "FD")
FD_names = list(FD_map.values())
FD_names.reverse()
FD_vals = []
FD_uncert = []
for i in FD_names:
FD_vals.append(
pint_model_object.get_params_dict(which="all", kind="value")[i]
)
FD_uncert.append(
pint_model_object.get_params_dict(which="all", kind="uncertainty")[i]
)
FD_vals.append(0.0)
FD_uncert.append(0.0)
FD_vals = np.array(FD_vals)
FD_uncert = np.array(FD_uncert)
delay = np.polyval(FD_vals, np.log10(freqs))
delta_delay_plus = np.polyval(FD_uncert + FD_vals, np.log10(freqs))
delta_delay_minus = np.polyval(FD_vals - FD_uncert, np.log10(freqs))
if len(FD_vals) - 1 > 1:
FD_phrase = "FD1-%s" % (len(FD_vals) - 1)
else:
FD_phrase = "FD1"
return delay * 1e6, delta_delay_plus * 1e6, delta_delay_minus * 1e6, FD_phrase
# Get FD params if fitter object is given
if fitter is not None:
# Check if the fitter object has FD parameters
try:
FD_delay, FD_delay_err_plus, FD_delay_err_minus, legend_text = get_FD_delay(
fitter.model, freqs * 1e-3
)
psr_name = fitter.model.PSR.value
"""For when new version of PINT is default on pint_pal
FD_delay = pint.models.frequency_dependent.FD.FD_delay(fitter.model,freqs)
"""
if show_bin:
nbins = fitter.toas["nbin"].astype(int).min()
P0 = 1 / fitter.model.F0.value
P0_bin_max = P0 / nbins
except:
print("No FD parameters in this model! Exitting...")
# Get FD params if model object is given
if model_object is not None:
# Check if the model object has FD parameters
try:
FD_delay, FD_delay_err_plus, FD_delay_err_minus, legend_text = get_FD_delay(
model_object, freqs * 1e-3
)
psr_name = model_object.PSR.value
"""For when new version of PINT is default on pint_pal
FD_delay = pint.models.frequency_dependent.FD.FD_delay(fitter.model,freqs)
"""
if show_bin:
print(
"show_bin requires a fitter object, cannot be used with the model alone"
)
show_bin = False
except:
print("No FD parameters in this model! Exitting...")
# Get plotting preferences.
if "figsize" in kwargs.keys():
figsize = kwargs["figsize"]
else:
figsize = (8, 4)
if axs == None:
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(111)
else:
fig = plt.gcf()
ax1 = axs
if "ls" in kwargs.keys():
linestyle = kwargs["ls"]
else:
linestyle = "-"
if "color" in kwargs.keys():
clr = kwargs["color"]
else:
clr = "green"
if "alpha" in kwargs.keys():
alpha = kwargs["alpha"]
else:
alpha = 0.2
if "loc" in kwargs.keys():
loc = kwargs["loc"]
else:
loc = "upper right"
# Plot frequency (MHz) vs delay (microseconds)
ax1.plot(freqs, FD_delay, label=legend_text, color=clr, ls=linestyle)
ax1.fill_between(
freqs, FD_delay_err_plus, FD_delay_err_minus, color=clr, alpha=alpha
)
if show_bin:
if (FD_delay > 0).any():
ax1.axhline(P0_bin_max * 1e6, label="1 profile bin")
if (FD_delay < 0).any():
ax1.axhline(-P0_bin_max * 1e6, label="1 profile bin")
ax1.set_xlabel("Frequency (MHz)")
ax1.set_ylabel("Delay ($\mu$s)")
if title:
ax1.set_title("%s FD Delay" % psr_name)
if legend:
ax1.legend(loc=loc)
if axs == None:
plt.tight_layout()
if save:
plt.savefig("%s_fd_delay.png" % psr_name)
return
def plot_residuals_freq(
fitter,
restype="postfit",
colorby="f",
plotsig=False,
avg=False,
mixed_ecorr=False,
whitened=False,
save=False,
legend=True,
title=True,
axs=None,
**kwargs,
):
"""
Make a plot of the residuals vs. frequency
Arguments
---------
fitter [object] : The PINT fitter object.
restype ['string'] : Type of residuals, pre or post fit, to plot from fitter object. Options are:
'prefit' - plot the prefit residuals.
'postfit' - plot the postfit residuals (default)
'both' - overplot both the pre and post-fit residuals.
colorby ['string']: What to use to determine color/markers
'pta' - color residuals by PTA (default)
'obs' - color residuals by telescope
'f' - color residuals by frontend/backend pair (flag not used by all PTAs).
plotsig [boolean] : If True plot number of measurements v. residuals/uncertainty, else v. residuals
[default: False].
avg [boolean] : If True and not wideband fitter, will compute and plot epoch-average residuals [default: False].
mixed_ecorr [boolean]: If True, supports combining already-epoch-averaged residuals with epoch-averaging [default: False]
whitened [boolean] : If True will compute and plot whitened residuals [default: False].
save [boolean] : If True will save plot with the name "resid_v_freq.png" Will add averaged/whitened
as necessary [default: False].
legend [boolean] : If False, will not print legend with plot [default: True].
title [boolean] : If False, will not print plot title [default: True].
axs [string] : If not None, should be defined subplot value and the figure will be used as part of a
larger figure [default: None].
Optional Arguments:
--------------------
res [list/array] : List or array of residual values to plot. Will override values from fitter object.
errs [list/array] : List or array of residual error values to plot. Will override values from fitter object.
freqs [list/array] : List or array of frequencies (MHz) to plot. Will override values from toa object.
rcvr_bcknds[list/array] : List or array of TOA receiver-backend combinations. Will override values from toa object.
figsize [tuple] : Size of the figure passed to matplotlib [default: (10,4)].
fmt ['string'] : matplotlib format option for markers [default: ('x')]
color ['string'] : matplotlib color option for plot [default: color dictionary in plot_utils.py file]
alpha [float] : matplotlib alpha options for plot points [default: 0.5]
"""
# Check if wideband
if fitter.is_wideband:
NB = False
if avg == True:
raise ValueError(
"Cannot epoch average wideband residuals, please change 'avg' to False."
)
else:
NB = True
# Check if want epoch averaged residuals
if avg == True and restype == "prefit" and mixed_ecorr == True:
avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True)
no_avg_dict = no_ecorr_average(
fitter.toas, fitter.resids_init, use_noise_model=True
)
elif avg == True and restype == "postfit" and mixed_ecorr == True:
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True)
elif avg == True and restype == "both" and mixed_ecorr == True:
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True)
avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True)
no_avg_dict_pre = no_ecorr_average(
fitter.toas, fitter.resids_init, use_noise_model=True
)
elif avg == True and restype == "prefit" and mixed_ecorr == False:
avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True)
elif avg == True and restype == "postfit" and mixed_ecorr == False:
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
elif avg == True and restype == "both" and mixed_ecorr == False:
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True)
# Get residuals
if "res" in kwargs.keys():
res = kwargs["res"]
else:
if restype == "prefit":
if NB == True:
if avg == True and mixed_ecorr == True:
res = avg_dict["time_resids"].to(u.us)
res_no_avg = no_avg_dict["time_resids"].to(u.us)
elif avg == True and mixed_ecorr == False:
res = avg_dict["time_resids"].to(u.us)
else:
res = fitter.resids_init.time_resids.to(u.us)
else:
res = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us)
elif restype == "postfit":
if NB == True:
if avg == True and mixed_ecorr == True:
res = avg_dict["time_resids"].to(u.us)
res_no_avg = no_avg_dict["time_resids"].to(u.us)
elif avg == True:
res = avg_dict["time_resids"].to(u.us)
else:
res = fitter.resids.time_resids.to(u.us)
else:
res = fitter.resids.residual_objs["toa"].time_resids.to(u.us)
elif restype == "both":
if NB == True:
if avg == True and mixed_ecorr == True:
res = avg_dict["time_resids"].to(u.us)
res_no_avg = no_avg_dict["time_resids"].to(u.us)
res_pre = avg_dict_pre["time_resids"].to(u.us)
res_pre_no_avg = no_avg_dict_pre["time_resids"].to(u.us)
elif avg == True and mixed_ecorr == False:
res = avg_dict["time_resids"].to(u.us)
res_pre = avg_dict_pre["time_resids"].to(u.us)
else:
res = fitter.resids.time_resids.to(u.us)
res_pre = fitter.resids_init.time_resids.to(u.us)
else:
res = fitter.resids.residual_objs["toa"].time_resids.to(u.us)
res_pre = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us)
else:
raise ValueError(
"Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."
% (restype)
)
# Check if we want whitened residuals
if whitened == True and ("res" not in kwargs.keys()):
if avg == True and mixed_ecorr == True:
if restype != "both":
res = whiten_resids(avg_dict, restype=restype)
res_no_avg = whiten_resids(no_avg_dict, restype=restype)
else:
res = whiten_resids(avg_dict_pre, restype="prefit")
res_pre = whiten_resids(avg_dict, restype="postfit")
res_pre = res_pre.to(u.us)
res_no_avg = whiten_resids(avg_dict_pre, restype="prefit")
res_pre_no_avg = whiten_resids(avg_dict, restype="postfit")
res_pre_no_avg = res_pre_no_avg.to(u.us)
res = res.to(u.us)
res_no_avg = res_no_avg.to(u.us)
elif avg == True and mixed_ecorr == False:
if restype != "both":
res = whiten_resids(avg_dict, restype=restype)
else:
res = whiten_resids(avg_dict_pre, restype="prefit")
res_pre = whiten_resids(avg_dict, restype="postfit")
res_pre = res_pre.to(u.us)
res = res.to(u.us)
else:
if restype != "both":
res = whiten_resids(fitter, restype=restype)
else:
res = whiten_resids(fitter, restype="prefit")
res_pre = whiten_resids(fitter, restype="postfit")
res_pre = res_pre.to(u.us)
res = res.to(u.us)
# Get errors
if "errs" in kwargs.keys():
errs = kwargs["errs"]
else:
if restype == "prefit":
if avg == True and mixed_ecorr == True:
errs = avg_dict["errors"].to(u.us)
errs_no_avg = no_avg_dict["errors"].to(u.us)
elif avg == True and mixed_ecorr == False:
errs = avg_dict["errors"].to(u.us)
else:
errs = fitter.toas.get_errors().to(u.us)
elif restype == "postfit":
if NB == True:
if avg == True and mixed_ecorr == True:
errs = avg_dict["errors"].to(u.us)
errs_no_avg = no_avg_dict["errors"].to(u.us)
elif avg == True and mixed_ecorr == False:
errs = avg_dict["errors"].to(u.us)
else:
errs = fitter.resids.get_data_error().to(u.us)
else:
errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us)
elif restype == "both":
if NB == True:
if avg == True and mixed_ecorr == True:
errs = avg_dict["errors"].to(u.us)
errs_pre = avg_dict_pre["errors"].to(u.us)
errs_no_avg = no_avg_dict["errors"].to(u.us)
errs_no_avg_pre = no_avg_dict_pre["errors"].to(u.us)
elif avg == True and mixed_ecorr == False:
errs = avg_dict["errors"].to(u.us)
errs_pre = avg_dict_pre["errors"].to(u.us)
else:
errs = fitter.resids.get_data_error().to(u.us)
errs_pre = fitter.toas.get_errors().to(u.us)
else:
errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us)
errs_pre = fitter.toas.get_errors().to(u.us)
# In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr.
# Create combined arrays
if avg == True and mixed_ecorr == True:
combo_res = np.hstack((res, res_no_avg))
combo_errs = np.hstack((errs, errs_no_avg))
if restype == "both":
combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre))
combo_res_pre = np.hstack((res_pre, res_no_avg_pre))
# Get freqs
if "freqs" in kwargs.keys():
freqs = kwargs["freqs"]
else:
freqs = fitter.toas.get_freqs().value
# Get colorby flag values (obs, PTA, febe, etc.)
if "colorby" in kwargs.keys():
cb = kwargs["colorby"]
else:
cb = np.array(fitter.toas[colorby])
# . Seems to run a little faster but not robust to obs?
# cb = np.array(fitter.toas.get_flag_value(colorby)[0])
if avg == True:
avg_cb = []
for iis in avg_dict["indices"]:
avg_cb.append(cb[iis[0]])
if mixed_ecorr == True:
no_avg_cb = []
for jjs in no_avg_dict["indices"]:
no_avg_cb.append(cb[jjs])
no_ecorr_cb = np.array(no_avg_cb)
cb = np.array(avg_cb)
# Get the set of unique flag values
if avg == True and mixed_ecorr == True:
cb = np.hstack((cb, no_ecorr_cb))
CB = set(cb)
colorscheme, markerscheme = set_color_and_marker(colorby)
if "figsize" in kwargs.keys():
figsize = kwargs["figsize"]
else:
figsize = (10, 4)
if axs == None:
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(111)
else:
fig = plt.gcf()
ax1 = axs
for i, c in enumerate(CB):
inds = np.where(cb == c)[0]
if not inds.tolist():
cb_label = ""
else:
cb_label = cb[inds][0]
# Get plot preferences
if "fmt" in kwargs.keys():
mkr = kwargs["fmt"]
else:
try:
mkr = markerscheme[cb_label]
if restype == "both":
mkr_pre = "."
except Exception:
mkr = "x"
if restype == "both":
mkr_pre = "."
log.log(1, "Color by Flag doesn't have a marker label!!")
if "color" in kwargs.keys():
clr = kwargs["color"]
else:
try:
clr = colorscheme[cb_label]
except Exception:
clr = "k"
log.log(1, "Color by Flag doesn't have a color!!")
if "alpha" in kwargs.keys():
alpha = kwargs["alpha"]
else:
alpha = 0.5
if avg and mixed_ecorr:
if plotsig:
combo_sig = combo_res[inds] / combo_errs[inds]
ax1.errorbar(
freqs[inds],
combo_sig,
yerr=len(combo_errs[inds]) * [1],
fmt=mkr,
color=clr,
label=cb_label,
alpha=alpha,
picker=True,
)
if restype == "both":
combo_sig_pre = combo_res_pre[inds] / combo_errs_pre[inds]
ax1.errorbar(
freqs[inds],
combo_sig_pre,
yerr=len(combo_errs_pre[inds]) * [1],
fmt=mkr_pre,
color=clr,
label=cb_label + " Prefit",
alpha=alpha,
picker=True,
)
else:
ax1.errorbar(
freqs[inds],
combo_res[inds],
yerr=combo_errs[inds],
fmt=mkr,
color=clr,
label=cb_label,
alpha=alpha,
picker=True,
)
if restype == "both":
ax1.errorbar(
freqs[inds],
combo_res_pre[inds],
yerr=combo_errs_pre[inds],
fmt=mkr_pre,
color=clr,
label=cb_label + " Prefit",
alpha=alpha,
picker=True,
)
else:
if plotsig:
sig = res[inds] / errs[inds]
ax1.errorbar(
freqs[inds],
sig,
yerr=len(errs[inds]) * [1],
fmt=mkr,
color=clr,
label=cb_label,
alpha=alpha,
picker=True,
)
if restype == "both":
sig_pre = res_pre[inds] / errs_pre[inds]
ax1.errorbar(
freqs[inds],
sig_pre,
yerr=len(errs_pre[inds]) * [1],
fmt=mkr_pre,
color=clr,
label=cb_label + " Prefit",
alpha=alpha,
picker=True,
)
else:
ax1.errorbar(
freqs[inds],
res[inds],
yerr=errs[inds],
fmt=mkr,
color=clr,
label=cb_label,
alpha=alpha,
picker=True,
)
if restype == "both":
ax1.errorbar(
freqs[inds],
res_pre[inds],
yerr=errs_pre[inds],
fmt=mkr_pre,
color=clr,
label=cb_label + " Prefit",
alpha=alpha,
picker=True,
)
# Set axis
ax1.set_xlabel(r"Frequency (MHz)")
ax1.grid(True)
if plotsig:
if avg and whitened:
ax1.set_ylabel(
"Average Residual/Uncertainty \n (Whitened)", multialignment="center"
)
elif avg and not whitened:
ax1.set_ylabel("Average Residual/Uncertainty")
elif whitened and not avg:
ax1.set_ylabel(
"Residual/Uncertainty \n (Whitened)", multialignment="center"
)
else:
ax1.set_ylabel("Residual/Uncertainty")
else:
if avg and whitened:
ax1.set_ylabel(
"Average Residual ($\mu$s) \n (Whitened)", multialignment="center"
)
elif avg and not whitened:
ax1.set_ylabel("Average Residual ($\mu$s)")
elif whitened and not avg:
ax1.set_ylabel("Residual ($\mu$s) \n (Whitened)", multialignment="center")
else:
ax1.set_ylabel("Residual ($\mu$s)")
if legend:
if len(CB) > 5:
ncol = int(np.ceil(len(CB) / 2))
y_offset = 1.15
else:
ncol = len(CB)
y_offset = 1.0
ax1.legend(
loc="upper center",
bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]),
ncol=ncol,
)
if title:
if len(CB) > 5:
y_offset = 1.1
else:
y_offset = 1.0
plt.title(
"%s %s frequency residuals" % (fitter.model.PSR.value, restype),
y=y_offset + 1.0 / figsize[1],
)
if axs == None:
plt.tight_layout()
if save:
ext = ""
if whitened:
ext += "_whitened"
if avg:
ext += "_averaged"
if NB:
ext += "_NB"
else:
ext += "_WB"
if restype == "prefit":
ext += "_prefit"
elif restype == "postfit":
ext += "_postfit"
elif restype == "both":
ext += "_pre_post_fit"
plt.savefig("%s_resid_v_freq%s.png" % (fitter.model.PSR.value, ext))
if axs == None:
# Define clickable points
text = ax1.text(0, 0, "")
stamp_color = "#FD9927"
def onclick(event):
# Get X and Y axis data
xdata = freqs
if plotsig:
ydata = (res / errs).decompose().value
else:
ydata = res.value
# Get x and y data from click
xclick = event.xdata
yclick = event.ydata
# Calculate scaled distance, find closest point index
d = np.sqrt(((xdata - xclick) / 10.0) ** 2 + (ydata - yclick) ** 2)
ind_close = np.where(np.min(d) == d)[0]
# highlight clicked point
ax1.scatter(xdata[ind_close], ydata[ind_close], marker="x", c=stamp_color)
# Print point info
text.set_position((xdata[ind_close], ydata[ind_close]))
if plotsig:
text.set_text(
"TOA Params:\n Frequency: %s \n Res/Err: %.2f \n Index: %s"
% (xdata[ind_close][0], ydata[ind_close], ind_close[0])
)
else:
text.set_text(
"TOA Params:\n Frequency: %s \n Res: %.2f \n Index: %s"
% (xdata[ind_close][0], ydata[ind_close], ind_close[0])
)
fig.canvas.mpl_connect("button_press_event", onclick)
return
def plot_dmx_time(
fitter,
savedmx=False,
save=False,
legend=True,
axs=None,
title=True,
compare=False,
**kwargs,
):
"""
Make a plot of DMX vs. time
Arguments
---------
fitter [object] : The PINT fitter object.
savedmx [boolean] : If True will save dmxparse values to output file with `"psrname"_dmxparse.nb/wb/.out`.
save [boolean] : If True will save plot with the name "resid_v_mjd.png" Will add averaged/whitened
as necessary [default: False].
legend [boolean] : If False, will not print legend with plot [default: True].
title [boolean] : If False, will not print plot title [default: True].
axs [string] : If not None, should be defined subplot value and the figure will be used as part of a
larger figure [default: None].
Optional Arguments:
--------------------
dmx [list/array] : List or array of DMX values to plot. Will override values from fitter object.
errs [list/array] : List or array of DMX error values to plot. Will override values from fitter object.
mjds [list/array] : List or array of DMX mjd epoch values to plot. Will override values from fitter object.
figsize [tuple] : Size of the figure passed to matplotlib [default: (10,4)].
fmt ['string'] : matplotlib format option for markers [default: ('x')]
color ['string'] : matplotlib color option for plot [default: color dictionary in plot_utils.py file]
alpha [float] : matplotlib alpha options for plot points [default: 0.5]
"""
# Get pulsar name
psrname = fitter.model.PSR.value
# Check if wideband
if fitter.is_wideband:
NB = False
dmxname = "%s_dmxparse.wb.out" % (psrname)
else:
NB = True
dmxname = "%s_dmxparse.nb.out" % (psrname)
# Get plotting dmx and error values for WB
if "dmx" in kwargs.keys():
DMXs = kwargs["dmx"]
else:
# get dmx dictionary from pint dmxparse function
dmx_dict = pu.dmxparse(fitter, save="dmxparse.out")
DMXs = dmx_dict["dmxs"].value
DMX_vErrs = dmx_dict["dmx_verrs"].value
DMX_center_MJD = dmx_dict["dmxeps"].value
DMX_center_Year = (DMX_center_MJD - 51544.0) / 365.25 + 2000.0
# move file name
if savedmx:
os.rename("dmxparse.out", dmxname)
# Double check/overwrite errors if necessary
if "errs" in kwargs.keys():
DMX_vErrs = kwargs["errs"]
# Double check/overwrite dmx mjd epochs if necessary
if "mjds" in kwargs.keys():
DMX_center_MJD = kwargs["mjds"]
DMX_center_Year = (DMX_center_MJD - 51544.0) / 365.25 + 2000.0
# If we want to compare WB to NB, we need to look for the right output file
if compare == True:
# Look for other dmx file
if NB:
# log.log(1, "Searching for file: %s_dmxparse.wb.out" % (psrname))
if not os.path.isfile("%s_dmxparse.wb.out" % (psrname)):
raise RuntimeError("Cannot find Wideband DMX parse output file.")
else:
# Get the values from the DMX parse file
dmx_epochs, nb_dmx, nb_dmx_var, nb_dmx_r1, nb_dmx_r2 = np.loadtxt(
"%s_dmxparse.wb.out" % (psrname),
unpack=True,
usecols=(0, 1, 2, 3, 4),
)
else:
# log.log(1, "Searching for file: %s_dmxparse.nb.out" % (psrname))
if not os.path.isfile("%s_dmxparse.nb.out" % (psrname)):
raise RuntimeError("Cannot find Narrowband DMX parse output file.")
else:
# Get the values from the DMX parse file
dmx_epochs, nb_dmx, nb_dmx_var, nb_dmx_r1, nb_dmx_r2 = np.loadtxt(
"%s_dmxparse.nb.out" % (psrname),
unpack=True,
usecols=(0, 1, 2, 3, 4),
)
dmx_mid_yr = (dmx_epochs - 51544.0) / 365.25 + 2000.0
# Define the plotting function
if axs == None:
if "figsize" in kwargs.keys():
figsize = kwargs["figsize"]
else:
figsize = (10, 4)
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(111)
else:
ax1 = axs
# Get plot preferences
if "fmt" in kwargs.keys():
mkr = kwargs["fmt"]
else:
mkr = "s"
if compare:
mkr_nb = "o"
if "color" in kwargs.keys():
clr = kwargs["color"]
else:
clr = "gray"
if compare:
clr_nb = "k"
if "alpha" in kwargs.keys():
alpha = kwargs["alpha"]
else:
alpha = 1.0
# Not actually plot
if NB and not compare:
ax1.errorbar(
DMX_center_Year,
DMXs * 10**3,
yerr=DMX_vErrs * 10**3,
fmt=".",
c=clr,
marker=mkr,
label="Narrowband",
)
elif not NB and not compare:
ax1.errorbar(
DMX_center_Year,
DMXs * 10**3,
yerr=DMX_vErrs * 10**3,
fmt=".",
c=clr,
marker=mkr,
label="Wideband",
)
elif compare:
if NB:
ax1.errorbar(
DMX_center_Year,
DMXs * 10**3,
yerr=DMX_vErrs * 10**3,
fmt=".",
c=clr,
marker=mkr,
label="Narrowband",
)
ax1.errorbar(
dmx_mid_yr,
nb_dmx * 10**3,
yerr=nb_dmx_var * 10**3,
fmt=".",
color=clr_nb,
marker=mkr_nb,
label="Wideband",
)
else:
ax1.errorbar(
DMX_center_Year,
DMXs * 10**3,
yerr=DMX_vErrs * 10**3,
fmt=".",
c=clr,
marker=mkr,
label="Wideband",
)
ax1.errorbar(
dmx_mid_yr,
nb_dmx * 10**3,
yerr=nb_dmx_var * 10**3,
fmt=".",
color=clr_nb,
marker=mkr_nb,
label="Narrowband",
)
# Set second axis
ax1.set_xlabel(r"Year")
ax1.grid(True)
ax2 = ax1.twiny()
mjd0 = ((ax1.get_xlim()[0]) - 2004.0) * 365.25 + 53005.0
mjd1 = ((ax1.get_xlim()[1]) - 2004.0) * 365.25 + 53005.0
ax2.set_xlim(mjd0, mjd1)
ax1.set_ylabel(r"DMX ($10^{-3}$ pc cm$^{-3}$)")
if legend:
ax1.legend(loc="best")
if title:
if NB and not compare:
plt.title("%s narrowband dmx" % (psrname), y=1.0 + 1.0 / figsize[1])
elif not NB and not compare:
plt.title("%s wideband dmx" % (psrname), y=1.0 + 1.0 / figsize[1])
elif compare:
plt.title(
"%s narrowband and wideband dmx" % (psrname), y=1.0 + 1.0 / figsize[1]
)
if axs == None:
plt.tight_layout()
if save:
ext = ""
if NB and not compare:
ext += "_NB"
if not NB and not compare:
ext += "_WB"
if compare:
ext += "_NB_WB_compare"
plt.savefig("%s_dmx_v_time%s.png" % (psrname, ext))
if axs == None:
# Define clickable points
text = ax1.text(0, 0, "")
# Define color for highlighting points
stamp_color = "#FD9927"
def onclick(event):
# Get X and Y axis data
xdata = DMX_center_Year
ydata = DMXs * 10**3
# Get x and y data from click
xclick = event.xdata
yclick = event.ydata
# Calculate scaled distance, find closest point index
d = np.sqrt(((xdata - xclick) / 1000.0) ** 2 + (ydata - yclick) ** 2)
ind_close = np.where(np.min(d) == d)[0]
# highlight clicked point
ax2.scatter(xdata[ind_close], ydata[ind_close], marker="s", c=stamp_color)
# Print point info
text.set_position((xdata[ind_close], ydata[ind_close]))
text.set_text(
"DMX Params:\n MJD: %s \n DMX: %.2f \n Index: %s"
% (xdata[ind_close][0], ydata[ind_close], ind_close[0])
)
fig.canvas.mpl_connect("button_press_event", onclick)
return
def plot_dmxout(dmxout_files, labels, psrname=None, outfile=None, model=None):
"""Make simple dmx vs. time plot with dmxout file(s) as input
Parameters
==========
dmxout_files: list/str
list of dmxout files to plot (or str if only one)
labels: list/str
list of labels for dmx timeseries (or str if only one)
psrname: str, optional
pulsar name
outfile: str, optional
save figure and write to outfile if set
model: `pint.model.TimingModel` object, optional
if provided, plot excess DM based on a basic SWM (NE_SW = 10.0)
Returns
=======
dmxDict: dictionary
dmxout information (mjd, val, err, r1, r2) for each label
"""
from astropy.time import Time
if isinstance(dmxout_files, str):
dmxout_files = [dmxout_files]
if isinstance(labels, str):
labels = [labels]
figsize = (10, 4)
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(111)
ax1.set_xlabel(r"Year")
ax1.set_ylabel(r"DMX ($10^{-3}$ pc cm$^{-3}$)")
ax1.grid(True)
ax2 = ax1.twiny()
ax2.set_xlabel("MJD")
dmxDict = {}
for ii, (df, lab) in enumerate(zip(dmxout_files, labels)):
dmxmjd, dmxval, dmxerr, dmxr1, dmxr2 = np.loadtxt(
df, unpack=True, usecols=range(0, 5)
)
idmxDict = {
"mjd": dmxmjd,
"val": dmxval,
"err": dmxerr,
"r1": dmxr1,
"r2": dmxr2,
}
ax2.errorbar(
dmxmjd,
dmxval * 10**3,
yerr=dmxerr * 10**3,
label=lab,
marker="o",
ls="",
markerfacecolor="none",
)
dmxDict[lab] = idmxDict
# set ax1 lims (year) based on ax2 lims (mjd)
mjd_xlo, mjd_xhi = ax2.get_xlim()
dy_xlo = Time(mjd_xlo, format="mjd").decimalyear
dy_xhi = Time(mjd_xhi, format="mjd").decimalyear
ax1.set_xlim(dy_xlo, dy_xhi)
# capture ylim
orig_ylim = ax2.get_ylim()
if psrname:
ax1.text(
0.975,
0.05,
psrname,
transform=ax1.transAxes,
size=18,
c="lightgray",
horizontalalignment="right",
verticalalignment="bottom",
)
if model:
from pint.simulation import make_fake_toas_fromMJDs
from pint_pal.lite_utils import remove_noise
fake_mjds = np.linspace(
np.min(dmxmjd), np.max(dmxmjd), num=int(np.max(dmxmjd) - np.min(dmxmjd))
)
fake_mjdTime = Time(fake_mjds, format="mjd")
# copy the model and add sw component
mo_swm = copy.deepcopy(model)
remove_noise(mo_swm) # Not necessary here and avoids lots of warnings
mo_swm.NE_SW.value = 10.0
# generate fake TOAs and calculate excess DM due to solar wind
fake_toas = make_fake_toas_fromMJDs(fake_mjdTime, mo_swm)
sun_dm_delays = mo_swm.solar_wind_dm(fake_toas) * 10**3 # same scaling as above
ax2.plot(fake_mjds, sun_dm_delays, c="lightgray", label="Excess DM")
# don't change ylim based on excess dm trace, if plotted
ax2.set_ylim(orig_ylim)
ax2.legend(loc="best")
plt.tight_layout()
if outfile:
plt.savefig(outfile)
return dmxDict
def plot_dmx_diffs_nbwb(dmxDict, show_missing=True, psrname=None, outfile=None):
"""Uses output dmxDict from plot_dmxout() to plot diffs between simultaneous nb-wb values
Parameters
==========
dmxDict: dictionary
dmxout information (mjd, val, err, r1, r2) for nb, wb, respectively (check both exist)
show_missing: bool, optional
if one value is missing (nb/wb), indicate the epoch and which one
psrname: str, optional
pulsar name
outfile: str, optional
save figure and write to outfile if set
Returns
=======
None?
"""
# should check that both nb/wb entries exist first...
nbmjd = dmxDict["nb"]["mjd"]
wbmjd = dmxDict["wb"]["mjd"]
allmjds = set(list(nbmjd) + list(wbmjd))
# May need slightly more curation if nb/wb mjds are *almost* identical
wbonly = allmjds - set(nbmjd)
nbonly = allmjds - set(wbmjd)
both = set(nbmjd).intersection(set(wbmjd))
# assemble arrays of common inds for plotting later; probably a better way to do this
nb_common_inds = []
wb_common_inds = []
for b in both:
nb_common_inds.append(np.where(nbmjd == b)[0][0])
wb_common_inds.append(np.where(wbmjd == b)[0][0])
nb_common_inds, wb_common_inds = np.array(nb_common_inds), np.array(wb_common_inds)
nbdmx, nbdmxerr = dmxDict["nb"]["val"], dmxDict["nb"]["err"]
wbdmx, wbdmxerr = dmxDict["wb"]["val"], dmxDict["wb"]["err"]
# propagate errors as quadrature sum, though Michael thinks geometric mean might be better?
nbwb_dmx_diffs = nbdmx[nb_common_inds] - wbdmx[wb_common_inds]
nbwb_err_prop = np.sqrt(
nbdmxerr[nb_common_inds] ** 2 + wbdmxerr[wb_common_inds] ** 2
)
# make the plot
from astropy.time import Time
figsize = (10, 4)
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(111)
ax1.set_xlabel(r"Year")
ax1.set_ylabel(r"$\Delta$DMX ($10^{-3}$ pc cm$^{-3}$)")
ax1.grid(True)
ax2 = ax1.twiny()
ax2.set_xlabel("MJD")
botharray = np.array(list(both))
mjdbothTime = Time(botharray, format="mjd")
dybothTime = mjdbothTime.decimalyear
minmjd, maxmjd = np.sort(botharray)[0], np.sort(botharray)[-1]
ax2.set_xlim(minmjd, maxmjd)
ax1.errorbar(
dybothTime,
nbwb_dmx_diffs * 1e3,
yerr=nbwb_err_prop * 1e3,
marker="o",
ls="",
markerfacecolor="none",
label="nb - wb",
)
# want arrows indicating missing nb/wb DMX values to difference
if show_missing:
stddiffs = np.std(nbwb_dmx_diffs)
mjdnbonlyTime = Time(np.array(list(nbonly)), format="mjd")
dynbonlyTime = mjdnbonlyTime.decimalyear
ax1.scatter(
dynbonlyTime,
np.zeros(len(nbonly)) + stddiffs * 1e3,
marker="v",
c="r",
label="nb only",
)
nbonlystr = [str(no) for no in nbonly]
if nbonlystr:
log.warning(
f"nb-only measurements available for MJDs: {', '.join(nbonlystr)}"
)
mjdwbonlyTime = Time(np.array(list(wbonly)), format="mjd")
dywbonlyTime = mjdwbonlyTime.decimalyear
ax1.scatter(
dywbonlyTime,
np.zeros(len(wbonly)) - stddiffs * 1e3,
marker="^",
c="r",
label="wb only",
)
wbonlystr = [str(wo) for wo in wbonly]
if wbonlystr:
log.warning(
f"wb-only measurements available for MJDs: {', '.join(wbonlystr)}"
)
if psrname:
ax1.text(
0.975,
0.05,
psrname,
transform=ax1.transAxes,
size=18,
c="lightgray",
horizontalalignment="right",
verticalalignment="bottom",
)
plt.tight_layout()
ax1.legend(loc="best")
if outfile:
plt.savefig(outfile)
return None
# Now we want to make wideband DM vs. time plot, this uses the premade dm_resids from PINT
def plot_dm_residuals(
fitter,
restype="postfit",
plotsig=False,
save=False,
legend=True,
title=True,
axs=None,
mean_sub=True,
**kwargs,
):
"""
Make a plot of Wideband timing DM residuals v. time.
Arguments
---------
fitter [object] : The PINT fitter object.
restype ['string'] : Type of residuals, pre or post fit, to plot from fitter object. Options are:
'prefit' - plot the prefit residuals.
'postfit' - plot the postfit residuals (default)
'both' - overplot both the pre and post-fit residuals.
plotsig [boolean] : If True plot number of measurements v. residuals/uncertainty, else v. residuals
[default: False].
save [boolean] : If True will save plot with the name "resid_v_mjd.png" Will add averaged/whitened
as necessary [default: False].
legend [boolean] : If False, will not print legend with plot [default: True].
title [boolean] : If False, will not print plot title [default: True].
axs [string] : If not None, should be defined subplot value and the figure will be used as part of a
larger figure [default: None].
mean_sub [boolean] : If False, will not mean subtract the DM residuals to be centered on zero [default: True]
Optional Arguments:
--------------------
dmres [list/array] : List or array of DM residual values to plot. Will override values from fitter object.
errs [list/array] : List or array of DM residual error values to plot. Will override values from fitter object.
mjds [list/array] : List or array of DM residual mjds to plot. Will override values from fitter object.
rcvr_bcknds[list/array] : List or array of TOA receiver-backend combinations. Will override values from toa object.
figsize [tuple] : Size of the figure passed to matplotlib [default: (10,4)].
fmt ['string'] : matplotlib format option for markers [default: ('x')]
color ['string'] : matplotlib color option for plot [default: color dictionary in plot_utils.py file]
alpha [float] : matplotlib alpha options for plot points [default: 0.5]
"""
markers, colorscheme = plot_settings()
# Check if wideband
if not fitter.is_wideband:
raise RuntimeError(
"Error: Narrowband TOAs have no DM residuals, use `plot_dmx_time() instead."
)
# Get the DM residuals
if "dmres" in kwargs.keys():
dm_resids = kwargs["dmres"]
else:
if restype == "postfit":
dm_resids = fitter.resids.residual_objs["dm"].resids.value
elif restype == "prefit":
dm_resids = fitter.resids_init.residual_objs["dm"].resids.value
elif restype == "both":
dm_resids = fitter.resids.residual_objs["dm"].resids.value
dm_resids_init = fitter.resids_init.residual_objs["dm"].resids.value
# Get the DM residual errors
if "errs" in kwargs.keys():
dm_error = kwargs["errs"]
else:
if restype == "postfit":
dm_error = fitter.resids.residual_objs["dm"].get_data_error().value
elif restype == "prefit":
dm_error = fitter.resids_init.residual_objs["dm"].get_data_error().value
elif restype == "both":
dm_error = fitter.resids.residual_objs["dm"].get_data_error().value
dm_error_init = (
fitter.resids_init.residual_objs["dm"].get_data_error().value
)
# Get the MJDs
if "mjds" in kwargs.keys():
mjds = kwargs["mjds"]
else:
mjds = fitter.toas.get_mjds().value
years = (mjds - 51544.0) / 365.25 + 2000.0
# Get the receiver-backend combos
if "rcvr_bcknds" in kwargs.keys():
rcvr_bcknds = kwargs["rcvr_bcknds"]
else:
rcvr_bcknds = np.array(fitter.toas.get_flag_value("f")[0])
# Get the set of unique receiver-bandend combos
RCVR_BCKNDS = set(rcvr_bcknds)
# If we don't want mean subtraced data we add the mean
if not mean_sub:
if "dmres" in kwargs.keys():
dm_avg = dm_resids
else:
dm_avg = fitter.resids.residual_objs["dm"].dm_data
if "errs" in kwargs.keys():
dm_avg_err = dm_error
else:
dm_avg_err = fitter.resids.residual_objs["dm"].get_data_error().value
DM0 = np.average(dm_avg, weights=(dm_avg_err) ** -2)
dm_resids += DM0.value
if restype == "both":
dm_resids_init += DM0.value
if plotsig:
ylabel = r"DM/Uncertainty"
else:
ylabel = r"DM [cm$^{-3}$ pc]"
else:
if plotsig:
ylabel = r"$\Delta$DM/Uncertainty"
else:
ylabel = r"$\Delta$DM [cm$^{-3}$ pc]"
if axs == None:
if "figsize" in kwargs.keys():
figsize = kwargs["figsize"]
else:
figsize = (10, 4)
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(111)
else:
ax1 = axs
for i, r_b in enumerate(RCVR_BCKNDS):
inds = np.where(rcvr_bcknds == r_b)[0]
if not inds.tolist():
r_b_label = ""
else:
r_b_label = rcvr_bcknds[inds][0]
# Get plot preferences
if "fmt" in kwargs.keys():
mkr = kwargs["fmt"]
else:
mkr = markers[r_b_label]
if restype == "both":
mkr_pre = "."
if "color" in kwargs.keys():
clr = kwargs["color"]
else:
clr = colorscheme[r_b_label]
if "alpha" in kwargs.keys():
alpha = kwargs["alpha"]
else:
alpha = 0.5
# Do plotting command
if restype == "both":
if plotsig:
dm_sig = dm_resids[inds] / dm_error[inds]
dm_sig_pre = dm_resids_init[inds] / dm_error[inds]
ax1.errorbar(
years[inds],
dm_sig,
yerr=len(dm_error[inds]) * [1],
fmt=markers[r_b_label],
color=colorscheme[r_b_label],
label=r_b_label,
alpha=0.5,
)
ax1.errorbar(
years[inds],
dm_sig_pre,
yerr=len(dm_error_init[inds]) * [1],
fmt=markers[r_b_label],
color=colorscheme[r_b_label],
label=r_b_label + " Prefit",
alpha=0.5,
)
else:
ax1.errorbar(
years[inds],
dm_resids[inds],
yerr=dm_error[inds],
fmt=markers[r_b_label],
color=colorscheme[r_b_label],
label=r_b_label,
alpha=0.5,
)
ax1.errorbar(
years[inds],
dm_resids_init[inds],
yerr=dm_error_init[inds],
fmt=markers[r_b_label],
color=colorscheme[r_b_label],
label=r_b_label + " Prefit",
alpha=0.5,
)
else:
if plotsig:
dm_sig = dm_resids[inds] / dm_error[inds]
ax1.errorbar(
years[inds],
dm_sig,
yerr=len(dm_error[inds]) * [1],
fmt=markers[r_b_label],
color=colorscheme[r_b_label],
label=r_b_label,
alpha=0.5,
)
else:
ax1.errorbar(
years[inds],
dm_resids[inds],
yerr=dm_error[inds],
fmt=markers[r_b_label],
color=colorscheme[r_b_label],
label=r_b_label,
alpha=0.5,
)
# Set second axis
ax1.set_ylabel(ylabel)
ax1.set_xlabel(r"Year")
ax1.grid(True)
ax2 = ax1.twiny()
mjd0 = ((ax1.get_xlim()[0]) - 2004.0) * 365.25 + 53005.0
mjd1 = ((ax1.get_xlim()[1]) - 2004.0) * 365.25 + 53005.0
ax2.set_xlim(mjd0, mjd1)
if legend:
if len(RCVR_BCKNDS) > 5:
ncol = int(np.ceil(len(RCVR_BCKNDS) / 2))
y_offset = 1.15
else:
ncol = len(RCVR_BCKNDS)
y_offset = 1.0
ax1.legend(
loc="upper center",
bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]),
ncol=ncol,
)
if title:
if len(RCVR_BCKNDS) > 5:
y_offset = 1.1
else:
y_offset = 1.0
plt.title(
"%s %s DM residuals" % (fitter.model.PSR.value, restype),
y=y_offset + 1.0 / figsize[1],
)
if axs == None:
plt.tight_layout()
if save:
ext = ""
if restype == "postfit":
ext += "_postfit"
elif restype == "prefit":
ext += "_prefit"
elif restype == "both":
ext += "_prefit_and_postfit"
plt.savefig("%s_dm_resids_v_time%s.png" % (fitter.model.PSR.value, ext))
if axs == None:
# Define clickable points
text = ax2.text(0, 0, "")
# Define point highlight color
if "430_ASP" in RCVR_BCKNDS or "430_PUPPI" in RCVR_BCKNDS:
stamp_color = "#61C853"
else:
stamp_color = "#FD9927"
def onclick(event):
# Get X and Y axis data
xdata = mjds
if plotsig:
ydata = dm_resids / dm_error
else:
ydata = dm_resids
# Get x and y data from click
xclick = event.xdata
yclick = event.ydata
# Calculate scaled distance, find closest point index
d = np.sqrt(((xdata - xclick) / 1000.0) ** 2 + (ydata - yclick) ** 2)
ind_close = np.where(np.min(d) == d)[0]
# highlight clicked point
ax2.scatter(xdata[ind_close], ydata[ind_close], marker="x", c=stamp_color)
# Print point info
text.set_position((xdata[ind_close], ydata[ind_close]))
text.set_text(
"DM Params:\n MJD: %s \n Res: %.6f \n Index: %s"
% (xdata[ind_close][0], ydata[ind_close], ind_close[0])
)
fig.canvas.mpl_connect("button_press_event", onclick)
return
def plot_measurements_v_res(
fitter,
restype="postfit",
plotsig=False,
nbin=50,
avg=False,
whitened=False,
save=False,
legend=True,
title=True,
axs=None,
**kwargs,
):
"""
Make a histogram of number of measurements v. residuals
Arguments
---------
fitter [object] : The PINT fitter object.
restype ['string'] : Type of residuals, pre or post fit, to plot from fitter object. Options are:
'prefit' - plot the prefit residuals.
'postfit' - plot the postfit residuals (default)
'both' - overplot both the pre and post-fit residuals.
plotsig [boolean] : If True plot number of measurements v. residuals/uncertainty, else v. residuals
[default: False].
nbin [int] : Number of bins to use in the histogram [default: 50]
avg [boolean] : If True and not wideband fitter, will compute and plot epoch-average residuals [default: False].
whitened [boolean] : If True will compute and plot whitened residuals [default: False].
save [boolean] : If True will save plot with the name "resid_v_mjd.png" Will add averaged/whitened
as necessary [default: False].
legend [boolean] : If False, will not print legend with plot [default: True].
title [boolean] : If False, will not print plot title [default: True].
axs [string] : If not None, should be defined subplot value and the figure will be used as part of a
larger figure [default: None].
Optional Arguments:
--------------------
res [list/array] : List or array of residual values to plot. Will override values from fitter object.
errs [list/array] : List or array of residual error values to plot. Will override values from fitter object.
rcvr_bcknds[list/array] : List or array of TOA receiver-backend combinations. Will override values from toa object.
figsize [tuple] : Size of the figure passed to matplotlib [default: (10,4)].
fmt ['string'] : matplotlib format option for markers [default: ('x')]
color ['string'] : matplotlib color option for plot [default: color dictionary in plot_utils.py file]
alpha [float] : matplotlib alpha options for plot points [default: 0.5]
"""
markers, colorscheme = plot_settings()
# Check if wideband
if fitter.is_wideband:
NB = False
if avg == True:
raise ValueError(
"Cannot epoch average wideband residuals, please change 'avg' to False."
)
else:
NB = True
# Check if want epoch averaged residuals
if avg == True and restype == "prefit":
avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True)
elif avg == True and restype == "postfit":
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
elif avg == True and restype == "both":
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True)
# Get residuals
if "res" in kwargs.keys():
res = kwargs["res"]
else:
if restype == "prefit":
if NB == True:
if avg == True:
res = avg_dict["time_resids"].to(u.us)
else:
res = fitter.resids_init.time_resids.to(u.us)
else:
res = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us)
elif restype == "postfit":
if NB == True:
if avg == True:
res = avg_dict["time_resids"].to(u.us)
else:
res = fitter.resids.time_resids.to(u.us)
else:
res = fitter.resids.residual_objs["toa"].time_resids.to(u.us)
elif restype == "both":
if NB == True:
if avg == True:
res = avg_dict["time_resids"].to(u.us)
res_pre = avg_dict_pre["time_resids"].to(u.us)
else:
res = fitter.resids.time_resids.to(u.us)
res_pre = fitter.resids_init.time_resids.to(u.us)
else:
res = fitter.resids.residual_objs["toa"].time_resids.to(u.us)
res_pre = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us)
else:
raise ValueError(
"Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."
% (restype)
)
# Check if we want whitened residuals
if whitened == True and ("res" not in kwargs.keys()):
if avg == True:
if restype != "both":
res = whiten_resids(avg_dict, restype=restype)
else:
res = whiten_resids(avg_dict_pre, restype="prefit")
res_pre = whiten_resids(avg_dict, restype="postfit")
res_pre = res_pre.to(u.us)
res = res.to(u.us)
else:
if restype != "both":
res = whiten_resids(fitter, restype=restype)
else:
res = whiten_resids(fitter, restype="prefit")
res_pre = whiten_resids(fitter, restype="postfit")
res_pre = res_pre.to(u.us)
res = res.to(u.us)
# Get errors
if "errs" in kwargs.keys():
errs = kwargs["errs"]
else:
if restype == "prefit":
if avg == True:
errs = avg_dict["errors"].to(u.us)
else:
errs = fitter.toas.get_errors().to(u.us)
elif restype == "postfit":
if NB == True:
if avg == True:
errs = avg_dict["errors"].to(u.us)
else:
errs = fitter.resids.get_data_error().to(u.us)
else:
errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us)
elif restype == "both":
if NB == True:
if avg == True:
errs = avg_dict["errors"].to(u.us)
errs_pre = avg_dict_pre["errors"].to(u.us)
else:
errs = fitter.resids.get_data_error().to(u.us)
errs_pre = fitter.toas.get_errors().to(u.us)
else:
errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us)
errs_pre = fitter.toas.get_errors().to(u.us)
# Get receiver backends
if "rcvr_bcknds" in kwargs.keys():
rcvr_bcknds = kwargs["rcvr_bcknds"]
else:
rcvr_bcknds = np.array(fitter.toas.get_flag_value("f")[0])
if avg == True:
avg_rcvr_bcknds = []
for iis in avg_dict["indices"]:
avg_rcvr_bcknds.append(rcvr_bcknds[iis[0]])
rcvr_bcknds = np.array(avg_rcvr_bcknds)
# Get the set of unique receiver-bandend combos
RCVR_BCKNDS = set(rcvr_bcknds)
if axs == None:
if "figsize" in kwargs.keys():
figsize = kwargs["figsize"]
else:
figsize = (10, 4)
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(111)
else:
ax1 = axs
xmax = 0
for i, r_b in enumerate(RCVR_BCKNDS):
inds = np.where(rcvr_bcknds == r_b)[0]
if not inds.tolist():
r_b_label = ""
else:
r_b_label = rcvr_bcknds[inds][0]
# Get plot preferences
if "color" in kwargs.keys():
clr = kwargs["color"]
else:
clr = colorscheme[r_b_label]
if plotsig:
sig = res[inds] / errs[inds]
ax1.hist(
sig,
nbin,
histtype="step",
color=colorscheme[r_b_label],
label=r_b_label,
)
xmax = max(xmax, max(sig), max(-sig))
if restype == "both":
sig_pre = res_pre[inds] / errs_pre[inds]
ax1.hist(
sig_pre,
nbin,
histtype="step",
color=colorscheme[r_b_label],
linestyle="--",
label=r_b_label + " Prefit",
)
else:
ax1.hist(
res[inds],
nbin,
histtype="step",
color=colorscheme[r_b_label],
label=r_b_label,
)
xmax = max(xmax, max(res[inds]), max(-res[inds]))
if restype == "both":
ax1.hist(
res[inds],
nbin,
histtype="step",
color=colorscheme[r_b_label],
linestyle="--",
label=r_b_label + " Prefit",
)
ax1.grid(True)
ax1.set_ylabel("Number of measurements")
if plotsig:
if avg and whitened:
ax1.set_xlabel(
"Average Residual/Uncertainty \n (Whitened)", multialignment="center"
)
elif avg and not whitened:
ax1.set_xlabel("Average Residual/Uncertainty")
elif whitened and not avg:
ax1.set_xlabel(
"Residual/Uncertainty \n (Whitened)", multialignment="center"
)
else:
ax1.set_xlabel("Residual/Uncertainty")
else:
if avg and whitened:
ax1.set_xlabel(
"Average Residual ($\mu$s) \n (Whitened)", multialignment="center"
)
elif avg and not whitened:
ax1.set_xlabel("Average Residual ($\mu$s)")
elif whitened and not avg:
ax1.set_xlabel("Residual ($\mu$s) \n (Whitened)", multialignment="center")
else:
ax1.set_xlabel("Residual ($\mu$s)")
ax1.set_xlim(-1.1 * xmax, 1.1 * xmax)
if legend:
if len(RCVR_BCKNDS) > 5:
ncol = int(np.ceil(len(RCVR_BCKNDS) / 2))
y_offset = 1.15
else:
ncol = len(RCVR_BCKNDS)
y_offset = 1.0
ax1.legend(
loc="upper center",
bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]),
ncol=ncol,
)
if title:
if len(RCVR_BCKNDS) > 5:
y_offset = 1.1
else:
y_offset = 1.0
plt.title(
"%s %s residual measurements" % (fitter.model.PSR.value, restype),
y=y_offset + 1.0 / figsize[1],
)
if axs == None:
plt.tight_layout()
if save:
ext = ""
if whitened:
ext += "_whitened"
if avg:
ext += "_averaged"
if NB:
ext += "_NB"
else:
ext += "_WB"
if restype == "prefit":
ext += "_prefit"
elif restype == "postfit":
ext += "_postfit"
elif restype == "both":
ext += "_pre_post_fit"
plt.savefig("%s_resid_measurements%s.png" % (fitter.model.PSR.value, ext))
return
def plot_measurements_v_dmres(
fitter,
restype="postfit",
plotsig=False,
nbin=50,
save=False,
legend=True,
title=True,
axs=None,
mean_sub=True,
**kwargs,
):
"""
Make a histogram of number of measurements v. residuals
Arguments
---------
fitter [object] : The PINT fitter object.
restype ['string'] : Type of residuals, pre or post fit, to plot from fitter object. Options are:
'prefit' - plot the prefit residuals.
'postfit' - plot the postfit residuals (default)
'both' - overplot both the pre and post-fit residuals.
plotsig [boolean] : If True plot number of measurements v. residuals/uncertainty, else v. residuals
[default: False].
nbin [int] : Number of bins to use in the histogram [default: 50]
save [boolean] : If True will save plot with the name "resid_v_mjd.png" Will add averaged/whitened
as necessary [default: False].
legend [boolean] : If False, will not print legend with plot [default: True].
title [boolean] : If False, will not print plot title [default: True].
axs [string] : If not None, should be defined subplot value and the figure will be used as part of a
larger figure [default: None].
mean_sub [boolean] : If False, will not mean subtract the DM residuals to be centered on zero [default: True]
Optional Arguments:
--------------------
dmres [list/array] : List or array of residual values to plot. Will override values from fitter object.
errs [list/array] : List or array of residual error values to plot. Will override values from fitter object.
rcvr_bcknds[list/array] : List or array of TOA receiver-backend combinations. Will override values from toa object.
figsize [tuple] : Size of the figure passed to matplotlib [default: (10,4)].
fmt ['string'] : matplotlib format option for markers [default: ('x')]
color ['string'] : matplotlib color option for plot [default: color dictionary in plot_utils.py file]
"""
markers, colorscheme = plot_settings()
# Check if wideband
if not fitter.is_wideband:
raise ValueError(
"Narrowband Fitters have have no DM residuals, please use `plot_measurements_v_dmres` instead."
)
# Get the DM residuals
if "dmres" in kwargs.keys():
dm_resids = kwargs["dmres"]
else:
if restype == "postfit":
dm_resids = fitter.resids.residual_objs["dm"].resids.value
elif restype == "prefit":
dm_resids = fitter.resids_init.residual_objs["dm"].resids.value
elif restype == "both":
dm_resids = fitter.resids.residual_objs["dm"].resids.value
dm_resids_init = fitter.resids_init.residual_objs["dm"].resids.value
# Get the DM residual errors
if "errs" in kwargs.keys():
dm_error = kwargs["errs"]
else:
if restype == "postfit":
dm_error = fitter.resids.residual_objs["dm"].get_data_error().value
elif restype == "prefit":
dm_error = fitter.resids_init.residual_objs["dm"].get_data_error().value
elif restype == "both":
dm_error = fitter.resids.residual_objs["dm"].get_data_error().value
dm_error_init = (
fitter.resids_init.residual_objs["dm"].get_data_error().value
)
# Get the receiver-backend combos
if "rcvr_bcknds" in kwargs.keys():
rcvr_bcknds = kwargs["rcvr_bcknds"]
else:
rcvr_bcknds = np.array(fitter.toas.get_flag_value("f")[0])
# Get the set of unique receiver-bandend combos
RCVR_BCKNDS = set(rcvr_bcknds)
# If we don't want mean subtraced data we add the mean
if not mean_sub:
if "dmres" in kwargs.keys():
dm_avg = dm_resids
else:
dm_avg = fitter.resids.residual_objs["dm"].dm_data
if "errs" in kwargs.keys():
dm_avg_err = dm_error
else:
dm_avg_err = fitter.resids.residual_objs["dm"].get_data_error().value
DM0 = np.average(dm_avg, weights=(dm_avg_err) ** -2)
dm_resids += DM0.value
if restype == "both":
dm_resids_init += DM0.value
if plotsig:
xlabel = r"DM/Uncertainty"
else:
xlabel = r"DM [cm$^{-3}$ pc]"
else:
if plotsig:
xlabel = r"$\Delta$DM/Uncertainty"
else:
xlabel = r"$\Delta$DM [cm$^{-3}$ pc]"
if axs == None:
if "figsize" in kwargs.keys():
figsize = kwargs["figsize"]
else:
figsize = (10, 4)
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(111)
else:
ax1 = axs
for i, r_b in enumerate(RCVR_BCKNDS):
inds = np.where(rcvr_bcknds == r_b)[0]
if not inds.tolist():
r_b_label = ""
else:
r_b_label = rcvr_bcknds[inds][0]
# Get plot preferences
if "color" in kwargs.keys():
clr = kwargs["color"]
else:
clr = colorscheme[r_b_label]
if plotsig:
sig = dm_resids[inds] / dm_error[inds]
ax1.hist(
sig,
nbin,
histtype="step",
color=colorscheme[r_b_label],
label=r_b_label,
)
if restype == "both":
sig_pre = dm_resids_init[inds] / dm_error_init[inds]
ax1.hist(
sig_pre,
nbin,
histtype="step",
color=colorscheme[r_b_label],
linestyle="--",
label=r_b_label + " Prefit",
)
else:
ax1.hist(
dm_resids[inds],
nbin,
histtype="step",
color=colorscheme[r_b_label],
label=r_b_label,
)
if restype == "both":
ax1.hist(
dm_resids_init[inds],
nbin,
histtype="step",
color=colorscheme[r_b_label],
linestyle="--",
label=r_b_label + " Prefit",
)
ax1.grid(True)
ax1.set_ylabel("Number of measurements")
ax1.set_xlabel(xlabel)
if legend:
if len(RCVR_BCKNDS) > 5:
ncol = int(np.ceil(len(RCVR_BCKNDS) / 2))
y_offset = 1.15
else:
ncol = len(RCVR_BCKNDS)
y_offset = 1.0
ax1.legend(
loc="upper center",
bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]),
ncol=ncol,
)
if title:
if len(RCVR_BCKNDS) > 5:
y_offset = 1.1
else:
y_offset = 1.0
plt.title(
"%s %s DM residual measurements" % (fitter.model.PSR.value, restype),
y=y_offset + 1.0 / figsize[1],
)
if axs == None:
plt.tight_layout()
if save:
ext = ""
if restype == "prefit":
ext += "_prefit"
elif restype == "postfit":
ext += "_postfit"
elif restype == "both":
ext += "_pre_post_fit"
plt.savefig("%s_DM_resid_measurements%s.png" % (fitter.model.PSR.value, ext))
return
def plot_residuals_orb(
fitter,
restype="postfit",
colorby="f",
plotsig=False,
avg=False,
mixed_ecorr=False,
whitened=False,
save=False,
legend=True,
title=True,
axs=None,
**kwargs,
):
"""
Make a plot of the residuals vs. orbital phase.
Arguments
---------
fitter [object] : The PINT fitter object.
restype ['string'] : Type of residuals, pre or post fit, to plot from fitter object. Options are:
'prefit' - plot the prefit residuals.
'postfit' - plot the postfit residuals (default)
'both' - overplot both the pre and post-fit residuals.
plotsig [boolean] : If True plot number of measurements v. residuals/uncertainty, else v. residuals
[default: False].
avg [boolean] : If True and not wideband fitter, will compute and plot epoch-average residuals [default: False].
whitened [boolean] : If True will compute and plot whitened residuals [default: False].
save [boolean] : If True will save plot with the name "resid_v_mjd.png" Will add averaged/whitened
as necessary [default: False].
legend [boolean] : If False, will not print legend with plot [default: True].
title [boolean] : If False, will not print plot title [default: True].
axs [string] : If not None, should be defined subplot value and the figure will be used as part of a
larger figure [default: None].
Optional Arguments:
--------------------
res [list/array] : List or array of residual values to plot. Will override values from fitter object.
errs [list/array] : List or array of residual error values to plot. Will override values from fitter object.
orbphase [list/array] : List or array of orbital phases to plot. Will override values from model object.
rcvr_bcknds[list/array] : List or array of TOA receiver-backend combinations. Will override values from toa object.
figsize [tuple] : Size of the figure passed to matplotlib [default: (10,4)].
fmt ['string'] : matplotlib format option for markers [default: ('x')]
color ['string'] : matplotlib color option for plot [default: color dictionary in plot_utils.py file]
alpha [float] : matplotlib alpha options for plot points [default: 0.5]
"""
# Check if wideband
if fitter.is_wideband:
NB = False
if avg == True:
raise ValueError(
"Cannot epoch average wideband residuals, please change 'avg' to False."
)
else:
NB = True
# Check if want epoch averaged residuals
if avg == True and restype == "prefit" and mixed_ecorr == True:
avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True)
no_avg_dict = no_ecorr_average(
fitter.toas, fitter.resids_init, use_noise_model=True
)
elif avg == True and restype == "postfit" and mixed_ecorr == True:
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True)
elif avg == True and restype == "both" and mixed_ecorr == True:
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids, use_noise_model=True)
avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True)
no_avg_dict_pre = no_ecorr_average(
fitter.toas, fitter.resids_init, use_noise_model=True
)
elif avg == True and restype == "prefit" and mixed_ecorr == False:
avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True)
elif avg == True and restype == "postfit" and mixed_ecorr == False:
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
elif avg == True and restype == "both" and mixed_ecorr == False:
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True)
# Get residuals
if "res" in kwargs.keys():
res = kwargs["res"]
else:
if restype == "prefit":
if NB == True:
if avg == True and mixed_ecorr == True:
res = avg_dict["time_resids"].to(u.us)
res_no_avg = no_avg_dict["time_resids"].to(u.us)
elif avg == True and mixed_ecorr == False:
res = avg_dict["time_resids"].to(u.us)
else:
res = fitter.resids_init.time_resids.to(u.us)
else:
res = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us)
elif restype == "postfit":
if NB == True:
if avg == True and mixed_ecorr == True:
res = avg_dict["time_resids"].to(u.us)
res_no_avg = no_avg_dict["time_resids"].to(u.us)
elif avg == True:
res = avg_dict["time_resids"].to(u.us)
else:
res = fitter.resids.time_resids.to(u.us)
else:
res = fitter.resids.residual_objs["toa"].time_resids.to(u.us)
elif restype == "both":
if NB == True:
if avg == True and mixed_ecorr == True:
res = avg_dict["time_resids"].to(u.us)
res_no_avg = no_avg_dict["time_resids"].to(u.us)
res_pre = avg_dict_pre["time_resids"].to(u.us)
res_pre_no_avg = no_avg_dict_pre["time_resids"].to(u.us)
elif avg == True and mixed_ecorr == False:
res = avg_dict["time_resids"].to(u.us)
res_pre = avg_dict_pre["time_resids"].to(u.us)
else:
res = fitter.resids.time_resids.to(u.us)
res_pre = fitter.resids_init.time_resids.to(u.us)
else:
res = fitter.resids.residual_objs["toa"].time_resids.to(u.us)
res_pre = fitter.resids_init.residual_objs["toa"].time_resids.to(u.us)
else:
raise ValueError(
"Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."
% (restype)
)
# Check if we want whitened residuals
if whitened == True and ("res" not in kwargs.keys()):
if avg == True and mixed_ecorr == True:
if restype != "both":
res = whiten_resids(avg_dict, restype=restype)
res_no_avg = whiten_resids(no_avg_dict, restype=restype)
else:
res = whiten_resids(avg_dict_pre, restype="prefit")
res_pre = whiten_resids(avg_dict, restype="postfit")
res_pre = res_pre.to(u.us)
res_no_avg = whiten_resids(avg_dict_pre, restype="prefit")
res_pre_no_avg = whiten_resids(avg_dict, restype="postfit")
res_pre_no_avg = res_pre_no_avg.to(u.us)
res = res.to(u.us)
res_no_avg = res_no_avg.to(u.us)
elif avg == True and mixed_ecorr == False:
if restype != "both":
res = whiten_resids(avg_dict, restype=restype)
else:
res = whiten_resids(avg_dict_pre, restype="prefit")
res_pre = whiten_resids(avg_dict, restype="postfit")
res_pre = res_pre.to(u.us)
res = res.to(u.us)
else:
if restype != "both":
res = whiten_resids(fitter, restype=restype)
else:
res = whiten_resids(fitter, restype="prefit")
res_pre = whiten_resids(fitter, restype="postfit")
res_pre = res_pre.to(u.us)
res = res.to(u.us)
# Get errors
if "errs" in kwargs.keys():
errs = kwargs["errs"]
else:
if restype == "prefit":
if avg == True and mixed_ecorr == True:
errs = avg_dict["errors"].to(u.us)
errs_no_avg = no_avg_dict["errors"].to(u.us)
elif avg == True and mixed_ecorr == False:
errs = avg_dict["errors"].to(u.us)
else:
errs = fitter.toas.get_errors().to(u.us)
elif restype == "postfit":
if NB == True:
if avg == True and mixed_ecorr == True:
errs = avg_dict["errors"].to(u.us)
errs_no_avg = no_avg_dict["errors"].to(u.us)
elif avg == True and mixed_ecorr == False:
errs = avg_dict["errors"].to(u.us)
else:
errs = fitter.resids.get_data_error().to(u.us)
else:
errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us)
elif restype == "both":
if NB == True:
if avg == True and mixed_ecorr == True:
errs = avg_dict["errors"].to(u.us)
errs_pre = avg_dict_pre["errors"].to(u.us)
errs_no_avg = no_avg_dict["errors"].to(u.us)
errs_no_avg_pre = no_avg_dict_pre["errors"].to(u.us)
elif avg == True and mixed_ecorr == False:
errs = avg_dict["errors"].to(u.us)
errs_pre = avg_dict_pre["errors"].to(u.us)
else:
errs = fitter.resids.get_data_error().to(u.us)
errs_pre = fitter.toas.get_errors().to(u.us)
else:
errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us)
errs_pre = fitter.toas.get_errors().to(u.us)
# Get MJDs
if "orbphase" not in kwargs.keys():
mjds = fitter.toas.get_mjds().value
if avg == True:
mjds = avg_dict["mjds"].value
if mixed_ecorr == True:
mjds_no_avg = no_avg_dict["mjds"].value
# Now we need to the orbital phases; start with binary model name
if "orbphase" in kwargs.keys():
orbphase = kwargs["orbphase"]
else:
orbphase = fitter.model.orbital_phase(mjds, radians=False)
if avg and mixed_ecorr:
no_avg_orbphase = fitter.model.orbital_phase(mjds_no_avg, radians=False)
# In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr.
# Create combined arrays
if avg == True and mixed_ecorr == True:
combo_res = np.hstack((res, res_no_avg))
combo_errs = np.hstack((errs, errs_no_avg))
combo_orbphase = np.hstack((orbphase, no_avg_orbphase))
if restype == "both":
combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre))
combo_res_pre = np.hstack((res_pre, res_no_avg_pre))
# Get colorby flag values (obs, PTA, febe, etc.)
if "colorby" in kwargs.keys():
cb = kwargs["colorby"]
else:
cb = np.array(fitter.toas[colorby])
# . Seems to run a little faster but not robust to obs
# cb = np.array(fitter.toas.get_flag_value(colorby)[0])
if avg == True:
avg_cb = []
for iis in avg_dict["indices"]:
avg_cb.append(cb[iis[0]])
if mixed_ecorr == True:
no_avg_cb = []
for jjs in no_avg_dict["indices"]:
no_avg_cb.append(cb[jjs])
no_ecorr_cb = np.array(no_avg_cb)
cb = np.array(avg_cb)
# Get the set of unique flag values
if avg == True and mixed_ecorr == True:
cb = np.hstack((cb, no_ecorr_cb))
CB = set(cb)
colorscheme, markerscheme = set_color_and_marker(colorby)
if "figsize" in kwargs.keys():
figsize = kwargs["figsize"]
else:
figsize = (10, 4)
if axs == None:
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(111)
else:
fig = plt.gcf()
ax1 = axs
for i, c in enumerate(CB):
inds = np.where(cb == c)[0]
if not inds.tolist():
cb_label = ""
else:
cb_label = cb[inds][0]
# Get plot preferences
if "fmt" in kwargs.keys():
mkr = kwargs["fmt"]
else:
try:
mkr = markerscheme[cb_label]
if restype == "both":
mkr_pre = "."
except Exception:
mkr = "x"
log.log(1, "Color by flag value doesn't have a marker label!!")
if "color" in kwargs.keys():
clr = kwargs["color"]
else:
try:
clr = colorscheme[cb_label]
except Exception:
clr = "k"
log.log(1, "Color by flag value doesn't have a color!!")
if "alpha" in kwargs.keys():
alpha = kwargs["alpha"]
else:
alpha = 0.5
if avg and mixed_ecorr:
if plotsig:
combo_sig = combo_res[inds] / combo_errs[inds]
ax1.errorbar(
combo_orbphase[inds],
combo_sig,
yerr=len(combo_errs[inds]) * [1],
fmt=mkr,
color=clr,
label=cb_label,
alpha=alpha,
)
if restype == "both":
combo_sig_pre = combo_res_pre[inds] / combo_errs_pre[inds]
ax1.errorbar(
combo_orbphase[inds],
combo_sig_pre,
yerr=len(combo_errs_pre[inds]) * [1],
fmt=mkr_pre,
color=clr,
label=cb_label + " Prefit",
alpha=alpha,
)
else:
ax1.errorbar(
combo_orbphase[inds],
combo_res[inds],
yerr=combo_errs[inds],
fmt=mkr,
color=clr,
label=cb_label,
alpha=alpha,
)
if restype == "both":
ax1.errorbar(
combo_orbphase[inds],
combo_res_pre[inds],
yerr=combo_errs_pre[inds],
fmt=mkr_pre,
color=clr,
label=cb_label + " Prefit",
alpha=alpha,
)
else:
if plotsig:
sig = res[inds] / errs[inds]
ax1.errorbar(
orbphase[inds],
sig,
yerr=len(errs[inds]) * [1],
fmt=mkr,
color=clr,
label=cb_label,
alpha=alpha,
)
if restype == "both":
sig_pre = res_pre[inds] / errs_pre[inds]
ax1.errorbar(
orbphase[inds],
sig_pre,
yerr=len(errs_pre[inds]) * [1],
fmt=mkr_pre,
color=clr,
label=cb_label + " Prefit",
alpha=alpha,
)
else:
ax1.errorbar(
orbphase[inds],
res[inds],
yerr=errs[inds],
fmt=mkr,
color=clr,
label=cb_label,
alpha=alpha,
)
if restype == "both":
ax1.errorbar(
orbphase[inds],
res_pre[inds],
yerr=errs_pre[inds],
fmt=mkr_pre,
color=clr,
label=cb_label + " Prefit",
alpha=alpha,
)
# Set second axis
ax1.set_xlabel(r"Orbital Phase")
ax1.grid(True)
if plotsig:
if avg and whitened:
ax1.set_ylabel(
"Average Residual/Uncertainty \n (Whitened)", multialignment="center"
)
elif avg and not whitened:
ax1.set_ylabel("Average Residual/Uncertainty")
elif whitened and not avg:
ax1.set_ylabel(
"Residual/Uncertainty \n (Whitened)", multialignment="center"
)
else:
ax1.set_ylabel("Residual/Uncertainty")
else:
if avg and whitened:
ax1.set_ylabel(
"Average Residual ($\mu$s) \n (Whitened)", multialignment="center"
)
elif avg and not whitened:
ax1.set_ylabel("Average Residual ($\mu$s)")
elif whitened and not avg:
ax1.set_ylabel("Residual ($\mu$s) \n (Whitened)", multialignment="center")
else:
ax1.set_ylabel("Residual ($\mu$s)")
if legend:
if len(CB) > 5:
ncol = int(np.ceil(len(CB) / 2))
y_offset = 1.15
else:
ncol = len(CB)
y_offset = 1.0
ax1.legend(
loc="upper center",
bbox_to_anchor=(0.5, y_offset + 1.0 / figsize[1]),
ncol=ncol,
)
if title:
if len(CB) > 5:
y_offset = 1.1
else:
y_offset = 1.0
plt.title(
"%s %s timing residuals" % (fitter.model.PSR.value, restype),
y=y_offset + 1.0 / figsize[1],
)
if axs == None:
plt.tight_layout()
if save:
ext = ""
if whitened:
ext += "_whitened"
if avg:
ext += "_averaged"
if NB:
ext += "_NB"
else:
ext += "_WB"
if restype == "prefit":
ext += "_prefit"
elif restype == "postfit":
ext += "_postfit"
elif restype == "both":
ext += "_pre_post_fit"
plt.savefig("%s_resid_v_orbphase%s.png" % (fitter.model.PSR.value, ext))
if axs == None:
# Define clickable points
text = ax1.text(0, 0, "")
stamp_color = "#FD9927"
# Define color for highlighting points
# if "430_ASP" in RCVR_BCKNDS or "430_PUPPI" in RCVR_BCKNDS:
# stamp_color = "#61C853"
# else:
# stamp_color = "#FD9927"
def onclick(event):
# Get X and Y axis data
xdata = orbphase
if plotsig:
ydata = (res / errs).decompose().value
else:
ydata = res.value
# Get x and y data from click
xclick = event.xdata
yclick = event.ydata
# Calculate scaled distance, find closest point index
d = np.sqrt((xdata - xclick) ** 2 + ((ydata - yclick) / 100.0) ** 2)
ind_close = np.where(np.min(d) == d)[0]
# highlight clicked point
ax1.scatter(xdata[ind_close], ydata[ind_close], marker="x", c=stamp_color)
# Print point info
text.set_position((xdata[ind_close], ydata[ind_close]))
if plotsig:
text.set_text(
"TOA Params:\n Phase: %.5f \n Res/Err: %.2f \n Index: %s"
% (xdata[ind_close][0], ydata[ind_close], ind_close[0])
)
else:
text.set_text(
"TOA Params:\n Phase: %.5f \n Res: %.2f \n Index: %s"
% (xdata[ind_close][0], ydata[ind_close], ind_close[0])
)
fig.canvas.mpl_connect("button_press_event", onclick)
return
def plot_residuals_serial(fitter, restype = 'postfit', colorby='pta', plotsig = False, avg = False, whitened = False, \
save = False, legend = True, title = True, axs = None, mixed_ecorr=False, epoch_lines=False, \
milli=False, mjd_order=False, **kwargs):
"""
Make a serial plot of the residuals. (TOAs are evenly spaced along the x-axis)
In the default setup this will emulate pintk/tempo2 and TOAs appear in the order they are listed in the par file
If mjd_order=True, TOAs are first sorted according to MJD.
Arguments
---------
fitter [object] : The PINT fitter object.
restype ['string'] : Type of residuals, pre or post fit, to plot from fitter object. Options are:
'prefit' - plot the prefit residuals.
'postfit' - plot the postfit residuals (default)
'both' - overplot both the pre and post-fit residuals.
colorby ['string']: What to use to determine color/markers
'pta' - color residuals by PTA (default)
'obs' - color residuals by telescope
'f' - color residuals by frontend/backend pair (flag not used by all PTAs).
plotsig [boolean] : If True plot number of measurements v. residuals/uncertainty, else v. residuals
[default: False].
avg [boolean] : If True and not wideband fitter, will compute and plot epoch-average residuals [default: False].
whitened [boolean] : If True will compute and plot whitened residuals [default: False].
save [boolean] : If True will save plot with the name "resid_v_mjd.png" Will add averaged/whitened
as necessary [default: False].
legend [boolean] : If False, will not print legend with plot [default: True].
title [boolean] : If False, will not print plot title [default: True].
axs [string] : If not None, should be defined subplot value and the figure will be used as part of a
larger figure [default: None].
Optional Arguments:
--------------------
res [list/array] : List or array of residual values to plot. Will override values from fitter object.
errs [list/array] : List or array of residual error values to plot. Will override values from fitter object.
mjds [list/array] : List or array of TOA MJDs to plot. Will override values from toa object.
obs[list/array] : List or array of TOA observatories combinations. Will override values from toa object.
figsize [tuple] : Size of the figure passed to matplotlib [default: (10,4)].
fmt ['string'] : matplotlib format option for markers [default: ('x')]
color ['string'] : matplotlib color option for plot [default: color dictionary in plot_utils.py file]
alpha [float] : matplotlib alpha options for plot points [default: 0.5]
mixed_ecorr [boolean]: If True, allows avging with mixed ecorr/no ecorr TOAs.
epoch_lines [boolean]: If True, plot a vertical line at the first TOA of each observation file.
milli [boolean]: If True, plot y-axis in milliseconds rather than microseconds.
mjd_order [boolean]: If True, the TOAs are sorted by MJD before being plotted.
"""
# Check if wideband
if fitter.is_wideband:
NB = False
if avg == True:
raise ValueError("Cannot epoch average wideband residuals, please change 'avg' to False.")
else:
NB = True
# Check if want epoch averaged residuals
if avg == True and restype == 'prefit' and mixed_ecorr == True:
avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True)
no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True)
elif avg == True and restype == 'postfit' and mixed_ecorr == True:
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True)
elif avg == True and restype == 'both' and mixed_ecorr == True:
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
no_avg_dict = no_ecorr_average(fitter.toas, fitter.resids,use_noise_model=True)
avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True)
no_avg_dict_pre = no_ecorr_average(fitter.toas, fitter.resids_init,use_noise_model=True)
elif avg == True and restype == 'prefit' and mixed_ecorr == False:
avg_dict = fitter.resids_init.ecorr_average(use_noise_model=True)
elif avg == True and restype == 'postfit' and mixed_ecorr==False:
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
elif avg == True and restype == 'both' and mixed_ecorr == False:
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
avg_dict_pre = fitter.resids_init.ecorr_average(use_noise_model=True)
# adjust to millisec
if milli:
unit = u.ms
unitstr = "ms"
else:
unit = u.us
unitstr = "$\mu$s"
# Get residuals
if 'res' in kwargs.keys():
res = kwargs['res']
else:
if restype == 'prefit':
if NB == True:
if avg == True and mixed_ecorr == True:
res = avg_dict['time_resids'].to(unit)
res_no_avg = no_avg_dict['time_resids'].to(unit)
elif avg==True and mixed_ecorr == False:
res = avg_dict['time_resids'].to(unit)
else:
res = fitter.resids_init.time_resids.to(unit)
else:
res = fitter.resids_init.residual_objs['toa'].time_resids.to(unit)
elif restype == 'postfit':
if NB == True:
if avg == True and mixed_ecorr == True:
res = avg_dict['time_resids'].to(unit)
res_no_avg = no_avg_dict['time_resids'].to(unit)
elif avg == True:
res = avg_dict['time_resids'].to(unit)
else:
res = fitter.resids.time_resids.to(unit)
else:
res = fitter.resids.residual_objs['toa'].time_resids.to(unit)
elif restype == 'both':
if NB == True:
if avg == True and mixed_ecorr == True:
res = avg_dict['time_resids'].to(unit)
res_no_avg = no_avg_dict['time_resids'].to(unit)
res_pre = avg_dict_pre['time_resids'].to(unit)
res_pre_no_avg = no_avg_dict_pre['time_resids'].to(unit)
elif avg == True and mixed_ecorr == False:
res = avg_dict['time_resids'].to(unit)
res_pre = avg_dict_pre['time_resids'].to(unit)
else:
res = fitter.resids.time_resids.to(unit)
res_pre = fitter.resids_init.time_resids.to(unit)
else:
res = fitter.resids.residual_objs['toa'].time_resids.to(unit)
res_pre = fitter.resids_init.residual_objs['toa'].time_resids.to(unit)
else:
raise ValueError("Unrecognized residual type: %s. Please choose from 'prefit', 'postfit', or 'both'."\
%(restype))
# Check if we want whitened residuals
if whitened == True and ('res' not in kwargs.keys()):
if avg == True and mixed_ecorr == True:
if restype != 'both':
res = whiten_resids(avg_dict, restype=restype)
res_no_avg = whiten_resids(no_avg_dict, restype=restype)
else:
res = whiten_resids(avg_dict_pre, restype='prefit')
res_pre = whiten_resids(avg_dict, restype='postfit')
res_pre = res_pre.to(unit)
res_no_avg = whiten_resids(avg_dict_pre, restype='prefit')
res_pre_no_avg = whiten_resids(avg_dict, restype='postfit')
res_pre_no_avg = res_pre_no_avg.to(unit)
res = res.to(unit)
res_no_avg = res_no_avg.to(unit)
elif avg == True and mixed_ecorr == False:
if restype != 'both':
res = whiten_resids(avg_dict, restype=restype)
else:
res = whiten_resids(avg_dict_pre, restype='prefit')
res_pre = whiten_resids(avg_dict, restype='postfit')
res_pre = res_pre.to(unit)
res = res.to(unit)
else:
if restype != 'both':
res = whiten_resids(fitter, restype=restype)
else:
res = whiten_resids(fitter, restype='prefit')
res_pre = whiten_resids(fitter, restype='postfit')
res_pre = res_pre.to(unit)
res = res.to(unit)
# Get errors
if 'errs' in kwargs.keys():
errs = kwargs['errs']
else:
if restype == 'prefit':
if avg == True and mixed_ecorr == True:
errs = avg_dict['errors'].to(unit)
errs_no_avg = no_avg_dict['errors'].to(unit)
elif avg == True and mixed_ecorr == False:
errs = avg_dict['errors'].to(unit)
else:
errs = fitter.toas.get_errors().to(unit)
elif restype == 'postfit':
if NB == True:
if avg == True and mixed_ecorr == True:
errs = avg_dict['errors'].to(unit)
errs_no_avg = no_avg_dict['errors'].to(unit)
elif avg == True and mixed_ecorr == False:
errs = avg_dict['errors'].to(unit)
else:
errs = fitter.resids.get_data_error().to(unit)
else:
errs = fitter.resids.residual_objs['toa'].get_data_error().to(unit)
elif restype == 'both':
if NB == True:
if avg == True and mixed_ecorr == True:
errs = avg_dict['errors'].to(unit)
errs_pre = avg_dict_pre['errors'].to(unit)
errs_no_avg = no_avg_dict['errors'].to(unit)
errs_no_avg_pre = no_avg_dict_pre['errors'].to(unit)
elif avg == True and mixed_ecorr == False:
errs = avg_dict['errors'].to(unit)
errs_pre = avg_dict_pre['errors'].to(unit)
else:
errs = fitter.resids.get_data_error().to(unit)
errs_pre = fitter.toas.get_errors().to(unit)
else:
errs = fitter.resids.residual_objs['toa'].get_data_error().to(unit)
errs_pre = fitter.toas.get_errors().to(unit)
# Get MJDs
if 'mjds' in kwargs.keys():
mjds = kwargs['mjds']
else:
mjds = fitter.toas.get_mjds().value
if avg == True and mixed_ecorr == True :
mjds = avg_dict['mjds'].value
mjds_no_avg = no_avg_dict['mjds'].value
years_no_avg = (mjds_no_avg - 51544.0)/365.25 + 2000.0
elif avg == True and mixed_ecorr == False:
mjds = avg_dict['mjds'].value
# Convert to years
years = (mjds - 51544.0)/365.25 + 2000.0
# In the end, we'll want to plot both ecorr avg & not ecorr avg at the same time if we have mixed ecorr.
# Create combined arrays
if avg == True and mixed_ecorr == True:
combo_res = np.hstack((res, res_no_avg))
combo_errs = np.hstack((errs, errs_no_avg))
combo_years = np.hstack((years, years_no_avg))
if restype =='both':
combo_errs_pre = np.hstack((errs_pre, errs_no_avg_pre))
combo_res_pre = np.hstack((res_pre, res_no_avg_pre))
# Get colorby flag values (obs, PTA, febe, etc.)
if 'colorby' in kwargs.keys():
cb = kwargs['colorby']
else:
cb = np.array(fitter.toas[colorby])
#. Seems to run a little faster but not robust to obs?
# cb = np.array(fitter.toas.get_flag_value(colorby)[0])
if avg == True:
avg_cb = []
for iis in avg_dict['indices']:
avg_cb.append(cb[iis[0]])
if mixed_ecorr == True:
no_avg_cb = []
for jjs in no_avg_dict['indices']:
no_avg_cb.append(cb[jjs])
no_ecorr_cb = np.array(no_avg_cb)
cb = np.array(avg_cb)
# Get the set of unique flag values
if avg==True and mixed_ecorr==True:
cb = np.hstack((cb,no_ecorr_cb))
CB = set(cb)
if colorby== 'pta':
colorscheme = colorschemes['pta']
elif colorby == 'obs':
colorscheme = colorschemes['observatories']
elif colorby == 'f':
colorscheme = colorschemes['febe']
if 'figsize' in kwargs.keys():
figsize = kwargs['figsize']
else:
figsize = (10,5)
if axs == None:
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(111)
else:
fig = plt.gcf()
ax1 = axs
# if want tempo2 version, where serial = order in tim file
if not mjd_order:
x = range(len(inds))
# if want serial = order in mjd
else:
x = np.argsort(mjds)
# plot vertical line at the first TOA of each observation file
if epoch_lines and not avg:
names = fitter.toas["name"]
for nm in np.unique(names):
inds_name = np.where(names==nm)[0]
x_nm = x[inds_name]
ax1.axvline(min(x_nm), c="k", alpha=0.1)
for i, c in enumerate(CB):
inds = np.where(cb==c)[0]
if not inds.tolist():
cb_label = ""
else:
cb_label = cb[inds][0]
# Get plot preferences
if 'fmt' in kwargs.keys():
mkr = kwargs['fmt']
else:
try:
mkr = markers[cb_label]
if restype == 'both':
mkr_pre = '.'
except Exception:
mkr = 'x'
log.log(1, "Color by Flag doesn't have a marker label!!")
if 'color' in kwargs.keys():
clr = kwargs['color']
else:
try:
clr = colorscheme[cb_label]
except Exception:
clr = 'k'
log.log(1, "Color by Flag doesn't have a color!!")
if 'alpha' in kwargs.keys():
alpha = kwargs['alpha']
else:
alpha = 0.5
if 'label' in kwargs.keys():
label = kwargs['label']
else:
label = cb_label
x_subsec = x[inds]
if avg == True and mixed_ecorr == True:
if plotsig:
combo_sig = combo_res[inds]/combo_errs[inds]
ax1.errorbar(x_subsec, combo_sig, yerr=len(combo_errs[inds])*[1], fmt=mkr, \
color=clr, label=label, alpha = alpha, picker=True)
if restype == 'both':
combo_sig_pre = combo_res_pre[inds]/combo_errs_pre[inds]
ax1.errorbar(x_subsec, combo_sig_pre, yerr=len(combo_errs_pre[inds])*[1], fmt=mkr_pre, \
color=clr, label=label+" Prefit", alpha = alpha, picker=True)
else:
ax1.errorbar(x_subsec, combo_res[inds], yerr=combo_errs[inds], fmt=mkr, \
color=clr, label=label, alpha = alpha, picker=True)
if restype == 'both':
ax1.errorbar(x_subsec, combo_res_pre[inds], yerr=combo_errs_pre[inds], fmt=mkr_pre, \
color=clr, label=label+" Prefit", alpha = alpha, picker=True)
else:
if plotsig:
sig = res[inds]/errs[inds]
ax1.errorbar(x_subsec, sig, yerr=len(errs[inds])*[1], fmt=mkr, \
color=clr, label=label, alpha = alpha, picker=True)
if restype == 'both':
sig_pre = res_pre[inds]/errs_pre[inds]
ax1.errorbar(x_subsec, sig_pre, yerr=len(errs_pre[inds])*[1], fmt=mkr_pre, \
color=clr, label=label+" Prefit", alpha = alpha, picker=True)
else:
ax1.errorbar(x_subsec, res[inds], yerr=errs[inds], fmt=mkr, \
color=clr, label=label, alpha = alpha, picker=True)
if restype == 'both':
ax1.errorbar(x_subsec, res_pre[inds], yerr=errs_pre[inds], fmt=mkr_pre, \
color=clr, label=label+" Prefit", alpha = alpha, picker=True)
# Set second axis
ax1.set_xlabel(r'TOA Number')
if plotsig:
if avg and whitened:
ax1.set_ylabel('Average Residual/Uncertainty \n (Whitened)', multialignment='center')
elif avg and not whitened:
ax1.set_ylabel('Average Residual/Uncertainty')
elif whitened and not avg:
ax1.set_ylabel('Residual/Uncertainty \n (Whitened)', multialignment='center')
else:
ax1.set_ylabel('Residual/Uncertainty')
else:
if avg and whitened:
ax1.set_ylabel(f'Average Residual ({unitstr}) \n (Whitened)', multialignment='center')
elif avg and not whitened:
ax1.set_ylabel(f'Average Residual ({unitstr})')
elif whitened and not avg:
ax1.set_ylabel(f'Residual ({unitstr}) \n (Whitened)', multialignment='center')
else:
ax1.set_ylabel(f'Residual ({unitstr})')
if legend:
if len(CB) > 5:
ncol = int(np.ceil(len(CB)/2))
y_offset = 1.15
else:
ncol = len(CB)
y_offset = 1.0
ax1.legend(loc='upper center', bbox_to_anchor= (0.5, y_offset+1.0/figsize[1]), ncol=ncol)
if title:
if len(CB) > 5:
y_offset = 1.1
else:
y_offset = 1.0
if isinstance(title, str):
title_str = title
else:
title_str = "%s %s timing residuals" % (fitter.model.PSR.value, restype)
plt.title(title_str, y=y_offset+1.0/figsize[1])
if axs == None:
plt.tight_layout()
if save:
ext = ""
if whitened:
ext += "_whitened"
if avg:
ext += "_averaged"
if NB:
ext += "_NB"
else:
ext += "_WB"
if restype == 'prefit':
ext += "_prefit"
elif restype == 'postfit':
ext += "_postfit"
elif restype == "both":
ext += "_pre_post_fit"
plt.savefig("%s_resid_v_mjd%s.png" % (fitter.model.PSR.value, ext))
if axs == None:
# Define clickable points
text = ax1.text(0,0,"")
# Define point highlight color
stamp_color = "#FD9927"
def onclick(event):
# Get X and Y axis data
xdata = x
if plotsig:
ydata = (res/errs).decompose().value
else:
ydata = res.value
# Get x and y data from click
xclick = event.xdata
yclick = event.ydata
# Calculate scaled distance, find closest point index
d = np.sqrt(((xdata - xclick)/10.0)**2 + (ydata - yclick)**2)
ind_close = np.where(np.min(d) == d)[0]
# highlight clicked point
ax1.scatter(xdata[ind_close], ydata[ind_close], marker = 'x', c = stamp_color)
# Print point info
text.set_position((xdata[ind_close], ydata[ind_close]))
if plotsig:
text.set_text("TOA Params:\n MJD: %s \n Res/Err: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0]))
else:
text.set_text("TOA Params:\n MJD: %s \n Res: %.2f \n Index: %s" % (xdata[ind_close][0], ydata[ind_close], ind_close[0]))
fig.canvas.mpl_connect('button_press_event', onclick)
return
def plot_fd_res_v_freq(
fitter,
plotsig=False,
comp_FD=True,
avg=False,
whitened=False,
save=False,
legend=True,
title=True,
axs=None,
**kwargs,
):
"""
Make a plot of the residuals vs. frequency, can do WB as well. Note, if WB fitter, comp_FD may not work.
If comp_FD is True, the panels are organized as follows:
Top: Residuals with FD parameters subtracted and the FD curve plotted as a dashed line.
Middle: Best fit residuals with no FD parameters.
Bottom: Residuals with FD correction included.
Note - This function may take a while to run if there are many TOAs.
Arguments
---------
fitter [object] : The PINT fitter object.
plotsig [boolean] : If True plot number of measurements v. residuals/uncertainty, else v. residuals
[default: False].
comp_FD [boolean]: If True, will plot the residuals v. frequency with FD included, FD subtracted, and best fit
without FD.
avg [boolean] : If True and not wideband fitter, will compute and plot epoch-average residuals [default: False].
whitened [boolean] : If True will compute and plot whitened residuals [default: False].
save [boolean] : If True will save plot with the name "resid_v_mjd.png" Will add averaged/whitened
as necessary [default: False].
legend [boolean] : If False, will not print legend with plot [default: True].
title [boolean] : If False, will not print plot title [default: True].
axs [string] : If not None, should be defined subplot value and the figure will be used as part of a
larger figure [default: None].
Optional Arguments:
--------------------
res [list/array] : List or array of residual values to plot. Will override values from fitter object.
errs [list/array] : List or array of residual error values to plot. Will override values from fitter object.
freqs [list/array] : List or array of TOA frequencies to plot. Will override values from toa object.
rcvr_bcknds[list/array] : List or array of TOA receiver-backend combinations. Will override values from toa object.
figsize [tuple] : Size of the figure passed to matplotlib [default: (10,4)].
fmt ['string'] : matplotlib format option for markers [default: ('x')]
color ['string'] : matplotlib color option for plot [default: color dictionary in plot_utils.py file]
alpha [float] : matplotlib alpha options for plot points [default: 0.5]
"""
# Check if fitter is wideband or not
if fitter.is_wideband:
NB = False
if avg == True:
raise ValueError(
"Cannot epoch average wideband residuals, please change 'avg' to False."
)
else:
NB = True
# Check if want epoch averaged residuals
if avg:
avg_dict = fitter.resids.ecorr_average(use_noise_model=True)
# Get residuals
if "res" in kwargs.keys():
res = kwargs["res"]
else:
if NB == True:
if avg == True:
res = avg_dict["time_resids"].to(u.us)
else:
res = fitter.resids.time_resids.to(u.us)
else:
res = fitter.resids.residual_objs["toa"].time_resids.to(u.us)
# Check if we want whitened residuals
if whitened == True and ("res" not in kwargs.keys()):
if avg == True:
res = whiten_resids(avg_dict)
res = res.to(u.us)
else:
res = whiten_resids(fitter)
res = res.to(u.us)
# Get errors
if "errs" in kwargs.keys():
errs = kwargs["errs"]
else:
if NB == True:
if avg == True:
errs = avg_dict["errors"].to(u.us)
else:
errs = fitter.resids.get_data_error().to(u.us)
else:
errs = fitter.resids.residual_objs["toa"].get_data_error().to(u.us)
# Get receiver backends
if "rcvr_bcknds" in kwargs.keys():
rcvr_bcknds = kwargs["rcvr_bcknds"]
else:
rcvr_bcknds = np.array(fitter.toas.get_flag_value("f")[0])
if avg == True:
avg_rcvr_bcknds = []
for iis in avg_dict["indices"]:
avg_rcvr_bcknds.append(rcvr_bcknds[iis[0]])
rcvr_bcknds = np.array(avg_rcvr_bcknds)
# Get the set of unique receiver-bandend combos
RCVR_BCKNDS = set(rcvr_bcknds)
# get frequencies
if "freqs" in kwargs.keys():
freqs = kwargs["freqs"]
else:
if avg == True:
freqs = avg_dict["freqs"].value
else:
freqs = fitter.toas.get_freqs().value
# Check if comparing the FD parameters
if comp_FD:
if axs != None:
log.warn("Cannot do full comparison with three panels")
axs = None
if "figsize" in kwargs.keys():
figsize = kwargs["figsize"]
else:
figsize = (4, 12)
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(313)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(311)
else:
if "figsize" in kwargs.keys():
figsize = kwargs["figsize"]
else:
figsize = (4, 4)
if axs == None:
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(111)
else:
ax1 = axs
# Make the plot of residual vs. frequency
for i, r_b in enumerate(RCVR_BCKNDS):
inds = np.where(rcvr_bcknds == r_b)[0]
if not inds.tolist():
r_b_label = ""
else:
r_b_label = rcvr_bcknds[inds][0]
# Get plot preferences
if "fmt" in kwargs.keys():
mkr = kwargs["fmt"]
else:
mkr = markers[r_b_label]
if "color" in kwargs.keys():
clr = kwargs["color"]
else:
clr = colorscheme[r_b_label]
if "alpha" in kwargs.keys():
alpha = kwargs["alpha"]
else:
alpha = 1.0
if plotsig:
sig = res[inds] / errs[inds]
ax1.errorbar(
freqs[inds],
sig,
yerr=len(errs[inds]) * [1],
fmt=mkr,
color=clr,
label=r_b_label,
alpha=alpha,
)
else:
ax1.errorbar(
freqs[inds],
res[inds],
yerr=errs[inds],
fmt=mkr,
color=clr,
label=r_b_label,
alpha=alpha,
)
# assign axis labels
ax1.set_xlabel(r"Frequency (MHz)")
ax1.grid(True)
if plotsig:
if avg and whitened:
ylabel = "Average Residual/Uncertainty \n (Whitened)"
elif avg and not whitened:
ylabel = "Average Residual/Uncertainty"
elif whitened and not avg:
ylabel = "Residual/Uncertainty \n (Whitened)"
else:
ylabel = "Residual/Uncertainty"
else:
if avg and whitened:
ylabel = "Average Residual ($\mu$s) \n (Whitened)"
elif avg and not whitened:
ylabel = "Average Residual ($\mu$s)"
elif whitened and not avg:
ylabel = "Residual ($\mu$s) \n (Whitened)"
else:
ylabel = "Residual ($\mu$s)"
ax1.set_ylabel(ylabel)
# Now if we want to show the other plots, we plot them
if comp_FD:
# Plot the residuals with FD parameters subtracted
cur_fd = [param for param in fitter.model.params if "FD" in param]
# Get the FD offsets
FD_offsets = np.zeros(np.size(res))
# Also need FD line
sorted_freqs = np.linspace(np.min(freqs), np.max(freqs), 1000)
FD_line = np.zeros(np.size(sorted_freqs))
for i, fd in enumerate(cur_fd):
fd_val = getattr(fitter.model, fd).value * 10**6 # convert to microseconds
FD_offsets += fd_val * np.log(freqs / 1000.0) ** (i + 1)
FD_line += fd_val * np.log(sorted_freqs / 1000.0) ** (i + 1)
# Now edit residuals
fd_cor_res = res.value + FD_offsets
# Now we need to redo the fit without the FD parameters
psr_fitter_nofd = copy.deepcopy(fitter)
try:
psr_fitter_nofd.model.remove_component("FD")
except:
log.warning("No FD parameters in the initial timing model...")
# Check if fitter is wideband or not
if psr_fitter_nofd.is_wideband:
resids = psr_fitter_nofd.resids.residual_objs["toa"]
else:
resids = psr_fitter_nofd.resids
psr_fitter_nofd.fit_toas(1)
# Now we need to figure out if these need to be whitened and/or averaged
if avg:
avg = psr_fitter_nofd.resids.ecorr_average(use_noise_model=True)
avg_rcvr_bcknds = rcvr_bcknds
if whitened:
# need to whiten and average
wres_avg = whiten_resids(avg)
res_nofd = wres_avg.to(u.us).value
else:
# need to average
res_nofd = avg["time_resids"].to(u.us).value
elif whitened:
# Need to whiten
wres_nofd = whiten_resids(psr_fitter_nofd)
res_nofd = wres_nofd.to(u.us).value
else:
res_nofd = resids.time_resids.to(u.us).value
# Now plot
for i, r_b in enumerate(RCVR_BCKNDS):
inds = np.where(rcvr_bcknds == r_b)[0]
if not inds.tolist():
r_b_label = ""
else:
r_b_label = rcvr_bcknds[inds][0]
# Get plot preferences
if "fmt" in kwargs.keys():
mkr = kwargs["fmt"]
else:
mkr = markers[r_b_label]
if "color" in kwargs.keys():
clr = kwargs["color"]
else:
clr = colorscheme[r_b_label]
if "alpha" in kwargs.keys():
alpha = kwargs["alpha"]
else:
alpha = 1.0
if plotsig:
sig = fd_cor_res[inds] / errs[inds]
ax3.errorbar(
freqs[inds],
sig.value,
yerr=len(errs[inds]) * [1],
fmt=mkr,
color=clr,
label=r_b_label,
alpha=alpha,
)
sig_nofd = res_nofd[inds] / errs[inds].value
ax2.errorbar(
freqs[inds],
sig_nofd,
yerr=len(errs[inds]) * [1],
fmt=mkr,
color=clr,
label=r_b_label,
alpha=alpha,
)
else:
ax3.errorbar(
freqs[inds],
fd_cor_res[inds],
yerr=errs[inds].value,
fmt=mkr,
color=clr,
label=r_b_label,
alpha=alpha,
)
ax2.errorbar(
freqs[inds],
res_nofd[inds],
yerr=errs[inds].value,
fmt=mkr,
color=clr,
label=r_b_label,
alpha=alpha,
)
ax3.plot(sorted_freqs, FD_line, c="k", ls="--")
# assign axis labels
ax3.set_xlabel(r"Frequency (MHz)")
ax3.set_ylabel(ylabel)
ax3.grid(True)
ax2.set_xlabel(r"Frequency (MHz)")
ax2.set_ylabel(ylabel)
ax2.grid(True)
if legend:
if comp_FD:
ax3.legend(
loc="upper center",
bbox_to_anchor=(0.5, 1.0 + 1.0 / figsize[1]),
ncol=int(len(RCVR_BCKNDS) / 2),
)
else:
ax1.legend(
loc="upper center",
bbox_to_anchor=(0.5, 1.0 + 1.0 / figsize[1]),
ncol=int(len(RCVR_BCKNDS) / 2),
)
if title:
plt.title(
"%s FD Paramter Check" % (fitter.model.PSR.value), y=1.0 + 1.0 / figsize[1]
)
plt.tight_layout()
if save:
ext = ""
if whitened:
ext += "_whitened"
if avg:
ext += "_averaged"
if NB:
ext += "_NB"
else:
ext += "_WB"
plt.savefig("%s_FD_resid_v_freq%s.png" % (fitter.model.PSR.value, ext))
return
"""
We also offer some options for convenience plotting functions, one that will show all possible summary plots, and
another that will show just the summary plots that are typically created in finalize_timing.py in that order.
"""
def summary_plots(
fitter, title=None, legends=False, save=False, avg=True, whitened=True
):
"""
Function to make a composite set of summary plots for sets of TOAs.
NOTE - This is noe the same set of plots as will be in the pdf writer
Arguments
---------
fitter [object] : The PINT fitter object.
title [boolean] : If True, will add titles to ALL plots [default: False].
legend [boolean] : If True, will add legends to ALL plots [default: False].
save [boolean] : If True will save plot with the name "psrname_summary.png" Will add averaged/whitened
as necessary [default: False].
avg [boolean] : If True and not wideband fitter, will make plots of epoch averaged residuals [default: True].
whitened [boolean] : If True will make plots of whitened residuals [default: True].
"""
if fitter.is_wideband:
if avg == True:
raise ValueError(
"Cannot epoch average wideband residuals, please change 'avg' to False."
)
# Determine how long the figure size needs to be
figlength = 18
gs_rows = 6
if whitened:
figlength += 18
gs_rows += 4
if avg:
figlength += 18
gs_rows += 4
if whitened and avg:
figlength += 18
gs_rows += 4
# adjust size if not in a binary
if not hasattr(fitter.model, "binary_model_name"):
sub_rows = 1
sub_len = 3
if whitened:
sub_rows += 1
sub_len += 3
if avg:
sub_rows += 1
sub_len += 3
if whitened and avg:
sub_rows += 1
sub_len += 3
figlength -= sub_len
gs_rows -= sub_rows
fig = plt.figure(figsize=(12, figlength)) # not sure what we'll need for a fig size
if title != None:
plt.title(title, y=1.015, size=16)
gs = fig.add_gridspec(gs_rows, 2)
count = 0
k = 0
# First plot is all residuals vs. time.
ax0 = fig.add_subplot(gs[count, :])
plot_residuals_time(fitter, title=False, axs=ax0, figsize=(12, 3))
k += 1
# Plot the residuals divided by uncertainty vs. time
ax1 = fig.add_subplot(gs[count + k, :])
plot_residuals_time(
fitter, title=False, legend=False, plotsig=True, axs=ax1, figsize=(12, 3)
)
k += 1
# Second plot is residual v. orbital phase
if hasattr(fitter.model, "binary_model_name"):
ax2 = fig.add_subplot(gs[count + k, :])
plot_residuals_orb(fitter, title=False, legend=False, axs=ax2, figsize=(12, 3))
k += 1
# Now add the measurement vs. uncertainty
ax3_0 = fig.add_subplot(gs[count + k, 0])
ax3_1 = fig.add_subplot(gs[count + k, 1])
plot_measurements_v_res(
fitter,
nbin=50,
plotsig=False,
title=False,
legend=False,
axs=ax3_0,
figsize=(6, 3),
)
plot_measurements_v_res(
fitter,
nbin=50,
plotsig=True,
title=False,
legend=False,
axs=ax3_1,
figsize=(6, 3),
)
k += 1
# and the DMX vs. time
ax4 = fig.add_subplot(gs[count + k, :])
plot_dmx_time(
fitter,
savedmx="dmxparse.out",
legend=False,
title=False,
axs=ax4,
figsize=(12, 3),
)
k += 1
# And residual vs. Frequency
ax5 = fig.add_subplot(gs[count + k, :])
plot_residuals_freq(fitter, title=False, legend=False, axs=ax5, figsize=(12, 3))
k += 1
# Now if whitened add the whitened residual plots
if whitened:
ax6 = fig.add_subplot(gs[count + k, :])
plot_residuals_time(
fitter, title=False, whitened=True, axs=ax6, figsize=(12, 3)
)
k += 1
# Plot the residuals divided by uncertainty vs. time
ax7 = fig.add_subplot(gs[count + k, :])
plot_residuals_time(
fitter,
title=False,
legend=False,
plotsig=True,
whitened=True,
axs=ax7,
figsize=(12, 3),
)
k += 1
# Second plot is residual v. orbital phase
if hasattr(fitter.model, "binary_model_name"):
ax8 = fig.add_subplot(gs[count + k, :])
plot_residuals_orb(
fitter,
title=False,
legend=False,
whitened=True,
axs=ax8,
figsize=(12, 3),
)
k += 1
# Now add the measurement vs. uncertainty
ax9_0 = fig.add_subplot(gs[count + k, 0])
ax9_1 = fig.add_subplot(gs[count + k, 1])
plot_measurements_v_res(
fitter,
nbin=50,
plotsig=False,
title=False,
legend=False,
whitened=True,
axs=ax9_0,
figsize=(6, 3),
)
plot_measurements_v_res(
fitter,
nbin=50,
plotsig=True,
title=False,
legend=False,
whitened=True,
axs=ax9_1,
figsize=(6, 3),
)
k += 1
# Now plot the average residuals
if avg:
ax10 = fig.add_subplot(gs[count + k, :])
plot_residuals_time(fitter, title=False, avg=True, axs=ax10, figsize=(12, 3))
k += 1
# Plot the residuals divided by uncertainty vs. time
ax11 = fig.add_subplot(gs[count + k, :])
plot_residuals_time(
fitter,
title=False,
legend=False,
plotsig=True,
avg=True,
axs=ax11,
figsize=(12, 3),
)
k += 1
# Second plot is residual v. orbital phase
if hasattr(fitter.model, "binary_model_name"):
ax12 = fig.add_subplot(gs[count + k, :])
plot_residuals_orb(
fitter, title=False, legend=False, avg=True, axs=ax12, figsize=(12, 3)
)
k += 1
# Now add the measurement vs. uncertainty
ax13_0 = fig.add_subplot(gs[count + k, 0])
ax13_1 = fig.add_subplot(gs[count + k, 1])
plot_measurements_v_res(
fitter,
nbin=50,
plotsig=False,
title=False,
legend=False,
avg=True,
axs=ax13_0,
figsize=(6, 3),
)
plot_measurements_v_res(
fitter,
nbin=50,
plotsig=True,
title=False,
legend=False,
avg=True,
axs=ax13_1,
figsize=(6, 3),
)
k += 1
# Now plot the whitened average residuals
if avg and whitened:
ax14 = fig.add_subplot(gs[count + k, :])
plot_residuals_time(fitter, avg=True, whitened=True, axs=ax14, figsize=(12, 3))
k += 1
# Plot the residuals divided by uncertainty vs. time
ax15 = fig.add_subplot(gs[count + k, :])
plot_residuals_time(
fitter,
title=False,
legend=False,
plotsig=True,
avg=True,
whitened=True,
axs=ax15,
figsize=(12, 3),
)
k += 1
# Second plot is residual v. orbital phase
if hasattr(fitter.model, "binary_model_name"):
ax16 = fig.add_subplot(gs[count + k, :])
plot_residuals_orb(
fitter,
title=False,
legend=False,
avg=True,
whitened=True,
axs=ax16,
figsize=(12, 3),
)
k += 1
# Now add the measurement vs. uncertainty
ax17_0 = fig.add_subplot(gs[count + k, 0])
ax17_1 = fig.add_subplot(gs[count + k, 1])
plot_measurements_v_res(
fitter,
nbin=50,
plotsig=False,
title=False,
legend=False,
avg=True,
whitened=True,
axs=ax17_0,
figsize=(6, 3),
)
plot_measurements_v_res(
fitter,
nbin=50,
plotsig=True,
title=False,
legend=False,
avg=True,
whitened=True,
axs=ax17_1,
figsize=(6, 3),
)
k += 1
plt.tight_layout()
if save:
plt.savefig("%s_summary_plots.png" % (fitter.model.PSR.value))
return
"""We also define a function to output the summary plots exactly as is done in finalize_timing.py (for now)"""
def summary_plots_ft(fitter, title=None, legends=False, save=False):
"""
Function to make a composite set of summary plots for sets of TOAs
NOTE - This is note the same set of plots as will be in the pdf writer
Arguments
---------
fitter [object] : The PINT fitter object.
title [boolean] : If True, will add titles to ALL plots [default: False].
legend [boolean] : If True, will add legends to ALL plots [default: False].
save [boolean] : If True will save plot with the name "psrname_summary.png" Will add averaged/whitened
as necessary [default: False].
"""
# Define the figure
# Determine how long the figure size needs to be
figlength = 18 * 3
gs_rows = 13
if not hasattr(fitter.model, "binary_model_name"):
figlength -= 9
gs_rows -= 3
if fitter.is_wideband:
figlength -= 9
gs_rows -= 3
fig = plt.figure(figsize=(12, figlength)) # not sure what we'll need for a fig size
if title != None:
plt.title(title, y=1.015, size=16)
gs = fig.add_gridspec(gs_rows, 2)
count = 0
k = 0
# First plot is all residuals vs. time.
ax0 = fig.add_subplot(gs[count, :])
plot_residuals_time(fitter, title=False, axs=ax0, figsize=(12, 3))
k += 1
# Then the epoch averaged residuals v. time
if not fitter.is_wideband:
ax10 = fig.add_subplot(gs[count + k, :])
plot_residuals_time(
fitter, title=False, legend=False, avg=True, axs=ax10, figsize=(12, 3)
)
k += 1
# Epoch averaged vs. orbital phase
if hasattr(fitter.model, "binary_model_name"):
if not fitter.is_wideband:
ax12 = fig.add_subplot(gs[count + k, :])
plot_residuals_orb(
fitter, title=False, legend=False, avg=True, axs=ax12, figsize=(12, 3)
)
k += 1
else:
ax12 = fig.add_subplot(gs[count + k, :])
plot_residuals_orb(
fitter, title=False, legend=False, axs=ax12, figsize=(12, 3)
)
k += 1
# And DMX vs. time
ax4 = fig.add_subplot(gs[count + k, :])
plot_dmx_time(
fitter,
savedmx="dmxparse.out",
legend=False,
title=False,
axs=ax4,
figsize=(12, 3),
)
k += 1
# Whitened residuals v. time
ax6 = fig.add_subplot(gs[count + k, :])
plot_residuals_time(fitter, whitened=True, axs=ax6, figsize=(12, 3))
k += 1
# Whitened epoch averaged residuals v. time
if not fitter.is_wideband:
ax15 = fig.add_subplot(gs[count + k, :])
plot_residuals_time(
fitter,
title=False,
legend=False,
plotsig=False,
avg=True,
whitened=True,
axs=ax15,
figsize=(12, 3),
)
k += 1
# Whitened epoch averaged residuals v. orbital phase
if hasattr(fitter.model, "binary_model_name"):
if not fitter.is_wideband:
ax16 = fig.add_subplot(gs[count + k, :])
plot_residuals_orb(
fitter,
title=False,
legend=False,
avg=True,
whitened=True,
axs=ax16,
figsize=(12, 3),
)
k += 1
else:
ax16 = fig.add_subplot(gs[count + k, :])
plot_residuals_orb(
fitter,
title=False,
legend=False,
avg=False,
whitened=True,
axs=ax16,
figsize=(12, 3),
)
k += 1
# Now add the measurement vs. uncertainty for both all reaiduals and epoch averaged
ax3_0 = fig.add_subplot(gs[count + k, 0])
ax3_1 = fig.add_subplot(gs[count + k, 1])
plot_measurements_v_res(
fitter,
nbin=50,
title=False,
legend=False,
plotsig=False,
whitened=True,
axs=ax3_0,
figsize=(6, 3),
)
if not fitter.is_wideband:
plot_measurements_v_res(
fitter,
nbin=50,
title=False,
legend=False,
avg=True,
whitened=True,
axs=ax3_1,
figsize=(6, 3),
)
k += 1
else:
plot_measurements_v_res(
fitter,
nbin=50,
title=False,
legend=False,
avg=False,
whitened=False,
axs=ax3_1,
figsize=(6, 3),
)
k += 1
# Whitened residual/uncertainty v. time
ax26 = fig.add_subplot(gs[count + k, :])
plot_residuals_time(
fitter,
plotsig=True,
title=False,
legend=False,
whitened=True,
axs=ax26,
figsize=(12, 3),
)
k += 1
# Epoch averaged Whitened residual/uncertainty v. time
if not fitter.is_wideband:
ax25 = fig.add_subplot(gs[count + k, :])
plot_residuals_time(
fitter,
title=False,
legend=False,
plotsig=True,
avg=True,
whitened=True,
axs=ax25,
figsize=(12, 3),
)
k += 1
# Epoch averaged Whitened residual/uncertainty v. orbital phase
if hasattr(fitter.model, "binary_model_name"):
if not fitter.is_wideband:
ax36 = fig.add_subplot(gs[count + k, :])
plot_residuals_orb(
fitter,
title=False,
legend=False,
plotsig=True,
avg=True,
whitened=True,
axs=ax36,
figsize=(12, 3),
)
k += 1
else:
ax36 = fig.add_subplot(gs[count + k, :])
plot_residuals_orb(
fitter,
title=False,
legend=False,
plotsig=True,
avg=False,
whitened=True,
axs=ax36,
figsize=(12, 3),
)
k += 1
# Now add the measurement vs. uncertainty for both all reaiduals/uncertainty and epoch averaged/uncertainty
ax17_0 = fig.add_subplot(gs[count + k, 0])
ax17_1 = fig.add_subplot(gs[count + k, 1])
plot_measurements_v_res(
fitter,
nbin=50,
plotsig=True,
title=False,
legend=False,
whitened=True,
axs=ax17_0,
figsize=(6, 3),
)
if not fitter.is_wideband:
plot_measurements_v_res(
fitter,
nbin=50,
title=False,
plotsig=True,
legend=False,
avg=True,
whitened=True,
axs=ax17_1,
figsize=(6, 3),
)
k += 1
else:
plot_measurements_v_res(
fitter,
nbin=50,
title=False,
plotsig=True,
legend=False,
avg=False,
whitened=False,
axs=ax17_1,
figsize=(6, 3),
)
k += 1
# Now plot the frequencies of the TOAs vs. time
ax5 = fig.add_subplot(gs[count + k, :])
plot_residuals_freq(fitter, title=False, legend=False, axs=ax5, figsize=(12, 3))
k += 1
plt.tight_layout()
if save:
plt.savefig("%s_summary_plots_FT.png" % (fitter.model.PSR.value))
return
# JUST THE PLOTS FOR THE PDF WRITERS LEFT
def plots_for_summary_pdf_nb(fitter, title=None, legends=False):
"""
Function to make a composite set of summary plots for sets of TOAs to be put into a summary pdf.
This is for Narrowband timing only. For Wideband timing, use `plots_for_summary_pdf_wb`.
By definition, this function will save all plots as "psrname"_summary_plot_#.nb.png, where # is
and integer from 1-4.
Arguments
---------
fitter [object] : The PINT fitter object.
title [boolean] : If True, will add titles to ALL plots [default: False].
legend [boolean] : If True, will add legends to ALL plots [default: False].
"""
if fitter.is_wideband:
raise ValueError(
"Cannot use this function with WidebandTOAFitter, please use `plots_for_summary_pdf_wb` instead."
)
# Need to make four sets of plots
for ii in range(4):
if ii != 3:
fig = plt.figure(figsize=(8, 10.0), dpi=100)
else:
fig = plt.figure(figsize=(8, 5), dpi=100)
if title != None:
plt.title(title, y=1.08, size=14)
if ii == 0:
gs = fig.add_gridspec(nrows=4, ncols=1)
ax0 = fig.add_subplot(gs[0, :])
ax1 = fig.add_subplot(gs[1, :])
ax2 = fig.add_subplot(gs[2, :])
ax3 = fig.add_subplot(gs[3, :])
# Plot residuals v. time
plot_residuals_time(fitter, title=False, axs=ax0, figsize=(8, 2.5))
# Plot averaged residuals v. time
if "ecorr_noise" in fitter.model.get_components_by_category().keys():
plot_residuals_time(
fitter,
avg=True,
axs=ax1,
title=False,
legend=False,
figsize=(8, 2.5),
)
else:
log.warning(
"ECORR not in model, cannot generate epoch averaged residuals. Plots will show all residuals."
)
plot_residuals_time(
fitter,
avg=False,
axs=ax1,
title=False,
legend=False,
figsize=(8, 2.5),
)
# Plot residuals v orbital phase
if hasattr(fitter.model, "binary_model_name"):
if "ecorr_noise" in fitter.model.get_components_by_category().keys():
plot_residuals_orb(
fitter,
title=False,
legend=False,
avg=True,
axs=ax2,
figsize=(8, 2.5),
)
else:
plot_residuals_orb(
fitter,
title=False,
legend=False,
avg=False,
axs=ax2,
figsize=(8, 2.5),
)
# plot dmx v. time
if "dispersion_dmx" in fitter.model.get_components_by_category().keys():
plot_dmx_time(
fitter,
savedmx="dmxparse.out",
legend=False,
title=False,
axs=ax3,
figsize=(8, 2.5),
)
else:
log.warning("No DMX bins in timing model, cannot plot DMX v. Time.")
plt.tight_layout()
plt.savefig("%s_summary_plot_1_nb.png" % (fitter.model.PSR.value))
plt.close()
elif ii == 1:
if hasattr(fitter.model, "binary_model_name"):
gs = fig.add_gridspec(4, 2)
ax2 = fig.add_subplot(gs[2, :])
ax3 = fig.add_subplot(gs[3, 0])
ax4 = fig.add_subplot(gs[3, 1])
else:
gs = fig.add_gridspec(3, 2)
ax3 = fig.add_subplot(gs[2, 0])
ax4 = fig.add_subplot(gs[2, 1])
ax0 = fig.add_subplot(gs[0, :])
ax1 = fig.add_subplot(gs[1, :])
# plot whitened residuals v time
plot_residuals_time(
fitter, title=False, whitened=True, axs=ax0, figsize=(8, 2.5)
)
# plot whitened, epoch averaged residuals v time
if "ecorr_noise" in fitter.model.get_components_by_category().keys():
plot_residuals_time(
fitter,
title=False,
legend=False,
avg=True,
whitened=True,
axs=ax1,
figsize=(8, 2.5),
)
else:
plot_residuals_time(
fitter,
title=False,
legend=False,
avg=False,
whitened=True,
axs=ax1,
figsize=(8, 2.5),
)
# Plot whitened, epoch averaged residuals v orbital phase
if hasattr(fitter.model, "binary_model_name"):
if "ecorr_noise" in fitter.model.get_components_by_category().keys():
plot_residuals_orb(
fitter,
title=False,
legend=False,
avg=True,
whitened=True,
axs=ax2,
figsize=(8, 2.5),
)
else:
plot_residuals_orb(
fitter,
title=False,
legend=False,
avg=False,
whitened=True,
axs=ax2,
figsize=(8, 2.5),
)
# plot number of whitened residuals histogram
plot_measurements_v_res(
fitter,
nbin=50,
title=False,
legend=False,
whitened=True,
axs=ax3,
figsize=(4, 2.5),
)
# plot number of whitened, epoch averaged residuals histogram
if "ecorr_noise" in fitter.model.get_components_by_category().keys():
plot_measurements_v_res(
fitter,
nbin=50,
title=False,
legend=False,
avg=True,
whitened=True,
axs=ax4,
figsize=(4, 2.5),
)
else:
plot_measurements_v_res(
fitter,
nbin=50,
title=False,
legend=False,
avg=False,
whitened=True,
axs=ax4,
figsize=(4, 2.5),
)
plt.tight_layout()
plt.savefig("%s_summary_plot_2_nb.png" % (fitter.model.PSR.value))
plt.close()
elif ii == 2:
if hasattr(fitter.model, "binary_model_name"):
gs = fig.add_gridspec(4, 2)
ax2 = fig.add_subplot(gs[2, :])
ax3 = fig.add_subplot(gs[3, 0])
ax4 = fig.add_subplot(gs[3, 1])
else:
gs = fig.add_gridspec(3, 2)
ax3 = fig.add_subplot(gs[2, 0])
ax4 = fig.add_subplot(gs[2, 1])
ax0 = fig.add_subplot(gs[0, :])
ax1 = fig.add_subplot(gs[1, :])
# plot whitened residuals/uncertainty v. time
plot_residuals_time(
fitter,
plotsig=True,
title=False,
whitened=True,
axs=ax0,
figsize=(8, 2.5),
)
# plot whitened, epoch averaged residuals/uncertainty v. time
if "ecorr_noise" in fitter.model.get_components_by_category().keys():
plot_residuals_time(
fitter,
title=False,
legend=False,
plotsig=True,
avg=True,
whitened=True,
axs=ax1,
figsize=(8, 2.5),
)
else:
plot_residuals_time(
fitter,
title=False,
legend=False,
plotsig=True,
avg=False,
whitened=True,
axs=ax1,
figsize=(8, 2.5),
)
# plot whitened, epoch averaged residuals/uncertainty v. orbital phase
if hasattr(fitter.model, "binary_model_name"):
if "ecorr_noise" in fitter.model.get_components_by_category().keys():
plot_residuals_orb(
fitter,
title=False,
legend=False,
plotsig=True,
avg=True,
whitened=True,
axs=ax2,
figsize=(8, 2.5),
)
else:
plot_residuals_orb(
fitter,
title=False,
legend=False,
plotsig=True,
avg=False,
whitened=True,
axs=ax2,
figsize=(8, 2.5),
)
# plot number of whitened residuals/uncertainty histogram
plot_measurements_v_res(
fitter,
nbin=50,
plotsig=True,
title=False,
legend=False,
whitened=True,
axs=ax3,
figsize=(4, 2.5),
)
# plot number of whitened, epoch averaged residuals/uncertainties histogram
if "ecorr_noise" in fitter.model.get_components_by_category().keys():
plot_measurements_v_res(
fitter,
nbin=50,
plotsig=True,
title=False,
legend=False,
avg=True,
whitened=True,
axs=ax4,
figsize=(4, 2.5),
)
else:
plot_measurements_v_res(
fitter,
nbin=50,
plotsig=True,
title=False,
legend=False,
avg=False,
whitened=True,
axs=ax4,
figsize=(4, 2.5),
)
plt.tight_layout()
plt.savefig("%s_summary_plot_3_nb.png" % (fitter.model.PSR.value))
plt.close()
elif ii == 3:
gs = fig.add_gridspec(1, 1)
ax0 = fig.add_subplot(gs[0])
plot_residuals_freq(
fitter, title=False, legend=True, axs=ax0, figsize=(8, 4)
)
plt.tight_layout()
plt.savefig("%s_summary_plot_4_nb.png" % (fitter.model.PSR.value))
plt.close()
def plots_for_summary_pdf_wb(fitter, title=None, legends=False):
"""
Function to make a composite set of summary plots for sets of TOAs to be put into a summary pdf.
This is for Wideband timing only. For Narrowband timing, use `plots_for_summary_pdf_nb`.
By definition, this function will save all plots as "psrname"_summary_plot_#.wb.png, where # is
and integer from 1-4.
Arguments
---------
fitter [object] : The PINT fitter object.
title [boolean] : If True, will add titles to ALL plots [default: False].
legend [boolean] : If True, will add legends to ALL plots [default: False].
"""
if not fitter.is_wideband:
raise ValueError(
"Cannot use this function with non-WidebandTOAFitter, please use `plots_for_summary_pdf_nb` instead."
)
# Need to make four sets of plots
for ii in range(4):
if ii != 3:
fig = plt.figure(figsize=(8, 10.0), dpi=100)
else:
fig = plt.figure(figsize=(8, 5), dpi=100)
if title != None:
plt.title(title, y=1.08, size=14)
if ii == 0:
if hasattr(fitter.model, "binary_model_name"):
gs = fig.add_gridspec(nrows=4, ncols=1)
ax2 = fig.add_subplot(gs[2, :])
ax3 = fig.add_subplot(gs[3, :])
else:
gs = fig.add_gridspec(nrows=3, ncols=1)
ax3 = fig.add_subplot(gs[2, :])
ax0 = fig.add_subplot(gs[0, :])
ax1 = fig.add_subplot(gs[1, :])
# Plot time residuals v. time
plot_residuals_time(fitter, title=False, axs=ax0, figsize=(8, 2.5))
# Plot DM residuals v. time
plot_dm_residuals(
fitter, save=False, legend=False, title=False, axs=ax1, figsize=(8, 2.5)
)
# Plot time residuals v. orbital phase
if hasattr(fitter.model, "binary_model_name"):
plot_residuals_orb(
fitter, title=False, legend=False, axs=ax2, figsize=(8, 2.5)
)
plot_dmx_time(
fitter,
savedmx="dmxparse.out",
legend=False,
title=False,
axs=ax3,
figsize=(8, 2.5),
)
plt.tight_layout()
plt.savefig("%s_summary_plot_1_wb.png" % (fitter.model.PSR.value))
plt.close()
elif ii == 1:
if hasattr(fitter.model, "binary_model_name"):
gs = fig.add_gridspec(3, 2)
ax2 = fig.add_subplot(gs[1, :])
ax3 = fig.add_subplot(gs[2, 0])
ax4 = fig.add_subplot(gs[2, 1])
else:
gs = fig.add_gridspec(2, 2)
ax3 = fig.add_subplot(gs[1, 0])
ax4 = fig.add_subplot(gs[1, 1])
ax0 = fig.add_subplot(gs[0, :])
# ax1 = fig.add_subplot(gs[1,:])
# Plot whitened time residuals v. time
plot_residuals_time(
fitter, title=False, whitened=True, axs=ax0, figsize=(8, 2.5)
)
# Plot whitened time residuals v. time
if hasattr(fitter.model, "binary_model_name"):
plot_residuals_orb(
fitter,
title=False,
legend=False,
whitened=True,
axs=ax2,
figsize=(8, 2.5),
)
# Plot number of whitened residuals histograms
plot_measurements_v_res(
fitter,
nbin=50,
title=False,
plotsig=False,
legend=False,
whitened=True,
axs=ax3,
figsize=(4, 2.5),
)
# plot number of DM residuals histograms
plot_measurements_v_dmres(
fitter, nbin=50, legend=False, title=False, axs=ax4
)
plt.tight_layout()
plt.savefig("%s_summary_plot_2_wb.png" % (fitter.model.PSR.value))
plt.close()
elif ii == 2:
if hasattr(fitter.model, "binary_model_name"):
gs = fig.add_gridspec(4, 2)
ax2 = fig.add_subplot(gs[2, :])
ax3 = fig.add_subplot(gs[3, 0])
ax4 = fig.add_subplot(gs[3, 1])
else:
gs = fig.add_gridspec(3, 2)
ax3 = fig.add_subplot(gs[2, 0])
ax4 = fig.add_subplot(gs[2, 1])
ax0 = fig.add_subplot(gs[0, :])
ax1 = fig.add_subplot(gs[1, :])
# plot whitened time residuals/uncertainty v time
plot_residuals_time(
fitter,
plotsig=True,
title=False,
whitened=True,
axs=ax0,
figsize=(8, 2.5),
)
# Plot DM residuals/uncertainty v. time
plot_dm_residuals(
fitter,
plotsig=True,
save=False,
legend=False,
title=False,
axs=ax1,
figsize=(8, 2.5),
)
# Plot whitened time residuals/uncertainty v orbital phase
if hasattr(fitter.model, "binary_model_name"):
plot_residuals_orb(
fitter,
title=False,
legend=False,
plotsig=True,
whitened=True,
axs=ax2,
figsize=(8, 2.5),
)
# plot number of whitened time residuals/uncertainty histograms
plot_measurements_v_res(
fitter,
nbin=50,
title=False,
plotsig=True,
legend=False,
whitened=True,
axs=ax3,
figsize=(4, 2.5),
)
# plot number of DM residuals/uncertainty histograms
plot_measurements_v_dmres(
fitter, plotsig=True, nbin=50, legend=False, title=False, axs=ax4
)
plt.tight_layout()
plt.savefig("%s_summary_plot_3_wb.png" % (fitter.model.PSR.value))
plt.close()
elif ii == 3:
gs = fig.add_gridspec(1, 1)
ax0 = fig.add_subplot(gs[0])
plot_residuals_freq(
fitter, title=False, legend=True, axs=ax0, figsize=(8, 4)
)
plt.tight_layout()
plt.savefig("%s_summary_plot_4_wb.png" % (fitter.model.PSR.value))
plt.close()
def plot_settings(colorby="f"):
"""
Initialize plot rc params, define color scheme
"""
fig_width_pt = 620
inches_per_pt = 1.0 / 72.27 # Convert pt to inches
golden_mean = (np.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio
fig_width = fig_width_pt * inches_per_pt # width in inches
fig_height = fig_width * golden_mean * 2 # height in inches
fig_size = [fig_width, fig_height]
fontsize = 20 # for xlabel, backend labels
plotting_params = {
"backend": "pdf",
"axes.labelsize": 12,
"lines.markersize": 4,
"font.size": 12,
"xtick.major.size": 6,
"xtick.minor.size": 3,
"ytick.major.size": 6,
"ytick.minor.size": 3,
"xtick.major.width": 0.5,
"ytick.major.width": 0.5,
"xtick.minor.width": 0.5,
"ytick.minor.width": 0.5,
"lines.markeredgewidth": 1,
"axes.linewidth": 1.2,
"legend.fontsize": 10,
"xtick.labelsize": 12,
"ytick.labelsize": 10,
"savefig.dpi": 400,
"path.simplify": True,
"font.family": "serif",
"font.serif": "Times",
"text.usetex": True,
"figure.figsize": fig_size,
"text.latex.preamble": r"\usepackage{amsmath}",
}
plt.rcParams.update(plotting_params)
colorscheme, markerscheme = set_color_and_marker(colorby)
return markerscheme, colorscheme
def get_fitter(yaml):
"""
Get the fitter and model from a given YAML
Parameters
==========
yaml: str
yaml to use for locating latest results
"""
tc = TimingConfiguration(yaml)
mo, to = tc.get_model_and_toas(excised=True, usepickle=True)
tc.manual_cuts(to)
receivers = lu.get_receivers(to)
if tc.get_toa_type() == "WB":
lu.add_feDMJumps(mo, receivers)
else:
lu.add_feJumps(mo, receivers)
fo = tc.construct_fitter(to, mo)
return fo, mo
def get_avg_years(fo_nb, fo_wb, avg_dict):
"""
Get MJDS for each data set in years
Parameters
==========
fo: fitter object
mo: model object
avg_dict: from fo.resids.ecorr_average()
"""
mjd_nb = fo_nb.toas.get_mjds().value
years_nb = (mjd_nb - 51544.0) / 365.25 + 2000.0
mjd_wb = fo_wb.toas.get_mjds().value
years_wb = (mjd_wb - 51544.0) / 365.25 + 2000.0
mjds_avg = avg_dict["mjds"].value
years_avg = (mjds_avg - 51544.0) / 365.25 + 2000.0
return years_nb, years_wb, years_avg
def get_backends(fo_nb, fo_wb, avg_dict):
"""
Grab backends via flags to make plotting easier
Parameters
==========
fo: fitter object
mo: model object
avg_dict: from fo.resids.ecorr_average()
"""
rcvr_bcknds_nb = np.array(fo_nb.toas.get_flag_value("f")[0])
rcvr_set_nb = set(rcvr_bcknds_nb)
rcvr_bcknds_wb = np.array(fo_wb.toas.get_flag_value("f")[0])
rcvr_set_wb = set(rcvr_bcknds_wb)
avg_rcvr_bcknds = []
for iis in avg_dict["indices"]:
avg_rcvr_bcknds.append(rcvr_bcknds_nb[iis[0]])
rcvr_bcknds_avg = np.array(avg_rcvr_bcknds)
rcvr_set_avg = set(rcvr_bcknds_avg)
return rcvr_bcknds_nb, rcvr_bcknds_wb, rcvr_bcknds_avg
def get_DMX_info(fo):
"""
Get DMX timeseries info from dmxparse
Parameters
==========
fo: fitter object
"""
dmx_dict = pint.utils.dmxparse(fo)
DMXs = dmx_dict["dmxs"].value
DMX_vErrs = dmx_dict["dmx_verrs"].value
DMX_center_MJD = dmx_dict["dmxeps"].value
DMX_center_Year = (DMX_center_MJD - 51544.0) / 365.25 + 2000.0
return DMXs, DMX_vErrs, DMX_center_Year
def plot_by_color(ax, x, y, err, bknds, rn_off, be_legend, be_format):
"""
Plot color-divided-by-receiver/BE points on any axis
Parameters
==========
ax: axis for plotting
x: x values to plot
y: y values to plot
err: error bars to plot
bknds: list of backend flags associated with TOAs
rn_off: the DC red noise offset to subtract (prior to PINT fix)
"""
markers, colorscheme = plot_settings()
for i, r_b in enumerate(set(bknds)):
inds = np.where(bknds == r_b)[0]
if not inds.tolist():
r_b_label = ""
else:
r_b_label = bknds[inds][0]
mkr = markers[r_b_label]
clr = colorscheme[r_b_label]
ax.errorbar(
x[inds],
y[inds] - (rn_off * u.us),
yerr=err[inds],
fmt=mkr,
color=clr,
label=r_b_label,
alpha=0.5,
)
ylim = max(np.abs(y - (rn_off * u.us))).value + 0.6 * max(np.abs(err)).value
ax.set_ylim(-1 * ylim * 1.08, ylim * 1.08)
if be_legend:
handles, labels = ax.get_legend_handles_labels()
labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))
fixed_labels = [label_names[l] for l in labels]
if be_format == "vert":
plt.legend(handles, fixed_labels, loc=(1.005, 0), fontsize=12)
if be_format == "horiz":
plt.legend(
handles,
fixed_labels,
loc="lower left",
ncol=len(fixed_labels),
borderpad=0.1,
columnspacing=0.1,
)
ax.set_ylim(-1 * ylim * 1.2, ylim * 1.08)
def rec_labels(axs, bcknds, years_avg):
"""
Mark transitions between backends
Parameters
==========
axs: axis for plotting
x: x values to plot
y: y values to plot
err: error bars to plot
bknds: list of backend flags associated with TOAs
rn_off: the DC red noise offset to subtract (prior to PINT fix)
"""
guppi = 2010.1
puppi = 2012.1
has_asp = False
has_puppi = False
has_gasp = False
has_guppi = False
has_ao = False
has_gbt = False
has_yuppi = False
for r in bcknds:
if "ASP" in r:
has_asp = True
if "PUPPI" in r:
has_puppi = True
if "GASP" in r:
has_gasp = True
if "GUPPI" in r:
has_guppi = True
if "YUPPI" in r:
has_yuppi = True
if has_asp and has_puppi:
for a in axs:
has_ao = True
a.axvline(puppi, linewidth=0.75, color="k", linestyle="--", alpha=0.6)
if has_gasp and has_guppi:
for a in axs:
has_gbt = True
a.axvline(guppi, linewidth=0.75, color="k", linestyle="--", alpha=0.6)
ycoord = 1.1
x_min_yr = min(years_avg)
x_max_yr = max(years_avg)
tform = axs[0].get_xaxis_transform()
va = ha = "center"
if has_ao and has_gbt:
if has_yuppi:
axs[0].text(
(puppi + x_max_yr) / 2.0,
ycoord,
"PUPPI/GUPPI/YUPPI",
transform=tform,
va=va,
ha=ha,
)
else:
axs[0].text(
(puppi + x_max_yr) / 2.0,
ycoord,
"PUPPI/GUPPI",
transform=tform,
va=va,
ha=ha,
)
axs[0].text(
(guppi + x_min_yr) / 2.0, ycoord, "ASP/GASP", transform=tform, va=va, ha=ha
)
axs[0].text(
(guppi + puppi) / 2.0, ycoord, "ASP/GUPPI", transform=tform, va=va, ha=ha
)
elif has_ao and not has_gbt:
if has_yuppi:
axs[0].text(
(puppi + x_max_yr) / 2.0,
ycoord,
"PUPPI/YUPPI",
transform=tform,
va=va,
ha=ha,
)
else:
axs[0].text(
(puppi + x_max_yr) / 2.0, ycoord, "PUPPI", transform=tform, va=va, ha=ha
)
axs[0].text(
(puppi + x_min_yr) / 2.0 - 0.2, ycoord, "ASP", transform=tform, va=va, ha=ha
)
elif not has_ao and has_gbt:
if has_yuppi:
axs[0].text(
(puppi + x_max_yr) / 2.0,
ycoord,
"GUPPI/YUPPI",
transform=tform,
va=va,
ha=ha,
)
else:
axs[0].text(
(guppi + x_max_yr) / 2.0, ycoord, "GUPPI", transform=tform, va=va, ha=ha
)
axs[0].text(
(guppi + x_min_yr) / 2.0, ycoord, "GASP", transform=tform, va=va, ha=ha
)
if has_puppi and not has_asp and not has_gasp and not has_guppi:
if has_yuppi:
axs[0].text(
(x_min_yr + x_max_yr) / 2.0,
ycoord,
"PUPPI/YUPPI",
transform=tform,
va=va,
ha=ha,
)
else:
axs[0].text(
(x_min_yr + x_max_yr) / 2.0,
ycoord,
"PUPPI",
transform=tform,
va=va,
ha=ha,
)
if has_guppi and not has_asp and not has_gasp and not has_puppi:
if has_yuppi:
axs[0].text(
(x_min_yr + x_max_yr) / 2.0,
ycoord,
"GUPPI/YUPPI",
transform=tform,
va=va,
ha=ha,
)
else:
axs[0].text(
(x_min_yr + x_max_yr) / 2.0,
ycoord,
"GUPPI",
transform=tform,
va=va,
ha=ha,
)
if has_yuppi and not has_guppi and not has_puppi:
axs[0].text(
(x_min_yr + x_max_yr) / 2.0, ycoord, "YUPPI", transform=tform, va=va, ha=ha
)
def rn_sub(testing, rn_subtract, fo_nb, fo_wb):
if rn_subtract:
if testing:
rn_nb = 0.0
rn_wb = 0.0
else:
rn_nb = fo_nb.current_state.xhat[0] * fo_nb.current_state.M[0, 0] * 1e6
rn_wb = fo_wb.current_state.xhat[0] * fo_wb.current_state.M[0, 0] * 1e6
else:
rn_nb = 0.0
rn_wb = 0.0
return rn_nb, rn_wb
|
nanogravREPO_NAMEpint_palPATH_START.@pint_pal_extracted@pint_pal-main@src@pint_pal@plot_utils.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "halomod/hmf",
"repo_path": "hmf_extracted/hmf-main/src/hmf/mass_function/__init__.py",
"type": "Python"
}
|
"""Subpackage for determining the halo mass function in Spherical Collapse."""
from . import fitting_functions, hmf, integrate_hmf
from .fitting_functions import PS, SMT, FittingFunction, Tinker08
from .hmf import MassFunction
|
halomodREPO_NAMEhmfPATH_START.@hmf_extracted@hmf-main@src@hmf@mass_function@__init__.py@.PATH_END.py
|
{
"filename": "example.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/document_loaders/example_data/source_code/example.py",
"type": "Python"
}
|
class MyClass:
def __init__(self, name):
self.name = name
def greet(self):
print(f"Hello, {self.name}!")
def main():
name = input("Enter your name: ")
obj = MyClass(name)
obj.greet()
if __name__ == "__main__":
main()
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@document_loaders@example_data@source_code@example.py@.PATH_END.py
|
{
"filename": "_montage.py",
"repo_name": "scikit-image/scikit-image",
"repo_path": "scikit-image_extracted/scikit-image-main/skimage/util/_montage.py",
"type": "Python"
}
|
import numpy as np
from .._shared import utils
from .. import exposure
__all__ = ['montage']
@utils.channel_as_last_axis(multichannel_output=False)
def montage(
arr_in,
fill='mean',
rescale_intensity=False,
grid_shape=None,
padding_width=0,
*,
channel_axis=None,
):
"""Create a montage of several single- or multichannel images.
Create a rectangular montage from an input array representing an ensemble
of equally shaped single- (gray) or multichannel (color) images.
For example, ``montage(arr_in)`` called with the following `arr_in`
+---+---+---+
| 1 | 2 | 3 |
+---+---+---+
will return
+---+---+
| 1 | 2 |
+---+---+
| 3 | * |
+---+---+
where the '*' patch will be determined by the `fill` parameter.
Parameters
----------
arr_in : ndarray, shape (K, M, N[, C])
An array representing an ensemble of `K` images of equal shape.
fill : float or array-like of floats or 'mean', optional
Value to fill the padding areas and/or the extra tiles in
the output array. Has to be `float` for single channel collections.
For multichannel collections has to be an array-like of shape of
number of channels. If `mean`, uses the mean value over all images.
rescale_intensity : bool, optional
Whether to rescale the intensity of each image to [0, 1].
grid_shape : tuple, optional
The desired grid shape for the montage `(ntiles_row, ntiles_column)`.
The default aspect ratio is square.
padding_width : int, optional
The size of the spacing between the tiles and between the tiles and
the borders. If non-zero, makes the boundaries of individual images
easier to perceive.
channel_axis : int or None, optional
If None, the image is assumed to be a grayscale (single channel) image.
Otherwise, this parameter indicates which axis of the array corresponds
to channels.
Returns
-------
arr_out : (K*(M+p)+p, K*(N+p)+p[, C]) ndarray
Output array with input images glued together (including padding `p`).
Examples
--------
>>> import numpy as np
>>> from skimage.util import montage
>>> arr_in = np.arange(3 * 2 * 2).reshape(3, 2, 2)
>>> arr_in # doctest: +NORMALIZE_WHITESPACE
array([[[ 0, 1],
[ 2, 3]],
[[ 4, 5],
[ 6, 7]],
[[ 8, 9],
[10, 11]]])
>>> arr_out = montage(arr_in)
>>> arr_out.shape
(4, 4)
>>> arr_out
array([[ 0, 1, 4, 5],
[ 2, 3, 6, 7],
[ 8, 9, 5, 5],
[10, 11, 5, 5]])
>>> arr_in.mean()
5.5
>>> arr_out_nonsquare = montage(arr_in, grid_shape=(1, 3))
>>> arr_out_nonsquare
array([[ 0, 1, 4, 5, 8, 9],
[ 2, 3, 6, 7, 10, 11]])
>>> arr_out_nonsquare.shape
(2, 6)
"""
if channel_axis is not None:
arr_in = np.asarray(arr_in)
else:
arr_in = np.asarray(arr_in)[..., np.newaxis]
if arr_in.ndim != 4:
raise ValueError(
'Input array has to be 3-dimensional for grayscale '
'images, or 4-dimensional with a `channel_axis` '
'specified.'
)
n_images, n_rows, n_cols, n_chan = arr_in.shape
if grid_shape:
ntiles_row, ntiles_col = (int(s) for s in grid_shape)
else:
ntiles_row = ntiles_col = int(np.ceil(np.sqrt(n_images)))
# Rescale intensity if necessary
if rescale_intensity:
for i in range(n_images):
arr_in[i] = exposure.rescale_intensity(arr_in[i])
# Calculate the fill value
if fill == 'mean':
fill = arr_in.mean(axis=(0, 1, 2))
fill = np.atleast_1d(fill).astype(arr_in.dtype)
# Pre-allocate an array with padding for montage
n_pad = padding_width
arr_out = np.empty(
(
(n_rows + n_pad) * ntiles_row + n_pad,
(n_cols + n_pad) * ntiles_col + n_pad,
n_chan,
),
dtype=arr_in.dtype,
)
for idx_chan in range(n_chan):
arr_out[..., idx_chan] = fill[idx_chan]
slices_row = [
slice(n_pad + (n_rows + n_pad) * n, n_pad + (n_rows + n_pad) * n + n_rows)
for n in range(ntiles_row)
]
slices_col = [
slice(n_pad + (n_cols + n_pad) * n, n_pad + (n_cols + n_pad) * n + n_cols)
for n in range(ntiles_col)
]
# Copy the data to the output array
for idx_image, image in enumerate(arr_in):
idx_sr = idx_image // ntiles_col
idx_sc = idx_image % ntiles_col
arr_out[slices_row[idx_sr], slices_col[idx_sc], :] = image
if channel_axis is not None:
return arr_out
else:
return arr_out[..., 0]
|
scikit-imageREPO_NAMEscikit-imagePATH_START.@scikit-image_extracted@scikit-image-main@skimage@util@_montage.py@.PATH_END.py
|
{
"filename": "plot_image_denoising.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/examples/decomposition/plot_image_denoising.py",
"type": "Python"
}
|
"""
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of a raccoon face image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Generate distorted image
# ------------------------
import numpy as np
try: # Scipy >= 1.10
from scipy.datasets import face
except ImportError:
from scipy.misc import face
raccoon_face = face(gray=True)
# Convert from uint8 representation with values between 0 and 255 to
# a floating point representation with values between 0 and 1.
raccoon_face = raccoon_face / 255.0
# downsample for higher speed
raccoon_face = (
raccoon_face[::4, ::4]
+ raccoon_face[1::4, ::4]
+ raccoon_face[::4, 1::4]
+ raccoon_face[1::4, 1::4]
)
raccoon_face /= 4.0
height, width = raccoon_face.shape
# Distort the right half of the image
print("Distorting image...")
distorted = raccoon_face.copy()
distorted[:, width // 2 :] += 0.075 * np.random.randn(height, width // 2)
# %%
# Display the distorted image
# ---------------------------
import matplotlib.pyplot as plt
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title("Image")
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation="nearest")
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title("Difference (norm: %.2f)" % np.sqrt(np.sum(difference**2)))
plt.imshow(
difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr, interpolation="nearest"
)
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, raccoon_face, "Distorted image")
# %%
# Extract reference patches
# ----------------------------
from time import time
from sklearn.feature_extraction.image import extract_patches_2d
# Extract all reference patches from the left half of the image
print("Extracting reference patches...")
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, : width // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print(f"{data.shape[0]} patches extracted in %.2fs." % (time() - t0))
# %%
# Learn the dictionary from reference patches
# -------------------------------------------
from sklearn.decomposition import MiniBatchDictionaryLearning
print("Learning the dictionary...")
t0 = time()
dico = MiniBatchDictionaryLearning(
# increase to 300 for higher quality results at the cost of slower
# training times.
n_components=50,
batch_size=200,
alpha=1.0,
max_iter=10,
)
V = dico.fit(data).components_
dt = time() - t0
print(f"{dico.n_iter_} iterations / {dico.n_steps_} steps in {dt:.2f}.")
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r, interpolation="nearest")
plt.xticks(())
plt.yticks(())
plt.suptitle(
"Dictionary learned from face patches\n"
+ "Train time %.1fs on %d patches" % (dt, len(data)),
fontsize=16,
)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
# %%
# Extract noisy patches and reconstruct them using the dictionary
# ---------------------------------------------------------------
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
print("Extracting noisy patches... ")
t0 = time()
data = extract_patches_2d(distorted[:, width // 2 :], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print("done in %.2fs." % (time() - t0))
transform_algorithms = [
("Orthogonal Matching Pursuit\n1 atom", "omp", {"transform_n_nonzero_coefs": 1}),
("Orthogonal Matching Pursuit\n2 atoms", "omp", {"transform_n_nonzero_coefs": 2}),
("Least-angle regression\n4 atoms", "lars", {"transform_n_nonzero_coefs": 4}),
("Thresholding\n alpha=0.1", "threshold", {"transform_alpha": 0.1}),
]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + "...")
reconstructions[title] = raccoon_face.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == "threshold":
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, width // 2 :] = reconstruct_from_patches_2d(
patches, (height, width // 2)
)
dt = time() - t0
print("done in %.2fs." % dt)
show_with_diff(reconstructions[title], raccoon_face, title + " (time: %.1fs)" % dt)
plt.show()
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@examples@decomposition@plot_image_denoising.py@.PATH_END.py
|
{
"filename": "_textcase.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/densitymap/colorbar/tickfont/_textcase.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextcaseValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="textcase",
parent_name="densitymap.colorbar.tickfont",
**kwargs,
):
super(TextcaseValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["normal", "word caps", "upper", "lower"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@densitymap@colorbar@tickfont@_textcase.py@.PATH_END.py
|
{
"filename": "_fgcolor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter/fillpattern/_fgcolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="fgcolor", parent_name="scatter.fillpattern", **kwargs
):
super(FgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatter@fillpattern@_fgcolor.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "gomesdasilva/ACTIN",
"repo_path": "ACTIN_extracted/ACTIN-master/setup.py",
"type": "Python"
}
|
#!/usr/bin/env python
import os
import subprocess
try: from setuptools import setup
except: from distutils.core import setup
path = os.path.dirname(os.path.realpath(__file__))
version_file = os.path.join(path, "actin", "VERSION")
try:
with open(version_file, 'r') as file:
version = file.read()
except: version = "unknown"
setup(name = 'actin',
version = version,
description = 'Activity Indices Calculator',
url = 'http://github.com/gomesdasilva/ACTIN',
download_url = 'https://github.com/gomesdasilva/ACTIN/archive/v1.3.2.tar.gz',
author = 'Joao Gomes da Silva',
author_email = 'Joao.Silva@astro.up.pt',
license = 'MIT',
keywords = ['astronomy', 'activity', 'fits', 'harps', 'harps-n', 'espresso','radial velocity', 'exoplanets'],
packages = ['actin'],
entry_points = {
"console_scripts": ['actin = actin.actin:main']
},
include_package_data = True,
install_requires = ['appdirs']
)
# This runs ACTIN from terminal, creates ACTIN directory with config file in Application Support and gives its location
#try: subprocess.call(["actin"])
#except: pass
|
gomesdasilvaREPO_NAMEACTINPATH_START.@ACTIN_extracted@ACTIN-master@setup.py@.PATH_END.py
|
{
"filename": "test_machine_independent.py",
"repo_name": "dtamayo/reboundx",
"repo_path": "reboundx_extracted/reboundx-main/reboundx/test/test_machine_independent.py",
"type": "Python"
}
|
import rebound
import reboundx
import unittest
import os
class TestMachineIndependent(unittest.TestCase):
def setUp(self):
self.sim = rebound.Simulation()
self.sim.add(m=1.)
self.sim.add(a=1., e=0.2)
self.rebx = reboundx.Extras(self.sim)
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(m=1.e-4, a=1., e=0.1)
sim.add(m=1.e-4, a=2, e=0.1, inc=0.2)
sim.integrator="whfast"
sim.dt = sim.particles[1].P/100
sim.move_to_com()
sim.save_to_file('twoplanets.bin', delete_file=True)
def test_modify_orbits_forces(self):
sim = rebound.Simulation("twoplanets.bin")
sim.save_to_file('modify_orbits_forces.sa', interval=1e3, delete_file=True)
rebx = reboundx.Extras(sim)
mod = rebx.load_force('modify_orbits_forces')
rebx.add_force(mod)
ps = sim.particles
ps[1].params['tau_a'] = -1e4
ps[1].params['tau_e'] = -1e3
ps[2].params['tau_e'] = -1e3
ps[2].params['tau_inc'] = -1e3
rebx.save('modify_orbits_forces.rebx')
sim.integrate(1.e4)
sa = reboundx.Simulationarchive('modify_orbits_forces.sa', 'modify_orbits_forces.rebx')
simf, rebx = sa[-1]
sim, rebx = sa[0]
sim.integrate(simf.t)
self.assertEqual(sim.particles[0].x, simf.particles[0].x)
def test_gr_full(self):
sim = rebound.Simulation('twoplanets.bin')
sim.save_to_file('gr_full.sa', interval=1e3, delete_file=True)
rebx = reboundx.Extras(sim)
force = rebx.load_force('gr_full')
rebx.add_force(force)
force.params['c'] = 1.e4
ps = sim.particles
rebx.save('gr_full.rebx')
sim.integrate(1.e4)
sa = reboundx.Simulationarchive('gr_full.sa', 'gr_full.rebx')
simf, rebx = sa[-1]
sim, rebx = sa[0]
sim.integrate(simf.t)
self.assertEqual(sim.particles[0].x, simf.particles[0].x)
def test_gr(self):
sim = rebound.Simulation('twoplanets.bin')
sim.save_to_file('gr.sa', interval=1e3, delete_file=True)
rebx = reboundx.Extras(sim)
force = rebx.load_force('gr')
rebx.add_force(force)
force.params['c'] = 1.e4
ps = sim.particles
rebx.save('gr.rebx')
sim.integrate(1.e4)
sa = reboundx.Simulationarchive('gr.sa', 'gr.rebx')
simf, rebx = sa[-1]
sim, rebx = sa[0]
sim.integrate(simf.t)
self.assertEqual(sim.particles[0].x, simf.particles[0].x)
def test_gr_potential(self):
sim = rebound.Simulation('twoplanets.bin')
sim.save_to_file('gr_potential.sa', interval=1e3, delete_file=True)
rebx = reboundx.Extras(sim)
force = rebx.load_force('gr_potential')
rebx.add_force(force)
force.params['c'] = 1.e4
ps = sim.particles
rebx.save('gr_potential.rebx')
sim.integrate(1.e4)
sa = reboundx.Simulationarchive('gr_potential.sa', 'gr_potential.rebx')
simf, rebx = sa[-1]
sim, rebx = sa[0]
sim.integrate(simf.t)
self.assertEqual(sim.particles[0].x, simf.particles[0].x)
def test_radiation_forces(self):
sim = rebound.Simulation('twoplanets.bin')
sim.save_to_file('radiation_forces.sa', interval=1e3, delete_file=True)
rebx = reboundx.Extras(sim)
force = rebx.load_force('radiation_forces')
rebx.add_force(force)
force.params['c'] = 1.e4
ps = sim.particles
ps[1].params['beta'] = 0.5
ps[2].params['beta'] = 0.3
rebx.save('radiation_forces.rebx')
sim.integrate(1.e4)
sa = reboundx.Simulationarchive('radiation_forces.sa', 'radiation_forces.rebx')
simf, rebx = sa[-1]
sim, rebx = sa[0]
sim.integrate(simf.t)
self.assertEqual(sim.particles[0].x, simf.particles[0].x)
def test_central_force(self):
sim = rebound.Simulation('twoplanets.bin')
sim.save_to_file('central_force.sa', interval=1e3, delete_file=True)
rebx = reboundx.Extras(sim)
force = rebx.load_force('central_force')
rebx.add_force(force)
ps = sim.particles
ps[0].params['Acentral'] = 1.e-3
ps[0].params['gammacentral'] = -1
rebx.save('central_force.rebx')
sim.integrate(1.e4)
sa = reboundx.Simulationarchive('central_force.sa', 'central_force.rebx')
simf, rebx = sa[-1]
sim, rebx = sa[0]
sim.integrate(simf.t)
self.assertEqual(sim.particles[0].x, simf.particles[0].x)
def test_gravitational_harmonics(self):
sim = rebound.Simulation('twoplanets.bin')
sim.save_to_file('gravitational_harmonics.sa', interval=1e3, delete_file=True)
rebx = reboundx.Extras(sim)
force = rebx.load_force('gravitational_harmonics')
rebx.add_force(force)
ps = sim.particles
ps[0].params['J2'] = 1.e-3
ps[0].params['J4'] = 1.e-3
ps[0].params['R_eq'] = 1.e-3
rebx.save('gravitational_harmonics.rebx')
sim.integrate(1.e4)
sa = reboundx.Simulationarchive('gravitational_harmonics.sa', 'gravitational_harmonics.rebx')
simf, rebx = sa[-1]
sim, rebx = sa[0]
sim.integrate(simf.t)
self.assertEqual(sim.particles[0].x, simf.particles[0].x)
def test_modify_mass(self):
sim = rebound.Simulation('twoplanets.bin')
sim.save_to_file('modify_mass.sa', interval=1e3, delete_file=True)
rebx = reboundx.Extras(sim)
mod = rebx.load_operator('modify_mass')
rebx.add_operator(mod)
ps = sim.particles
ps[0].params['tau_mass'] = -1e4
rebx.save('modify_mass.rebx')
sim.integrate(1.e4)
sa = reboundx.Simulationarchive('modify_mass.sa', 'modify_mass.rebx')
simf, rebx = sa[-1]
sim, rebx = sa[0]
sim.integrate(simf.t)
self.assertEqual(sim.particles[0].x, simf.particles[0].x)
if __name__ == '__main__':
unittest.main()
|
dtamayoREPO_NAMEreboundxPATH_START.@reboundx_extracted@reboundx-main@reboundx@test@test_machine_independent.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/box/unselected/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._marker import MarkerValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._marker.MarkerValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@box@unselected@__init__.py@.PATH_END.py
|
{
"filename": "2d-histogram-contour.md",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/doc/python/2d-histogram-contour.md",
"type": "Markdown"
}
|
---
jupyter:
jupytext:
notebook_metadata_filter: all
text_representation:
extension: .md
format_name: markdown
format_version: '1.2'
jupytext_version: 1.3.1
kernelspec:
display_name: Python 3
language: python
name: python3
language_info:
codemirror_mode:
name: ipython
version: 3
file_extension: .py
mimetype: text/x-python
name: python
nbconvert_exporter: python
pygments_lexer: ipython3
version: 3.6.8
plotly:
description: How to make 2D Histogram Contour plots in Python with Plotly.
display_as: statistical
language: python
layout: base
name: 2D Histogram Contour
order: 11
page_type: u-guide
permalink: python/2d-histogram-contour/
redirect_from: python/2d-density-plots/
thumbnail: thumbnail/hist2dcontour.png
---
## 2D Histogram Contours or Density Contours
A 2D histogram contour plot, also known as a density contour plot, is a 2-dimensional generalization of a [histogram](/python/histograms/) which resembles a [contour plot](/python/contour-plots/) but is computed by grouping a set of points specified by their `x` and `y` coordinates into bins, and applying an aggregation function such as `count` or `sum` (if `z` is provided) to compute the value to be used to compute contours. This kind of visualization (and the related [2D histogram, or density heatmap](/python/2d-histogram/)) is often used to manage over-plotting, or situations where showing large data sets as [scatter plots](/python/line-and-scatter/) would result in points overlapping each other and hiding patterns.
## Density Contours with Plotly Express
[Plotly Express](/python/plotly-express/) is the easy-to-use, high-level interface to Plotly, which [operates on a variety of types of data](/python/px-arguments/) and produces [easy-to-style figures](/python/styling-plotly-express/). The Plotly Express function `density_contour()` can be used to produce density contours.
```python
import plotly.express as px
df = px.data.tips()
fig = px.density_contour(df, x="total_bill", y="tip")
fig.show()
```
Marginal plots can be added to visualize the 1-dimensional distributions of the two variables. Here we use a marginal [`histogram`](/python/histograms/). Other allowable values are `violin`, `box` and `rug`.
```python
import plotly.express as px
df = px.data.tips()
fig = px.density_contour(df, x="total_bill", y="tip", marginal_x="histogram", marginal_y="histogram")
fig.show()
```
Density contours can also be [faceted](/python/facet-plots/) and [discretely colored](/python/discrete-color/):
```python
import plotly.express as px
df = px.data.tips()
fig = px.density_contour(df, x="total_bill", y="tip", facet_col="sex", color="smoker")
fig.show()
```
Plotly Express density contours can be [continuously-colored](/python/colorscales/) and labeled:
```python
import plotly.express as px
df = px.data.tips()
fig = px.density_contour(df, x="total_bill", y="tip")
fig.update_traces(contours_coloring="fill", contours_showlabels = True)
fig.show()
```
### Other aggregation functions than `count`
By passing in a `z` value and a `histfunc`, density contours can perform basic aggregation operations. Here we show average Sepal Length grouped by Petal Length and Petal Width for the Iris dataset.
```python
import plotly.express as px
df = px.data.iris()
fig = px.density_contour(df, x="petal_length", y="petal_width", z="sepal_length", histfunc="avg")
fig.show()
```
### 2D Histograms with Graph Objects
To build this kind of figure with [graph objects](/python/graph-objects/) without using Plotly Express, we can use the `go.Histogram2d` class.
#### Basic 2D Histogram Contour
```python
import plotly.graph_objects as go
import numpy as np
np.random.seed(1)
x = np.random.uniform(-1, 1, size=500)
y = np.random.uniform(-1, 1, size=500)
fig = go.Figure(go.Histogram2dContour(
x = x,
y = y
))
fig.show()
```
#### 2D Histogram Contour Colorscale
```python
import plotly.graph_objects as go
import numpy as np
x = np.random.uniform(-1, 1, size=500)
y = np.random.uniform(-1, 1, size=500)
fig = go.Figure(go.Histogram2dContour(
x = x,
y = y,
colorscale = 'Blues'
))
fig.show()
```
#### 2D Histogram Contour Styled
```python
import plotly.graph_objects as go
import numpy as np
x = np.random.uniform(-1, 1, size=500)
y = np.random.uniform(-1, 1, size=500)
fig = go.Figure(go.Histogram2dContour(
x = x,
y = y,
colorscale = 'Jet',
contours = dict(
showlabels = True,
labelfont = dict(
family = 'Raleway',
color = 'white'
)
),
hoverlabel = dict(
bgcolor = 'white',
bordercolor = 'black',
font = dict(
family = 'Raleway',
color = 'black'
)
)
))
fig.show()
```
#### 2D Histogram Contour Subplot
```python
import plotly.graph_objects as go
import numpy as np
t = np.linspace(-1, 1.2, 2000)
x = (t**3) + (0.3 * np.random.randn(2000))
y = (t**6) + (0.3 * np.random.randn(2000))
fig = go.Figure()
fig.add_trace(go.Histogram2dContour(
x = x,
y = y,
colorscale = 'Blues',
reversescale = True,
xaxis = 'x',
yaxis = 'y'
))
fig.add_trace(go.Scatter(
x = x,
y = y,
xaxis = 'x',
yaxis = 'y',
mode = 'markers',
marker = dict(
color = 'rgba(0,0,0,0.3)',
size = 3
)
))
fig.add_trace(go.Histogram(
y = y,
xaxis = 'x2',
marker = dict(
color = 'rgba(0,0,0,1)'
)
))
fig.add_trace(go.Histogram(
x = x,
yaxis = 'y2',
marker = dict(
color = 'rgba(0,0,0,1)'
)
))
fig.update_layout(
autosize = False,
xaxis = dict(
zeroline = False,
domain = [0,0.85],
showgrid = False
),
yaxis = dict(
zeroline = False,
domain = [0,0.85],
showgrid = False
),
xaxis2 = dict(
zeroline = False,
domain = [0.85,1],
showgrid = False
),
yaxis2 = dict(
zeroline = False,
domain = [0.85,1],
showgrid = False
),
height = 600,
width = 600,
bargap = 0,
hovermode = 'closest',
showlegend = False
)
fig.show()
```
#### Reference
See https://plotly.com/python/reference/histogram2dcontour/ for more information and chart attribute options!
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@doc@python@2d-histogram-contour.md@.PATH_END.py
|
{
"filename": "setup_package.py",
"repo_name": "zblz/naima",
"repo_path": "naima_extracted/naima-main/src/naima/tests/setup_package.py",
"type": "Python"
}
|
def get_package_data():
return {_ASTROPY_PACKAGE_NAME_ + ".tests": ["coveragerc", "data/*.dat"]}
|
zblzREPO_NAMEnaimaPATH_START.@naima_extracted@naima-main@src@naima@tests@setup_package.py@.PATH_END.py
|
{
"filename": "_gbd.py",
"repo_name": "Jashcraf/poke",
"repo_path": "poke_extracted/poke-main/poke/_gbd.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
import numexpr as ne
import os
os.environ['NUMEXPR_MAX_THREADS'] = '64'
os.environ['NUMEXPR_NUM_THREADS'] = '32'
def disp_array(array,cmap='viridis'):
plt.figure()
plt.imshow(array,cmap=cmap)
plt.colorbar()
plt.show()
def Matmulvec(x2,y2,M,x1,y1):
"""Multiplies vectors r2 = [x2,y2], r1 = [x1,y1], with matrix M in
return r2^T @ M r1
Parameters
----------
x2 : _type_
_description_
y2 : _type_
_description_
M : _type_
_description_
x1 : _type_
_description_
y1 : _type_
_description_
Returns
-------
_type_
_description_
"""
return (x2*M[0,0] + y2*M[1,0])*x1 + (x2*M[0,1] + y2*M[1,1])*y1
def ComputeGouyPhase(Q):
"""Computes the Guoy phase of the complex curvature matrix Q
Parameters
----------
Q : numpy.ndarray
2x2 complex curvature matrix. Follows numpy's matrix broadcasting rules.
"""
eigvals = np.linalg.eigvals(Q)
q1,q2 = eigvals[0],eigvals[1]
gouy = .5*(np.arctan(np.real(q1)/np.imag(q1)) + np.arctan(np.real(q2)/np.imag(q2)))
return gouy
def EvalField(xData,yData,zData,lData,mData,nData,opd,dPx,dPy,dHx,dHy,detsize,npix,normal=np.array([0.,0.,1.]),wavelength=1.65e-6):
"""
Paramters
---------
x/y/zData : numpy.ndarray
the position coordinates of the rays at the final field of evaluation
l/m/nData : numpy.ndarray
the icrection cosine coordinates of the rays at the final field of evaluation
opd : numpy.ndarray
Optical path difference for each ray
dPx/y : float
The ray differential in position used to compute the ABCD matrix
dHx/y : float
The ray differential in direction cosine used to compute the ABCD matrix
detsize : float
The side length of the square detector to evaluate the field on
npix : int
The number of pixels across detsize
normal : numpy.ndarray
Direction cosine vector describing the normal of the detector (assuming a planar detector). Defaults to [0,0,1]
wavelength : float
Distance in meters corresponding to the optical wavelength of interest
Returns
-------
Field : complex128 numpy.ndarray
The simulated scalar field at the detector using GBD
This will evaluate the field as gaussian beamlets at the last surface the rays are traced to
RECALL: The ray data shape is [len(raysets),len(surflist),maxrays] from TraceThroughZOS
1) read ray data
2) compute transversal plane basis vectors
3) find plane normal to z and including detector coordinate
4) Find where ray intersects plane
5) Propagate to the plane
6) Compute the ABCD matrix on the plane
7) Compute field
TODO: Make more flexible, this assumes that all beamlets are fundamental mode gaussians
"""
"""Set up complex curvature
"""
wo = dPx
zr = (np.pi*wo**2)/wavelength
qinv = 1/(1j*zr)
Qinv = np.array([[qinv,0],
[0,qinv]])
k = 2*np.pi/wavelength
"""Set up Detector
"""
# Set up detector vector
x = np.linspace(-detsize/2,detsize/2,npix)
x,y = np.meshgrid(x,x)
r0 = np.array([x.ravel(),y.ravel(),0*x.ravel()]) # detector plane is the z = 0 plane
print('r0 shape = ',r0.shape)
"""
Read Ray Data
"""
## 1) Read Ray Data, now shape [len(raysets),maxrays]
xEnd = xData[:,-1] # Positions
yEnd = yData[:,-1]
zEnd = zData[:,-1]
lEnd = lData[:,-1] # Direction Cosines
mEnd = mData[:,-1]
nEnd = nData[:,-1]
# Try to keep variables in the parlance of Ashcraft et al 2022
# These are now shape 3 x len(raysets) x maxrays, so we move the spatial index to the last axis
# These are the central rays
rdet = np.moveaxis(np.array([xEnd,yEnd,zEnd]),0,-1)
mean_base = np.mean(rdet[0],axis=0)
rdet -= mean_base
print('centroid at ',mean_base)
kdet = np.moveaxis(np.array([lEnd,mEnd,nEnd]),0,-1)
## 2) Compute Transversal Plane Basis Vectors, only use central ray
n = kdet[0]
l = np.cross(n,-normal) # normal is eta in Ashcraft et al 2022
lx = l[...,0]
ly = l[...,1]
lz = l[...,2]
lnorm = np.sqrt(lx**2 + ly**2 + lz**2) # np.linalg.norm(l,axis=-1)
l[:,0] /= lnorm
l[:,1] /= lnorm
l[:,2] /= lnorm
m = np.cross(n,l)
# print('Determine Norms (should be 1)')
# print('-------------------------')
# print(np.linalg.norm(l,axis=-1))
# print(np.linalg.norm(m,axis=-1))
# print(np.linalg.norm(n,axis=-1))
# print('-------------------------')
# Wrong shape for matmul, gotta moveaxis
O = np.array([[l[...,0],l[...,1],l[...,2]],
[m[...,0],m[...,1],m[...,2]],
[n[...,0],n[...,1],n[...,2]]])
# Clear the prior variables
del l, m
print('O before the reshape = ',O.shape)
O = np.moveaxis(O,-1,0)
## Compute the Position to Update
RHS = n @ r0 # n dot r0, broadcast for every pixel and beamlet
RHS = np.broadcast_to(RHS,(rdet.shape[0],RHS.shape[0],RHS.shape[1]))
LHS = np.sum(n*rdet,axis=-1) # n dot rdet
LHS = np.broadcast_to(LHS,(RHS.shape[-1],LHS.shape[0],LHS.shape[1]))
LHS = np.moveaxis(LHS,0,-1)
DEN = np.sum(n*kdet,axis=-1) # n dot kdet
DEN = np.broadcast_to(DEN,(LHS.shape[-1],DEN.shape[0],DEN.shape[1]))
DEN = np.moveaxis(DEN,0,-1)
Delta = (RHS-LHS)/DEN
Delta = Delta[...,np.newaxis]
del RHS,LHS,DEN,n
kdet = np.broadcast_to(kdet,(Delta.shape[-2],kdet.shape[0],kdet.shape[1],kdet.shape[2]))
kdet = np.moveaxis(kdet,0,-2)
rdet = np.broadcast_to(rdet,(Delta.shape[-2],rdet.shape[0],rdet.shape[1],rdet.shape[2]))
rdet = np.moveaxis(rdet,0,-2)
O = np.broadcast_to(O,(rdet.shape[0],rdet.shape[2],rdet.shape[1],3,3))
O = np.moveaxis(O,1,2)
# Get a bunch of updated ray positions, remember the broadcasting rules
rdetprime = rdet + kdet*Delta
rdetprime = rdetprime[...,np.newaxis]
rtransprime = O @ rdetprime
# rayset, beamlet, pixel, dimension, extra
kdet = kdet[...,np.newaxis]
ktrans = O @ kdet
r0 = np.moveaxis(r0,0,-1)[...,np.newaxis]
ktrans = np.broadcast_to(ktrans,rtransprime.shape)
del kdet,rdet,rdetprime
## Now compute the ray transfer matrix from the data
central_r = rtransprime[0]
central_k = ktrans[0]
waistx_r = rtransprime[1]
waistx_k = ktrans[1]
waisty_r = rtransprime[2]
waisty_k = ktrans[2]
divergex_r = rtransprime[3]
divergex_k = ktrans[3]
divergey_r = rtransprime[4]
divergey_k = ktrans[4]
Axx = (waistx_r[...,0,0] - central_r[...,0,0])/dPx
Ayx = (waistx_r[...,1,0] - central_r[...,1,0])/dPx
Cxx = (waistx_k[...,0,0] - central_k[...,0,0])/dPx
Cyx = (waistx_k[...,1,0] - central_k[...,1,0])/dPx
Axy = (waisty_r[...,0,0] - central_r[...,0,0])/dPy
Ayy = (waisty_r[...,1,0] - central_r[...,1,0])/dPy
Cxy = (waisty_k[...,0,0] - central_k[...,0,0])/dPy
Cyy = (waisty_k[...,1,0] - central_k[...,1,0])/dPy
Bxx = (divergex_r[...,0,0] - central_r[...,0,0])/dHx
Byx = (divergex_r[...,1,0] - central_r[...,1,0])/dHx
Dxx = (divergex_k[...,0,0] - central_k[...,0,0])/dHx
Dyx = (divergex_k[...,1,0] - central_k[...,1,0])/dHx
Bxy = (divergey_r[...,0,0] - central_r[...,0,0])/dHy
Byy = (divergey_r[...,1,0] - central_r[...,1,0])/dHy
Dxy = (divergey_k[...,0,0] - central_k[...,0,0])/dHy
Dyy = (divergey_k[...,1,0] - central_k[...,1,0])/dHy
del waistx_k,waistx_r,waisty_k,waisty_r,divergex_k,divergex_r,divergey_r,divergey_k
ABCD = np.array([[Axx,Axy,Bxx,Bxy],
[Ayx,Ayy,Byx,Byy],
[Cxx,Cxy,Dxx,Dxy],
[Cyx,Cyy,Dyx,Dyy]])
ABCD = np.moveaxis(ABCD,0,-1)
ABCD = np.moveaxis(ABCD,0,-1)
r0prime = O @ r0
r = r0prime - central_r
del r0,r0prime
# Grab the central
r = r[0,...,0]
"""
# For Displaying the Ray Transfer Matrix
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
beamid = 0
fig,ax = plt.subplots(nrows=4,ncols=4,figsize=[10,10])
for i in range(4):
for j in range(4):
im = ax[i,j].scatter(r[beamid,:,0],r[beamid,:,1],c=ABCD[beamid,:,i,j])
div = make_axes_locatable(ax[i,j])
cax = div.append_axes("right",size='5%',pad="2%")
cb = fig.colorbar(im,cax=cax)
ax[i,j].get_xaxis().set_visible(False)
ax[i,j].get_yaxis().set_visible(False)
"""
"""
Propagate the Q Parameter
"""
A = ABCD[...,0:2,0:2]
B = ABCD[...,0:2,2:4]
C = ABCD[...,2:4,0:2]
D = ABCD[...,2:4,2:4]
del ABCD, Axx,Axy,Ayx,Ayy,Bxx,Bxy,Byx,Byy,Cxx,Cxy,Cyx,Cyy,Dxx,Dxy,Dyx,Dyy
Num = (C + np.matmul(D , Qinv))
Den = np.linalg.inv(A + np.matmul(B , Qinv))
Qpinv = np.matmul(Num,Den)
del Num,Den
Amplitude = 1/(np.sqrt(np.linalg.det(A + B @ Qpinv)))
transversal = (-1j*k/2)*((r[...,0]*Qpinv[...,0,0] + r[...,1]*Qpinv[...,1,0])*r[...,0] + (r[...,0]*Qpinv[...,0,1] + r[...,1]*Qpinv[...,1,1])*r[...,1])
opticalpath = (-1j*k)*(opd[0,-1] + np.moveaxis(Delta[0,...,0],0,-1))
opticalpath = np.moveaxis(opticalpath,0,-1)
result,vecs = np.linalg.eig(Qpinv)
eig1 = result[...,0]
eig2 = result[...,1]
guoy = 1j*0.5*(np.arctan(np.real(eig1)/np.imag(eig1)) + np.arctan(np.real(eig2)/np.imag(eig2)))
del result,vecs,eig1,eig2
Phase = transversal+opticalpath+guoy
del transversal,opd,guoy,Delta,Qpinv,r
"""
Compute the Gaussian Field
"""
Field = Amplitude*ne.evaluate('exp(Phase)')
# print('Field Shape = ',Field.shape)
# Coherent Superposition
Field = np.sum(Field,axis=0)
# print('Coherent field shape = ',Field.shape)
return Field
|
JashcrafREPO_NAMEpokePATH_START.@poke_extracted@poke-main@poke@_gbd.py@.PATH_END.py
|
{
"filename": "astropyconst13.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/constants/astropyconst13.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants for Astropy v1.3 and earlier.
See :mod:`astropy.constants` for a complete listing of constants
defined in Astropy.
"""
from astropy.utils import find_current_module
from . import codata2010, iau2012
from . import utils as _utils
codata = codata2010
iaudata = iau2012
_utils._set_c(codata, iaudata, find_current_module())
# Clean up namespace
del find_current_module
del _utils
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@constants@astropyconst13.py@.PATH_END.py
|
{
"filename": "legendre_jax.py",
"repo_name": "martinjameswhite/directsht",
"repo_path": "directsht_extracted/directsht-main/sht/legendre_jax.py",
"type": "Python"
}
|
import numpy as np
import jax.numpy as jnp
from jax import vmap, jit, devices
from functools import partial
from jax.lax import fori_loop
from sht.utils_jax import move_to_device
import sht.utils_jax as utils
# Choose the number of devices we'll be parallelizing across
N_devices = len(devices())
def null_unphys(Yv, Yd):
'''
Zero-out spurious entries (artefacts of our implementation)
:param Yv: jnp array of size (Nell, Nell) with the Ylms
:param Yd: jnp array of size (Nell, Nell) with the derivatives of Ylms
:return: tuple (Yv, Yd) where we've zeroed out the unphysical entries
'''
# TODO: Make this act in place by donating buffers
mask = jnp.triu(jnp.ones((Yv.shape[0], Yv.shape[1])))
mask = jnp.array([jnp.roll(mask[i, :], -i) for i in range(len(mask))])
Yv, Yd = [jnp.nan_to_num(Y) * mask[:, :, None] for Y in [Yv, Yd]]
return (Yv, Yd)
def compute_Plm_table(Nl, xx):
"""Use recurrence relations to compute a table of Ylm[cos(theta),0]
for ell>=0, m>=0, x>=0. Can use symmetries to get m<0 and/or x<0,
viz. (-1)^m for m<0 and (-1)^(ell-m) for x<0.
:param Nl: Number of ells (and hence m's) in the grid.
:param xx: Array of x points (non-negative and increasing).
:return Y[ell,m,x=Cos[theta],0] without the sqrt{(2ell+1)/4pi}
normalization (that is applied in __init__)
"""
# We donate argnums to enforce in-place array updates
@partial(jit, donate_argnums=(1,))
def get_mhigh(m, Plm, sx):
indx = lambda ell, m: (m, ell - m)
i0, i1 = indx(m, m), indx(m - 1, m - 1)
return Plm.at[i0[0], i0[1], :].set(-jnp.sqrt(1.0 - 1. / (2 * m)) * sx * Plm[i1[0], i1[1], :])
#
@partial(jit, donate_argnums=(1,))
def get_misellm1(m, Plm, xx):
indx = lambda ell, m: (m, ell - m)
i0, i1 = indx(m, m), indx(m + 1, m)
return Plm.at[i1[0], i1[1], :].set(jnp.sqrt(2 * m + 1.) * xx * Plm[i0[0], i0[1], :])
#
@jit
def ext_slow_recurrence(xx, Plm):
# Note we use Plm.shape[0] instead of Nl to allow padding
return vmap(partial_fun_Ylm, (0, 0, None))(jnp.arange(0, Plm.shape[0], dtype='int32'), Plm, xx)
#
@partial(jit, donate_argnums=(1,))
def partial_fun_Ylm(m, Ylm_row, xx):
body_fun = lambda ell, Ylm_at_m: full_fun_Ylm(ell, m, Ylm_at_m, xx)
return fori_loop(0, len(Ylm_row) - 2, body_fun, Ylm_row)
#
@partial(jit, donate_argnums=(2,))
def full_fun_Ylm(i, m, Ylm_at_m, xx):
# Our indexing scheme is (m, ell-m), so since the loops start at the third column
# (i.e. ell=m+2) we can get ell from the loop index as
ell = m + i + 2
# The recursion relies on the previous two elements on this row
i0, i1, i2 = i + 2, i + 1, i
fact1, fact2 = jnp.sqrt((ell - m) * 1. / (ell + m)), \
jnp.sqrt((ell - m - 1.) / (ell + m - 1.))
Ylm_at_m = Ylm_at_m.at[i0, :].set(((2 * ell - 1) * xx * Ylm_at_m[i1, :]
- (ell + m - 1) * Ylm_at_m[i2, :] * fact2)
* fact1 / (ell - m))
return Ylm_at_m
#
# This should match the convention used in the SHT class below.
# We shift all the entries so that rows start at ell=m. This helps recursion.
indx = lambda ell, m: (m, ell - m)
Nx = len(xx)
sx = jnp.sqrt(1 - xx ** 2)
# Distribute the grid across devices if possible
Plm = jnp.zeros((Nl, Nl, Nx))
# First we do the l=m=0 and l=1, m=0 cases
Plm = Plm.at[indx(0, 0)[0], indx(0, 0)[1], :].set(jnp.ones_like(xx))
Plm = Plm.at[indx(1, 0)[0], indx(1, 0)[1], :].set(xx.copy())
# Now we fill in m>0.
# To keep the recurrences stable, we treat "high m" and "low m"
# separately. Start with the highest value of m allowed:
Plm = fori_loop(1, Nl, lambda m, Plms: get_mhigh(m, Plms, sx), Plm)
# Now do m=ell-1
Plm = fori_loop(1, Nl - 1, lambda m, Plms: get_misellm1(m, Plms, xx), Plm)
# Now we distribute/shard it across GPUS. Note that we should only do this
# once we've computed the diagonals, which happens in a direction orthogonal
# to our row-based sharding!
Plm = move_to_device(Plm, pad_axes=[0, 1])
# Finally fill in ell>m+1:
Plm = ext_slow_recurrence(xx, Plm)
return (Plm)
def compute_der_table(Nl, xx, Yv):
"""Use recurrence relations to compute a table of derivatives of
Ylm[cos(theta),0] for ell>=0, m>=0, x=>0.
Assumes the Ylm table has already been built (passed as Yv).
:param Nl: Number of ells in the derivative grid.
:param xx: Values of cos(theta) at which to evaluate derivs.
:param Yv: Already computed Ylm values.
:return Yd: The table of first derivatives.
"""
#
def ext_der_slow_recurrence(xx, Yv, Yd):
omx2 = 1.0 - xx ** 2
# Note we use Yv.shape[0] instead of Nl to allow padding
rows = jnp.arange(0, Yv.shape[0], dtype='int32')
cols = jnp.arange(0, Yv.shape[1], dtype='int32')
return vmap(vmap(full_fun_dYlm, (0, None, 0, 0, None, None)),
(None, 0, None, 0, None, None), 1)(rows, cols, Yv, Yd, xx, omx2)
@partial(jit, donate_argnums=(3,))
def full_fun_dYlm(m, i, Yv_at_m, Yd_at_ell_m, xx, omx2):
# Our indexing scheme is (m, ell-m), so we can get ell from the loop index as
ell = m + i
i0, i1 = i, i - 1
# indx = lambda ell, m: (m, ell - m)
# i0, i1 = indx(ell, m), indx(ell - 1, m)
fact = jnp.sqrt(1.0 * (ell - m) / (ell + m))
Yd_at_ell_m = Yd_at_ell_m.at[:].set(((ell + m) * fact * Yv_at_m[i1, :] - ell * xx * Yv_at_m[i0, :]) / omx2)
return Yd_at_ell_m
@partial(jit, donate_argnums=(3,))
def fill_dYmm(m, i, Yv_at_m, Yd_at_m, xx, omx2):
# Our indexing scheme is (m, ell-m), so we can get ell from the loop index as
ell = m + i
return Yd_at_m.at[i, :].set((- ell * xx * Yv_at_m[i, :]) / omx2)
def fill_dYmm_ext(Yv, Yd, xx):
omx2 = 1.0 - xx ** 2
# Note we use Yv.shape[0] instead of Nl to allow padding
rows = jnp.arange(0, Yv.shape[0], dtype='int32')
return vmap(fill_dYmm, (0, None, 0, 0, None, None))(rows, 0, Yv, Yd, xx, omx2)
#
Nx = len(xx)
# This should match the convention used in the SHT class below.
indx = lambda ell, m: (m, ell - m)
# Distribute the grid across devices if possible
Yd = utils.init_array(Nl, Nx, N_devices)
# then build the m>0 tables.
Yd = ext_der_slow_recurrence(xx, Yv, Yd)
# Do ell=1, m=0 and ell=0, m=0, which the recursion can't give us.
Yd = Yd.at[indx(1, 0)[0], indx(1, 0)[1], :].set(jnp.ones_like(xx))
Yd = Yd.at[indx(0, 0)[0], indx(0, 0)[1], :].set(jnp.zeros_like(xx))
# The zeroth column (i.e. ell=m) is pathological in our implementation
# so do it again
Yd = fill_dYmm_ext(Yv, Yd, xx)
return (Yd)
@partial(jit, donate_argnums=(2,))
def norm(m, i, Ylm_at_m_ell):
# Get ell in our indexing scheme where indx = lambda ell, m: (m, ell - m)
ell = m + i
return Ylm_at_m_ell.at[:].multiply(jnp.sqrt((2 * ell + 1) / 4. / np.pi))
def norm_ext(Y, Nl):
'''
Normalize the Plm's by the (2ell+1)/4pi factor relating Plm to Ylm
:param Yv: jnp.ndarray of shape (Nl, Nl, Nx) containing the Plm's
:return: jnp.ndarray of shape (Nl, Nl, Nx) containing the (2ell+1)/4pi Ylm
'''
# Note we use Yv.shape[0] instead of Nl to allow padding
rows = jnp.arange(0, Y.shape[0], dtype='int32')
cols = jnp.arange(0, Y.shape[1], dtype='int32')
# This is effectively a loop over ms and ells, multiplying each entry by the norm
return vmap(vmap(norm, (0, None, 0)),
(None, 0, 0))(rows, cols, Y)
|
martinjameswhiteREPO_NAMEdirectshtPATH_START.@directsht_extracted@directsht-main@sht@legendre_jax.py@.PATH_END.py
|
{
"filename": "psrindex.py",
"repo_name": "demorest/nanopipe",
"repo_path": "nanopipe_extracted/nanopipe-master/src/psrindex.py",
"type": "Python"
}
|
#! /usr/bin/env python
# Class to deal with indexing pulsar data files in an arbitrary
# directory structure.
import os
import sys
import fnmatch
from collections import namedtuple, OrderedDict
import sqlite3
# Note, need to be careful about setting/re-setting no_amps
# if this will be imported by other code that needs to load the
# data. Could use "with" style context.
import psrchive
#psrchive.Profile.no_amps.fset(True)
class psrchive_no_amps(object):
"""Use this in with statements to temporarily change the
PSRCHIVE no_amps setting. Probably not thread-safe!"""
def __init__(self,mode=True):
self.mode = mode
def __enter__(self):
self.previous_setting = psrchive.Profile.no_amps.fget()
psrchive.Profile.no_amps.fset(self.mode)
def __exit__(self,t,v,tb):
psrchive.Profile.no_amps.fset(self.previous_setting)
# dict of info to store and their sql type defs
_infotypes = OrderedDict()
_infotypes["fname"] = "text unique" # file name
_infotypes["path"] = "text" # path to file
_infotypes["source"] = "text" # source name
_infotypes["rcvr"] = "text" # receiver name
_infotypes["backend"] = "text" # backend name
_infotypes["type"] = "text" # pulsar/cal/etc
_infotypes["mjd"] = "real" # MJD
_infotypes["bad"] = "int" # bad file flag
_infotypes["reason"] = "text" # reason for badness
_infofields = list(_infotypes.keys())
class PSRFile(namedtuple('PSRFile', _infofields)):
"""This class represents a single raw data file."""
@classmethod
def _fromfile(cls, fname_path):
"""Generate a PSRFile instance from a data file.
fname should be full path to file name."""
(path,fname) = os.path.split(fname_path)
try:
with psrchive_no_amps():
arch = psrchive.Archive_load(fname_path)
source = arch.get_source()
rcvr = arch.get_receiver_name()
backend = arch.get_backend_name()
type = arch.get_type()
mjd = arch[0].get_epoch().in_days()
bad = 0
reason = ""
except:
source = "unk"
rcvr = "unk"
backend = "unk"
type = "unk"
mjd = 0.0
bad = 1
reason = "import error"
return super(PSRFile,cls).__new__(cls,
fname=fname, path=path,
source=source, rcvr=rcvr, backend=backend,
type=type, mjd=mjd, bad=bad, reason=reason)
@classmethod
def _fromdb(cls, cursor, row):
# Note, requires fields in db be in same order as in this class.
# should be guaranteed if everything is done consistently
# via this interface..
return cls._make(row)
@property
def full_path(self):
return self.path + '/' + self.fname
class PSRIndex(object):
"""This class represents an index of pulsar data files."""
def __init__(self, filename="index.db", create=False):
if not create:
if not os.path.exists(filename):
raise RuntimeError("PSRIndex file '%s' not found" % filename)
self.dbfilename = filename
self.db = sqlite3.connect(self.dbfilename)
self.db.row_factory = PSRFile._fromdb
self.cur = self.db.cursor()
create_qry = "create table if not exists files ("
for f in _infofields:
create_qry += f + " " + _infotypes[f] + ","
create_qry = create_qry.rstrip(',') + ")"
self.cur.execute(create_qry)
self.verbose = False
# These filters determine which files are considered for
# inclusion.
self.file_filters = [
"guppi_*.fits",
"puppi_*.fits",
"vegas_*.fits",
"5????.*.asp",
"c??????.dat",
"ABPP_*.ar",
"*.[AB][CD]_????.ar",
"*.[AB][CD]_????.cf",
"*.[AB][CD][12]_????.ar",
"*.[AB][CD][12]_????.cf",
]
def list_files(self, basedir='.'):
"""Return a list of all matching files under the given
basedir."""
basedir_full = os.path.abspath(basedir)
out = []
uniq_names = set()
for (path, subdirs, fnames) in os.walk(basedir_full):
for filter in self.file_filters:
for fname in fnmatch.filter(fnames, filter):
if fname not in uniq_names:
uniq_names.add(fname)
out.append(os.path.join(path,fname))
else:
if self.verbose:
print("Warning: file name collision %s" % fname)
return out
def add_file(self, fname_path, replace=False, commit=True):
"""Add a single file, given with full path, to the db.
If file already exists in the db nothing will be done.
If replace=True existing info will be overwritten."""
if replace==False:
(path,fname) = os.path.split(fname_path)
self.cur.execute("select * from files where fname=?",(fname,))
if self.cur.fetchone() != None:
# file exists
if self.verbose: print("File exists: %s" % fname)
return
info = PSRFile._fromfile(fname_path)
if self.verbose: print("Adding: %s" % info.fname)
qry = "insert into files (" \
+ str.join(', ',_infofields) \
+ ") values (" \
+ str.join(', ',[':'+x for x in _infofields]) +")"
self.cur.execute(qry, info._asdict())
if commit: self.db.commit()
def add_files(self, basedir='.'):
for fname in self.list_files(basedir):
self.add_file(fname, commit=False)
self.db.commit()
def flag_bad(self, fname, reason="", unflag=False):
if unflag: badval=0
else: badval=1
qry = "update files set bad=?, reason=? where fname=?"
self.cur.execute(qry, (badval, reason, fname))
self.db.commit()
def select(self, where=None, include_bad=False, as_dict=False):
"""Shortcut to select / fetchall"""
qry = "select * from files"
if (where is None) and include_bad:
pass
else:
qry += " where "
if not include_bad:
qry += "not bad"
if where is not None:
qry += " and (" + where + ")"
else:
qry += where
qry += " order by mjd"
if self.verbose: print(qry)
self.cur.execute(qry)
if as_dict:
output = {}
for f in self.cur.fetchall():
output[f.fname] = f
return output
else:
return self.cur.fetchall()
|
demorestREPO_NAMEnanopipePATH_START.@nanopipe_extracted@nanopipe-master@src@psrindex.py@.PATH_END.py
|
{
"filename": "_line.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/sankey/node/_line.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="line", parent_name="sankey.node", **kwargs):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Line"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the color of the `line` around each
`node`.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
width
Sets the width (in px) of the `line` around
each `node`.
widthsrc
Sets the source reference on Chart Studio Cloud
for width .
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@sankey@node@_line.py@.PATH_END.py
|
{
"filename": "dusty_galaxy.py",
"repo_name": "bwvdnbro/CMacIonize",
"repo_path": "CMacIonize_extracted/CMacIonize-master/benchmarks/dusty_galaxy.py",
"type": "Python"
}
|
#! /usr/bin/python
################################################################################
# This file is part of CMacIonize
# Copyright (C) 2017 Bert Vandenbroucke (bert.vandenbroucke@gmail.com)
#
# CMacIonize is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CMacIonize is distributed in the hope that it will be useful,
# but WITOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with CMacIonize. If not, see <http://www.gnu.org/licenses/>.
################################################################################
##
# @file dusty_galaxy.py
#
# @brief Script used to plot the binary output file generated by the dusty
# galaxy test.
#
# @author Bert Vandenbroucke (bv7@st-andrews.ac.uk)
##
## load necessary modules
# for reading in the binary file
import numpy as np
# for plotting
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as pl
# load the binary file
image = np.fromfile("galaxy_image.dat", dtype=np.float64)
# convert to a 200x200 image array
image = image.reshape((200, 200))
# due to different ordering in CMacIonize and Python, we need to transpose the
# image before we can plot it
image = image.T
# make the plot
fig, ax = pl.subplots(1, 1)
cax = ax.imshow(image)
fig.colorbar(cax)
pl.savefig("galaxy_image.png")
|
bwvdnbroREPO_NAMECMacIonizePATH_START.@CMacIonize_extracted@CMacIonize-master@benchmarks@dusty_galaxy.py@.PATH_END.py
|
{
"filename": "functions.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/how_to/functions.ipynb",
"type": "Jupyter Notebook"
}
|
---
sidebar_position: 3
keywords: [RunnableLambda, LCEL]
---
# How to run custom functions
:::info Prerequisites
This guide assumes familiarity with the following concepts:
- [LangChain Expression Language (LCEL)](/docs/concepts/lcel)
- [Chaining runnables](/docs/how_to/sequence/)
:::
You can use arbitrary functions as [Runnables](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable). This is useful for formatting or when you need functionality not provided by other LangChain components, and custom functions used as Runnables are called [`RunnableLambdas`](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.RunnableLambda.html).
Note that all inputs to these functions need to be a SINGLE argument. If you have a function that accepts multiple arguments, you should write a wrapper that accepts a single dict input and unpacks it into multiple arguments.
This guide will cover:
- How to explicitly create a runnable from a custom function using the `RunnableLambda` constructor and the convenience `@chain` decorator
- Coercion of custom functions into runnables when used in chains
- How to accept and use run metadata in your custom function
- How to stream with custom functions by having them return generators
## Using the constructor
Below, we explicitly wrap our custom logic using the `RunnableLambda` constructor:
```python
%pip install -qU langchain langchain_openai
import os
from getpass import getpass
if "OPENAI_API_KEY" not in os.environ:
os.environ["OPENAI_API_KEY"] = getpass()
```
```python
from operator import itemgetter
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableLambda
from langchain_openai import ChatOpenAI
def length_function(text):
return len(text)
def _multiple_length_function(text1, text2):
return len(text1) * len(text2)
def multiple_length_function(_dict):
return _multiple_length_function(_dict["text1"], _dict["text2"])
model = ChatOpenAI()
prompt = ChatPromptTemplate.from_template("what is {a} + {b}")
chain1 = prompt | model
chain = (
{
"a": itemgetter("foo") | RunnableLambda(length_function),
"b": {"text1": itemgetter("foo"), "text2": itemgetter("bar")}
| RunnableLambda(multiple_length_function),
}
| prompt
| model
)
chain.invoke({"foo": "bar", "bar": "gah"})
```
AIMessage(content='3 + 9 equals 12.', response_metadata={'token_usage': {'completion_tokens': 8, 'prompt_tokens': 14, 'total_tokens': 22}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-73728de3-e483-49e3-ad54-51bd9570e71a-0')
## The convenience `@chain` decorator
You can also turn an arbitrary function into a chain by adding a `@chain` decorator. This is functionaly equivalent to wrapping the function in a `RunnableLambda` constructor as shown above. Here's an example:
```python
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import chain
prompt1 = ChatPromptTemplate.from_template("Tell me a joke about {topic}")
prompt2 = ChatPromptTemplate.from_template("What is the subject of this joke: {joke}")
@chain
def custom_chain(text):
prompt_val1 = prompt1.invoke({"topic": text})
output1 = ChatOpenAI().invoke(prompt_val1)
parsed_output1 = StrOutputParser().invoke(output1)
chain2 = prompt2 | ChatOpenAI() | StrOutputParser()
return chain2.invoke({"joke": parsed_output1})
custom_chain.invoke("bears")
```
'The subject of the joke is the bear and his girlfriend.'
Above, the `@chain` decorator is used to convert `custom_chain` into a runnable, which we invoke with the `.invoke()` method.
If you are using a tracing with [LangSmith](https://docs.smith.langchain.com/), you should see a `custom_chain` trace in there, with the calls to OpenAI nested underneath.
## Automatic coercion in chains
When using custom functions in chains with the pipe operator (`|`), you can omit the `RunnableLambda` or `@chain` constructor and rely on coercion. Here's a simple example with a function that takes the output from the model and returns the first five letters of it:
```python
prompt = ChatPromptTemplate.from_template("tell me a story about {topic}")
model = ChatOpenAI()
chain_with_coerced_function = prompt | model | (lambda x: x.content[:5])
chain_with_coerced_function.invoke({"topic": "bears"})
```
'Once '
Note that we didn't need to wrap the custom function `(lambda x: x.content[:5])` in a `RunnableLambda` constructor because the `model` on the left of the pipe operator is already a Runnable. The custom function is **coerced** into a runnable. See [this section](/docs/how_to/sequence/#coercion) for more information.
## Passing run metadata
Runnable lambdas can optionally accept a [RunnableConfig](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.config.RunnableConfig.html#langchain_core.runnables.config.RunnableConfig) parameter, which they can use to pass callbacks, tags, and other configuration information to nested runs.
```python
import json
from langchain_core.runnables import RunnableConfig
def parse_or_fix(text: str, config: RunnableConfig):
fixing_chain = (
ChatPromptTemplate.from_template(
"Fix the following text:\n\n```text\n{input}\n```\nError: {error}"
" Don't narrate, just respond with the fixed data."
)
| model
| StrOutputParser()
)
for _ in range(3):
try:
return json.loads(text)
except Exception as e:
text = fixing_chain.invoke({"input": text, "error": e}, config)
return "Failed to parse"
from langchain_community.callbacks import get_openai_callback
with get_openai_callback() as cb:
output = RunnableLambda(parse_or_fix).invoke(
"{foo: bar}", {"tags": ["my-tag"], "callbacks": [cb]}
)
print(output)
print(cb)
```
{'foo': 'bar'}
Tokens Used: 62
Prompt Tokens: 56
Completion Tokens: 6
Successful Requests: 1
Total Cost (USD): $9.6e-05
```python
from langchain_community.callbacks import get_openai_callback
with get_openai_callback() as cb:
output = RunnableLambda(parse_or_fix).invoke(
"{foo: bar}", {"tags": ["my-tag"], "callbacks": [cb]}
)
print(output)
print(cb)
```
{'foo': 'bar'}
Tokens Used: 62
Prompt Tokens: 56
Completion Tokens: 6
Successful Requests: 1
Total Cost (USD): $9.6e-05
## Streaming
:::note
[RunnableLambda](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.RunnableLambda.html) is best suited for code that does not need to support streaming. If you need to support streaming (i.e., be able to operate on chunks of inputs and yield chunks of outputs), use [RunnableGenerator](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.RunnableGenerator.html) instead as in the example below.
:::
You can use generator functions (ie. functions that use the `yield` keyword, and behave like iterators) in a chain.
The signature of these generators should be `Iterator[Input] -> Iterator[Output]`. Or for async generators: `AsyncIterator[Input] -> AsyncIterator[Output]`.
These are useful for:
- implementing a custom output parser
- modifying the output of a previous step, while preserving streaming capabilities
Here's an example of a custom output parser for comma-separated lists. First, we create a chain that generates such a list as text:
```python
from typing import Iterator, List
prompt = ChatPromptTemplate.from_template(
"Write a comma-separated list of 5 animals similar to: {animal}. Do not include numbers"
)
str_chain = prompt | model | StrOutputParser()
for chunk in str_chain.stream({"animal": "bear"}):
print(chunk, end="", flush=True)
```
lion, tiger, wolf, gorilla, panda
Next, we define a custom function that will aggregate the currently streamed output and yield it when the model generates the next comma in the list:
```python
# This is a custom parser that splits an iterator of llm tokens
# into a list of strings separated by commas
def split_into_list(input: Iterator[str]) -> Iterator[List[str]]:
# hold partial input until we get a comma
buffer = ""
for chunk in input:
# add current chunk to buffer
buffer += chunk
# while there are commas in the buffer
while "," in buffer:
# split buffer on comma
comma_index = buffer.index(",")
# yield everything before the comma
yield [buffer[:comma_index].strip()]
# save the rest for the next iteration
buffer = buffer[comma_index + 1 :]
# yield the last chunk
yield [buffer.strip()]
list_chain = str_chain | split_into_list
for chunk in list_chain.stream({"animal": "bear"}):
print(chunk, flush=True)
```
['lion']
['tiger']
['wolf']
['gorilla']
['raccoon']
Invoking it gives a full array of values:
```python
list_chain.invoke({"animal": "bear"})
```
['lion', 'tiger', 'wolf', 'gorilla', 'raccoon']
## Async version
If you are working in an `async` environment, here is an `async` version of the above example:
```python
from typing import AsyncIterator
async def asplit_into_list(
input: AsyncIterator[str],
) -> AsyncIterator[List[str]]: # async def
buffer = ""
async for (
chunk
) in input: # `input` is a `async_generator` object, so use `async for`
buffer += chunk
while "," in buffer:
comma_index = buffer.index(",")
yield [buffer[:comma_index].strip()]
buffer = buffer[comma_index + 1 :]
yield [buffer.strip()]
list_chain = str_chain | asplit_into_list
async for chunk in list_chain.astream({"animal": "bear"}):
print(chunk, flush=True)
```
['lion']
['tiger']
['wolf']
['gorilla']
['panda']
```python
await list_chain.ainvoke({"animal": "bear"})
```
['lion', 'tiger', 'wolf', 'gorilla', 'panda']
## Next steps
Now you've learned a few different ways to use custom logic within your chains, and how to implement streaming.
To learn more, see the other how-to guides on runnables in this section.
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@how_to@functions.ipynb@.PATH_END.py
|
{
"filename": "test_arithmetic.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/scalar/period/test_arithmetic.py",
"type": "Python"
}
|
from datetime import timedelta
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas import (
NaT,
Period,
Timedelta,
Timestamp,
offsets,
)
class TestPeriodArithmetic:
def test_add_overflow_raises(self):
# GH#55503
per = Timestamp.max.to_period("ns")
msg = "|".join(
[
"Python int too large to convert to C long",
# windows, 32bit linux builds
"int too big to convert",
]
)
with pytest.raises(OverflowError, match=msg):
per + 1
msg = "value too large"
with pytest.raises(OverflowError, match=msg):
per + Timedelta(1)
with pytest.raises(OverflowError, match=msg):
per + offsets.Nano(1)
def test_period_add_integer(self):
per1 = Period(freq="D", year=2008, month=1, day=1)
per2 = Period(freq="D", year=2008, month=1, day=2)
assert per1 + 1 == per2
assert 1 + per1 == per2
def test_period_add_invalid(self):
# GH#4731
per1 = Period(freq="D", year=2008, month=1, day=1)
per2 = Period(freq="D", year=2008, month=1, day=2)
msg = "|".join(
[
r"unsupported operand type\(s\)",
"can only concatenate str",
"must be str, not Period",
]
)
with pytest.raises(TypeError, match=msg):
per1 + "str"
with pytest.raises(TypeError, match=msg):
"str" + per1
with pytest.raises(TypeError, match=msg):
per1 + per2
def test_period_sub_period_annual(self):
left, right = Period("2011", freq="Y"), Period("2007", freq="Y")
result = left - right
assert result == 4 * right.freq
msg = r"Input has different freq=M from Period\(freq=Y-DEC\)"
with pytest.raises(IncompatibleFrequency, match=msg):
left - Period("2007-01", freq="M")
def test_period_sub_period(self):
per1 = Period("2011-01-01", freq="D")
per2 = Period("2011-01-15", freq="D")
off = per1.freq
assert per1 - per2 == -14 * off
assert per2 - per1 == 14 * off
msg = r"Input has different freq=M from Period\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
per1 - Period("2011-02", freq="M")
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH#23878
p1 = Period("19910905", freq=tick_classes(n))
p2 = Period("19920406", freq=tick_classes(n))
expected = Period(str(p2), freq=p2.freq.base) - Period(
str(p1), freq=p1.freq.base
)
assert (p2 - p1) == expected
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(offsets.YearEnd, "month"),
(offsets.QuarterEnd, "startingMonth"),
(offsets.MonthEnd, None),
(offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n, normalize):
# GH#23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
p1 = Period(p1_d, freq=offset(n, normalize, **kwds))
p2 = Period(p2_d, freq=offset(n, normalize, **kwds))
expected = Period(p2_d, freq=p2.freq.base) - Period(p1_d, freq=p1.freq.base)
assert (p2 - p1) == expected
def test_period_add_offset(self):
# freq is DateOffset
for freq in ["Y", "2Y", "3Y"]:
per = Period("2011", freq=freq)
exp = Period("2013", freq=freq)
assert per + offsets.YearEnd(2) == exp
assert offsets.YearEnd(2) + per == exp
for off in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
per + off
with pytest.raises(IncompatibleFrequency, match=msg):
off + per
for freq in ["M", "2M", "3M"]:
per = Period("2011-03", freq=freq)
exp = Period("2011-05", freq=freq)
assert per + offsets.MonthEnd(2) == exp
assert offsets.MonthEnd(2) + per == exp
exp = Period("2012-03", freq=freq)
assert per + offsets.MonthEnd(12) == exp
assert offsets.MonthEnd(12) + per == exp
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
for off in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
per + off
with pytest.raises(IncompatibleFrequency, match=msg):
off + per
# freq is Tick
for freq in ["D", "2D", "3D"]:
per = Period("2011-04-01", freq=freq)
exp = Period("2011-04-06", freq=freq)
assert per + offsets.Day(5) == exp
assert offsets.Day(5) + per == exp
exp = Period("2011-04-02", freq=freq)
assert per + offsets.Hour(24) == exp
assert offsets.Hour(24) + per == exp
exp = Period("2011-04-03", freq=freq)
assert per + np.timedelta64(2, "D") == exp
assert np.timedelta64(2, "D") + per == exp
exp = Period("2011-04-02", freq=freq)
assert per + np.timedelta64(3600 * 24, "s") == exp
assert np.timedelta64(3600 * 24, "s") + per == exp
exp = Period("2011-03-30", freq=freq)
assert per + timedelta(-2) == exp
assert timedelta(-2) + per == exp
exp = Period("2011-04-03", freq=freq)
assert per + timedelta(hours=48) == exp
assert timedelta(hours=48) + per == exp
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
for off in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(4, "h"),
timedelta(hours=23),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
per + off
with pytest.raises(IncompatibleFrequency, match=msg):
off + per
for freq in ["h", "2h", "3h"]:
per = Period("2011-04-01 09:00", freq=freq)
exp = Period("2011-04-03 09:00", freq=freq)
assert per + offsets.Day(2) == exp
assert offsets.Day(2) + per == exp
exp = Period("2011-04-01 12:00", freq=freq)
assert per + offsets.Hour(3) == exp
assert offsets.Hour(3) + per == exp
msg = "cannot use operands with types"
exp = Period("2011-04-01 12:00", freq=freq)
assert per + np.timedelta64(3, "h") == exp
assert np.timedelta64(3, "h") + per == exp
exp = Period("2011-04-01 10:00", freq=freq)
assert per + np.timedelta64(3600, "s") == exp
assert np.timedelta64(3600, "s") + per == exp
exp = Period("2011-04-01 11:00", freq=freq)
assert per + timedelta(minutes=120) == exp
assert timedelta(minutes=120) + per == exp
exp = Period("2011-04-05 12:00", freq=freq)
assert per + timedelta(days=4, minutes=180) == exp
assert timedelta(days=4, minutes=180) + per == exp
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
for off in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(3200, "s"),
timedelta(hours=23, minutes=30),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
per + off
with pytest.raises(IncompatibleFrequency, match=msg):
off + per
def test_period_sub_offset(self):
# freq is DateOffset
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
for freq in ["Y", "2Y", "3Y"]:
per = Period("2011", freq=freq)
assert per - offsets.YearEnd(2) == Period("2009", freq=freq)
for off in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
per - off
for freq in ["M", "2M", "3M"]:
per = Period("2011-03", freq=freq)
assert per - offsets.MonthEnd(2) == Period("2011-01", freq=freq)
assert per - offsets.MonthEnd(12) == Period("2010-03", freq=freq)
for off in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
per - off
# freq is Tick
for freq in ["D", "2D", "3D"]:
per = Period("2011-04-01", freq=freq)
assert per - offsets.Day(5) == Period("2011-03-27", freq=freq)
assert per - offsets.Hour(24) == Period("2011-03-31", freq=freq)
assert per - np.timedelta64(2, "D") == Period("2011-03-30", freq=freq)
assert per - np.timedelta64(3600 * 24, "s") == Period(
"2011-03-31", freq=freq
)
assert per - timedelta(-2) == Period("2011-04-03", freq=freq)
assert per - timedelta(hours=48) == Period("2011-03-30", freq=freq)
for off in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(4, "h"),
timedelta(hours=23),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
per - off
for freq in ["h", "2h", "3h"]:
per = Period("2011-04-01 09:00", freq=freq)
assert per - offsets.Day(2) == Period("2011-03-30 09:00", freq=freq)
assert per - offsets.Hour(3) == Period("2011-04-01 06:00", freq=freq)
assert per - np.timedelta64(3, "h") == Period("2011-04-01 06:00", freq=freq)
assert per - np.timedelta64(3600, "s") == Period(
"2011-04-01 08:00", freq=freq
)
assert per - timedelta(minutes=120) == Period("2011-04-01 07:00", freq=freq)
assert per - timedelta(days=4, minutes=180) == Period(
"2011-03-28 06:00", freq=freq
)
for off in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(3200, "s"),
timedelta(hours=23, minutes=30),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
per - off
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_period_addsub_nat(self, freq):
# GH#13071
per = Period("2011-01", freq=freq)
# For subtraction, NaT is treated as another Period object
assert NaT - per is NaT
assert per - NaT is NaT
# For addition, NaT is treated as offset-like
assert NaT + per is NaT
assert per + NaT is NaT
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "m"])
def test_period_add_sub_td64_nat(self, unit):
# GH#47196
per = Period("2022-06-01", "D")
nat = np.timedelta64("NaT", unit)
assert per + nat is NaT
assert nat + per is NaT
assert per - nat is NaT
with pytest.raises(TypeError, match="unsupported operand"):
nat - per
def test_period_ops_offset(self):
per = Period("2011-04-01", freq="D")
result = per + offsets.Day()
exp = Period("2011-04-02", freq="D")
assert result == exp
result = per - offsets.Day(2)
exp = Period("2011-03-30", freq="D")
assert result == exp
msg = r"Input cannot be converted to Period\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
per + offsets.Hour(2)
with pytest.raises(IncompatibleFrequency, match=msg):
per - offsets.Hour(2)
def test_period_add_timestamp_raises(self):
# GH#17983
ts = Timestamp("2017")
per = Period("2017", freq="M")
msg = r"unsupported operand type\(s\) for \+: 'Timestamp' and 'Period'"
with pytest.raises(TypeError, match=msg):
ts + per
msg = r"unsupported operand type\(s\) for \+: 'Period' and 'Timestamp'"
with pytest.raises(TypeError, match=msg):
per + ts
class TestPeriodComparisons:
def test_period_comparison_same_freq(self):
jan = Period("2000-01", "M")
feb = Period("2000-02", "M")
assert not jan == feb
assert jan != feb
assert jan < feb
assert jan <= feb
assert not jan > feb
assert not jan >= feb
def test_period_comparison_same_period_different_object(self):
# Separate Period objects for the same period
left = Period("2000-01", "M")
right = Period("2000-01", "M")
assert left == right
assert left >= right
assert left <= right
assert not left < right
assert not left > right
def test_period_comparison_mismatched_freq(self):
jan = Period("2000-01", "M")
day = Period("2012-01-01", "D")
assert not jan == day
assert jan != day
msg = r"Input has different freq=D from Period\(freq=M\)"
with pytest.raises(IncompatibleFrequency, match=msg):
jan < day
with pytest.raises(IncompatibleFrequency, match=msg):
jan <= day
with pytest.raises(IncompatibleFrequency, match=msg):
jan > day
with pytest.raises(IncompatibleFrequency, match=msg):
jan >= day
def test_period_comparison_invalid_type(self):
jan = Period("2000-01", "M")
assert not jan == 1
assert jan != 1
int_or_per = "'(Period|int)'"
msg = f"not supported between instances of {int_or_per} and {int_or_per}"
for left, right in [(jan, 1), (1, jan)]:
with pytest.raises(TypeError, match=msg):
left > right
with pytest.raises(TypeError, match=msg):
left >= right
with pytest.raises(TypeError, match=msg):
left < right
with pytest.raises(TypeError, match=msg):
left <= right
def test_period_comparison_nat(self):
per = Period("2011-01-01", freq="D")
ts = Timestamp("2011-01-01")
# confirm Period('NaT') work identical with Timestamp('NaT')
for left, right in [
(NaT, per),
(per, NaT),
(NaT, ts),
(ts, NaT),
]:
assert not left < right
assert not left > right
assert not left == right
assert left != right
assert not left <= right
assert not left >= right
@pytest.mark.parametrize(
"scalar, expected",
((0, False), (Period("2000-01", "M"), True)),
)
def test_period_comparison_numpy_zerodim_arr(self, scalar, expected):
zerodim_arr = np.array(scalar)
per = Period("2000-01", "M")
assert (per == zerodim_arr) is expected
assert (zerodim_arr == per) is expected
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@scalar@period@test_arithmetic.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "AndreasKvammen/ML_dust_detection",
"repo_path": "ML_dust_detection_extracted/ML_dust_detection-main/Dust_impact_rates/README.md",
"type": "Markdown"
}
|
The folder "Dust_impact_rates" contains the MatLab script and files to reproduce the daily dust impact rates (Figure 11 from the article)
1. dust_impact_rates.m - is a MatLab script to read the daily dust count (classified by the TDS, SVM and CNN approach) and convert it to impact rates using the RPW-TDS duty cycle. The script plots the daily impact rates along with the associated fit using the dust flux
model from Zaslavsky et al. (2021) with an included offset
2. TDS_ddc.txt - is a table containing the date and the daily dust count using the TDS approach
3. SVM_ddc.txt - is a table containing the date, the median of the daily dust count (using the SVM approach) and the standard deviation of the daily dust count (calculated using 10 different training/testing data splits)
4. CNN_ddc.txt - is a table containing the date, the median of the daily dust count (using the CNN approach) and the standard deviation of the daily dust count (calculated using 10 different training/testing data splits)
5. fits.csv - is a table containing the date, the RPW-TDS duty cycle and the TDS/SVM/CNN fit to data using the dust flux model from Zaslavsky et al. (2021) with an included offset
6. dust_impact_rates.pdf - presents the MatLab code and resulting plots in pdf format
|
AndreasKvammenREPO_NAMEML_dust_detectionPATH_START.@ML_dust_detection_extracted@ML_dust_detection-main@Dust_impact_rates@README.md@.PATH_END.py
|
{
"filename": "hashing.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/pyro/contrib/tracking/hashing.py",
"type": "Python"
}
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import heapq
import itertools
from collections import defaultdict
from numbers import Number
import torch
class LSH:
"""
Implements locality-sensitive hashing for low-dimensional euclidean space.
Allows to efficiently find neighbours of a point. Provides 2 guarantees:
- Difference between coordinates of points not returned by :meth:`nearby`
and input point is larger than ``radius``.
- Difference between coordinates of points returned by :meth:`nearby` and
input point is smaller than 2 ``radius``.
Example:
>>> radius = 1
>>> lsh = LSH(radius)
>>> a = torch.tensor([-0.51, -0.51]) # hash(a)=(-1,-1)
>>> b = torch.tensor([-0.49, -0.49]) # hash(a)=(0,0)
>>> c = torch.tensor([1.0, 1.0]) # hash(b)=(1,1)
>>> lsh.add('a', a)
>>> lsh.add('b', b)
>>> lsh.add('c', c)
>>> # even though c is within 2radius of a
>>> lsh.nearby('a') # doctest: +SKIP
{'b'}
>>> lsh.nearby('b') # doctest: +SKIP
{'a', 'c'}
>>> lsh.remove('b')
>>> lsh.nearby('a') # doctest: +SKIP
set()
:param float radius: Scaling parameter used in hash function. Determines the size of the neighbourhood.
"""
def __init__(self, radius):
if not (isinstance(radius, Number) and radius > 0):
raise ValueError(
"radius must be float greater than 0, given: {}".format(radius)
)
self._radius = radius
self._hash_to_key = defaultdict(set)
self._key_to_hash = {}
def _hash(self, point):
coords = (point / self._radius).round()
return tuple(map(int, coords))
def add(self, key, point):
"""
Adds (``key``, ``point``) pair to the hash.
:param key: Key used identify ``point``.
:param torch.Tensor point: data, should be detached and on cpu.
"""
_hash = self._hash(point)
if key in self._key_to_hash:
self.remove(key)
self._key_to_hash[key] = _hash
self._hash_to_key[_hash].add(key)
def remove(self, key):
"""
Removes ``key`` and corresponding point from the hash.
Raises :exc:`KeyError` if key is not in hash.
:param key: key used to identify point.
"""
_hash = self._key_to_hash.pop(key)
self._hash_to_key[_hash].remove(key)
def nearby(self, key):
r"""
Returns a set of keys which are neighbours of the point identified by ``key``.
Two points are nearby if difference of each element of their hashes is smaller than 2. In euclidean space, this
corresponds to all points :math:`\mathbf{p}` where :math:`|\mathbf{p}_k-(\mathbf{p_{key}})_k|<r`,
and some points (all points not guaranteed) where :math:`|\mathbf{p}_k-(\mathbf{p_{key}})_k|<2r`.
:param key: key used to identify input point.
:return: a set of keys identifying neighbours of the input point.
:rtype: set
"""
_hash = self._key_to_hash[key]
result = set()
for nearby_hash in itertools.product(*[[i - 1, i, i + 1] for i in _hash]):
result |= self._hash_to_key[nearby_hash]
result.remove(key)
return result
class ApproxSet:
"""
Queries low-dimensional euclidean space for approximate occupancy.
:param float radius: scaling parameter used in hash function. Determines the size of the bin.
See :class:`LSH` for details.
"""
def __init__(self, radius):
if not (isinstance(radius, Number) and radius > 0):
raise ValueError(
"radius must be float greater than 0, given: {}".format(radius)
)
self._radius = radius
self._bins = set()
def _hash(self, point):
coords = (point / self._radius).round()
return tuple(map(int, coords))
def try_add(self, point):
"""
Attempts to add ``point`` to set. Only adds there are no points in the ``point``'s bin.
:param torch.Tensor point: Point to be queried, should be detached and on cpu.
:return: ``True`` if point is successfully added, ``False`` if there is already a point in ``point``'s bin.
:rtype: bool
"""
_hash = self._hash(point)
if _hash in self._bins:
return False
self._bins.add(_hash)
return True
def merge_points(points, radius):
"""
Greedily merge points that are closer than given radius.
This uses :class:`LSH` to achieve complexity that is linear in the number
of merged clusters and quadratic in the size of the largest merged cluster.
:param torch.Tensor points: A tensor of shape ``(K,D)`` where ``K`` is
the number of points and ``D`` is the number of dimensions.
:param float radius: The minimum distance nearer than which
points will be merged.
:return: A tuple ``(merged_points, groups)`` where ``merged_points`` is a
tensor of shape ``(J,D)`` where ``J <= K``, and ``groups`` is a list of
tuples of indices mapping merged points to original points. Note that
``len(groups) == J`` and ``sum(len(group) for group in groups) == K``.
:rtype: tuple
"""
if points.dim() != 2:
raise ValueError(
"Expected points.shape == (K,D), but got {}".format(points.shape)
)
if not (isinstance(radius, Number) and radius > 0):
raise ValueError(
"Expected radius to be a positive number, but got {}".format(radius)
)
radius = (
0.99 * radius
) # avoid merging points exactly radius apart, e.g. grid points
threshold = radius**2
# setup data structures to cheaply search for nearest pairs
lsh = LSH(radius)
priority_queue = []
groups = [(i,) for i in range(len(points))]
for i, point in enumerate(points):
lsh.add(i, point)
for j in lsh.nearby(i):
d2 = (point - points[j]).pow(2).sum().item()
if d2 < threshold:
heapq.heappush(priority_queue, (d2, j, i))
if not priority_queue:
return points, groups
# convert from dense to sparse representation
next_id = len(points)
points = dict(enumerate(points))
groups = dict(enumerate(groups))
# greedily merge
while priority_queue:
d1, i, j = heapq.heappop(priority_queue)
if i not in points or j not in points:
continue
k = next_id
next_id += 1
points[k] = (points.pop(i) + points.pop(j)) / 2
groups[k] = groups.pop(i) + groups.pop(j)
lsh.remove(i)
lsh.remove(j)
lsh.add(k, points[k])
for i in lsh.nearby(k):
if i == k:
continue
d2 = (points[i] - points[k]).pow(2).sum().item()
if d2 < threshold:
heapq.heappush(priority_queue, (d2, i, k))
# convert from sparse to dense representation
ids = sorted(points.keys())
points = torch.stack([points[i] for i in ids])
groups = [groups[i] for i in ids]
return points, groups
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@pyro@contrib@tracking@hashing.py@.PATH_END.py
|
{
"filename": "advice.md",
"repo_name": "tigerchenlu98/rebound",
"repo_path": "rebound_extracted/rebound-main/.github/ISSUE_TEMPLATE/advice.md",
"type": "Markdown"
}
|
---
name: Ask for advice
about: Ask for advice on how to use REBOUND for the specific science problem that you have in mind. Get suggestions for choosing the best integrator and settings, or inquire about ways to optmimize runtime.
title: ''
labels: question
assignees: ''
---
**Environment**
Which version of REBOUND are you using and on what operating system?
- REBOUND Version: [e.g. 28.7.0]
- API interface: [C or Python]
- Operating System (including version): [e.g. Linux, Windows, MacOS]
**Physics**
Describe the physical system you're interested in simulating.
What are the characteristic time and length scales in your problem?
How many particles do you want to simulate?
How long do you need to run the simulation?
**Goal**
Describe what you're ultimately trying to achieve?
What are the science questions you want to answer?
What quantities do you want to measure with your simulation?
**Progress**
What have you tried so far?
How is that working out?
What are the stumbling blocks?
What areas would you like to improve?
**Name/Affiliation**
It helps to know your name and your affiliation (if you have one).
|
tigerchenlu98REPO_NAMEreboundPATH_START.@rebound_extracted@rebound-main@.github@ISSUE_TEMPLATE@advice.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "dsavransky/EXOSIMS",
"repo_path": "EXOSIMS_extracted/EXOSIMS-master/EXOSIMS/PlanetPhysicalModel/__init__.py",
"type": "Python"
}
|
dsavranskyREPO_NAMEEXOSIMSPATH_START.@EXOSIMS_extracted@EXOSIMS-master@EXOSIMS@PlanetPhysicalModel@__init__.py@.PATH_END.py
|
|
{
"filename": "_offsetsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/bar/_offsetsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OffsetsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="offsetsrc", parent_name="bar", **kwargs):
super(OffsetsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@bar@_offsetsrc.py@.PATH_END.py
|
{
"filename": "create_mumpower_bdf_file.py",
"repo_name": "nuc-astro/winnet",
"repo_path": "winnet_extracted/winnet-master/bin/create_mumpower_bdf_file.py",
"type": "Python"
}
|
# Author: M. Reichert
# Date : 03.06.2024
# Convert beta delayed fission file of Mumpower et al. 2022 into a WinNet readable format.
# The Mumpower et al. 2022 file can be accessed at :
# https://journals.aps.org/prc/supplemental/10.1103/PhysRevC.106.065805
import numpy as np
import pandas as pd
from class_files.nucleus_class import nucleus
path = "mumpower_beoh350_bdf_frdm2012_bstr12-gtff_frldm2015.dat"
# Columns of the file
columns = ["name", "Z", "N", "A", "P0", "P1", "P2", "P3", "P4", "P5", "P6", "P7", "P8", "P9", "P10",
"P0f", "P1f", "P2f", "P3f", "P4f", "P5f", "P6f", "P7f", "P8f", "P9f", "P10f"]
total_data =[]
rate_counter = 0
# Open and parse through the file
with open(path,"r") as f:
lines = f.readlines()
for l in lines:
if (l[0] == "#") or (l.strip()==""):
continue
else:
data = np.array(l.split()).astype(float)
if len(data) == 16:
# Get the nucleus name
nucleus_name = nucleus(Z=int(data[0]), N=int(data[1])).get_name()
# Create the first part of the entry
entry = [nucleus_name]+list(data[0:14])
else:
# Create the second part of the entry
entry = entry+list(data)
# Add it to the total data
total_data.append(entry)
rate_counter += 1
# Create a pandas dataframe
df = pd.DataFrame(total_data, columns=columns)
# Calculate the total fission probability
df["Total_pf"] = df["P0f"]+df["P1f"]+df["P2f"]+df["P3f"]+df["P4f"]+df["P5f"]+df["P6f"]+df["P7f"]+df["P8f"]+df["P9f"]+df["P10f"]
# Drop entries that have no fission probability
df = df[df["Total_pf"]>0]
# Create the file
fcont = ""
# Go through the dataframe
for it, row in df.iterrows():
fcont = fcont + row["name"].rjust(5) + 4*" "+4*" "+"mp22\n"
for i in range(10):
fcont = fcont + f"{row[f'P{i}f']:.3f}"+4*" "
fcont = fcont + "\n"
# Write to file
with open("fission_rates_bdf_mp22.dat","w") as f:
f.write(fcont)
# Say something
print(f"File fission_rates_bdf_mp22.dat created with {rate_counter} entries.")
|
nuc-astroREPO_NAMEwinnetPATH_START.@winnet_extracted@winnet-master@bin@create_mumpower_bdf_file.py@.PATH_END.py
|
{
"filename": "_textfont.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/scatterpolar/selected/_textfont.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterpolar.selected"
_path_str = "scatterpolar.selected.textfont"
_valid_props = {"color"}
# color
# -----
@property
def color(self):
"""
Sets the text font color of selected points.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the text font color of selected points.
"""
def __init__(self, arg=None, color=None, **kwargs):
"""
Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterpolar.s
elected.Textfont`
color
Sets the text font color of selected points.
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolar.selected.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolar.selected.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@scatterpolar@selected@_textfont.py@.PATH_END.py
|
{
"filename": "e3b50f666fbb_the_first_alembic_revision.py",
"repo_name": "transientskp/tkp",
"repo_path": "tkp_extracted/tkp-master/alembic/versions/e3b50f666fbb_the_first_alembic_revision.py",
"type": "Python"
}
|
"""the first alembic revision
Revision ID: e3b50f666fbb
Revises:
Create Date: 2016-05-30 11:32:22.585690
"""
# revision identifiers, used by Alembic.
revision = 'e3b50f666fbb'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
|
transientskpREPO_NAMEtkpPATH_START.@tkp_extracted@tkp-master@alembic@versions@e3b50f666fbb_the_first_alembic_revision.py@.PATH_END.py
|
{
"filename": "databricks.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/tools/databricks.ipynb",
"type": "Jupyter Notebook"
}
|
# Databricks Unity Catalog (UC)
This notebook shows how to use UC functions as LangChain tools, with both LangChain and LangGraph agent APIs.
See Databricks documentation ([AWS](https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-create-sql-function.html)|[Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/language-manual/sql-ref-syntax-ddl-create-sql-function)|[GCP](https://docs.gcp.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-create-sql-function.html)) to learn how to create SQL or Python functions in UC. Do not skip function and parameter comments, which are critical for LLMs to call functions properly.
In this example notebook, we create a simple Python function that executes arbitrary code and use it as a LangChain tool:
```sql
CREATE FUNCTION main.tools.python_exec (
code STRING COMMENT 'Python code to execute. Remember to print the final result to stdout.'
)
RETURNS STRING
LANGUAGE PYTHON
COMMENT 'Executes Python code and returns its stdout.'
AS $$
import sys
from io import StringIO
stdout = StringIO()
sys.stdout = stdout
exec(code)
return stdout.getvalue()
$$
```
It runs in a secure and isolated environment within a Databricks SQL warehouse.
```python
%pip install --upgrade --quiet databricks-sdk langchain-community databricks-langchain langgraph mlflow
```
Note: you may need to restart the kernel to use updated packages.
```python
from databricks_langchain import ChatDatabricks
llm = ChatDatabricks(endpoint="databricks-meta-llama-3-70b-instruct")
```
```python
from databricks.sdk import WorkspaceClient
from langchain_community.tools.databricks import UCFunctionToolkit
tools = (
UCFunctionToolkit(
# You can find the SQL warehouse ID in its UI after creation.
warehouse_id="xxxx123456789"
)
.include(
# Include functions as tools using their qualified names.
# You can use "{catalog_name}.{schema_name}.*" to get all functions in a schema.
"main.tools.python_exec",
)
.get_tools()
)
```
(Optional) To increase the retry time for getting a function execution response, set environment variable UC_TOOL_CLIENT_EXECUTION_TIMEOUT. Default retry time value is 120s.
## LangGraph agent example
```python
import os
os.environ["UC_TOOL_CLIENT_EXECUTION_TIMEOUT"] = "200"
```
## LangGraph agent example
```python
from langgraph.prebuilt import create_react_agent
agent = create_react_agent(
llm,
tools,
state_modifier="You are a helpful assistant. Make sure to use tool for information.",
)
agent.invoke({"messages": [{"role": "user", "content": "36939 * 8922.4"}]})
```
{'messages': [HumanMessage(content='36939 * 8922.4', additional_kwargs={}, response_metadata={}, id='1a10b10b-8e37-48c7-97a1-cac5006228d5'),
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_a8f3986f-4b91-40a3-8d6d-39f431dab69b', 'type': 'function', 'function': {'name': 'main__tools__python_exec', 'arguments': '{"code": "print(36939 * 8922.4)"}'}}]}, response_metadata={'prompt_tokens': 771, 'completion_tokens': 29, 'total_tokens': 800}, id='run-865c3613-20ba-4e80-afc8-fde1cfb26e5a-0', tool_calls=[{'name': 'main__tools__python_exec', 'args': {'code': 'print(36939 * 8922.4)'}, 'id': 'call_a8f3986f-4b91-40a3-8d6d-39f431dab69b', 'type': 'tool_call'}]),
ToolMessage(content='{"format": "SCALAR", "value": "329584533.59999996\\n", "truncated": false}', name='main__tools__python_exec', id='8b63d4c8-1a3d-46a5-a719-393b2ef36770', tool_call_id='call_a8f3986f-4b91-40a3-8d6d-39f431dab69b'),
AIMessage(content='The result of the multiplication is:\n\n329584533.59999996', additional_kwargs={}, response_metadata={'prompt_tokens': 846, 'completion_tokens': 22, 'total_tokens': 868}, id='run-22772404-611b-46e4-9956-b85e4a385f0f-0')]}
## LangChain agent example
```python
from langchain.agents import AgentExecutor, create_tool_calling_agent
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful assistant. Make sure to use tool for information.",
),
("placeholder", "{chat_history}"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
]
)
agent = create_tool_calling_agent(llm, tools, prompt)
```
```python
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
agent_executor.invoke({"input": "36939 * 8922.4"})
```
[1m> Entering new AgentExecutor chain...[0m
[32;1m[1;3m
Invoking: `main__tools__python_exec` with `{'code': 'print(36939 * 8922.4)'}`
[0m[36;1m[1;3m{"format": "SCALAR", "value": "329584533.59999996\n", "truncated": false}[0m[32;1m[1;3mThe result of the multiplication is:
329584533.59999996[0m
[1m> Finished chain.[0m
{'input': '36939 * 8922.4',
'output': 'The result of the multiplication is:\n\n329584533.59999996'}
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@tools@databricks.ipynb@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "brinckmann/montepython_public",
"repo_path": "montepython_public_extracted/montepython_public-master/montepython/likelihoods/kids450_cf_likelihood_public/README.md",
"type": "Markdown"
}
|
This repository contains the likelihood module for the KiDS-450 correlation function measurements from [Hildebrandt et al. 2017 (MNRAS, 465, 1454)](http://adsabs.harvard.edu/abs/2017MNRAS.465.1454H). Note that this is NOT the original likelihood used for their analysis. However, this likelihood was used for the analysis of [Khlinger et al. 2019 (MNRAS, 484, 3126)](http://adsabs.harvard.edu/abs/2019MNRAS.484.3126K) and differences with respect to the original CosmoMC likelihood were found to be negligible (please refer to Section 4.1 in that reference for details).
The module will be working 'out-of-the-box' within a [MontePython](https://github.com/brinckmann/montepython_public) and [CLASS](https://github.com/lesgourg/class_public) (version >= 2.6!) setup. The required KiDS-450 data files can be downloaded from the [KiDS science data webpage](http://kids.strw.leidenuniv.nl/sciencedata.php) and the parameter file for reproducing the fiducial run of [Khlinger et al. 2019 (MNRAS, 484, 3126)](http://adsabs.harvard.edu/abs/2019MNRAS.484.3126K) is supplied in the subfolder `INPUT` within this repository.
Assuming that MontePython (with CLASS version >= 2.6) is set up (we recommend to use the MultiNest sampler!), please proceed as follows:
1) Clone this repository
`git clone https://github.com/fkoehlin/kids450_cf_likelihood_public.git`
2) Copy `__init__.py` and `kids450_cf_likelihood_public.data` from this repository into a folder named `kids450_cf_likelihood_public` within `/your/path/to/montepython_public/montepython/likelihoods/`.
(you can rename the folder to whatever you like, but you must use this name then consistently for the whole likelihood which implies to rename the `*.data`-file, including the prefixes of the parameters defined in there, the name of the likelihood in the `__init__.py`-file and also in the `*.param`-file.)
3) Set the path to the data folder (i.e. `KiDS-450_COSMIC_SHEAR_DATA_RELEASE` from the tarball available from the [KiDS science data webpage](http://kids.strw.leidenuniv.nl/sciencedata.php')) in `kids450_cf_likelihood_public.data` and modify parameters as you please (note that everything is set up to work with `kids450_cf.param`).
4) Copy the folder `CUT_VALUES` from this repository also into the root data folder (`KiDS-450_COSMIC_SHEAR_DATA_RELEASE`).
5) Start your runs using e.g. the `kids450_cf.param` supplied in the subfolder `INPUT` within this repository.
6) Contribute your developments/bugfixes to this likelihood (please use a dedicated branch per fix/feature).
7) If you publish your results based on using this likelihood, please consider citing [Khlinger et al. 2019 (MNRAS, 484, 3126)](http://adsabs.harvard.edu/abs/2019MNRAS.484.3126K) and please follow the instructions on the [KiDS science data webpage](http://kids.strw.leidenuniv.nl/sciencedata.php) for referencing all relevant sources for the KiDS-450 data. Please cite also all relevant references for MontePython and CLASS.
Refer to `run_with_multinest.sh` within the subfolder `INPUT` for all MultiNest-related settings that were used for the fiducial runs.
WARNING: This likelihood only produces valid results for `\Omega_k = 0`, i.e. flat cosmologies!
For questions/comments please use the issue-tracking system!
|
brinckmannREPO_NAMEmontepython_publicPATH_START.@montepython_public_extracted@montepython_public-master@montepython@likelihoods@kids450_cf_likelihood_public@README.md@.PATH_END.py
|
{
"filename": "coupling_layers.py",
"repo_name": "vislearn/FrEIA",
"repo_path": "FrEIA_extracted/FrEIA-master/FrEIA/modules/coupling_layers.py",
"type": "Python"
}
|
from . import InvertibleModule
from typing import Callable, Union
import torch
class _BaseCouplingBlock(InvertibleModule):
'''Base class to implement various coupling schemes. It takes care of
checking the dimensions, conditions, clamping mechanism, etc.
Each child class only has to implement the _coupling1 and _coupling2 methods
for the left and right coupling operations.
(In some cases below, forward() is also overridden)
'''
def __init__(self, dims_in, dims_c=[],
clamp: float = 2.,
clamp_activation: Union[str, Callable] = "ATAN",
split_len: Union[float, int] = 0.5):
'''
Additional args in docstring of base class.
Args:
clamp: Soft clamping for the multiplicative component. The
amplification or attenuation of each input dimension can be at most
exp(±clamp).
clamp_activation: Function to perform the clamping. String values
"ATAN", "TANH", and "SIGMOID" are recognized, or a function of
object can be passed. TANH behaves like the original realNVP paper.
A custom function should take tensors and map -inf to -1 and +inf to +1.
split_len: Specify the dimension where the data should be split.
If given as int, directly indicates the split dimension.
If given as float, must fulfil 0 <= split_len <= 1 and number of
unchanged dimensions is set to `round(split_len * dims_in[0, 0])`.
'''
super().__init__(dims_in, dims_c)
self.channels = dims_in[0][0]
# ndims means the rank of tensor strictly speaking.
# i.e. 1D, 2D, 3D tensor, etc.
self.ndims = len(dims_in[0])
if isinstance(split_len, float):
if not (0 <= split_len <= 1):
raise ValueError(f"Float split_len must be in range [0, 1], "
f"but is: {split_len}")
split_len = round(self.channels * split_len)
else:
if not (0 <= split_len <= self.channels):
raise ValueError(f"Integer split_len must be in range "
f"0 <= split_len <= {self.channels}, "
f"but is: {split_len}")
self.split_len1 = split_len
self.split_len2 = self.channels - split_len
self.clamp = clamp
assert all([tuple(dims_c[i][1:]) == tuple(dims_in[0][1:]) for i in range(len(dims_c))]), \
F"Dimensions of input {dims_in} and one or more conditions {dims_c} don't agree."
self.conditional = (len(dims_c) > 0)
self.condition_length = sum([dims_c[i][0] for i in range(len(dims_c))])
if isinstance(clamp_activation, str):
if clamp_activation == "ATAN":
self.f_clamp = (lambda u: 0.636 * torch.atan(u))
elif clamp_activation == "TANH":
self.f_clamp = torch.tanh
elif clamp_activation == "SIGMOID":
self.f_clamp = (lambda u: 2. * (torch.sigmoid(u) - 0.5))
else:
raise ValueError(f'Unknown clamp activation "{clamp_activation}"')
else:
self.f_clamp = clamp_activation
def forward(self, x, c=[], rev=False, jac=True):
'''See base class docstring'''
# notation:
# x1, x2: two halves of the input
# y1, y2: two halves of the output
# *_c: variable with condition concatenated
# j1, j2: Jacobians of the two coupling operations
x1, x2 = torch.split(x[0], [self.split_len1, self.split_len2], dim=1)
if not rev:
x2_c = torch.cat([x2, *c], 1) if self.conditional else x2
y1, j1 = self._coupling1(x1, x2_c)
y1_c = torch.cat([y1, *c], 1) if self.conditional else y1
y2, j2 = self._coupling2(x2, y1_c)
else:
# names of x and y are swapped for the reverse computation
x1_c = torch.cat([x1, *c], 1) if self.conditional else x1
y2, j2 = self._coupling2(x2, x1_c, rev=True)
y2_c = torch.cat([y2, *c], 1) if self.conditional else y2
y1, j1 = self._coupling1(x1, y2_c, rev=True)
return (torch.cat((y1, y2), 1),), j1 + j2
def _coupling1(self, x1, u2, rev=False):
'''The first/left coupling operation in a two-sided coupling block.
Args:
x1 (Tensor): the 'active' half being transformed.
u2 (Tensor): the 'passive' half, including the conditions, from
which the transformation is computed.
Returns:
y1 (Tensor): same shape as x1, the transformed 'active' half.
j1 (float or Tensor): the Jacobian, only has batch dimension.
If the Jacobian is zero of fixed, may also return float.
'''
raise NotImplementedError()
def _coupling2(self, x2, u1, rev=False):
'''The second/right coupling operation in a two-sided coupling block.
Args:
x2 (Tensor): the 'active' half being transformed.
u1 (Tensor): the 'passive' half, including the conditions, from
which the transformation is computed.
Returns:
y2 (Tensor): same shape as x1, the transformed 'active' half.
j2 (float or Tensor): the Jacobian, only has batch dimension.
If the Jacobian is zero of fixed, may also return float.
'''
raise NotImplementedError()
def output_dims(self, input_dims):
'''See base class for docstring'''
if len(input_dims) != 1:
raise ValueError("Can only use 1 input")
return input_dims
class NICECouplingBlock(_BaseCouplingBlock):
'''Coupling Block following the NICE (Dinh et al, 2015) design.
The inputs are split in two halves. For 2D, 3D, 4D inputs, the split is
performed along the channel dimension. Then, residual coefficients are
predicted by two subnetworks that are added to each half in turn.
'''
def __init__(self, dims_in, dims_c=[],
subnet_constructor: callable = None,
split_len: Union[float, int] = 0.5):
'''
Additional args in docstring of base class.
Args:
subnet_constructor:
Callable function, class, or factory object, with signature
constructor(dims_in, dims_out). The result should be a torch
nn.Module, that takes dims_in input channels, and dims_out output
channels. See tutorial for examples.
Two of these subnetworks will be initialized inside the block.
'''
super().__init__(dims_in, dims_c,
clamp=0., clamp_activation=(lambda u: u),
split_len=split_len)
self.F = subnet_constructor(self.split_len2 + self.condition_length, self.split_len1)
self.G = subnet_constructor(self.split_len1 + self.condition_length, self.split_len2)
def _coupling1(self, x1, u2, rev=False):
if rev:
return x1 - self.F(u2), 0.
return x1 + self.F(u2), 0.
def _coupling2(self, x2, u1, rev=False):
if rev:
return x2 - self.G(u1), 0.
return x2 + self.G(u1), 0.
class RNVPCouplingBlock(_BaseCouplingBlock):
'''Coupling Block following the RealNVP design (Dinh et al, 2017) with some
minor differences. The inputs are split in two halves. For 2D, 3D, 4D
inputs, the split is performed along the channel dimension. For
checkerboard-splitting, prepend an i_RevNet_downsampling module. Two affine
coupling operations are performed in turn on both halves of the input.
'''
def __init__(self, dims_in, dims_c=[],
subnet_constructor: Callable = None,
clamp: float = 2.,
clamp_activation: Union[str, Callable] = "ATAN",
split_len: Union[float, int] = 0.5):
'''
Additional args in docstring of base class.
Args:
subnet_constructor: function or class, with signature
constructor(dims_in, dims_out). The result should be a torch
nn.Module, that takes dims_in input channels, and dims_out output
channels. See tutorial for examples. Four of these subnetworks will be
initialized in the block.
clamp: Soft clamping for the multiplicative component. The
amplification or attenuation of each input dimension can be at most
exp(±clamp).
clamp_activation: Function to perform the clamping. String values
"ATAN", "TANH", and "SIGMOID" are recognized, or a function of
object can be passed. TANH behaves like the original realNVP paper.
A custom function should take tensors and map -inf to -1 and +inf to +1.
'''
super().__init__(dims_in, dims_c, clamp, clamp_activation,
split_len=split_len)
self.subnet_s1 = subnet_constructor(self.split_len1 + self.condition_length, self.split_len2)
self.subnet_t1 = subnet_constructor(self.split_len1 + self.condition_length, self.split_len2)
self.subnet_s2 = subnet_constructor(self.split_len2 + self.condition_length, self.split_len1)
self.subnet_t2 = subnet_constructor(self.split_len2 + self.condition_length, self.split_len1)
def _coupling1(self, x1, u2, rev=False):
# notation (same for _coupling2):
# x: inputs (i.e. 'x-side' when rev is False, 'z-side' when rev is True)
# y: outputs (same scheme)
# *_c: variables with condition appended
# *1, *2: left half, right half
# a: all affine coefficients
# s, t: multiplicative and additive coefficients
# j: log det Jacobian
s2, t2 = self.subnet_s2(u2), self.subnet_t2(u2)
s2 = self.clamp * self.f_clamp(s2)
j1 = torch.sum(s2, dim=tuple(range(1, self.ndims + 1)))
if rev:
y1 = (x1 - t2) * torch.exp(-s2)
return y1, -j1
else:
y1 = torch.exp(s2) * x1 + t2
return y1, j1
def _coupling2(self, x2, u1, rev=False):
s1, t1 = self.subnet_s1(u1), self.subnet_t1(u1)
s1 = self.clamp * self.f_clamp(s1)
j2 = torch.sum(s1, dim=tuple(range(1, self.ndims + 1)))
if rev:
y2 = (x2 - t1) * torch.exp(-s1)
return y2, -j2
else:
y2 = torch.exp(s1) * x2 + t1
return y2, j2
class GLOWCouplingBlock(_BaseCouplingBlock):
'''Coupling Block following the GLOW design. Note, this is only the coupling
part itself, and does not include ActNorm, invertible 1x1 convolutions, etc.
See AllInOneBlock for a block combining these functions at once.
The only difference to the RNVPCouplingBlock coupling blocks
is that it uses a single subnetwork to jointly predict [s_i, t_i], instead of two separate
subnetworks. This reduces computational cost and speeds up learning.
'''
def __init__(self, dims_in, dims_c=[],
subnet_constructor: Callable = None,
clamp: float = 2.,
clamp_activation: Union[str, Callable] = "ATAN",
split_len: Union[float, int] = 0.5):
'''
Additional args in docstring of base class.
Args:
subnet_constructor: function or class, with signature
constructor(dims_in, dims_out). The result should be a torch
nn.Module, that takes dims_in input channels, and dims_out output
channels. See tutorial for examples. Two of these subnetworks will be
initialized in the block.
clamp: Soft clamping for the multiplicative component. The
amplification or attenuation of each input dimension can be at most
exp(±clamp).
clamp_activation: Function to perform the clamping. String values
"ATAN", "TANH", and "SIGMOID" are recognized, or a function of
object can be passed. TANH behaves like the original realNVP paper.
A custom function should take tensors and map -inf to -1 and +inf to +1.
'''
super().__init__(dims_in, dims_c, clamp, clamp_activation,
split_len=split_len)
self.subnet1 = subnet_constructor(self.split_len1 + self.condition_length, self.split_len2 * 2)
self.subnet2 = subnet_constructor(self.split_len2 + self.condition_length, self.split_len1 * 2)
def _coupling1(self, x1, u2, rev=False):
# notation (same for _coupling2):
# x: inputs (i.e. 'x-side' when rev is False, 'z-side' when rev is True)
# y: outputs (same scheme)
# *_c: variables with condition appended
# *1, *2: left half, right half
# a: all affine coefficients
# s, t: multiplicative and additive coefficients
# j: log det Jacobian
a2 = self.subnet2(u2)
s2, t2 = a2[:, :self.split_len1], a2[:, self.split_len1:]
s2 = self.clamp * self.f_clamp(s2)
j1 = torch.sum(s2, dim=tuple(range(1, self.ndims + 1)))
if rev:
y1 = (x1 - t2) * torch.exp(-s2)
return y1, -j1
else:
y1 = torch.exp(s2) * x1 + t2
return y1, j1
def _coupling2(self, x2, u1, rev=False):
a1 = self.subnet1(u1)
s1, t1 = a1[:, :self.split_len2], a1[:, self.split_len2:]
s1 = self.clamp * self.f_clamp(s1)
j2 = torch.sum(s1, dim=tuple(range(1, self.ndims + 1)))
if rev:
y2 = (x2 - t1) * torch.exp(-s1)
return y2, -j2
else:
y2 = torch.exp(s1) * x2 + t1
return y2, j2
class GINCouplingBlock(_BaseCouplingBlock):
'''Coupling Block following the GIN design. The difference from
GLOWCouplingBlock (and other affine coupling blocks) is that the Jacobian
determinant is constrained to be 1. This constrains the block to be
volume-preserving. Volume preservation is achieved by subtracting the mean
of the output of the s subnetwork from itself. While volume preserving, GIN
is still more powerful than NICE, as GIN is not volume preserving within
each dimension.
Note: this implementation differs slightly from the originally published
implementation, which scales the final component of the s subnetwork so the
sum of the outputs of s is zero. There was no difference found between the
implementations in practice, but subtracting the mean guarantees that all
outputs of s are at most ±exp(clamp), which might be more stable in certain
cases.
'''
def __init__(self, dims_in, dims_c=[],
subnet_constructor: Callable = None,
clamp: float = 2.,
clamp_activation: Union[str, Callable] = "ATAN",
split_len: Union[float, int] = 0.5):
'''
Additional args in docstring of base class.
Args:
subnet_constructor: function or class, with signature
constructor(dims_in, dims_out). The result should be a torch
nn.Module, that takes dims_in input channels, and dims_out output
channels. See tutorial for examples. Two of these subnetworks will be
initialized in the block.
clamp: Soft clamping for the multiplicative component. The
amplification or attenuation of each input dimension can be at most
exp(±clamp).
clamp_activation: Function to perform the clamping. String values
"ATAN", "TANH", and "SIGMOID" are recognized, or a function of
object can be passed. TANH behaves like the original realNVP paper.
A custom function should take tensors and map -inf to -1 and +inf to +1.
'''
super().__init__(dims_in, dims_c, clamp, clamp_activation,
split_len=split_len)
self.subnet1 = subnet_constructor(self.split_len1 + self.condition_length, self.split_len2 * 2)
self.subnet2 = subnet_constructor(self.split_len2 + self.condition_length, self.split_len1 * 2)
def _coupling1(self, x1, u2, rev=False):
# notation (same for _coupling2):
# x: inputs (i.e. 'x-side' when rev is False, 'z-side' when rev is True)
# y: outputs (same scheme)
# *_c: variables with condition appended
# *1, *2: left half, right half
# a: all affine coefficients
# s, t: multiplicative and additive coefficients
# j: log det Jacobian
a2 = self.subnet2(u2)
s2, t2 = a2[:, :self.split_len1], a2[:, self.split_len1:]
s2 = self.clamp * self.f_clamp(s2)
s2 = s2 - s2.mean(1, keepdim=True) # changed inplace op
if rev:
y1 = (x1 - t2) * torch.exp(-s2)
return y1, 0.
else:
y1 = torch.exp(s2) * x1 + t2
return y1, 0.
def _coupling2(self, x2, u1, rev=False):
a1 = self.subnet1(u1)
s1, t1 = a1[:, :self.split_len2], a1[:, self.split_len2:]
s1 = self.clamp * self.f_clamp(s1)
s1 = s1 - s1.mean(1, keepdim=True) # changed inplace op
if rev:
y2 = (x2 - t1) * torch.exp(-s1)
return y2, 0.
else:
y2 = torch.exp(s1) * x2 + t1
return y2, 0.
class AffineCouplingOneSided(_BaseCouplingBlock):
'''Half of a coupling block following the GLOWCouplingBlock design. This
means only one affine transformation on half the inputs. In the case where
random permutations or orthogonal transforms are used after every block,
this is not a restriction and simplifies the design. '''
def __init__(self, dims_in, dims_c=[],
subnet_constructor: Callable = None,
clamp: float = 2.,
clamp_activation: Union[str, Callable] = "ATAN",
split_len: Union[float, int] = 0.5):
'''
Additional args in docstring of base class.
Args:
subnet_constructor: function or class, with signature
constructor(dims_in, dims_out). The result should be a torch
nn.Module, that takes dims_in input channels, and dims_out output
channels. See tutorial for examples. One subnetwork will be
initialized in the block.
clamp: Soft clamping for the multiplicative component. The
amplification or attenuation of each input dimension can be at most
exp(±clamp).
clamp_activation: Function to perform the clamping. String values
"ATAN", "TANH", and "SIGMOID" are recognized, or a function of
object can be passed. TANH behaves like the original realNVP paper.
A custom function should take tensors and map -inf to -1 and +inf to +1.
'''
super().__init__(dims_in, dims_c, clamp, clamp_activation,
split_len=split_len)
self.subnet = subnet_constructor(self.split_len1 + self.condition_length, 2 * self.split_len2)
def forward(self, x, c=[], rev=False, jac=True):
x1, x2 = torch.split(x[0], [self.split_len1, self.split_len2], dim=1)
x1_c = torch.cat([x1, *c], 1) if self.conditional else x1
# notation:
# x1, x2: two halves of the input
# y1, y2: two halves of the output
# a: all affine coefficients
# s, t: multiplicative and additive coefficients
# j: log det Jacobian
a = self.subnet(x1_c)
s, t = a[:, :self.split_len2], a[:, self.split_len2:]
s = self.clamp * self.f_clamp(s)
j = torch.sum(s, dim=tuple(range(1, self.ndims + 1)))
if rev:
y2 = (x2 - t) * torch.exp(-s)
j *= -1
else:
y2 = x2 * torch.exp(s) + t
return (torch.cat((x1, y2), 1),), j
class ConditionalAffineTransform(_BaseCouplingBlock):
'''Similar to the conditioning layers from SPADE (Park et al, 2019): Perform
an affine transformation on the whole input, where the affine coefficients
are predicted from only the condition.
'''
def __init__(self, dims_in, dims_c=[],
subnet_constructor: Callable = None,
clamp: float = 2.,
clamp_activation: Union[str, Callable] = "ATAN",
split_len: Union[float, int] = 0.5):
'''
Additional args in docstring of base class.
Args:
subnet_constructor: function or class, with signature
constructor(dims_in, dims_out). The result should be a torch
nn.Module, that takes dims_in input channels, and dims_out output
channels. See tutorial for examples. One subnetwork will be
initialized in the block.
clamp: Soft clamping for the multiplicative component. The
amplification or attenuation of each input dimension can be at most
exp(±clamp).
clamp_activation: Function to perform the clamping. String values
"ATAN", "TANH", and "SIGMOID" are recognized, or a function of
object can be passed. TANH behaves like the original realNVP paper.
A custom function should take tensors and map -inf to -1 and +inf to +1.
'''
super().__init__(dims_in, dims_c, clamp, clamp_activation,
split_len=split_len)
if not self.conditional:
raise ValueError("ConditionalAffineTransform must have a condition")
self.subnet = subnet_constructor(self.condition_length, 2 * self.channels)
def forward(self, x, c=[], rev=False, jac=True):
if len(c) > 1:
cond = torch.cat(c, 1)
else:
cond = c[0]
# notation:
# x: inputs (i.e. 'x-side' when rev is False, 'z-side' when rev is True)
# y: outputs (same scheme)
# *_c: variables with condition appended
# *1, *2: left half, right half
# a: all affine coefficients
# s, t: multiplicative and additive coefficients
# j: log det Jacobian
a = self.subnet(cond)
s, t = a[:, :self.channels], a[:, self.channels:]
s = self.clamp * self.f_clamp(s)
j = torch.sum(s, dim=tuple(range(1, self.ndims + 1)))
if rev:
y = (x[0] - t) * torch.exp(-s)
return (y,), -j
else:
y = torch.exp(s) * x[0] + t
return (y,), j
|
vislearnREPO_NAMEFrEIAPATH_START.@FrEIA_extracted@FrEIA-master@FrEIA@modules@coupling_layers.py@.PATH_END.py
|
{
"filename": "source.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/_pytest/_code/source.py",
"type": "Python"
}
|
from __future__ import absolute_import, division, generators, print_function
import ast
from ast import PyCF_ONLY_AST as _AST_FLAG
from bisect import bisect_right
import sys
import six
import inspect
import tokenize
import py
cpy_compile = compile
class Source(object):
""" a immutable object holding a source code fragment,
possibly deindenting it.
"""
_compilecounter = 0
def __init__(self, *parts, **kwargs):
self.lines = lines = []
de = kwargs.get('deindent', True)
rstrip = kwargs.get('rstrip', True)
for part in parts:
if not part:
partlines = []
if isinstance(part, Source):
partlines = part.lines
elif isinstance(part, (tuple, list)):
partlines = [x.rstrip("\n") for x in part]
elif isinstance(part, six.string_types):
partlines = part.split('\n')
if rstrip:
while partlines:
if partlines[-1].strip():
break
partlines.pop()
else:
partlines = getsource(part, deindent=de).lines
if de:
partlines = deindent(partlines)
lines.extend(partlines)
def __eq__(self, other):
try:
return self.lines == other.lines
except AttributeError:
if isinstance(other, str):
return str(self) == other
return False
__hash__ = None
def __getitem__(self, key):
if isinstance(key, int):
return self.lines[key]
else:
if key.step not in (None, 1):
raise IndexError("cannot slice a Source with a step")
newsource = Source()
newsource.lines = self.lines[key.start:key.stop]
return newsource
def __len__(self):
return len(self.lines)
def strip(self):
""" return new source object with trailing
and leading blank lines removed.
"""
start, end = 0, len(self)
while start < end and not self.lines[start].strip():
start += 1
while end > start and not self.lines[end - 1].strip():
end -= 1
source = Source()
source.lines[:] = self.lines[start:end]
return source
def putaround(self, before='', after='', indent=' ' * 4):
""" return a copy of the source object with
'before' and 'after' wrapped around it.
"""
before = Source(before)
after = Source(after)
newsource = Source()
lines = [(indent + line) for line in self.lines]
newsource.lines = before.lines + lines + after.lines
return newsource
def indent(self, indent=' ' * 4):
""" return a copy of the source object with
all lines indented by the given indent-string.
"""
newsource = Source()
newsource.lines = [(indent + line) for line in self.lines]
return newsource
def getstatement(self, lineno, assertion=False):
""" return Source statement which contains the
given linenumber (counted from 0).
"""
start, end = self.getstatementrange(lineno, assertion)
return self[start:end]
def getstatementrange(self, lineno, assertion=False):
""" return (start, end) tuple which spans the minimal
statement region which containing the given lineno.
"""
if not (0 <= lineno < len(self)):
raise IndexError("lineno out of range")
ast, start, end = getstatementrange_ast(lineno, self)
return start, end
def deindent(self, offset=None):
""" return a new source object deindented by offset.
If offset is None then guess an indentation offset from
the first non-blank line. Subsequent lines which have a
lower indentation offset will be copied verbatim as
they are assumed to be part of multilines.
"""
# XXX maybe use the tokenizer to properly handle multiline
# strings etc.pp?
newsource = Source()
newsource.lines[:] = deindent(self.lines, offset)
return newsource
def isparseable(self, deindent=True):
""" return True if source is parseable, heuristically
deindenting it by default.
"""
try:
import parser
except ImportError:
def syntax_checker(x):
return compile(x, 'asd', 'exec')
else:
syntax_checker = parser.suite
if deindent:
source = str(self.deindent())
else:
source = str(self)
try:
# compile(source+'\n', "x", "exec")
syntax_checker(source + '\n')
except KeyboardInterrupt:
raise
except Exception:
return False
else:
return True
def __str__(self):
return "\n".join(self.lines)
def compile(self, filename=None, mode='exec',
flag=generators.compiler_flag,
dont_inherit=0, _genframe=None):
""" return compiled code object. if filename is None
invent an artificial filename which displays
the source/line position of the caller frame.
"""
if not filename or py.path.local(filename).check(file=0):
if _genframe is None:
_genframe = sys._getframe(1) # the caller
fn, lineno = _genframe.f_code.co_filename, _genframe.f_lineno
base = "<%d-codegen " % self._compilecounter
self.__class__._compilecounter += 1
if not filename:
filename = base + '%s:%d>' % (fn, lineno)
else:
filename = base + '%r %s:%d>' % (filename, fn, lineno)
source = "\n".join(self.lines) + '\n'
try:
co = cpy_compile(source, filename, mode, flag)
except SyntaxError:
ex = sys.exc_info()[1]
# re-represent syntax errors from parsing python strings
msglines = self.lines[:ex.lineno]
if ex.offset:
msglines.append(" " * ex.offset + '^')
msglines.append("(code was compiled probably from here: %s)" % filename)
newex = SyntaxError('\n'.join(msglines))
newex.offset = ex.offset
newex.lineno = ex.lineno
newex.text = ex.text
raise newex
else:
if flag & _AST_FLAG:
return co
lines = [(x + "\n") for x in self.lines]
py.std.linecache.cache[filename] = (1, None, lines, filename)
return co
#
# public API shortcut functions
#
def compile_(source, filename=None, mode='exec', flags=generators.compiler_flag, dont_inherit=0):
""" compile the given source to a raw code object,
and maintain an internal cache which allows later
retrieval of the source code for the code object
and any recursively created code objects.
"""
if isinstance(source, ast.AST):
# XXX should Source support having AST?
return cpy_compile(source, filename, mode, flags, dont_inherit)
_genframe = sys._getframe(1) # the caller
s = Source(source)
co = s.compile(filename, mode, flags, _genframe=_genframe)
return co
def getfslineno(obj):
""" Return source location (path, lineno) for the given object.
If the source cannot be determined return ("", -1)
"""
import _pytest._code
try:
code = _pytest._code.Code(obj)
except TypeError:
try:
fn = (py.std.inspect.getsourcefile(obj) or
py.std.inspect.getfile(obj))
except TypeError:
return "", -1
fspath = fn and py.path.local(fn) or None
lineno = -1
if fspath:
try:
_, lineno = findsource(obj)
except IOError:
pass
else:
fspath = code.path
lineno = code.firstlineno
assert isinstance(lineno, int)
return fspath, lineno
#
# helper functions
#
def findsource(obj):
try:
sourcelines, lineno = py.std.inspect.findsource(obj)
except py.builtin._sysex:
raise
except: # noqa
return None, -1
source = Source()
source.lines = [line.rstrip() for line in sourcelines]
return source, lineno
def getsource(obj, **kwargs):
import _pytest._code
obj = _pytest._code.getrawcode(obj)
try:
strsrc = inspect.getsource(obj)
except IndentationError:
strsrc = "\"Buggy python version consider upgrading, cannot get source\""
assert isinstance(strsrc, str)
return Source(strsrc, **kwargs)
def deindent(lines, offset=None):
if offset is None:
for line in lines:
line = line.expandtabs()
s = line.lstrip()
if s:
offset = len(line) - len(s)
break
else:
offset = 0
if offset == 0:
return list(lines)
newlines = []
def readline_generator(lines):
for line in lines:
yield line + '\n'
while True:
yield ''
it = readline_generator(lines)
try:
for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)):
if sline > len(lines):
break # End of input reached
if sline > len(newlines):
line = lines[sline - 1].expandtabs()
if line.lstrip() and line[:offset].isspace():
line = line[offset:] # Deindent
newlines.append(line)
for i in range(sline, eline):
# Don't deindent continuing lines of
# multiline tokens (i.e. multiline strings)
newlines.append(lines[i])
except (IndentationError, tokenize.TokenError):
pass
# Add any lines we didn't see. E.g. if an exception was raised.
newlines.extend(lines[len(newlines):])
return newlines
def get_statement_startend2(lineno, node):
import ast
# flatten all statements and except handlers into one lineno-list
# AST's line numbers start indexing at 1
values = []
for x in ast.walk(node):
if isinstance(x, ast.stmt) or isinstance(x, ast.ExceptHandler):
values.append(x.lineno - 1)
for name in "finalbody", "orelse":
val = getattr(x, name, None)
if val:
# treat the finally/orelse part as its own statement
values.append(val[0].lineno - 1 - 1)
values.sort()
insert_index = bisect_right(values, lineno)
start = values[insert_index - 1]
if insert_index >= len(values):
end = None
else:
end = values[insert_index]
return start, end
def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
if astnode is None:
content = str(source)
try:
astnode = compile(content, "source", "exec", 1024) # 1024 for AST
except ValueError:
start, end = getstatementrange_old(lineno, source, assertion)
return None, start, end
start, end = get_statement_startend2(lineno, astnode)
# we need to correct the end:
# - ast-parsing strips comments
# - there might be empty lines
# - we might have lesser indented code blocks at the end
if end is None:
end = len(source.lines)
if end > start + 1:
# make sure we don't span differently indented code blocks
# by using the BlockFinder helper used which inspect.getsource() uses itself
block_finder = inspect.BlockFinder()
# if we start with an indented line, put blockfinder to "started" mode
block_finder.started = source.lines[start][0].isspace()
it = ((x + "\n") for x in source.lines[start:end])
try:
for tok in tokenize.generate_tokens(lambda: next(it)):
block_finder.tokeneater(*tok)
except (inspect.EndOfBlock, IndentationError):
end = block_finder.last + start
except Exception:
pass
# the end might still point to a comment or empty line, correct it
while end:
line = source.lines[end - 1].lstrip()
if line.startswith("#") or not line:
end -= 1
else:
break
return astnode, start, end
def getstatementrange_old(lineno, source, assertion=False):
""" return (start, end) tuple which spans the minimal
statement region which containing the given lineno.
raise an IndexError if no such statementrange can be found.
"""
# XXX this logic is only used on python2.4 and below
# 1. find the start of the statement
from codeop import compile_command
for start in range(lineno, -1, -1):
if assertion:
line = source.lines[start]
# the following lines are not fully tested, change with care
if 'super' in line and 'self' in line and '__init__' in line:
raise IndexError("likely a subclass")
if "assert" not in line and "raise" not in line:
continue
trylines = source.lines[start:lineno + 1]
# quick hack to prepare parsing an indented line with
# compile_command() (which errors on "return" outside defs)
trylines.insert(0, 'def xxx():')
trysource = '\n '.join(trylines)
# ^ space here
try:
compile_command(trysource)
except (SyntaxError, OverflowError, ValueError):
continue
# 2. find the end of the statement
for end in range(lineno + 1, len(source) + 1):
trysource = source[start:end]
if trysource.isparseable():
return start, end
raise SyntaxError("no valid source range around line %d " % (lineno,))
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@_pytest@_code@source.py@.PATH_END.py
|
{
"filename": "py311.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/importlib-metadata/py3/importlib_metadata/compat/py311.py",
"type": "Python"
}
|
import os
import pathlib
import sys
import types
def wrap(path): # pragma: no cover
"""
Workaround for https://github.com/python/cpython/issues/84538
to add backward compatibility for walk_up=True.
An example affected package is dask-labextension, which uses
jupyter-packaging to install JupyterLab javascript files outside
of site-packages.
"""
def relative_to(root, *, walk_up=False):
return pathlib.Path(os.path.relpath(path, root))
return types.SimpleNamespace(relative_to=relative_to)
relative_fix = wrap if sys.version_info < (3, 12) else lambda x: x
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@importlib-metadata@py3@importlib_metadata@compat@py311.py@.PATH_END.py
|
{
"filename": "_legendgrouptitle.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/parcoords/_legendgrouptitle.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendgrouptitleValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="legendgrouptitle", parent_name="parcoords", **kwargs
):
super(LegendgrouptitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Legendgrouptitle"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this legend group's title font.
text
Sets the title of the legend group.
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@parcoords@_legendgrouptitle.py@.PATH_END.py
|
{
"filename": "test_libpython_in_gdb.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/cython/Cython/Debugger/Tests/test_libpython_in_gdb.py",
"type": "Python"
}
|
# -*- coding: UTF-8 -*-
"""
Test libpython.py. This is already partly tested by test_libcython_in_gdb and
Lib/test/test_gdb.py in the Python source. These tests are run in gdb and
called from test_libcython_in_gdb.main()
"""
import os
import sys
import gdb
from Cython.Debugger import libcython
from Cython.Debugger import libpython
from . import test_libcython_in_gdb
from .test_libcython_in_gdb import _debug, inferior_python_version
class TestPrettyPrinters(test_libcython_in_gdb.DebugTestCase):
"""
Test whether types of Python objects are correctly inferred and that
the right libpython.PySomeTypeObjectPtr classes are instantiated.
Also test whether values are appropriately formatted (don't be too
laborious as Lib/test/test_gdb.py already covers this extensively).
Don't take care of decreffing newly allocated objects as a new
interpreter is started for every test anyway.
"""
def setUp(self):
super(TestPrettyPrinters, self).setUp()
self.break_and_run('b = c = d = 0')
def get_pyobject(self, code):
value = gdb.parse_and_eval(code)
assert libpython.pointervalue(value) != 0
return value
def pyobject_fromcode(self, code, gdbvar=None):
if gdbvar is not None:
d = {'varname':gdbvar, 'code':code}
gdb.execute('set $%(varname)s = %(code)s' % d)
code = '$' + gdbvar
return libpython.PyObjectPtr.from_pyobject_ptr(self.get_pyobject(code))
def get_repr(self, pyobject):
return pyobject.get_truncated_repr(libpython.MAX_OUTPUT_LEN)
def alloc_bytestring(self, string, gdbvar=None):
if inferior_python_version < (3, 0):
funcname = 'PyString_FromStringAndSize'
else:
funcname = 'PyBytes_FromStringAndSize'
assert b'"' not in string
# ensure double quotes
code = '(PyObject *) %s("%s", %d)' % (funcname, string.decode('iso8859-1'), len(string))
return self.pyobject_fromcode(code, gdbvar=gdbvar)
def alloc_unicodestring(self, string, gdbvar=None):
postfix = libpython.get_inferior_unicode_postfix()
funcname = 'PyUnicode%s_DecodeUnicodeEscape' % (postfix,)
data = string.encode("unicode_escape").decode('iso8859-1')
return self.pyobject_fromcode(
'(PyObject *) %s("%s", %d, "strict")' % (
funcname, data.replace('"', r'\"').replace('\\', r'\\'), len(data)),
gdbvar=gdbvar)
def test_bytestring(self):
bytestring = self.alloc_bytestring(b"spam")
if inferior_python_version < (3, 0):
bytestring_class = libpython.PyStringObjectPtr
expected = repr(b"spam")
else:
bytestring_class = libpython.PyBytesObjectPtr
expected = "b'spam'"
self.assertEqual(type(bytestring), bytestring_class)
self.assertEqual(self.get_repr(bytestring), expected)
def test_unicode(self):
unicode_string = self.alloc_unicodestring(u"spam ἄλφα")
expected = u"'spam ἄλφα'"
if inferior_python_version < (3, 0):
expected = 'u' + expected
self.assertEqual(type(unicode_string), libpython.PyUnicodeObjectPtr)
self.assertEqual(self.get_repr(unicode_string), expected)
def test_int(self):
if inferior_python_version < (3, 0):
intval = self.pyobject_fromcode('PyInt_FromLong(100)')
self.assertEqual(type(intval), libpython.PyIntObjectPtr)
self.assertEqual(self.get_repr(intval), '100')
def test_long(self):
longval = self.pyobject_fromcode('PyLong_FromLong(200)',
gdbvar='longval')
assert gdb.parse_and_eval('$longval->ob_type == &PyLong_Type')
self.assertEqual(type(longval), libpython.PyLongObjectPtr)
self.assertEqual(self.get_repr(longval), '200')
def test_frame_type(self):
frame = self.pyobject_fromcode('PyEval_GetFrame()')
self.assertEqual(type(frame), libpython.PyFrameObjectPtr)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@cython@Cython@Debugger@Tests@test_libpython_in_gdb.py@.PATH_END.py
|
{
"filename": "misc.py",
"repo_name": "sunpy/ndcube",
"repo_path": "ndcube_extracted/ndcube-main/ndcube/utils/misc.py",
"type": "Python"
}
|
import astropy.units as u
__all__ = ['convert_quantities_to_units', 'unique_sorted']
def unique_sorted(iterable):
"""
Return unique values in the order they are first encountered in the iterable.
"""
lookup = set() # a temporary lookup set
return [ele for ele in iterable if ele not in lookup and lookup.add(ele) is None]
def convert_quantities_to_units(coords, units):
"""Converts a sequence of Quantities to units used in the WCS.
Non-Quantity types in the sequence are allowed and ignored.
Parameters
----------
coords: iterable of `astropy.units.Quantity` or `None`
The coordinates to be converted.
units: iterable of `astropy.units.Unit` or `str`
The units to which the coordinates should be converted.
Returns
-------
converted_coords: iterable of `astropy.units.Quantity` or `None`
The coordinates converted to the units.
Non-quantity types remain.
"""
return [coord.to(unit) if isinstance(coord, u.Quantity) else coord
for coord, unit in zip(coords, units)]
|
sunpyREPO_NAMEndcubePATH_START.@ndcube_extracted@ndcube-main@ndcube@utils@misc.py@.PATH_END.py
|
{
"filename": "angsep.py",
"repo_name": "backyardworlds/wiseview",
"repo_path": "wiseview_extracted/wiseview-master/wv/common/angsep.py",
"type": "Python"
}
|
#!/usr/bin/python
# angsep.py
# Program to calculate the angular separation between two points
# whose coordinates are given in RA and Dec
# From angsep.py Written by Enno Middelberg 2001
from numpy import *
def angsep(ra1deg,dec1deg,ra2deg,dec2deg):
""" Determine separation in degrees between two celestial objects
arguments are RA and Dec in decimal degrees.
"""
if abs(ra1deg-ra2deg) < 0.0000001 and abs(dec1deg-dec2deg) < 0.0000001:
return 0.0
ra1rad=deg2rad(ra1deg)
dec1rad=deg2rad(dec1deg)
ra2rad=deg2rad(ra2deg)
dec2rad=deg2rad(dec2deg)
# calculate scalar product for determination
# of angular separation
x=cos(ra1rad)*cos(dec1rad)*cos(ra2rad)*cos(dec2rad)
y=sin(ra1rad)*cos(dec1rad)*sin(ra2rad)*cos(dec2rad)
z=sin(dec1rad)*sin(dec2rad)
rad=arccos(x+y+z) # Sometimes gives warnings when coords match
# use Pythargoras approximation if rad < 1 arcsec
# Is buggy when spanning RA 0/360 i think
#sep = choose( rad<0.000004848 , (
#sqrt((cos(dec1rad)*(ra1rad-ra2rad))**2+(dec1rad-dec2rad)**2),rad))
sep = rad
# Angular separation
sep=rad2deg(sep)
return sep
|
backyardworldsREPO_NAMEwiseviewPATH_START.@wiseview_extracted@wiseview-master@wv@common@angsep.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "sdss/lvmagp",
"repo_path": "lvmagp_extracted/lvmagp-main/container/__init__.py",
"type": "Python"
}
|
import subprocess
# import sys
import os
# import time
# import pexpect
import shlex
import click
import socket
from pathlib import PosixPath
container_bin = 'podman'
lvmt_root = os.environ["PWD"]
lvmt_image_source_local = "localhost/sdss"
lvmt_image_source_remote = "ghcr.io/sdss"
lvmt_image_name = 'lvmagp'
lvmt_rmq = socket.gethostname()
default_agp = 'lvm.agp'
# def isRunning(name: str = default_agp):
# command = subprocess.run(shlex.split(f"{container_bin} container exists {name}"))
# return not command.returncode # True if running
# def next_free_port( port=5900, max_port=5909 ):
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# while port <= max_port:
# try:
# sock.bind(('', port))
# sock.close()
# return port
# except OSError:
# port += 1
# print('no free ports')
# return None
@click.command()
@click.option("--lvmt_root", default=lvmt_root, type=str)
@click.option("--use-cache/--no-cache", default=True)
def build(lvmt_root:str, use_cache: bool):
agp_dockerfile = f"{lvmt_root}/container"
lvmt_image_fullbuild = "" if use_cache else " --no-cache"
build = f"{container_bin} build --tag sdss/{lvmt_image_name} {lvmt_image_fullbuild} --rm {agp_dockerfile} --format docker"
print(build)
command = subprocess.run(shlex.split(build))
@click.command()
@click.option("--lvmt_root", default=lvmt_root, type=str)
@click.option("--name", "-n", default=default_agp, type=str)
@click.option("--debug/--no-debug", "-d", default=False)
@click.option("--kill/--no-kill", default=False)
@click.option("--rmq", default=lvmt_rmq, type=str)
@click.option("--simulator/--aravisagp", default=False)
def start(lvmt_root:str, name: str, debug:bool, kill:bool, rmq:str, simulator:bool):
if not subprocess.run(shlex.split(f"podman image exists {lvmt_image_source_local}/{lvmt_image_name}")).returncode:
lvmt_image = f"{lvmt_image_source_local}/{lvmt_image_name}"
else:
if subprocess.run(shlex.split(f"podman image exists {lvmt_image_source_remote}/{lvmt_image_name}")).returncode:
subprocess.run(shlex.split(f"podman pull {lvmt_image_source_remote}/{lvmt_image_name}:latest"))
lvmt_image = f"{lvmt_image_source_remote}/{lvmt_image_name}"
if kill:
subprocess.run(shlex.split(f"podman kill {name}"))
subprocess.run(shlex.split(f"podman rm {name} -f"))
run_base = f"--rm -d --name={name}"
if rmq:
run_base += f" -e LVMT_RMQ={lvmt_rmq}"
else:
run_base += f" -e LVMT_RMQ={socket.gethostname()}"
if debug:
run_base += f" -e LVMT_DEBUG=true"
if simulator:
run_base += f" -e LVMT_CAM_TYPE=skymakeragp"
run_agp = f"-v {lvmt_root}/python/lvmagp:/root/lvmagp/python/lvmagp:rw -e LVMT_CAM={name}"
run = f"{container_bin} run {run_base} {run_agp} {lvmt_image}"
print(run)
#child = pexpect.spawn(run)
#child.expect('BSC loaded')
#assert isRunning(name) == True
command = subprocess.run(shlex.split(f"{run}"))
logs = subprocess.run(shlex.split(f"podman logs -f {name}"))
print("done")
@click.command()
@click.option("--name", "-n", default=default_agp, type=str)
def stop(name: str):
command = subprocess.run(shlex.split(f"{container_bin} kill {name}"))
|
sdssREPO_NAMElvmagpPATH_START.@lvmagp_extracted@lvmagp-main@container@__init__.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/heatmap/hoverlabel/_font.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="font", parent_name="heatmap.hoverlabel", **kwargs):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@heatmap@hoverlabel@_font.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/gemini_instruments/cirpass/tests/__init__.py",
"type": "Python"
}
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@gemini_instruments@cirpass@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "_y.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/legend/_y.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="y", parent_name="layout.legend", **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
max=kwargs.pop("max", 3),
min=kwargs.pop("min", -2),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@legend@_y.py@.PATH_END.py
|
{
"filename": "test_distributions.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/tests/distributions/test_distributions.py",
"type": "Python"
}
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
import torch
import pyro
import pyro.distributions as dist
from pyro.distributions import TorchDistribution
from pyro.distributions.testing.gof import auto_goodness_of_fit
from pyro.distributions.util import broadcast_shape
from tests.common import (
TEST_FAILURE_RATE,
assert_close,
assert_equal,
xfail_if_not_implemented,
)
def _log_prob_shape(dist, x_size=torch.Size()):
event_dims = len(dist.event_shape)
expected_shape = broadcast_shape(dist.shape(), x_size, strict=True)
if event_dims > 0:
expected_shape = expected_shape[:-event_dims]
return expected_shape
# Distribution tests - all distributions
def test_support_shape(dist):
for idx in range(dist.get_num_test_data()):
dist_params = dist.get_dist_params(idx)
d = dist.pyro_dist(**dist_params)
assert d.support.event_dim == d.event_dim
x = dist.get_test_data(idx)
ok = d.support.check(x)
assert ok.shape == broadcast_shape(
d.batch_shape, x.shape[: x.dim() - d.event_dim]
)
assert ok.all()
def test_infer_shapes(dist):
if "LKJ" in dist.pyro_dist.__name__ or "SineSkewed" in dist.pyro_dist.__name__:
pytest.xfail(reason="cannot statically compute shape")
for idx in range(dist.get_num_test_data()):
dist_params = dist.get_dist_params(idx)
arg_shapes = {
k: v.shape if isinstance(v, torch.Tensor) else ()
for k, v in dist_params.items()
}
batch_shape, event_shape = dist.pyro_dist.infer_shapes(**arg_shapes)
d = dist.pyro_dist(**dist_params)
assert d.batch_shape == batch_shape
assert d.event_shape == event_shape
def test_batch_log_prob(dist):
if dist.scipy_arg_fn is None:
pytest.skip(
"{}.log_prob_sum has no scipy equivalent".format(dist.pyro_dist.__name__)
)
for idx in dist.get_batch_data_indices():
dist_params = dist.get_dist_params(idx)
d = dist.pyro_dist(**dist_params)
test_data = dist.get_test_data(idx)
log_prob_sum_pyro = d.log_prob(test_data).sum().item()
log_prob_sum_np = np.sum(dist.get_scipy_batch_logpdf(-1))
assert_equal(log_prob_sum_pyro, log_prob_sum_np)
def test_batch_log_prob_shape(dist):
for idx in range(dist.get_num_test_data()):
dist_params = dist.get_dist_params(idx)
d = dist.pyro_dist(**dist_params)
x = dist.get_test_data(idx)
with xfail_if_not_implemented():
# Get log_prob shape after broadcasting.
expected_shape = _log_prob_shape(d, x.size())
log_p_obj = d.log_prob(x)
assert log_p_obj.size() == expected_shape
def test_batch_entropy_shape(dist):
for idx in range(dist.get_num_test_data()):
dist_params = dist.get_dist_params(idx)
d = dist.pyro_dist(**dist_params)
with xfail_if_not_implemented():
# Get entropy shape after broadcasting.
expected_shape = _log_prob_shape(d)
entropy_obj = d.entropy()
assert entropy_obj.size() == expected_shape
def test_score_errors_event_dim_mismatch(dist):
for idx in dist.get_batch_data_indices():
dist_params = dist.get_dist_params(idx)
d = dist.pyro_dist(**dist_params)
test_data_wrong_dims = torch.ones(d.shape() + (1,))
if len(d.event_shape) > 0:
if dist.get_test_distribution_name() == "MultivariateNormal":
pytest.skip(
"MultivariateNormal does not do shape validation in log_prob."
)
elif dist.get_test_distribution_name() == "LowRankMultivariateNormal":
pytest.skip(
"LowRankMultivariateNormal does not do shape validation in log_prob."
)
with pytest.raises((ValueError, RuntimeError)):
d.log_prob(test_data_wrong_dims)
def test_score_errors_non_broadcastable_data_shape(dist):
for idx in dist.get_batch_data_indices():
dist_params = dist.get_dist_params(idx)
d = dist.pyro_dist(**dist_params)
if dist.get_test_distribution_name() == "LKJCholesky":
pytest.skip("https://github.com/pytorch/pytorch/issues/52724")
shape = d.shape()
non_broadcastable_shape = (shape[0] + 1,) + shape[1:]
test_data_non_broadcastable = torch.ones(non_broadcastable_shape)
with pytest.raises((ValueError, RuntimeError)):
d.log_prob(test_data_non_broadcastable)
# Distributions tests - continuous distributions
def test_support_is_not_discrete(continuous_dist):
Dist = continuous_dist.pyro_dist
for i in range(continuous_dist.get_num_test_data()):
d = Dist(**continuous_dist.get_dist_params(i))
assert not d.support.is_discrete
def test_gof(continuous_dist):
Dist = continuous_dist.pyro_dist
if Dist in [dist.LKJ, dist.LKJCholesky]:
pytest.xfail(reason="incorrect submanifold scaling")
num_samples = 50000
for i in range(continuous_dist.get_num_test_data()):
d = Dist(**continuous_dist.get_dist_params(i))
with torch.random.fork_rng():
samples = d.sample(torch.Size([num_samples]))
with xfail_if_not_implemented():
probs = d.log_prob(samples).exp()
dim = None
if "ProjectedNormal" in Dist.__name__:
dim = samples.size(-1) - 1
# Test each batch independently.
probs = probs.reshape(num_samples, -1)
samples = samples.reshape(probs.shape + d.event_shape)
if "Dirichlet" in Dist.__name__:
# The Dirichlet density is over all but one of the probs.
samples = samples[..., :-1]
for b in range(probs.size(-1)):
gof = auto_goodness_of_fit(samples[:, b], probs[:, b], dim=dim)
assert gof > TEST_FAILURE_RATE
def test_mean(continuous_dist):
Dist = continuous_dist.pyro_dist
if Dist.__name__ in [
"Cauchy",
"HalfCauchy",
"SineBivariateVonMises",
"VonMises",
"ProjectedNormal",
"Stable",
]:
pytest.xfail(reason="Euclidean mean is not defined")
for i in range(continuous_dist.get_num_test_data()):
d = Dist(**continuous_dist.get_dist_params(i))
with xfail_if_not_implemented():
actual = d.mean
num_samples = 100000
if Dist.__name__ == "LogNormal":
num_samples = 200000
expected = d.sample((num_samples,)).mean(0)
assert_close(actual, expected, atol=0.02, rtol=0.05)
def test_variance(continuous_dist):
Dist = continuous_dist.pyro_dist
if Dist.__name__ in [
"Cauchy",
"HalfCauchy",
"MultivariateStudentT",
"ProjectedNormal",
"Stable",
"VonMises",
]:
pytest.xfail(reason="Euclidean variance is not defined")
if Dist.__name__ in ["LogNormal"]:
pytest.xfail(reason="high variance estimator")
for i in range(continuous_dist.get_num_test_data()):
d = Dist(**continuous_dist.get_dist_params(i))
with xfail_if_not_implemented():
actual = d.variance
expected = d.sample((100000,)).var(0)
assert_close(actual, expected, atol=0.02, rtol=0.05)
def test_cdf_icdf(continuous_dist):
Dist = continuous_dist.pyro_dist
for i in range(continuous_dist.get_num_test_data()):
d = Dist(**continuous_dist.get_dist_params(i))
if d.event_shape.numel() != 1:
continue # only valid for univariate distributions
u = torch.empty((100,) + d.shape()).uniform_()
with xfail_if_not_implemented():
x = d.icdf(u)
u2 = d.cdf(x)
assert_equal(u, u2)
# Distributions tests - discrete distributions
def test_support_is_discrete(discrete_dist):
Dist = discrete_dist.pyro_dist
for i in range(discrete_dist.get_num_test_data()):
d = Dist(**discrete_dist.get_dist_params(i))
assert d.support.is_discrete
def test_enumerate_support(discrete_dist):
expected_support = discrete_dist.expected_support
expected_support_non_vec = discrete_dist.expected_support_non_vec
if not expected_support:
pytest.skip("enumerate_support not tested for distribution")
Dist = discrete_dist.pyro_dist
actual_support_non_vec = Dist(
**discrete_dist.get_dist_params(0)
).enumerate_support()
actual_support = Dist(**discrete_dist.get_dist_params(-1)).enumerate_support()
assert_equal(actual_support.data, torch.tensor(expected_support))
assert_equal(actual_support_non_vec.data, torch.tensor(expected_support_non_vec))
def test_enumerate_support_shape(dist):
if not dist.pyro_dist.has_enumerate_support:
pytest.skip()
for idx in range(dist.get_num_test_data()):
dist_params = dist.get_dist_params(idx)
d = dist.pyro_dist(**dist_params)
with xfail_if_not_implemented():
support = d.enumerate_support()
n = support.shape[0]
assert support.shape == (n,) + d.batch_shape + d.event_shape
support_expanded = d.enumerate_support(expand=True)
assert_equal(support, support_expanded)
support_unexpanded = d.enumerate_support(expand=False)
assert (
support_unexpanded.shape
== (n,) + (1,) * len(d.batch_shape) + d.event_shape
)
assert (support_expanded == support_unexpanded).all()
@pytest.mark.parametrize(
"dist_class, args",
[
(dist.Normal, {"loc": torch.tensor(0.0), "scale": torch.tensor(-1.0)}),
(dist.Gamma, {"concentration": -1.0, "rate": 1.0}),
(dist.Exponential, {"rate": -2}),
],
)
@pytest.mark.parametrize("validate_args", [True, False])
def test_distribution_validate_args(dist_class, args, validate_args):
with pyro.validation_enabled(validate_args):
if not validate_args:
dist_class(**args)
else:
with pytest.raises(ValueError):
dist_class(**args)
def check_sample_shapes(small, large):
dist_instance = small
if isinstance(
dist_instance,
(
dist.LogNormal,
dist.LowRankMultivariateNormal,
dist.VonMises,
dist.LogNormalNegativeBinomial,
),
):
# Ignore broadcasting bug in LogNormal:
# https://github.com/pytorch/pytorch/pull/7269
# LogNormalNegativeBinomial has no sample method
return
x = small.sample()
assert_equal(small.log_prob(x).expand(large.batch_shape), large.log_prob(x))
x = large.sample()
assert_equal(small.log_prob(x), large.log_prob(x))
@pytest.mark.parametrize("sample_shape", [(), (2,), (2, 3)])
@pytest.mark.parametrize("shape_type", [torch.Size, tuple, list])
def test_expand_by(dist, sample_shape, shape_type):
for idx in range(dist.get_num_test_data()):
small = dist.pyro_dist(**dist.get_dist_params(idx))
large = small.expand_by(shape_type(sample_shape))
assert large.batch_shape == sample_shape + small.batch_shape
check_sample_shapes(small, large)
@pytest.mark.parametrize("sample_shape", [(), (2,), (2, 3)])
@pytest.mark.parametrize("shape_type", [torch.Size, tuple, list])
@pytest.mark.parametrize("default", [False, True])
def test_expand_new_dim(dist, sample_shape, shape_type, default):
for idx in range(dist.get_num_test_data()):
small = dist.pyro_dist(**dist.get_dist_params(idx))
if default:
large = TorchDistribution.expand(
small, shape_type(sample_shape + small.batch_shape)
)
else:
with xfail_if_not_implemented():
large = small.expand(shape_type(sample_shape + small.batch_shape))
assert large.batch_shape == sample_shape + small.batch_shape
check_sample_shapes(small, large)
@pytest.mark.parametrize("shape_type", [torch.Size, tuple, list])
@pytest.mark.parametrize("default", [False, True])
def test_expand_existing_dim(dist, shape_type, default):
for idx in range(dist.get_num_test_data()):
small = dist.pyro_dist(**dist.get_dist_params(idx))
for dim, size in enumerate(small.batch_shape):
if size != 1:
continue
batch_shape = list(small.batch_shape)
batch_shape[dim] = 5
batch_shape = torch.Size(batch_shape)
if default:
large = TorchDistribution.expand(small, shape_type(batch_shape))
else:
with xfail_if_not_implemented():
large = small.expand(shape_type(batch_shape))
assert large.batch_shape == batch_shape
check_sample_shapes(small, large)
@pytest.mark.parametrize(
"sample_shapes",
[
[(2, 1), (2, 3)],
[(2, 1, 1), (2, 1, 3), (2, 5, 3)],
],
)
@pytest.mark.parametrize("default", [False, True])
def test_subsequent_expands_ok(dist, sample_shapes, default):
for idx in range(dist.get_num_test_data()):
d = dist.pyro_dist(**dist.get_dist_params(idx))
original_batch_shape = d.batch_shape
for shape in sample_shapes:
proposed_batch_shape = torch.Size(shape) + original_batch_shape
if default:
n = TorchDistribution.expand(d, proposed_batch_shape)
else:
with xfail_if_not_implemented():
n = d.expand(proposed_batch_shape)
assert n.batch_shape == proposed_batch_shape
with xfail_if_not_implemented():
check_sample_shapes(d, n)
d = n
@pytest.mark.parametrize(
"initial_shape, proposed_shape",
[
[(2, 1), (4, 3)],
[(2, 4), (2, 2, 1)],
[(1, 2, 1), (2, 1)],
],
)
@pytest.mark.parametrize("default", [False, True])
def test_expand_error(dist, initial_shape, proposed_shape, default):
for idx in range(dist.get_num_test_data()):
small = dist.pyro_dist(**dist.get_dist_params(idx))
if default:
large = TorchDistribution.expand(small, initial_shape + small.batch_shape)
else:
with xfail_if_not_implemented():
large = small.expand(torch.Size(initial_shape) + small.batch_shape)
proposed_batch_shape = torch.Size(proposed_shape) + small.batch_shape
with pytest.raises((RuntimeError, ValueError)):
large.expand(proposed_batch_shape)
@pytest.mark.parametrize(
"extra_event_dims,expand_shape",
[
(0, [4, 3, 2, 1]),
(0, [4, 3, 2, 2]),
(1, [5, 4, 3, 2]),
(2, [5, 4, 3]),
],
)
@pytest.mark.parametrize("default", [False, True])
def test_expand_reshaped_distribution(extra_event_dims, expand_shape, default):
probs = torch.ones(1, 6) / 6
d = dist.OneHotCategorical(probs)
full_shape = torch.Size([4, 1, 1, 1, 6])
if default:
reshaped_dist = TorchDistribution.expand(d, [4, 1, 1, 1]).to_event(
extra_event_dims
)
else:
reshaped_dist = d.expand_by([4, 1, 1]).to_event(extra_event_dims)
cut = 4 - extra_event_dims
batch_shape, event_shape = full_shape[:cut], full_shape[cut:]
assert reshaped_dist.batch_shape == batch_shape
assert reshaped_dist.event_shape == event_shape
large = reshaped_dist.expand(expand_shape)
assert large.batch_shape == torch.Size(expand_shape)
assert large.event_shape == torch.Size(event_shape)
# Throws error when batch shape cannot be broadcasted
with pytest.raises((RuntimeError, ValueError)):
reshaped_dist.expand(expand_shape + [3])
# Throws error when trying to shrink existing batch shape
with pytest.raises((RuntimeError, ValueError)):
large.expand(expand_shape[1:])
def test_expand_enumerate_support():
probs = torch.ones(3, 6) / 6
d = dist.Categorical(probs)
actual_enum_shape = (
TorchDistribution.expand(d, (4, 3)).enumerate_support(expand=True).shape
)
assert actual_enum_shape == (6, 4, 3)
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@tests@distributions@test_distributions.py@.PATH_END.py
|
{
"filename": "_tickprefix.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattergeo/marker/colorbar/_tickprefix.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickprefixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="tickprefix",
parent_name="scattergeo.marker.colorbar",
**kwargs,
):
super(TickprefixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattergeo@marker@colorbar@_tickprefix.py@.PATH_END.py
|
{
"filename": "tiny_vit_sam.py",
"repo_name": "ESA-Datalabs/XAMI-model",
"repo_path": "XAMI-model_extracted/XAMI-model-main/xami_model/mobile_sam/mobile_sam/modeling/tiny_vit_sam.py",
"type": "Python"
}
|
# --------------------------------------------------------
# TinyViT Model Architecture
# Copyright (c) 2022 Microsoft
# Adapted from LeViT and Swin Transformer
# LeViT: (https://github.com/facebookresearch/levit)
# Swin: (https://github.com/microsoft/swin-transformer)
# Build the TinyViT Model
# --------------------------------------------------------
import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath as TimmDropPath,\
to_2tuple, trunc_normal_
from timm.models.registry import register_model
from typing import Tuple
class Conv2d_BN(torch.nn.Sequential):
def __init__(self, a, b, ks=1, stride=1, pad=0, dilation=1,
groups=1, bn_weight_init=1):
super().__init__()
self.add_module('c', torch.nn.Conv2d(
a, b, ks, stride, pad, dilation, groups, bias=False))
bn = torch.nn.BatchNorm2d(b)
torch.nn.init.constant_(bn.weight, bn_weight_init)
torch.nn.init.constant_(bn.bias, 0)
self.add_module('bn', bn)
@torch.no_grad()
def fuse(self):
c, bn = self._modules.values()
w = bn.weight / (bn.running_var + bn.eps)**0.5
w = c.weight * w[:, None, None, None]
b = bn.bias - bn.running_mean * bn.weight / \
(bn.running_var + bn.eps)**0.5
m = torch.nn.Conv2d(w.size(1) * self.c.groups, w.size(
0), w.shape[2:], stride=self.c.stride, padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups)
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
class DropPath(TimmDropPath):
def __init__(self, drop_prob=None):
super().__init__(drop_prob=drop_prob)
self.drop_prob = drop_prob
def __repr__(self):
msg = super().__repr__()
msg += f'(drop_prob={self.drop_prob})'
return msg
class PatchEmbed(nn.Module):
def __init__(self, in_chans, embed_dim, resolution, activation):
super().__init__()
img_size: Tuple[int, int] = to_2tuple(resolution)
self.patches_resolution = (img_size[0] // 4, img_size[1] // 4)
self.num_patches = self.patches_resolution[0] * \
self.patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
n = embed_dim
self.seq = nn.Sequential(
Conv2d_BN(in_chans, n // 2, 3, 2, 1),
activation(),
Conv2d_BN(n // 2, n, 3, 2, 1),
)
def forward(self, x):
return self.seq(x)
class MBConv(nn.Module):
def __init__(self, in_chans, out_chans, expand_ratio,
activation, drop_path):
super().__init__()
self.in_chans = in_chans
self.hidden_chans = int(in_chans * expand_ratio)
self.out_chans = out_chans
self.conv1 = Conv2d_BN(in_chans, self.hidden_chans, ks=1)
self.act1 = activation()
self.conv2 = Conv2d_BN(self.hidden_chans, self.hidden_chans,
ks=3, stride=1, pad=1, groups=self.hidden_chans)
self.act2 = activation()
self.conv3 = Conv2d_BN(
self.hidden_chans, out_chans, ks=1, bn_weight_init=0.0)
self.act3 = activation()
self.drop_path = DropPath(
drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.act2(x)
x = self.conv3(x)
x = self.drop_path(x)
x += shortcut
x = self.act3(x)
return x
class PatchMerging(nn.Module):
def __init__(self, input_resolution, dim, out_dim, activation):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.out_dim = out_dim
self.act = activation()
self.conv1 = Conv2d_BN(dim, out_dim, 1, 1, 0)
stride_c=2
if(out_dim==320 or out_dim==448 or out_dim==576):
stride_c=1
self.conv2 = Conv2d_BN(out_dim, out_dim, 3, stride_c, 1, groups=out_dim)
self.conv3 = Conv2d_BN(out_dim, out_dim, 1, 1, 0)
def forward(self, x):
if x.ndim == 3:
H, W = self.input_resolution
B = len(x)
# (B, C, H, W)
x = x.view(B, H, W, -1).permute(0, 3, 1, 2)
x = self.conv1(x)
x = self.act(x)
x = self.conv2(x)
x = self.act(x)
x = self.conv3(x)
x = x.flatten(2).transpose(1, 2)
return x
class ConvLayer(nn.Module):
def __init__(self, dim, input_resolution, depth,
activation,
drop_path=0., downsample=None, use_checkpoint=False,
out_dim=None,
conv_expand_ratio=4.,
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
MBConv(dim, dim, conv_expand_ratio, activation,
drop_path[i] if isinstance(drop_path, list) else drop_path,
)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(
input_resolution, dim=dim, out_dim=out_dim, activation=activation)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None,
out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.norm = nn.LayerNorm(in_features)
self.fc1 = nn.Linear(in_features, hidden_features)
self.fc2 = nn.Linear(hidden_features, out_features)
self.act = act_layer()
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.norm(x)
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(torch.nn.Module):
def __init__(self, dim, key_dim, num_heads=8,
attn_ratio=4,
resolution=(14, 14),
):
super().__init__()
# (h, w)
assert isinstance(resolution, tuple) and len(resolution) == 2
self.num_heads = num_heads
self.scale = key_dim ** -0.5
self.key_dim = key_dim
self.nh_kd = nh_kd = key_dim * num_heads
self.d = int(attn_ratio * key_dim)
self.dh = int(attn_ratio * key_dim) * num_heads
self.attn_ratio = attn_ratio
h = self.dh + nh_kd * 2
self.norm = nn.LayerNorm(dim)
self.qkv = nn.Linear(dim, h)
self.proj = nn.Linear(self.dh, dim)
points = list(itertools.product(
range(resolution[0]), range(resolution[1])))
N = len(points)
attention_offsets = {}
idxs = []
for p1 in points:
for p2 in points:
offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
if offset not in attention_offsets:
attention_offsets[offset] = len(attention_offsets)
idxs.append(attention_offsets[offset])
self.attention_biases = torch.nn.Parameter(
torch.zeros(num_heads, len(attention_offsets)))
self.register_buffer('attention_bias_idxs',
torch.LongTensor(idxs).view(N, N),
persistent=False)
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and hasattr(self, 'ab'):
del self.ab
else:
self.register_buffer('ab',
self.attention_biases[:, self.attention_bias_idxs],
persistent=False)
def forward(self, x): # x (B,N,C)
B, N, _ = x.shape
# Normalization
x = self.norm(x)
qkv = self.qkv(x)
# (B, N, num_heads, d)
q, k, v = qkv.view(B, N, self.num_heads, -
1).split([self.key_dim, self.key_dim, self.d], dim=3)
# (B, num_heads, N, d)
q = q.permute(0, 2, 1, 3)
k = k.permute(0, 2, 1, 3)
v = v.permute(0, 2, 1, 3)
attn = (
(q @ k.transpose(-2, -1)) * self.scale
+
(self.attention_biases[:, self.attention_bias_idxs]
if self.training else self.ab)
)
attn = attn.softmax(dim=-1)
x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh)
x = self.proj(x)
return x
class TinyViTBlock(nn.Module):
r""" TinyViT Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int, int]): Input resolution.
num_heads (int): Number of attention heads.
window_size (int): Window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
drop (float, optional): Dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
local_conv_size (int): the kernel size of the convolution between
Attention and MLP. Default: 3
activation: the activation function. Default: nn.GELU
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7,
mlp_ratio=4., drop=0., drop_path=0.,
local_conv_size=3,
activation=nn.GELU,
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
assert window_size > 0, 'window_size must be greater than 0'
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.drop_path = DropPath(
drop_path) if drop_path > 0. else nn.Identity()
assert dim % num_heads == 0, 'dim must be divisible by num_heads'
head_dim = dim // num_heads
window_resolution = (window_size, window_size)
self.attn = Attention(dim, head_dim, num_heads,
attn_ratio=1, resolution=window_resolution)
mlp_hidden_dim = int(dim * mlp_ratio)
mlp_activation = activation
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=mlp_activation, drop=drop)
pad = local_conv_size // 2
self.local_conv = Conv2d_BN(
dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
res_x = x
if H == self.window_size and W == self.window_size:
x = self.attn(x)
else:
x = x.view(B, H, W, C)
pad_b = (self.window_size - H %
self.window_size) % self.window_size
pad_r = (self.window_size - W %
self.window_size) % self.window_size
padding = pad_b > 0 or pad_r > 0
if padding:
x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b))
pH, pW = H + pad_b, W + pad_r
nH = pH // self.window_size
nW = pW // self.window_size
# window partition
x = x.view(B, nH, self.window_size, nW, self.window_size, C).transpose(2, 3).reshape(
B * nH * nW, self.window_size * self.window_size, C)
x = self.attn(x)
# window reverse
x = x.view(B, nH, nW, self.window_size, self.window_size,
C).transpose(2, 3).reshape(B, pH, pW, C)
if padding:
x = x[:, :H, :W].contiguous()
x = x.view(B, L, C)
x = res_x + self.drop_path(x)
x = x.transpose(1, 2).reshape(B, C, H, W)
x = self.local_conv(x)
x = x.view(B, C, L).transpose(1, 2)
x = x + self.drop_path(self.mlp(x))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, mlp_ratio={self.mlp_ratio}"
class BasicLayer(nn.Module):
""" A basic TinyViT layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
drop (float, optional): Dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
local_conv_size: the kernel size of the depthwise convolution between attention and MLP. Default: 3
activation: the activation function. Default: nn.GELU
out_dim: the output dimension of the layer. Default: dim
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., drop=0.,
drop_path=0., downsample=None, use_checkpoint=False,
local_conv_size=3,
activation=nn.GELU,
out_dim=None,
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
TinyViTBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
mlp_ratio=mlp_ratio,
drop=drop,
drop_path=drop_path[i] if isinstance(
drop_path, list) else drop_path,
local_conv_size=local_conv_size,
activation=activation,
)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(
input_resolution, dim=dim, out_dim=out_dim, activation=activation)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
class LayerNorm2d(nn.Module):
def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x: torch.Tensor) -> torch.Tensor:
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class TinyViT(nn.Module):
def __init__(self, img_size=224, in_chans=3, num_classes=1000,
embed_dims=[96, 192, 384, 768], depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_sizes=[7, 7, 14, 7],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
use_checkpoint=False,
mbconv_expand_ratio=4.0,
local_conv_size=3,
layer_lr_decay=1.0,
):
super().__init__()
self.img_size=img_size
self.num_classes = num_classes
self.depths = depths
self.num_layers = len(depths)
self.mlp_ratio = mlp_ratio
activation = nn.GELU
self.patch_embed = PatchEmbed(in_chans=in_chans,
embed_dim=embed_dims[0],
resolution=img_size,
activation=activation)
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate,
sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
kwargs = dict(dim=embed_dims[i_layer],
input_resolution=(patches_resolution[0] // (2 ** (i_layer-1 if i_layer == 3 else i_layer)),
patches_resolution[1] // (2 ** (i_layer-1 if i_layer == 3 else i_layer))),
# input_resolution=(patches_resolution[0] // (2 ** i_layer),
# patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
downsample=PatchMerging if (
i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint,
out_dim=embed_dims[min(
i_layer + 1, len(embed_dims) - 1)],
activation=activation,
)
if i_layer == 0:
layer = ConvLayer(
conv_expand_ratio=mbconv_expand_ratio,
**kwargs,
)
else:
layer = BasicLayer(
num_heads=num_heads[i_layer],
window_size=window_sizes[i_layer],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
local_conv_size=local_conv_size,
**kwargs)
self.layers.append(layer)
# Classifier head
self.norm_head = nn.LayerNorm(embed_dims[-1])
self.head = nn.Linear(
embed_dims[-1], num_classes) if num_classes > 0 else torch.nn.Identity()
# init weights
self.apply(self._init_weights)
self.set_layer_lr_decay(layer_lr_decay)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dims[-1],
256,
kernel_size=1,
bias=False,
),
LayerNorm2d(256),
nn.Conv2d(
256,
256,
kernel_size=3,
padding=1,
bias=False,
),
LayerNorm2d(256),
)
def set_layer_lr_decay(self, layer_lr_decay):
decay_rate = layer_lr_decay
# layers -> blocks (depth)
depth = sum(self.depths)
lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)]
#print("LR SCALES:", lr_scales)
def _set_lr_scale(m, scale):
for p in m.parameters():
p.lr_scale = scale
self.patch_embed.apply(lambda x: _set_lr_scale(x, lr_scales[0]))
i = 0
for layer in self.layers:
for block in layer.blocks:
block.apply(lambda x: _set_lr_scale(x, lr_scales[i]))
i += 1
if layer.downsample is not None:
layer.downsample.apply(
lambda x: _set_lr_scale(x, lr_scales[i - 1]))
assert i == depth
for m in [self.norm_head, self.head]:
m.apply(lambda x: _set_lr_scale(x, lr_scales[-1]))
for k, p in self.named_parameters():
p.param_name = k
def _check_lr_scale(m):
for p in m.parameters():
assert hasattr(p, 'lr_scale'), p.param_name
self.apply(_check_lr_scale)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'attention_biases'}
def forward_features(self, x):
# x: (N, C, H, W)
x = self.patch_embed(x)
x = self.layers[0](x)
start_i = 1
for i in range(start_i, len(self.layers)):
layer = self.layers[i]
x = layer(x)
B,_,C=x.size()
x = x.view(B, 64, 64, C)
x=x.permute(0, 3, 1, 2)
x=self.neck(x)
return x
def forward(self, x):
x = self.forward_features(x)
#x = self.norm_head(x)
#x = self.head(x)
return x
_checkpoint_url_format = \
'https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/{}.pth'
_provided_checkpoints = {
'tiny_vit_5m_224': 'tiny_vit_5m_22kto1k_distill',
'tiny_vit_11m_224': 'tiny_vit_11m_22kto1k_distill',
'tiny_vit_21m_224': 'tiny_vit_21m_22kto1k_distill',
'tiny_vit_21m_384': 'tiny_vit_21m_22kto1k_384_distill',
'tiny_vit_21m_512': 'tiny_vit_21m_22kto1k_512_distill',
}
def register_tiny_vit_model(fn):
'''Register a TinyViT model
It is a wrapper of `register_model` with loading the pretrained checkpoint.
'''
def fn_wrapper(pretrained=False, **kwargs):
model = fn()
if pretrained:
model_name = fn.__name__
assert model_name in _provided_checkpoints, \
f'Sorry that the checkpoint `{model_name}` is not provided yet.'
url = _checkpoint_url_format.format(
_provided_checkpoints[model_name])
checkpoint = torch.hub.load_state_dict_from_url(
url=url,
map_location='cpu', check_hash=False,
)
model.load_state_dict(checkpoint['model'])
return model
# rename the name of fn_wrapper
fn_wrapper.__name__ = fn.__name__
return register_model(fn_wrapper)
@register_tiny_vit_model
def tiny_vit_5m_224(pretrained=False, num_classes=1000, drop_path_rate=0.0):
return TinyViT(
num_classes=num_classes,
embed_dims=[64, 128, 160, 320],
depths=[2, 2, 6, 2],
num_heads=[2, 4, 5, 10],
window_sizes=[7, 7, 14, 7],
drop_path_rate=drop_path_rate,
)
@register_tiny_vit_model
def tiny_vit_11m_224(pretrained=False, num_classes=1000, drop_path_rate=0.1):
return TinyViT(
num_classes=num_classes,
embed_dims=[64, 128, 256, 448],
depths=[2, 2, 6, 2],
num_heads=[2, 4, 8, 14],
window_sizes=[7, 7, 14, 7],
drop_path_rate=drop_path_rate,
)
@register_tiny_vit_model
def tiny_vit_21m_224(pretrained=False, num_classes=1000, drop_path_rate=0.2):
return TinyViT(
num_classes=num_classes,
embed_dims=[96, 192, 384, 576],
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 18],
window_sizes=[7, 7, 14, 7],
drop_path_rate=drop_path_rate,
)
@register_tiny_vit_model
def tiny_vit_21m_384(pretrained=False, num_classes=1000, drop_path_rate=0.1):
return TinyViT(
img_size=384,
num_classes=num_classes,
embed_dims=[96, 192, 384, 576],
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 18],
window_sizes=[12, 12, 24, 12],
drop_path_rate=drop_path_rate,
)
@register_tiny_vit_model
def tiny_vit_21m_512(pretrained=False, num_classes=1000, drop_path_rate=0.1):
return TinyViT(
img_size=512,
num_classes=num_classes,
embed_dims=[96, 192, 384, 576],
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 18],
window_sizes=[16, 16, 32, 16],
drop_path_rate=drop_path_rate,
)
|
ESA-DatalabsREPO_NAMEXAMI-modelPATH_START.@XAMI-model_extracted@XAMI-model-main@xami_model@mobile_sam@mobile_sam@modeling@tiny_vit_sam.py@.PATH_END.py
|
{
"filename": "misc.py",
"repo_name": "fjdu/rac-2d",
"repo_path": "rac-2d_extracted/rac-2d-master/utils_python/draw/misc.py",
"type": "Python"
}
|
def load_data_as_dic(filepath, comments='!', returnOriginalKeys=False):
import numpy as np
data = np.loadtxt(filepath, comments=comments)
ftmp = open(filepath, 'r')
str_comment = ftmp.readline()[1:].split()
ftmp.close()
dic = {}
for i in range(len(str_comment)):
dic.update({str_comment[i]: data[:, i]})
del data
if returnOriginalKeys:
return str_comment, dic
else:
return dic
def to_spherical(d, r_grid, theta_grid, phi_grid, items):
"""Export the model data for selected items using spherical coordinates.
d: model data in the form of a dictionary; obtained by calling load_data_as_dic.
r_grid: radial grid boundary points; unit: AU
theta_grid: theta grid boundary points; unit: radian
phi_grid: phi grid boundary points; unit: radian
items: a dictionary of items to be exported in the following format; note
that the filename is the output filename (to be used as input for RADMC 3D)
to save data for each item:
items = {
'rhodus_1': {'filename': 'rhodus_1.inp'},
'rhodus_2': {'filename': 'rhodus_2.inp'},
'rhodus_3': {'filename': 'rhodus_3.inp'},
'Tdust1': {'filename': 'Tdust1.inp'},
'Tdust2': {'filename': 'Tdust2.inp'},
'Tdust3': {'filename': 'Tdust3.inp'},
'C2H': {'filename': 'C2H.inp'},
'C3H2': {'filename': 'C3H2.inp'}
}
"""
import numpy as np
nr, nt, nphi = len(r_grid), len(theta_grid), len(phi_grid)
files = {}
for key in items:
files.update({key: open(items[key]['filename'], 'w')})
state = query_state()
for i in range(nphi-1):
phi = 0.5 * (phi_grid[i] + phi_grid[i+1])
for j in range(nt-1):
theta = 0.5 * (theta_grid[j] + theta_grid[j+1])
for k in range(nr-1):
r = 0.5 * (r_grid[k] + r_grid[k+1])
rho = r * np.sin(theta)
z = r * np.cos(theta)
for key in items:
val = state.query(d, rho, z, key)
files[key].write('{0:.6e}\n'.format(val))
for key in items:
files[key].close()
def num2str4fig(num, scientific=True):
import re
s1 = '{0:16.1f}'.format(num).strip()
s2 = [re.sub(r'^([\-+]*)(0+)([1-9]+)', r'\1\3', item) for item in \
[item.replace('+','') for item in '{0:16.1e}'.format(num).strip().split('e')]]
len1 = len(s1)
len2 = 1
for item in s2:
len2 += len(item)
choose = 1
if (num != 0.0):
if float(s1) == 0.0:
choose = 2
int_0 = int(float(s1))
if abs(int_0 - float(s1)) < 0.01:
s1 = '{:d}'.format(int_0)
if (not scientific) or (len1 <= len2 and choose==1):
return r'${' + s1 + r'}$'
else:
if float(s2[1]) == 0.0 or float(s2[1]) == 1.0:
return r'${' + s1 + r'}$'
else:
int_ = int(float(s2[0]))
if abs(float(s2[0])-int_) < 0.01:
if int_ == 1:
return r'$10^{' + s2[1] + r'}$'
else:
return r'${' + '{:d}'.format(int_) + r'}{\times}10^{' + s2[1] + r'}$'
else:
return r'${' + s2[0] + r'}{\times}10^{' + s2[1] + r'}$'
def clip_fraction(num_str, th=0.01):
num = float(num_str)
num_round = round(num)
if abs(num_round-num)/num < th:
return num_str.split('.')[0]
else:
return num_str
all_elements = ['H', 'D', 'He', 'C', 'N', 'O', 'Si', 'S', 'Fe', 'Na', 'Mg', 'Cl', 'P', 'F', 'Ne', 'Ar', 'K']
def get_all_matches(el, word):
m = []
len_el = len(el)
len_w = len(word)
i = 0
while(1):
j = i + len_el
if j > len_w:
break
if word[i:j] == el:
m.append(i)
i += 1
return m
def get_leading_digits(word):
s = ''
for L in word:
if '0' <= L and L <= '9':
s += L
else:
break
return s
def get_ele_counts(el, word, all_el):
len_el = len(el)
len_w = len(word)
el_c = [c for c in all_el if (c.startswith(el) and c!=el)]
idx = get_all_matches(el, word)
ct = 0
for i in idx:
if len([_ for _ in filter(word[i:].startswith, el_c)]) != 0:
continue
ii = i+len_el
if i+len_el == len_w:
ct += 1
continue
Ldig = get_leading_digits(word[ii:])
if Ldig == '':
ct += 1
continue
else:
ct += int(Ldig)
return ct
def get_molecule_names_from_file(fname, col_start=None):
#col_start = 133
with open(fname, 'r') as f:
str_comment = f.readline()[1:].split()
if col_start == None:
col_start = str_comment.index('H')
return str_comment[col_start:]
def update_elemental_abundance(ele, d, fname, all_elements):
from numpy import zeros
molecule_names = get_molecule_names_from_file(fname)
n = len(d['H'])
X_ele = zeros(n)
for k in molecule_names:
c = get_ele_counts(ele, k, all_elements)
if c != 0:
try:
X_ele += c * d[k]
except:
raise KeyError(k)
d['X['+ele+']'] = X_ele
return
def update_density_of_X(X, d):
d['n('+X+')'] = d[X] * d['n_gas']
return
def update_stuff(ca, filename):
update_density_of_X('H2O', ca)
update_density_of_X('OH', ca)
update_density_of_X('CO', ca)
update_density_of_X('O', ca)
update_elemental_abundance('O', ca, filename, all_elements)
update_elemental_abundance('C', ca, filename, all_elements)
ca['Tgas-Tdust'] = ca['Tgas'] - ca['Tdust']
def is_hydrocarbon(name):
c_H = get_ele_counts('H', name, all_elements)
c_C = get_ele_counts('C', name, all_elements)
if c_H == 0 or c_C == 0:
return False
for el in all_elements:
if el == 'H' or el == 'C':
continue
c_el = get_ele_counts(el, name, all_elements)
if c_el > 0:
return False
return True
def has_nitrogen(name):
c_N = get_ele_counts('N', name, all_elements)
if c_N > 0:
return True
else:
return False
def get_hydrocarbons(all_names):
hc = []
for name in all_names:
if is_hydrocarbon(name):
hc.append(name)
return hc
def get_nitrogens(all_names):
ni = []
for name in all_names:
if has_nitrogen(name):
ni.append(name)
return ni
|
fjduREPO_NAMErac-2dPATH_START.@rac-2d_extracted@rac-2d-master@utils_python@draw@misc.py@.PATH_END.py
|
{
"filename": "test_catalogmesh.py",
"repo_name": "bccp/nbodykit",
"repo_path": "nbodykit_extracted/nbodykit-master/nbodykit/source/mesh/tests/test_catalogmesh.py",
"type": "Python"
}
|
from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import set_options
from nbodykit import setup_logging
from numpy.testing import assert_array_equal, assert_allclose
import pytest
# debug logging
setup_logging("debug")
@MPITest([1])
def test_tsc_interlacing(comm):
source = UniformCatalog(nbar=3e-4, BoxSize=512., seed=42, comm=comm)
# interlacing with TSC
mesh = source.to_mesh(resampler='tsc', Nmesh=64, interlaced=True, compensated=True)
# compute the power spectrum -- should be flat shot noise
# if the compensation worked
r = FFTPower(mesh, mode='1d', kmin=0.02)
# skip a few large scale modes that are noisier (fewer modes)
assert_allclose(r.power['power'][5:], 1 / (3e-4), rtol=1e-1)
@MPITest([1])
def test_paint_empty(comm):
source = UniformCatalog(nbar=3e-4, BoxSize=512., seed=42, comm=comm)
source = source[:0]
assert source.csize == 0
# interlacing with TSC
mesh = source.to_mesh(resampler='tsc', Nmesh=64, interlaced=True, compensated=True)
# compute the power spectrum -- should be flat shot noise
# if the compensation worked
real = mesh.to_real_field(normalize=True)
assert_allclose(real, 1.0)
real = mesh.to_real_field(normalize=False)
assert_allclose(real, 0.0)
@MPITest([1])
def test_paint_chunksize(comm):
source = UniformCatalog(nbar=3e-4, BoxSize=512., seed=42, comm=comm)
# interlacing with TSC
mesh = source.to_mesh(resampler='tsc', Nmesh=64, interlaced=True, compensated=True)
with set_options(paint_chunk_size=source.csize // 4):
r1 = mesh.compute()
with set_options(paint_chunk_size=source.csize):
r2 = mesh.compute()
assert_allclose(r1, r2)
@MPITest([1, 4])
def test_shotnoise(comm):
source = UniformCatalog(nbar=3e-4, BoxSize=512., seed=42, comm=comm)
source['Weight'] = source.rng.uniform()
# interlacing with TSC
mesh = source.to_mesh(resampler='tsc', Nmesh=64, interlaced=True, compensated=True, weight='Weight')
with set_options(paint_chunk_size=source.csize // 4):
r1 = mesh.compute()
with set_options(paint_chunk_size=source.csize):
r2 = mesh.compute()
assert_allclose(r1, r2)
# expected shotnoise for uniform weights between 0 and 1
SN = 4 / 3.0 * 1 / (3e-4)
assert_allclose(r1.attrs['shotnoise'], SN, rtol=1e-2)
assert_allclose(r2.attrs['shotnoise'], SN, rtol=1e-2)
@MPITest([1])
def test_cic_interlacing(comm):
source = UniformCatalog(nbar=3e-4, BoxSize=512., seed=42, comm=comm)
# interlacing with TSC
mesh = source.to_mesh(resampler='cic', Nmesh=64, interlaced=True, compensated=True)
# compute the power spectrum -- should be flat shot noise
# if the compensation worked
r = FFTPower(mesh, mode='1d', kmin=0.02)
@MPITest([1])
def test_setters(comm):
source = UniformCatalog(nbar=3e-4, BoxSize=512., seed=42, comm=comm)
# make the mesh
mesh = source.to_mesh(resampler='cic', Nmesh=64, interlaced=True, compensated=True)
assert mesh.compensated == True
mesh.compensated = False
assert mesh.compensated == False
assert mesh.interlaced == True
mesh.interlaced = False
assert mesh.interlaced == False
assert mesh.window == 'cic'
mesh.window = 'tsc'
assert mesh.window == 'tsc'
@MPITest([1])
def test_bad_window(comm):
source = UniformCatalog(nbar=3e-4, BoxSize=512., seed=42, comm=comm)
# make the mesh
mesh = source.to_mesh(resampler='cic', Nmesh=64, interlaced=True, compensated=True)
# no such window
with pytest.raises(Exception):
mesh.window = "BAD"
@MPITest([1])
def test_no_compensation(comm):
source = UniformCatalog(nbar=3e-4, BoxSize=512., seed=42, comm=comm)
# make the mesh
mesh = source.to_mesh(resampler='cic', Nmesh=64, interlaced=True, compensated=True)
# no compensation for this window
mesh.window = 'db6'
# cannot compute compensation
with pytest.raises(ValueError):
actions = mesh.actions
@MPITest([4])
def test_odd_chunksize(comm):
# no errors shall occur. This is a regression test.
source = ArrayCatalog({
'Position': numpy.ones((2000, 3)),
}, BoxSize=512., comm=comm)
# make the mesh
mesh = source.to_mesh(resampler='cic', Nmesh=64, interlaced=True, compensated=True)
with set_options(paint_chunk_size=1111):
mesh.compute()
@MPITest([1, 4])
def test_view(comm):
# the CatalogSource
source = UniformCatalog(nbar=2e-4, BoxSize=512., seed=42, comm=comm)
source['TEST'] = 10.
source.attrs['TEST'] = 10.0
# the mesh
mesh = source.to_mesh(Nmesh=32)
# view
view = mesh.view()
assert view.base is mesh
from nbodykit.base.mesh import MeshSource
assert isinstance(view, MeshSource)
# check meta-data
for k in mesh.attrs:
assert k in view.attrs
@MPITest([1, 4])
def test_apply_nocompensation(comm):
# the CatalogSource
source = UniformCatalog(nbar=2e-4, BoxSize=512, seed=42, comm=comm)
source['TEST'] = 10.
source['Position2'] = source['Position']
source.attrs['TEST'] = 10.0
# the mesh
mesh = source.to_mesh(position='Position2', Nmesh=32, compensated=False)
def raisefunc(k, v):
raise StopIteration
mesh = mesh.apply(raisefunc)
with pytest.raises(StopIteration):
mesh.compute()
# view
view = mesh.view()
assert view.base is mesh
assert isinstance(view, mesh.__class__)
# check meta-data
for k in mesh.attrs:
assert k in view.attrs
@MPITest([1])
def test_apply_compensated(comm):
# the CatalogSource
source = UniformCatalog(nbar=2e-4, BoxSize=512., seed=42, comm=comm)
source['TEST'] = 10.
source['Position2'] = source['Position']
source.attrs['TEST'] = 10.0
# the mesh
mesh = source.to_mesh(position='Position2', Nmesh=32, compensated=True)
def raisefunc(k, v):
raise StopIteration
mesh = mesh.apply(raisefunc)
with pytest.raises(StopIteration):
mesh.compute()
|
bccpREPO_NAMEnbodykitPATH_START.@nbodykit_extracted@nbodykit-master@nbodykit@source@mesh@tests@test_catalogmesh.py@.PATH_END.py
|
{
"filename": "test_npairs_jackknife_3d.py",
"repo_name": "astropy/halotools",
"repo_path": "halotools_extracted/halotools-master/halotools/mock_observables/pair_counters/test_pair_counters/test_npairs_jackknife_3d.py",
"type": "Python"
}
|
"""
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
# load pair counters
from ..npairs_jackknife_3d import npairs_jackknife_3d, _npairs_jackknife_3d_process_weights_jtags
from ....custom_exceptions import HalotoolsError
import pytest
from astropy.utils.misc import NumpyRNGContext
slow = pytest.mark.slow
__all__ = ('test_npairs_jackknife_3d_periodic', 'test_npairs_jackknife_3d_nonperiodic')
fixed_seed = 43
# set up random points to test pair counters
Npts = 1000
with NumpyRNGContext(fixed_seed):
random_sample = np.random.random((Npts, 3))
period = np.array([1.0, 1.0, 1.0])
num_threads = 2
# set up a regular grid of points to test pair counters
Npts2 = 10
epsilon = 0.001
gridx = np.linspace(0, 1-epsilon, Npts2)
gridy = np.linspace(0, 1-epsilon, Npts2)
gridz = np.linspace(0, 1-epsilon, Npts2)
xx, yy, zz = np.array(np.meshgrid(gridx, gridy, gridz))
xx = xx.flatten()
yy = yy.flatten()
zz = zz.flatten()
grid_points = np.vstack([xx, yy, zz]).T
grid_jackknife_spacing = 0.5
grid_jackknife_ncells = int(1/grid_jackknife_spacing)
ix = np.floor(gridx/grid_jackknife_spacing).astype(int)
iy = np.floor(gridy/grid_jackknife_spacing).astype(int)
iz = np.floor(gridz/grid_jackknife_spacing).astype(int)
ixx, iyy, izz = np.array(np.meshgrid(ix, iy, iz))
ixx = ixx.flatten()
iyy = iyy.flatten()
izz = izz.flatten()
grid_indices = np.ravel_multi_index([ixx, iyy, izz],
[grid_jackknife_ncells, grid_jackknife_ncells, grid_jackknife_ncells])
grid_indices += 1
def test_npairs_jackknife_3d_periodic():
"""
test npairs_jackknife_3d with periodic boundary conditions.
"""
rbins = np.array([0.0, 0.1, 0.2, 0.3])
# define the jackknife sample labels
Npts = len(random_sample)
N_jsamples = 10
with NumpyRNGContext(fixed_seed):
jtags1 = np.sort(np.random.randint(1, N_jsamples+1, size=Npts))
# define weights
weights1 = np.random.random(Npts)
result = npairs_jackknife_3d(random_sample, random_sample, rbins, period=period,
jtags1=jtags1, jtags2=jtags1, N_samples=10,
weights1=weights1, weights2=weights1, num_threads=num_threads)
msg = 'The returned result is an unexpected shape.'
assert np.shape(result) == (N_jsamples+1, len(rbins)), msg
# Now verify that when computing jackknife pairs on a regularly spaced grid,
# the counts in all subvolumes are identical
grid_result = npairs_jackknife_3d(grid_points, grid_points, rbins, period=period,
jtags1=grid_indices, jtags2=grid_indices, N_samples=grid_jackknife_ncells**3,
num_threads=num_threads)
for icell in range(1, grid_jackknife_ncells**3-1):
assert np.all(grid_result[icell, :] == grid_result[icell+1, :])
def test_npairs_jackknife_3d_nonperiodic():
"""
test npairs_jackknife_3d without periodic boundary conditions.
"""
rbins = np.array([0.0, 0.1, 0.2, 0.3])
# define the jackknife sample labels
Npts = len(random_sample)
N_jsamples = 10
with NumpyRNGContext(fixed_seed):
jtags1 = np.sort(np.random.randint(1, N_jsamples+1, size=Npts))
# define weights
weights1 = np.random.random(Npts)
result = npairs_jackknife_3d(random_sample, random_sample, rbins, period=None,
jtags1=jtags1, jtags2=jtags1, N_samples=10,
weights1=weights1, weights2=weights1, num_threads=num_threads)
msg = 'The returned result is an unexpected shape.'
assert np.shape(result) == (N_jsamples+1, len(rbins)), msg
grid_result = npairs_jackknife_3d(grid_points, grid_points, rbins, period=None,
jtags1=grid_indices, jtags2=grid_indices, N_samples=grid_jackknife_ncells**3,
num_threads=num_threads)
for icell in range(1, grid_jackknife_ncells**3-1):
assert np.all(grid_result[icell, :] == grid_result[icell+1, :])
def test_process_weights1():
npts1, npts2 = 100, 120
N_samples = 5
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts1, 3))
sample2 = np.random.random((npts2, 3))
weights1 = np.random.rand(npts1)
weights2 = np.random.rand(npts2)
jtags1 = np.random.randint(1, N_samples+1, npts1)
jtags2 = np.random.randint(1, N_samples+1, npts2)
__ = _npairs_jackknife_3d_process_weights_jtags(sample1, sample2,
weights1, weights2, jtags1, jtags2, N_samples)
def test_process_weights2():
npts1, npts2 = 10, 10
N_samples = 5
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts1, 3))
sample2 = np.random.random((npts2, 3))
weights1 = np.random.rand(npts1-1)
weights2 = np.random.rand(npts2)
jtags1 = np.random.randint(1, N_samples, npts1)
jtags2 = np.random.randint(1, N_samples, npts2)
with pytest.raises(HalotoolsError) as err:
__ = _npairs_jackknife_3d_process_weights_jtags(sample1, sample2,
weights1, weights2, jtags1, jtags2, N_samples)
substr = "weights1 should have same len as sample1"
assert substr in err.value.args[0]
def test_process_weights3():
npts1, npts2 = 10, 10
N_samples = 5
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts1, 3))
sample2 = np.random.random((npts2, 3))
weights1 = np.random.rand(npts1)
weights2 = np.random.rand(npts2-1)
jtags1 = np.random.randint(1, N_samples, npts1)
jtags2 = np.random.randint(1, N_samples, npts2)
with pytest.raises(HalotoolsError) as err:
__ = _npairs_jackknife_3d_process_weights_jtags(sample1, sample2,
weights1, weights2, jtags1, jtags2, N_samples)
substr = "weights2 should have same len as sample2"
assert substr in err.value.args[0]
def test_process_weights4():
npts1, npts2 = 10, 10
N_samples = 5
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts1, 3))
sample2 = np.random.random((npts2, 3))
weights1 = np.random.rand(npts1)
weights2 = np.random.rand(npts2)
jtags1 = np.random.randint(1, N_samples, npts1-1)
jtags2 = np.random.randint(1, N_samples, npts2)
with pytest.raises(HalotoolsError) as err:
__ = _npairs_jackknife_3d_process_weights_jtags(sample1, sample2,
weights1, weights2, jtags1, jtags2, N_samples)
substr = "jtags1 should have same len as sample1"
assert substr in err.value.args[0]
def test_process_weights5():
npts1, npts2 = 10, 10
N_samples = 5
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts1, 3))
sample2 = np.random.random((npts2, 3))
weights1 = np.random.rand(npts1)
weights2 = np.random.rand(npts2)
jtags1 = np.random.randint(1, N_samples, npts1)
jtags2 = np.random.randint(1, N_samples, npts2-1)
with pytest.raises(HalotoolsError) as err:
__ = _npairs_jackknife_3d_process_weights_jtags(sample1, sample2,
weights1, weights2, jtags1, jtags2, N_samples)
substr = "jtags2 should have same len as sample2"
assert substr in err.value.args[0]
def test_process_weights6():
npts1, npts2 = 10, 10
N_samples = 5
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts1, 3))
sample2 = np.random.random((npts2, 3))
weights1 = np.random.rand(npts1)
weights2 = np.random.rand(npts2)
jtags1 = np.random.randint(0, 2, npts1)
jtags2 = np.random.randint(1, 2, npts2)
with pytest.raises(HalotoolsError) as err:
__ = _npairs_jackknife_3d_process_weights_jtags(sample1, sample2,
weights1, weights2, jtags1, jtags2, N_samples)
substr = "jtags1 must be >= 1"
assert substr in err.value.args[0]
def test_process_weights7():
npts1, npts2 = 10, 10
N_samples = 5
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts1, 3))
sample2 = np.random.random((npts2, 3))
weights1 = np.random.rand(npts1)
weights2 = np.random.rand(npts2)
jtags1 = np.random.randint(1, 2, npts1)
jtags2 = np.random.randint(0, 2, npts2)
with pytest.raises(HalotoolsError) as err:
__ = _npairs_jackknife_3d_process_weights_jtags(sample1, sample2,
weights1, weights2, jtags1, jtags2, N_samples)
substr = "jtags2 must be >= 1"
assert substr in err.value.args[0]
def test_process_weights8():
npts1, npts2 = 10, 10
N_samples = 5
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts1, 3))
sample2 = np.random.random((npts2, 3))
weights1 = np.random.rand(npts1)
weights2 = np.random.rand(npts2)
jtags1 = np.random.randint(N_samples+1, N_samples+10, npts1)
jtags2 = np.random.randint(1, 2, npts2)
with pytest.raises(HalotoolsError) as err:
__ = _npairs_jackknife_3d_process_weights_jtags(sample1, sample2,
weights1, weights2, jtags1, jtags2, N_samples)
substr = "jtags1 must be <= N_samples"
assert substr in err.value.args[0]
def test_process_weights9():
npts1, npts2 = 10, 10
N_samples = 5
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts1, 3))
sample2 = np.random.random((npts2, 3))
weights1 = np.random.rand(npts1)
weights2 = np.random.rand(npts2)
jtags1 = np.random.randint(1, 2, npts1)
jtags2 = np.random.randint(N_samples+1, N_samples+10, npts2)
with pytest.raises(HalotoolsError) as err:
__ = _npairs_jackknife_3d_process_weights_jtags(sample1, sample2,
weights1, weights2, jtags1, jtags2, N_samples)
substr = "jtags2 must be <= N_samples"
assert substr in err.value.args[0]
|
astropyREPO_NAMEhalotoolsPATH_START.@halotools_extracted@halotools-master@halotools@mock_observables@pair_counters@test_pair_counters@test_npairs_jackknife_3d.py@.PATH_END.py
|
{
"filename": "cachedmodel.py",
"repo_name": "JohannesBuchner/BXA",
"repo_path": "BXA_extracted/BXA-master/bxa/sherpa/cachedmodel.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
"""
BXA (Bayesian X-ray Analysis) for Sherpa
Copyright: Johannes Buchner (C) 2013-2016
"""
import os
if 'MAKESPHINXDOC' not in os.environ:
from sherpa.models import ArithmeticModel, CompositeModel
from sherpa.models.parameter import Parameter
else:
ArithmeticModel = object
CompositeModel = list
Parameter = None
class VariableCachedModel(CompositeModel, ArithmeticModel):
"""Wrapper that caches the most recent model call."""
def __init__(self, othermodel):
"""*othermodel* can be any sherpa model"""
self.othermodel = othermodel
self.cache = None
self.lastp = None
print('calling CompositeModel...')
CompositeModel.__init__(self, name='cached(%s)' % othermodel.name, parts=(othermodel,))
def calc(self, p, left, right, *args, **kwargs):
if self.cache is None or self.lastp != p:
self.cache = self.othermodel.calc(p, left, right, *args, **kwargs)
self.lastp = p
return self.cache
def startup(self):
self.othermodel.startup()
CompositeModel.startup(self)
def teardown(self):
self.othermodel.teardown()
CompositeModel.teardown(self)
def guess(self, dep, *args, **kwargs):
self.othermodel.guess(dep, *args, **kwargs)
CompositeModel.guess(self, dep, *args, **kwargs)
class CachedModel(CompositeModel, ArithmeticModel):
"""Wrapper that caches the first model call forever."""
def __init__(self, othermodel):
"""*othermodel* can be any sherpa model"""
self.othermodel = othermodel
self.cache = None
self.relnorm = Parameter('cache', 'relnorm', 1, 0, 1e10, 0, 1e10)
CompositeModel.__init__(self, name='cached(%s)' % othermodel.name, parts=(othermodel,))
def calc(self, *args, **kwargs):
if self.cache is None:
print(' computing cached model ... ')
self.cache = self.othermodel.calc(*args, **kwargs)
return self.cache * self.relnorm.val
def startup(self):
self.othermodel.startup()
CompositeModel.startup(self)
def teardown(self):
self.othermodel.teardown()
CompositeModel.teardown(self)
def guess(self, dep, *args, **kwargs):
self.othermodel.guess(dep, *args, **kwargs)
CompositeModel.guess(self, dep, *args, **kwargs)
|
JohannesBuchnerREPO_NAMEBXAPATH_START.@BXA_extracted@BXA-master@bxa@sherpa@cachedmodel.py@.PATH_END.py
|
{
"filename": "_zsmooth.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/heatmapgl/_zsmooth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ZsmoothValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="zsmooth", parent_name="heatmapgl", **kwargs):
super(ZsmoothValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["fast", False]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@heatmapgl@_zsmooth.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "inonchiu/ComEst",
"repo_path": "ComEst_extracted/ComEst-master/comest/templates/filters/__init__.py",
"type": "Python"
}
|
inonchiuREPO_NAMEComEstPATH_START.@ComEst_extracted@ComEst-master@comest@templates@filters@__init__.py@.PATH_END.py
|
|
{
"filename": "initializers_v2.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/keras/initializers/initializers_v2.py",
"type": "Python"
}
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras initializers for TF 2."""
# pylint: disable=g-classes-have-attributes
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
_PARTITION_SHAPE = 'partition_shape'
_PARTITION_OFFSET = 'partition_offset'
class Initializer(object):
"""Initializer base class: all Keras initializers inherit from this class.
Initializers should implement a `__call__` method with the following
signature:
```python
def __call__(self, shape, dtype=None, **kwargs):
# returns a tensor of shape `shape` and dtype `dtype`
# containing values drawn from a distribution of your choice.
```
Optionally, you an also implement the method `get_config` and the class
method `from_config` in order to support serialization -- just like with
any Keras object.
Here's a simple example: a random normal initializer.
```python
import tensorflow as tf
class ExampleRandomNormal(tf.keras.initializers.Initializer):
def __init__(self, mean, stddev):
self.mean = mean
self.stddev = stddev
def __call__(self, shape, dtype=None, **kwargs):
return tf.random.normal(
shape, mean=self.mean, stddev=self.stddev, dtype=dtype)
def get_config(self): # To support serialization
return {"mean": self.mean, "stddev": self.stddev}
```
Note that we don't have to implement `from_config` in the example above since
the constructor arguments of the class the keys in the config returned by
`get_config` are the same. In this case, the default `from_config`
works fine.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor.
**kwargs: Additional keyword arguments.
"""
raise NotImplementedError
def get_config(self):
"""Returns the configuration of the initializer as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config`.
Returns:
A `tf.keras.initializers.Initializer` instance.
"""
config.pop('dtype', None)
return cls(**config)
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0.
Also available via the shortcut function `tf.keras.initializers.zeros`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Zeros()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Zeros()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _get_dtype(dtype)
if not dtype.is_numpy_compatible or dtype == dtypes.string:
raise ValueError('Expected numeric or boolean dtype, got %s.' % dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return array_ops.zeros(shape, dtype)
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1.
Also available via the shortcut function `tf.keras.initializers.ones`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Ones()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Ones()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _get_dtype(dtype)
if not dtype.is_numpy_compatible or dtype == dtypes.string:
raise ValueError('Expected numeric or boolean dtype, got %s.' % dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return array_ops.ones(shape, dtype)
class Constant(Initializer):
"""Initializer that generates tensors with constant values.
Also available via the shortcut function `tf.keras.initializers.constant`.
Only scalar values are allowed.
The constant value provided must be convertible to the dtype requested
when calling the initializer.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Constant(3.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Constant(3.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
value: A Python scalar.
"""
def __init__(self, value=0):
self.value = value
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to `self.value`.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not specified,
`tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
del kwargs
return constant_op.constant(
self.value, dtype=_get_dtype(dtype), shape=shape)
def get_config(self):
return {'value': self.value}
class RandomUniform(Initializer):
"""Initializer that generates tensors with a uniform distribution.
Also available via the shortcut function
`tf.keras.initializers.random_uniform`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range of
random values to generate (inclusive).
maxval: A python scalar or a scalar tensor. Upper bound of the range of
random values to generate (exclusive).
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __init__(self, minval=-0.05, maxval=0.05, seed=None):
self.minval = minval
self.maxval = maxval
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point and integer
types are supported. If not specified,
`tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _get_dtype(dtype)
if not dtype.is_floating and not dtype.is_integer:
raise ValueError('Expected float or integer dtype, got %s.' % dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.random_uniform(shape, self.minval,
self.maxval, dtype)
def get_config(self):
return {
'minval': self.minval,
'maxval': self.maxval,
'seed': self.seed
}
class RandomNormal(Initializer):
"""Initializer that generates tensors with a normal distribution.
Also available via the shortcut function
`tf.keras.initializers.random_normal`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
mean: a python scalar or a scalar tensor. Mean of the random values to
generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the random
values to generate.
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to random normal values.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _assert_float_dtype(_get_dtype(dtype))
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.random_normal(shape, self.mean, self.stddev,
dtype)
def get_config(self):
return {
'mean': self.mean,
'stddev': self.stddev,
'seed': self.seed
}
class TruncatedNormal(Initializer):
"""Initializer that generates a truncated normal distribution.
Also available via the shortcut function
`tf.keras.initializers.truncated_normal`.
The values generated are similar to values from a
`tf.keras.initializers.RandomNormal` initializer except that values more
than two standard deviations from the mean are
discarded and re-drawn.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate before truncation.
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to random normal values (truncated).
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _assert_float_dtype(_get_dtype(dtype))
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.truncated_normal(shape, self.mean,
self.stddev, dtype)
def get_config(self):
return {
'mean': self.mean,
'stddev': self.stddev,
'seed': self.seed
}
class VarianceScaling(Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
Also available via the shortcut function
`tf.keras.initializers.variance_scaling`.
With `distribution="truncated_normal" or "untruncated_normal"`, samples are
drawn from a truncated/untruncated normal distribution with a mean of zero and
a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)`,
where `n` is:
- number of input units in the weight tensor, if `mode="fan_in"`
- number of output units, if `mode="fan_out"`
- average of the numbers of input and output units, if `mode="fan_avg"`
With `distribution="uniform"`, samples are drawn from a uniform distribution
within `[-limit, limit]`, where `limit = sqrt(3 * scale / n)`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.VarianceScaling(
... scale=0.1, mode='fan_in', distribution='uniform')
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.VarianceScaling(
... scale=0.1, mode='fan_in', distribution='uniform')
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "truncated_normal",
"untruncated_normal" and "uniform".
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __init__(self,
scale=1.0,
mode='fan_in',
distribution='truncated_normal',
seed=None):
if scale <= 0.:
raise ValueError('`scale` must be positive float.')
if mode not in {'fan_in', 'fan_out', 'fan_avg'}:
raise ValueError('Invalid `mode` argument:', mode)
distribution = distribution.lower()
# Compatibility with keras-team/keras.
if distribution == 'normal':
distribution = 'truncated_normal'
if distribution not in {'uniform', 'truncated_normal',
'untruncated_normal'}:
raise ValueError('Invalid `distribution` argument:', distribution)
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _assert_float_dtype(_get_dtype(dtype))
scale = self.scale
fan_in, fan_out = _compute_fans(shape)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
if self.mode == 'fan_in':
scale /= max(1., fan_in)
elif self.mode == 'fan_out':
scale /= max(1., fan_out)
else:
scale /= max(1., (fan_in + fan_out) / 2.)
if self.distribution == 'truncated_normal':
# constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
stddev = math.sqrt(scale) / .87962566103423978
return self._random_generator.truncated_normal(shape, 0.0, stddev, dtype)
elif self.distribution == 'untruncated_normal':
stddev = math.sqrt(scale)
return self._random_generator.random_normal(shape, 0.0, stddev, dtype)
else:
limit = math.sqrt(3.0 * scale)
return self._random_generator.random_uniform(shape, -limit, limit, dtype)
def get_config(self):
return {
'scale': self.scale,
'mode': self.mode,
'distribution': self.distribution,
'seed': self.seed
}
class Orthogonal(Initializer):
"""Initializer that generates an orthogonal matrix.
Also available via the shortcut function `tf.keras.initializers.orthogonal`.
If the shape of the tensor to initialize is two-dimensional, it is initialized
with an orthogonal matrix obtained from the QR decomposition of a matrix of
random numbers drawn from a normal distribution.
If the matrix has fewer rows than columns then the output will have orthogonal
rows. Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Orthogonal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Orthogonal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
gain: multiplicative factor to apply to the orthogonal matrix
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)
([pdf](https://arxiv.org/pdf/1312.6120.pdf))
"""
def __init__(self, gain=1.0, seed=None):
self.gain = gain
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to an orthogonal matrix.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs, support_partition=False)
dtype = _assert_float_dtype(_get_dtype(dtype))
# Check the shape
if len(shape) < 2:
raise ValueError('The tensor to initialize must be '
'at least two-dimensional')
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))
# Generate a random matrix
a = self._random_generator.random_normal(flat_shape, dtype=dtype)
# Compute the qr factorization
q, r = gen_linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.tensor_diag_part(r)
q *= math_ops.sign(d)
if num_rows < num_cols:
q = array_ops.matrix_transpose(q)
return self.gain * array_ops.reshape(q, shape)
def get_config(self):
return {'gain': self.gain, 'seed': self.seed}
class Identity(Initializer):
"""Initializer that generates the identity matrix.
Also available via the shortcut function `tf.keras.initializers.identity`.
Only usable for generating 2D matrices.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Identity()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Identity()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
gain: Multiplicative factor to apply to the identity matrix.
"""
def __init__(self, gain=1.0):
self.gain = gain
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to a 2D identity matrix.
Args:
shape: Shape of the tensor. It should have exactly rank 2.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs, support_partition=False)
dtype = _assert_float_dtype(_get_dtype(dtype))
if len(shape) != 2:
raise ValueError(
'Identity matrix initializer can only be used for 2D matrices.')
initializer = linalg_ops.eye(*shape, dtype=dtype)
return self.gain * initializer
def get_config(self):
return {'gain': self.gain}
class GlorotUniform(VarianceScaling):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
Also available via the shortcut function
`tf.keras.initializers.glorot_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / (fan_in + fan_out))` (`fan_in` is the number of input units
in the weight tensor and `fan_out` is the number of output units).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.GlorotUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.GlorotUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None):
super(GlorotUniform, self).__init__(
scale=1.0,
mode='fan_avg',
distribution='uniform',
seed=seed)
def get_config(self):
return {'seed': self.seed}
class GlorotNormal(VarianceScaling):
"""The Glorot normal initializer, also called Xavier normal initializer.
Also available via the shortcut function
`tf.keras.initializers.glorot_normal`.
Draws samples from a truncated normal distribution centered on 0 with `stddev
= sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of input units in
the weight tensor and `fan_out` is the number of output units in the weight
tensor.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.GlorotNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.GlorotNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None):
super(GlorotNormal, self).__init__(
scale=1.0,
mode='fan_avg',
distribution='truncated_normal',
seed=seed)
def get_config(self):
return {'seed': self.seed}
class LecunNormal(VarianceScaling):
"""Lecun normal initializer.
Also available via the shortcut function
`tf.keras.initializers.lecun_normal`.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a truncated normal distribution centered on 0 with `stddev
= sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight
tensor.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.LecunNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.LecunNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. Used to seed the random generator.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)
([pdf]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
def __init__(self, seed=None):
super(LecunNormal, self).__init__(
scale=1., mode='fan_in', distribution='truncated_normal', seed=seed)
def get_config(self):
return {'seed': self.seed}
class LecunUniform(VarianceScaling):
"""Lecun uniform initializer.
Also available via the shortcut function
`tf.keras.initializers.lecun_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`,
where `limit = sqrt(3 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.LecunUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.LecunUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long
([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
def __init__(self, seed=None):
super(LecunUniform, self).__init__(
scale=1., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
class HeNormal(VarianceScaling):
"""He normal initializer.
Also available via the shortcut function
`tf.keras.initializers.he_normal`.
It draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the
weight tensor.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.HeNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.HeNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
def __init__(self, seed=None):
super(HeNormal, self).__init__(
scale=2., mode='fan_in', distribution='truncated_normal', seed=seed)
def get_config(self):
return {'seed': self.seed}
class HeUniform(VarianceScaling):
"""He uniform variance scaling initializer.
Also available via the shortcut function
`tf.keras.initializers.he_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.HeUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.HeUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
def __init__(self, seed=None):
super(HeUniform, self).__init__(
scale=2., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
def _get_dtype(dtype):
if dtype is None:
dtype = backend.floatx()
return dtypes.as_dtype(dtype)
def _assert_float_dtype(dtype):
"""Validate and return floating point type based on `dtype`.
`dtype` must be a floating point type.
Args:
dtype: The data type to validate.
Returns:
Validated type.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
dtype = dtypes.as_dtype(dtype)
if not dtype.is_floating:
raise ValueError('Expected floating point type, got %s.' % dtype)
return dtype
class _RandomGenerator(object):
"""Random generator that selects appropriate random ops."""
def __init__(self, seed=None):
super(_RandomGenerator, self).__init__()
if seed is not None:
# Stateless random ops requires 2-int seed.
self.seed = [seed, 0]
else:
self.seed = None
def random_normal(self, shape, mean=0.0, stddev=1, dtype=dtypes.float32):
"""A deterministic random normal if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_random_normal
else:
op = random_ops.random_normal
return op(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
def random_uniform(self, shape, minval, maxval, dtype):
"""A deterministic random uniform if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_random_uniform
else:
op = random_ops.random_uniform
return op(
shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed)
def truncated_normal(self, shape, mean, stddev, dtype):
"""A deterministic truncated normal if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_truncated_normal
else:
op = random_ops.truncated_normal
return op(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
def _compute_fans(shape):
"""Computes the number of input and output units for a weight shape.
Args:
shape: Integer shape tuple or TF tensor shape.
Returns:
A tuple of integer scalars (fan_in, fan_out).
"""
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return int(fan_in), int(fan_out)
def _validate_kwargs(cls_name, kwargs, support_partition=True):
for kwarg in kwargs:
if kwarg not in [_PARTITION_SHAPE, _PARTITION_OFFSET]:
raise TypeError('Unknown keyword arguments: %s' % kwarg)
elif not support_partition:
raise ValueError('%s initializer doesn\'t support partition-related '
'arguments' % cls_name)
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@keras@initializers@initializers_v2.py@.PATH_END.py
|
{
"filename": "tfsa-2022-159.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/security/advisory/tfsa-2022-159.md",
"type": "Markdown"
}
|
## TFSA-2022-159: `FractionalMaxPoolGrad` Heap OOB
### CVE Number
CVE-2022-41897
### Impact
If [`FractionMaxPoolGrad`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/fractional_max_pool_op.cc) is given outsize inputs `row_pooling_sequence` and `col_pooling_sequence`, TensorFlow will crash.
```python
import tensorflow as tf
tf.raw_ops.FractionMaxPoolGrad(
orig_input = [[[[1, 1, 1, 1, 1]]]],
orig_output = [[[[1, 1, 1]]]],
out_backprop = [[[[3], [3], [6]]]],
row_pooling_sequence = [-0x4000000, 1, 1],
col_pooling_sequence = [-0x4000000, 1, 1],
overlapping = False
)
```
### Patches
We have patched the issue in GitHub commit [d71090c3e5ca325bdf4b02eb236cfb3ee823e927](https://github.com/tensorflow/tensorflow/commit/d71090c3e5ca325bdf4b02eb236cfb3ee823e927).
The fix will be included in TensorFlow 2.11. We will also cherrypick this commit on TensorFlow 2.10.1, 2.9.3, and TensorFlow 2.8.4, as these are also affected and still in supported range.
### For more information
Please consult [our security guide](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md) for more information regarding the security model and how to contact us with issues and questions.
### Attribution
This vulnerability has been reported by Yu Tian from Qihoo 360 AIVul Team.
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@security@advisory@tfsa-2022-159.md@.PATH_END.py
|
{
"filename": "runtests.py",
"repo_name": "dsavransky/EXOSIMS",
"repo_path": "EXOSIMS_extracted/EXOSIMS-master/runtests.py",
"type": "Python"
}
|
import unittest
import xmlrunner
from coverage import Coverage
import sys
"""
Unittest-compatible test-running script
Sonny Rappaport, Cornell June 7/6/2021. Written in conjunction with .circleci/config.yml
to work with circleci's parallelization, coverage.py, and coveralls.
"""
def format_path(file_path):
"""Converts from the default bash /directory/test.py format to directory.test
format (as unittest only works with )
Args:
file_path (String): The name of the file to be formatted. Should be in a
/.../..../test.py format.
"""
no_py = file_path.replace(".py", "")
no_slash = no_py.replace("/", ".")
return no_slash
if __name__ == "__main__":
"""When called via bash with a list of file names, creates and runs a unittest
suite over all the test files passed into this method, generating both a XML
file and a .coverage file for each parallel run on circleci. The XML file is placed
in exosims/test-reports and the XML file is placed in the exosims root folder.
Command line usage example:
python runtests.py [List of testfile names]
"""
cov = Coverage()
cov.start()
loader = unittest.TestLoader()
tests_format = []
for x in sys.argv:
tests_format.append(format_path(x))
# sys.argv (argument from bash) should contain a list of file names
suites = [loader.loadTestsFromName(str) for str in tests_format]
combosuite = unittest.TestSuite(suites)
# create suite of all tests
runner = xmlrunner.XMLTestRunner(output="test-reports")
# generate XML files containing the times of each test for circle-ci's
# test-splitting via time
runner.run(combosuite)
cov.stop()
cov.save()
|
dsavranskyREPO_NAMEEXOSIMSPATH_START.@EXOSIMS_extracted@EXOSIMS-master@runtests.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/datasets/copper/__init__.py",
"type": "Python"
}
|
__all__ = ["load", "load_pandas",
"COPYRIGHT", "TITLE", "SOURCE", "DESCRSHORT", "DESCRLONG", "NOTE"]
from .data import (
load, load_pandas,
COPYRIGHT, TITLE, SOURCE, DESCRSHORT, DESCRLONG, NOTE)
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@datasets@copper@__init__.py@.PATH_END.py
|
{
"filename": "gamma_sampler.py",
"repo_name": "CU-NESS/pylinex",
"repo_path": "pylinex_extracted/pylinex-master/examples/nonlinear/gamma_sampler.py",
"type": "Python"
}
|
"""
File: examples/nonlinear/gamma_sampler.py
Author: Keith Tauscher
Date: 23 Nov 2018
Description: Example showing use of the GammaLoglikelihood class in an MCMC
sampling context and comparison with the Fisher matrix formalism.
"""
from __future__ import division
import os
import numpy as np
import matplotlib.pyplot as pl
from distpy import GammaDistribution, TruncatedGaussianDistribution,\
TruncatedGaussianJumpingDistribution, DistributionSet,\
JumpingDistributionSet
from pylinex import ConstantModel, GammaLoglikelihood, Sampler, BurnRule,\
NLFitter
seed = 3
np.random.seed(seed)
linewidth = 3
fontsize = 24
true_value = 1
num_averaged = 10
num_points = 100
total_number = num_averaged * num_points
num_walkers = 100
steps_per_checkpoint = 100
num_checkpoints = 100
model = ConstantModel(num_points)
true_parameters = np.ones(1) * true_value
true_variance = (true_value ** 2) / num_averaged
mean = model(true_parameters)
data = np.ones(num_points) * true_value
data =\
GammaDistribution(num_averaged, true_value / num_averaged).draw(num_points)
loglikelihood = GammaLoglikelihood(data, model, num_averaged)
file_name = 'TESTINGGAMMALOGLIKELIHOODDELETEIFYOUSEETHIS.TEMP'
restart_mode = None
guess_distribution_set = DistributionSet([(TruncatedGaussianDistribution(\
true_value, true_variance / 1000, low=0), 'a', None)])
jumping_distribution_set = JumpingDistributionSet([\
(TruncatedGaussianJumpingDistribution(true_variance, low=0), 'a', None)])
try:
loglikelihood.save(file_name)
assert(loglikelihood == GammaLoglikelihood.load(file_name))
except:
os.remove(file_name)
raise
else:
os.remove(file_name)
sampler = Sampler(file_name, num_walkers, loglikelihood,\
jumping_distribution_set=jumping_distribution_set,\
guess_distribution_set=guess_distribution_set,\
steps_per_checkpoint=steps_per_checkpoint, restart_mode=restart_mode)
sampler.run_checkpoints(num_checkpoints, silence_error=True)
burn_rule = BurnRule(desired_fraction=0.5)
fitter = NLFitter(file_name, burn_rule)
fig = pl.figure(figsize=(12,9))
ax = fig.add_subplot(111)
ax = fitter.plot_univariate_histogram('a', ax=ax,\
apply_transforms_to_chain=True, fontsize=28, matplotlib_function='plot',\
show_intervals=False, bins=100, xlabel='$a$', ylabel='PDF',\
title='Gamma loglikelihood test', linewidth=linewidth,\
label='MCMC w. noise', linestyle='--')
x_values = true_value +\
(np.linspace(-1, 1, 1000) * (4 * np.sqrt(true_variance)))
x_values = x_values[x_values > 0]
true_distribution = GammaDistribution(total_number, true_value / total_number)
pdf_values = np.exp(true_distribution.log_value(x_values))
pdf_values = pdf_values / np.max(pdf_values)
ax.plot(x_values, pdf_values, color='r', linewidth=linewidth, linestyle='--',\
label='No noise scatter')
transform_list = None
max_standard_deviations = np.inf
prior_distribution = None
larger_differences = 1e-5
smaller_differences = 1e-6
fisher_distribution_set =\
loglikelihood.parameter_distribution_fisher_formalism(\
fitter.maximum_probability_parameters, transform_list=transform_list,\
max_standard_deviations=max_standard_deviations,\
prior_to_impose_in_transformed_space=prior_distribution,\
larger_differences=larger_differences,\
smaller_differences=smaller_differences)
fisher_distribution = fisher_distribution_set._data[0][0]
pdf_values = np.exp([fisher_distribution.log_value(x_value)\
for x_value in x_values])
pdf_values = pdf_values / np.max(pdf_values)
ax.plot(x_values, pdf_values, color='g', linewidth=linewidth, linestyle='--',\
label='Fisher from MCMC')
ax.legend(fontsize=fontsize)
os.remove(file_name)
pl.show()
|
CU-NESSREPO_NAMEpylinexPATH_START.@pylinex_extracted@pylinex-master@examples@nonlinear@gamma_sampler.py@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/choroplethmapbox/hoverlabel/font/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="shadow",
parent_name="choroplethmapbox.hoverlabel.font",
**kwargs,
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@choroplethmapbox@hoverlabel@font@_shadow.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "n-claes/legolas",
"repo_path": "legolas_extracted/legolas-master/post_processing/pylbo/visualisation/eigenfunctions/__init__.py",
"type": "Python"
}
|
n-claesREPO_NAMElegolasPATH_START.@legolas_extracted@legolas-master@post_processing@pylbo@visualisation@eigenfunctions@__init__.py@.PATH_END.py
|
|
{
"filename": "quantization.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/references/training-parameters/quantization.md",
"type": "Markdown"
}
|
# Quantization settings
## target_border {#target_border}
Command-line: `--target-border`
#### Description
If set, defines the border for converting target values to 0 and 1.
Depending on the specified value:
- $target\_value \le border\_value$ the target is converted to 0
- $target\_value > border\_value$ the target is converted to 1
**Type**
{{ python-type--float }}
**Default value**
{% cut "Python package, R package" %}
None
{% endcut %}
{% cut "Command line" %}
The value is not set
{% endcut %}
**Supported processing units**
{{ cpu-gpu }}
## border_count {#border_count}
Command-line: `-x`, `--border-count`
_Alias:_ `max_bin`
#### Description
The number of splits for numerical features. Allowed values are integers from 1 to 65535 inclusively.
**Type**
{{ python-type--int }}
**Default value**
{% include [reusage-default-values-border_count](../../_includes/work_src/reusage-default-values/border_count.md) %}
**Supported processing units**
{{ cpu-gpu }}
## feature_border_type {#feature_border_type}
Command-line: `--feature-border-type`
#### Description
The [quantization mode](../../concepts/quantization.md) for numerical features.
Possible values:
- Median
- Uniform
- UniformAndQuantiles
- MaxLogSum
- MinEntropy
- GreedyLogSum
**Type**
{{ python-type--string }}
**Default value**
{{ fit--feature_border_type }}
**Supported processing units**
{{ cpu-gpu }}
## per_float_feature_quantization {#per_float_feature_quantization}
Command-line: `--per-float-feature-quantization`
#### Description
The quantization description for the specified feature or list of features.
Description format for a single feature:
```
FeatureId[:border_count=BorderCount][:nan_mode=BorderType][:border_type=border_selection_method]
```
Examples:
- ```
per_float_feature_quantization='0:border_count=1024'
```
In this example, the feature indexed 0 has 1024 borders.
- ```python
per_float_feature_quantization=['0:border_count=1024', '1:border_count=1024']
```
In this example, features indexed 0 and 1 have 1024 borders.
**Type**
- {{ python-type--string }}
- {{ python-type--list-of-strings }}
**Default value**
{% cut "Python package, R package" %}
None
{% endcut %}
{% cut "Command-line" %}
Ommited
{% endcut %}
**Supported processing units**
{{ cpu-gpu }}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@references@training-parameters@quantization.md@.PATH_END.py
|
{
"filename": "_hoverinfosrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/barpolar/_hoverinfosrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HoverinfosrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="hoverinfosrc", parent_name="barpolar", **kwargs):
super(HoverinfosrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@barpolar@_hoverinfosrc.py@.PATH_END.py
|
{
"filename": "_colorsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattergeo/marker/line/_colorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="scattergeo.marker.line", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattergeo@marker@line@_colorsrc.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "icecube/TauRunner",
"repo_path": "TauRunner_extracted/TauRunner-master/taurunner/__init__.py",
"type": "Python"
}
|
from .body import __init__
from .track import __init__
from .utils import __init__
from .cross_sections import __init__
from .particle import __init__
from .proposal_interface import __init__
from .Casino import Propagate
|
icecubeREPO_NAMETauRunnerPATH_START.@TauRunner_extracted@TauRunner-master@taurunner@__init__.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "sonoUnoTeam/sonoUno-desktop",
"repo_path": "sonoUno-desktop_extracted/sonoUno-desktop-master/sonoUno/data_transform/__init__.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
|
sonoUnoTeamREPO_NAMEsonoUno-desktopPATH_START.@sonoUno-desktop_extracted@sonoUno-desktop-master@sonoUno@data_transform@__init__.py@.PATH_END.py
|
{
"filename": "tests.py",
"repo_name": "RafiKueng/SpaghettiLens",
"repo_path": "SpaghettiLens_extracted/SpaghettiLens-master/_backup2/apps/lenses/tests.py",
"type": "Python"
}
|
from django.test import TestCase
# Create your tests here.
|
RafiKuengREPO_NAMESpaghettiLensPATH_START.@SpaghettiLens_extracted@SpaghettiLens-master@_backup2@apps@lenses@tests.py@.PATH_END.py
|
{
"filename": "_matfuncs.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py3/scipy/sparse/linalg/_matfuncs.py",
"type": "Python"
}
|
"""
Sparse matrix functions
"""
#
# Authors: Travis Oliphant, March 2002
# Anthony Scopatz, August 2012 (Sparse Updates)
# Jake Vanderplas, August 2012 (Sparse Updates)
#
__all__ = ['expm', 'inv']
import numpy as np
from scipy.linalg._basic import solve, solve_triangular
from scipy.sparse._base import issparse
from scipy.sparse.linalg import spsolve
from scipy.sparse._sputils import is_pydata_spmatrix
import scipy.sparse
import scipy.sparse.linalg
from scipy.sparse.linalg._interface import LinearOperator
from ._expm_multiply import _ident_like, _exact_1_norm as _onenorm
UPPER_TRIANGULAR = 'upper_triangular'
def inv(A):
"""
Compute the inverse of a sparse matrix
Parameters
----------
A : (M, M) sparse matrix
square matrix to be inverted
Returns
-------
Ainv : (M, M) sparse matrix
inverse of `A`
Notes
-----
This computes the sparse inverse of `A`. If the inverse of `A` is expected
to be non-sparse, it will likely be faster to convert `A` to dense and use
`scipy.linalg.inv`.
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import inv
>>> A = csc_matrix([[1., 0.], [1., 2.]])
>>> Ainv = inv(A)
>>> Ainv
<2x2 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Column format>
>>> A.dot(Ainv)
<2x2 sparse matrix of type '<class 'numpy.float64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> A.dot(Ainv).toarray()
array([[ 1., 0.],
[ 0., 1.]])
.. versionadded:: 0.12.0
"""
# Check input
if not (scipy.sparse.issparse(A) or is_pydata_spmatrix(A)):
raise TypeError('Input must be a sparse matrix')
# Use sparse direct solver to solve "AX = I" accurately
I = _ident_like(A)
Ainv = spsolve(A, I)
return Ainv
def _onenorm_matrix_power_nnm(A, p):
"""
Compute the 1-norm of a non-negative integer power of a non-negative matrix.
Parameters
----------
A : a square ndarray or matrix or sparse matrix
Input matrix with non-negative entries.
p : non-negative integer
The power to which the matrix is to be raised.
Returns
-------
out : float
The 1-norm of the matrix power p of A.
"""
# Check input
if int(p) != p or p < 0:
raise ValueError('expected non-negative integer p')
p = int(p)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
# Explicitly make a column vector so that this works when A is a
# numpy matrix (in addition to ndarray and sparse matrix).
v = np.ones((A.shape[0], 1), dtype=float)
M = A.T
for i in range(p):
v = M.dot(v)
return np.max(v)
def _is_upper_triangular(A):
# This function could possibly be of wider interest.
if issparse(A):
lower_part = scipy.sparse.tril(A, -1)
# Check structural upper triangularity,
# then coincidental upper triangularity if needed.
return lower_part.nnz == 0 or lower_part.count_nonzero() == 0
elif is_pydata_spmatrix(A):
import sparse
lower_part = sparse.tril(A, -1)
return lower_part.nnz == 0
else:
return not np.tril(A, -1).any()
def _smart_matrix_product(A, B, alpha=None, structure=None):
"""
A matrix product that knows about sparse and structured matrices.
Parameters
----------
A : 2d ndarray
First matrix.
B : 2d ndarray
Second matrix.
alpha : float
The matrix product will be scaled by this constant.
structure : str, optional
A string describing the structure of both matrices `A` and `B`.
Only `upper_triangular` is currently supported.
Returns
-------
M : 2d ndarray
Matrix product of A and B.
"""
if len(A.shape) != 2:
raise ValueError('expected A to be a rectangular matrix')
if len(B.shape) != 2:
raise ValueError('expected B to be a rectangular matrix')
f = None
if structure == UPPER_TRIANGULAR:
if (not issparse(A) and not issparse(B)
and not is_pydata_spmatrix(A) and not is_pydata_spmatrix(B)):
f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B))
if f is not None:
if alpha is None:
alpha = 1.
out = f(alpha, A, B)
else:
if alpha is None:
out = A.dot(B)
else:
out = alpha * A.dot(B)
return out
class MatrixPowerOperator(LinearOperator):
def __init__(self, A, p, structure=None):
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if p < 0:
raise ValueError('expected p to be a non-negative integer')
self._A = A
self._p = p
self._structure = structure
self.dtype = A.dtype
self.ndim = A.ndim
self.shape = A.shape
def _matvec(self, x):
for i in range(self._p):
x = self._A.dot(x)
return x
def _rmatvec(self, x):
A_T = self._A.T
x = x.ravel()
for i in range(self._p):
x = A_T.dot(x)
return x
def _matmat(self, X):
for i in range(self._p):
X = _smart_matrix_product(self._A, X, structure=self._structure)
return X
@property
def T(self):
return MatrixPowerOperator(self._A.T, self._p)
class ProductOperator(LinearOperator):
"""
For now, this is limited to products of multiple square matrices.
"""
def __init__(self, *args, **kwargs):
self._structure = kwargs.get('structure', None)
for A in args:
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError(
'For now, the ProductOperator implementation is '
'limited to the product of multiple square matrices.')
if args:
n = args[0].shape[0]
for A in args:
for d in A.shape:
if d != n:
raise ValueError(
'The square matrices of the ProductOperator '
'must all have the same shape.')
self.shape = (n, n)
self.ndim = len(self.shape)
self.dtype = np.result_type(*[x.dtype for x in args])
self._operator_sequence = args
def _matvec(self, x):
for A in reversed(self._operator_sequence):
x = A.dot(x)
return x
def _rmatvec(self, x):
x = x.ravel()
for A in self._operator_sequence:
x = A.T.dot(x)
return x
def _matmat(self, X):
for A in reversed(self._operator_sequence):
X = _smart_matrix_product(A, X, structure=self._structure)
return X
@property
def T(self):
T_args = [A.T for A in reversed(self._operator_sequence)]
return ProductOperator(*T_args)
def _onenormest_matrix_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of A^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
MatrixPowerOperator(A, p, structure=structure))
def _onenormest_product(operator_seq,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of the matrix product of the args.
Parameters
----------
operator_seq : linear operator sequence
Matrices whose 1-norm of product is to be computed.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
structure : str, optional
A string describing the structure of all operators.
Only `upper_triangular` is currently supported.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
ProductOperator(*operator_seq, structure=structure))
class _ExpmPadeHelper:
"""
Help lazily evaluate a matrix exponential.
The idea is to not do more work than we need for high expm precision,
so we lazily compute matrix powers and store or precompute
other properties of the matrix.
"""
def __init__(self, A, structure=None, use_exact_onenorm=False):
"""
Initialize the object.
Parameters
----------
A : a dense or sparse square numpy matrix or ndarray
The matrix to be exponentiated.
structure : str, optional
A string describing the structure of matrix `A`.
Only `upper_triangular` is currently supported.
use_exact_onenorm : bool, optional
If True then only the exact one-norm of matrix powers and products
will be used. Otherwise, the one-norm of powers and products
may initially be estimated.
"""
self.A = A
self._A2 = None
self._A4 = None
self._A6 = None
self._A8 = None
self._A10 = None
self._d4_exact = None
self._d6_exact = None
self._d8_exact = None
self._d10_exact = None
self._d4_approx = None
self._d6_approx = None
self._d8_approx = None
self._d10_approx = None
self.ident = _ident_like(A)
self.structure = structure
self.use_exact_onenorm = use_exact_onenorm
@property
def A2(self):
if self._A2 is None:
self._A2 = _smart_matrix_product(
self.A, self.A, structure=self.structure)
return self._A2
@property
def A4(self):
if self._A4 is None:
self._A4 = _smart_matrix_product(
self.A2, self.A2, structure=self.structure)
return self._A4
@property
def A6(self):
if self._A6 is None:
self._A6 = _smart_matrix_product(
self.A4, self.A2, structure=self.structure)
return self._A6
@property
def A8(self):
if self._A8 is None:
self._A8 = _smart_matrix_product(
self.A6, self.A2, structure=self.structure)
return self._A8
@property
def A10(self):
if self._A10 is None:
self._A10 = _smart_matrix_product(
self.A4, self.A6, structure=self.structure)
return self._A10
@property
def d4_tight(self):
if self._d4_exact is None:
self._d4_exact = _onenorm(self.A4)**(1/4.)
return self._d4_exact
@property
def d6_tight(self):
if self._d6_exact is None:
self._d6_exact = _onenorm(self.A6)**(1/6.)
return self._d6_exact
@property
def d8_tight(self):
if self._d8_exact is None:
self._d8_exact = _onenorm(self.A8)**(1/8.)
return self._d8_exact
@property
def d10_tight(self):
if self._d10_exact is None:
self._d10_exact = _onenorm(self.A10)**(1/10.)
return self._d10_exact
@property
def d4_loose(self):
if self.use_exact_onenorm:
return self.d4_tight
if self._d4_exact is not None:
return self._d4_exact
else:
if self._d4_approx is None:
self._d4_approx = _onenormest_matrix_power(self.A2, 2,
structure=self.structure)**(1/4.)
return self._d4_approx
@property
def d6_loose(self):
if self.use_exact_onenorm:
return self.d6_tight
if self._d6_exact is not None:
return self._d6_exact
else:
if self._d6_approx is None:
self._d6_approx = _onenormest_matrix_power(self.A2, 3,
structure=self.structure)**(1/6.)
return self._d6_approx
@property
def d8_loose(self):
if self.use_exact_onenorm:
return self.d8_tight
if self._d8_exact is not None:
return self._d8_exact
else:
if self._d8_approx is None:
self._d8_approx = _onenormest_matrix_power(self.A4, 2,
structure=self.structure)**(1/8.)
return self._d8_approx
@property
def d10_loose(self):
if self.use_exact_onenorm:
return self.d10_tight
if self._d10_exact is not None:
return self._d10_exact
else:
if self._d10_approx is None:
self._d10_approx = _onenormest_product((self.A4, self.A6),
structure=self.structure)**(1/10.)
return self._d10_approx
def pade3(self):
b = (120., 60., 12., 1.)
U = _smart_matrix_product(self.A,
b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[2]*self.A2 + b[0]*self.ident
return U, V
def pade5(self):
b = (30240., 15120., 3360., 420., 30., 1.)
U = _smart_matrix_product(self.A,
b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
return U, V
def pade7(self):
b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
U = _smart_matrix_product(self.A,
b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
return U, V
def pade9(self):
b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
2162160., 110880., 3960., 90., 1.)
U = _smart_matrix_product(self.A,
(b[9]*self.A8 + b[7]*self.A6 + b[5]*self.A4 +
b[3]*self.A2 + b[1]*self.ident),
structure=self.structure)
V = (b[8]*self.A8 + b[6]*self.A6 + b[4]*self.A4 +
b[2]*self.A2 + b[0]*self.ident)
return U, V
def pade13_scaled(self, s):
b = (64764752532480000., 32382376266240000., 7771770303897600.,
1187353796428800., 129060195264000., 10559470521600.,
670442572800., 33522128640., 1323241920., 40840800., 960960.,
16380., 182., 1.)
B = self.A * 2**-s
B2 = self.A2 * 2**(-2*s)
B4 = self.A4 * 2**(-4*s)
B6 = self.A6 * 2**(-6*s)
U2 = _smart_matrix_product(B6,
b[13]*B6 + b[11]*B4 + b[9]*B2,
structure=self.structure)
U = _smart_matrix_product(B,
(U2 + b[7]*B6 + b[5]*B4 +
b[3]*B2 + b[1]*self.ident),
structure=self.structure)
V2 = _smart_matrix_product(B6,
b[12]*B6 + b[10]*B4 + b[8]*B2,
structure=self.structure)
V = V2 + b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident
return U, V
def expm(A):
"""
Compute the matrix exponential using Pade approximation.
Parameters
----------
A : (M,M) array_like or sparse matrix
2D Array or Matrix (sparse or dense) to be exponentiated
Returns
-------
expA : (M,M) ndarray
Matrix exponential of `A`
Notes
-----
This is algorithm (6.1) which is a simplification of algorithm (5.1).
.. versionadded:: 0.12.0
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
"A New Scaling and Squaring Algorithm for the Matrix Exponential."
SIAM Journal on Matrix Analysis and Applications.
31 (3). pp. 970-989. ISSN 1095-7162
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import expm
>>> A = csc_matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]])
>>> A.toarray()
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]], dtype=int64)
>>> Aexp = expm(A)
>>> Aexp
<3x3 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Column format>
>>> Aexp.toarray()
array([[ 2.71828183, 0. , 0. ],
[ 0. , 7.3890561 , 0. ],
[ 0. , 0. , 20.08553692]])
"""
return _expm(A, use_exact_onenorm='auto')
def _expm(A, use_exact_onenorm):
# Core of expm, separated to allow testing exact and approximate
# algorithms.
# Avoid indiscriminate asarray() to allow sparse or other strange arrays.
if isinstance(A, (list, tuple, np.matrix)):
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
# gracefully handle size-0 input,
# carefully handling sparse scenario
if A.shape == (0, 0):
out = np.zeros([0, 0], dtype=A.dtype)
if issparse(A) or is_pydata_spmatrix(A):
return A.__class__(out)
return out
# Trivial case
if A.shape == (1, 1):
out = [[np.exp(A[0, 0])]]
# Avoid indiscriminate casting to ndarray to
# allow for sparse or other strange arrays
if issparse(A) or is_pydata_spmatrix(A):
return A.__class__(out)
return np.array(out)
# Ensure input is of float type, to avoid integer overflows etc.
if ((isinstance(A, np.ndarray) or issparse(A) or is_pydata_spmatrix(A))
and not np.issubdtype(A.dtype, np.inexact)):
A = A.astype(float)
# Detect upper triangularity.
structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None
if use_exact_onenorm == "auto":
# Hardcode a matrix order threshold for exact vs. estimated one-norms.
use_exact_onenorm = A.shape[0] < 200
# Track functions of A to help compute the matrix exponential.
h = _ExpmPadeHelper(
A, structure=structure, use_exact_onenorm=use_exact_onenorm)
# Try Pade order 3.
eta_1 = max(h.d4_loose, h.d6_loose)
if eta_1 < 1.495585217958292e-002 and _ell(h.A, 3) == 0:
U, V = h.pade3()
return _solve_P_Q(U, V, structure=structure)
# Try Pade order 5.
eta_2 = max(h.d4_tight, h.d6_loose)
if eta_2 < 2.539398330063230e-001 and _ell(h.A, 5) == 0:
U, V = h.pade5()
return _solve_P_Q(U, V, structure=structure)
# Try Pade orders 7 and 9.
eta_3 = max(h.d6_tight, h.d8_loose)
if eta_3 < 9.504178996162932e-001 and _ell(h.A, 7) == 0:
U, V = h.pade7()
return _solve_P_Q(U, V, structure=structure)
if eta_3 < 2.097847961257068e+000 and _ell(h.A, 9) == 0:
U, V = h.pade9()
return _solve_P_Q(U, V, structure=structure)
# Use Pade order 13.
eta_4 = max(h.d8_loose, h.d10_loose)
eta_5 = min(eta_3, eta_4)
theta_13 = 4.25
# Choose smallest s>=0 such that 2**(-s) eta_5 <= theta_13
if eta_5 == 0:
# Nilpotent special case
s = 0
else:
s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0)
s = s + _ell(2**-s * h.A, 13)
U, V = h.pade13_scaled(s)
X = _solve_P_Q(U, V, structure=structure)
if structure == UPPER_TRIANGULAR:
# Invoke Code Fragment 2.1.
X = _fragment_2_1(X, h.A, s)
else:
# X = r_13(A)^(2^s) by repeated squaring.
for i in range(s):
X = X.dot(X)
return X
def _solve_P_Q(U, V, structure=None):
"""
A helper function for expm_2009.
Parameters
----------
U : ndarray
Pade numerator.
V : ndarray
Pade denominator.
structure : str, optional
A string describing the structure of both matrices `U` and `V`.
Only `upper_triangular` is currently supported.
Notes
-----
The `structure` argument is inspired by similar args
for theano and cvxopt functions.
"""
P = U + V
Q = -U + V
if issparse(U) or is_pydata_spmatrix(U):
return spsolve(Q, P)
elif structure is None:
return solve(Q, P)
elif structure == UPPER_TRIANGULAR:
return solve_triangular(Q, P)
else:
raise ValueError('unsupported matrix structure: ' + str(structure))
def _exp_sinch(a, x):
"""
Stably evaluate exp(a)*sinh(x)/x
Notes
-----
The strategy of falling back to a sixth order Taylor expansion
was suggested by the Spallation Neutron Source docs
which was found on the internet by google search.
http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html
The details of the cutoff point and the Horner-like evaluation
was picked without reference to anything in particular.
Note that sinch is not currently implemented in scipy.special,
whereas the "engineer's" definition of sinc is implemented.
The implementation of sinc involves a scaling factor of pi
that distinguishes it from the "mathematician's" version of sinc.
"""
# If x is small then use sixth order Taylor expansion.
# How small is small? I am using the point where the relative error
# of the approximation is less than 1e-14.
# If x is large then directly evaluate sinh(x) / x.
if abs(x) < 0.0135:
x2 = x*x
return np.exp(a) * (1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.))))
else:
return (np.exp(a + x) - np.exp(a - x)) / (2*x)
def _eq_10_42(lam_1, lam_2, t_12):
"""
Equation (10.42) of Functions of Matrices: Theory and Computation.
Notes
-----
This is a helper function for _fragment_2_1 of expm_2009.
Equation (10.42) is on page 251 in the section on Schur algorithms.
In particular, section 10.4.3 explains the Schur-Parlett algorithm.
expm([[lam_1, t_12], [0, lam_1])
=
[[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)],
[0, exp(lam_2)]
"""
# The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1)
# apparently suffers from cancellation, according to Higham's textbook.
# A nice implementation of sinch, defined as sinh(x)/x,
# will apparently work around the cancellation.
a = 0.5 * (lam_1 + lam_2)
b = 0.5 * (lam_1 - lam_2)
return t_12 * _exp_sinch(a, b)
def _fragment_2_1(X, T, s):
"""
A helper function for expm_2009.
Notes
-----
The argument X is modified in-place, but this modification is not the same
as the returned value of the function.
This function also takes pains to do things in ways that are compatible
with sparse matrices, for example by avoiding fancy indexing
and by using methods of the matrices whenever possible instead of
using functions of the numpy or scipy libraries themselves.
"""
# Form X = r_m(2^-s T)
# Replace diag(X) by exp(2^-s diag(T)).
n = X.shape[0]
diag_T = np.ravel(T.diagonal().copy())
# Replace diag(X) by exp(2^-s diag(T)).
scale = 2 ** -s
exp_diag = np.exp(scale * diag_T)
for k in range(n):
X[k, k] = exp_diag[k]
for i in range(s-1, -1, -1):
X = X.dot(X)
# Replace diag(X) by exp(2^-i diag(T)).
scale = 2 ** -i
exp_diag = np.exp(scale * diag_T)
for k in range(n):
X[k, k] = exp_diag[k]
# Replace (first) superdiagonal of X by explicit formula
# for superdiagonal of exp(2^-i T) from Eq (10.42) of
# the author's 2008 textbook
# Functions of Matrices: Theory and Computation.
for k in range(n-1):
lam_1 = scale * diag_T[k]
lam_2 = scale * diag_T[k+1]
t_12 = scale * T[k, k+1]
value = _eq_10_42(lam_1, lam_2, t_12)
X[k, k+1] = value
# Return the updated X matrix.
return X
def _ell(A, m):
"""
A helper function for expm_2009.
Parameters
----------
A : linear operator
A linear operator whose norm of power we care about.
m : int
The power of the linear operator
Returns
-------
value : int
A value related to a bound.
"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
# The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.
# They are coefficients of terms of a generating function series expansion.
c_i = {3: 100800.,
5: 10059033600.,
7: 4487938430976000.,
9: 5914384781877411840000.,
13: 113250775606021113483283660800000000.
}
abs_c_recip = c_i[m]
# This is explained after Eq. (1.2) of the 2009 expm paper.
# It is the "unit roundoff" of IEEE double precision arithmetic.
u = 2**-53
# Compute the one-norm of matrix power p of abs(A).
A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), 2*m + 1)
# Treat zero norm as a special case.
if not A_abs_onenorm:
return 0
alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip)
log2_alpha_div_u = np.log2(alpha/u)
value = int(np.ceil(log2_alpha_div_u / (2 * m)))
return max(value, 0)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py3@scipy@sparse@linalg@_matfuncs.py@.PATH_END.py
|
{
"filename": "test_update_traces.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/tests/test_core/test_update_objects/test_update_traces.py",
"type": "Python"
}
|
from unittest import TestCase
import inspect
import copy
import pytest
import plotly.graph_objs as go
from plotly.subplots import make_subplots
from functools import reduce
class TestSelectForEachUpdateTraces(TestCase):
def setUp(self):
fig = make_subplots(
rows=3,
cols=2,
specs=[
[{}, {"type": "scene"}],
[{"secondary_y": True}, {"type": "polar"}],
[{"type": "domain", "colspan": 2}, None],
],
).update(layout={"height": 800})
# data[0], (1, 1)
fig.add_scatter(
mode="markers",
y=[2, 3, 1],
name="A",
marker={"color": "green", "size": 10},
row=1,
col=1,
)
# data[1], (1, 1)
fig.add_bar(y=[2, 3, 1], row=1, col=1, name="B")
# data[2], (2, 1)
fig.add_scatter(
mode="lines", y=[1, 2, 0], line={"color": "purple"}, name="C", row=2, col=1
)
# data[3], (2, 1)
fig.add_heatmap(z=[[2, 3, 1], [2, 1, 3], [3, 2, 1]], row=2, col=1, name="D")
# data[4], (1, 2)
fig.add_scatter3d(
x=[0, 0, 0],
y=[0, 0, 0],
z=[0, 1, 2],
mode="markers",
marker={"color": "green", "size": 10},
name="E",
row=1,
col=2,
)
# data[5], (1, 2)
fig.add_scatter3d(
x=[0, 0, -1],
y=[-1, 0, 0],
z=[0, 1, 2],
mode="lines",
line={"color": "purple", "width": 4},
name="F",
row=1,
col=2,
)
# data[6], (2, 2)
fig.add_scatterpolar(
mode="markers",
r=[0, 3, 2],
theta=[0, 20, 87],
marker={"color": "green", "size": 8},
name="G",
row=2,
col=2,
)
# data[7], (2, 2)
fig.add_scatterpolar(
mode="lines", r=[0, 3, 2], theta=[20, 87, 111], name="H", row=2, col=2
)
# data[8], (3, 1)
fig.add_parcoords(
dimensions=[{"values": [1, 2, 3, 2, 1]}, {"values": [3, 2, 1, 3, 2, 1]}],
line={"color": "purple"},
name="I",
row=3,
col=1,
)
# data[9], (2, 1) with secondary_y
fig.add_scatter(
mode="lines",
y=[1, 2, 0],
line={"color": "purple"},
name="C",
row=2,
col=1,
secondary_y=True,
)
self.fig = fig
self.fig_no_grid = go.Figure(self.fig.to_dict())
# select_traces and for_each_trace
# --------------------------------
def assert_select_traces(
self,
expected_inds,
selector=None,
row=None,
col=None,
secondary_y=None,
test_no_grid=False,
):
# Select traces on figure initialized with make_subplots
trace_generator = self.fig.select_traces(
selector=selector, row=row, col=col, secondary_y=secondary_y
)
self.assertTrue(inspect.isgenerator(trace_generator))
trace_list = list(trace_generator)
self.assertEqual(trace_list, [self.fig.data[i] for i in expected_inds])
# Select traces on figure not containing subplot info
if test_no_grid:
trace_generator = self.fig_no_grid.select_traces(
selector=selector, row=row, col=col, secondary_y=secondary_y
)
trace_list = list(trace_generator)
self.assertEqual(
trace_list, [self.fig_no_grid.data[i] for i in expected_inds]
)
# Test for each trace
trace_list = []
for_each_res = self.fig.for_each_trace(
lambda t: trace_list.append(t),
selector=selector,
row=row,
col=col,
secondary_y=secondary_y,
)
self.assertIs(for_each_res, self.fig)
self.assertEqual(trace_list, [self.fig.data[i] for i in expected_inds])
def test_select_by_type(self):
self.assert_select_traces(
[0, 2, 9], selector={"type": "scatter"}, test_no_grid=True
)
self.assert_select_traces([1], selector={"type": "bar"}, test_no_grid=True)
self.assert_select_traces([3], selector={"type": "heatmap"}, test_no_grid=True)
self.assert_select_traces(
[4, 5], selector={"type": "scatter3d"}, test_no_grid=True
)
self.assert_select_traces(
[6, 7], selector={"type": "scatterpolar"}, test_no_grid=True
)
self.assert_select_traces(
[8], selector={"type": "parcoords"}, test_no_grid=True
)
self.assert_select_traces([], selector={"type": "pie"}, test_no_grid=True)
def test_select_by_grid(self):
# Row and column
self.assert_select_traces([0, 1], row=1, col=1)
self.assert_select_traces([2, 3, 9], row=2, col=1)
self.assert_select_traces([4, 5], row=1, col=2)
self.assert_select_traces([6, 7], row=2, col=2)
self.assert_select_traces([8], row=3, col=1)
# Row only
self.assert_select_traces([0, 1, 4, 5], row=1)
self.assert_select_traces([2, 3, 6, 7, 9], row=2)
self.assert_select_traces([8], row=3)
# Col only
self.assert_select_traces([0, 1, 2, 3, 8, 9], col=1)
self.assert_select_traces([4, 5, 6, 7], col=2)
def test_select_by_secondary_y(self):
self.assert_select_traces([2, 3, 9], row=2, col=1)
self.assert_select_traces([2, 3], row=2, col=1, secondary_y=False)
self.assert_select_traces([9], row=2, col=1, secondary_y=True)
def test_select_by_property_across_trace_types(self):
self.assert_select_traces(
[0, 4, 6], selector={"mode": "markers"}, test_no_grid=True
)
self.assert_select_traces(
[2, 5, 7, 9], selector={"mode": "lines"}, test_no_grid=True
)
self.assert_select_traces(
[0, 4],
selector={"marker": {"color": "green", "size": 10}},
test_no_grid=True,
)
# Several traces have 'marker.color' == 'green', but they all have
# additional marker properties so there should be no exact match.
self.assert_select_traces(
[], selector={"marker": {"color": "green"}}, test_no_grid=True
)
self.assert_select_traces(
[0, 4, 6], selector={"marker.color": "green"}, test_no_grid=True
)
self.assert_select_traces(
[2, 5, 8, 9], selector={"line.color": "purple"}, test_no_grid=True
)
def test_select_property_and_grid(self):
# (1, 1)
self.assert_select_traces([0], selector={"mode": "markers"}, row=1, col=1)
self.assert_select_traces([1], selector={"type": "bar"}, row=1, col=1)
# (2, 1)
self.assert_select_traces([2, 9], selector={"mode": "lines"}, row=2, col=1)
# (1, 2)
self.assert_select_traces([4], selector={"marker.color": "green"}, row=1, col=2)
# Valid row/col and valid selector but the intersection is empty
self.assert_select_traces([], selector={"type": "markers"}, row=3, col=1)
def test_select_with_function(self):
def _check_trace_key(k, v):
def f(t):
try:
return t[k] == v
except LookupError:
return False
return f
# (1, 1)
self.assert_select_traces(
[0], selector=_check_trace_key("mode", "markers"), row=1, col=1
)
self.assert_select_traces(
[1], selector=_check_trace_key("type", "bar"), row=1, col=1
)
# (2, 1)
self.assert_select_traces(
[2, 9], selector=_check_trace_key("mode", "lines"), row=2, col=1
)
# (1, 2)
self.assert_select_traces(
[4], selector=_check_trace_key("marker.color", "green"), row=1, col=2
)
# Valid row/col and valid selector but the intersection is empty
self.assert_select_traces(
[], selector=_check_trace_key("type", "markers"), row=3, col=1
)
def test_select_traces_type_error(self):
with self.assertRaises(TypeError):
self.assert_select_traces([0], selector=123.456, row=1, col=1)
def test_for_each_trace_lowercase_names(self):
# Names are all uppercase to start
original_names = [t.name for t in self.fig.data]
self.assertTrue([str.isupper(n) for n in original_names])
# Lower case names
result_fig = self.fig.for_each_trace(lambda t: t.update(name=t.name.lower()))
# Check chaning
self.assertIs(result_fig, self.fig)
# Check that names were altered
self.assertTrue(
all([t.name == n.lower() for t, n in zip(result_fig.data, original_names)])
)
# test update_traces
# ------------------
def assert_update_traces(
self,
expected_inds,
patch=None,
selector=None,
row=None,
col=None,
secondary_y=None,
**kwargs,
):
# Save off original figure
fig_orig = copy.deepcopy(self.fig)
for trace1, trace2 in zip(fig_orig.data, self.fig.data):
trace1.uid = trace2.uid
# Perform update
update_res = self.fig.update_traces(
patch,
selector=selector,
row=row,
col=col,
secondary_y=secondary_y,
**kwargs,
)
# Check chaining support
self.assertIs(update_res, self.fig)
# Check resulting traces
for i, (t_orig, t) in enumerate(zip(fig_orig.data, self.fig.data)):
if i in expected_inds:
# Check that traces are initially equal
self.assertNotEqual(t_orig, t)
# Check that traces are equal after update
t_orig.update(patch, **kwargs)
# Check that traces are equal
self.assertEqual(t_orig, t)
def test_update_traces_by_type(self):
self.assert_update_traces(
[0, 2, 9], {"visible": "legendonly"}, selector={"type": "scatter"}
)
self.assert_update_traces(
[0, 2, 9], selector={"type": "scatter"}, visible=False
)
self.assert_update_traces(
[1], {"visible": "legendonly"}, selector={"type": "bar"}
)
self.assert_update_traces(
[3], {"colorscale": "Viridis"}, selector={"type": "heatmap"}
)
# Nest dictionaries
self.assert_update_traces(
[4, 5],
{"marker": {"line": {"color": "yellow"}}},
selector={"type": "scatter3d"},
)
# dot syntax
self.assert_update_traces(
[4, 5], {"marker.line.color": "cyan"}, selector={"type": "scatter3d"}
)
# underscore syntax
self.assert_update_traces(
[4, 5], dict(marker_line_color="pink"), selector={"type": "scatter3d"}
)
# underscore syntax with kwarg
self.assert_update_traces(
[4, 5], selector={"type": "scatter3d"}, marker_line_color="red"
)
self.assert_update_traces(
[6, 7], {"line": {"dash": "dot"}}, selector={"type": "scatterpolar"}
)
# Nested dictionaries
self.assert_update_traces(
[8],
{"dimensions": {1: {"label": "Dimension 1"}}},
selector={"type": "parcoords"},
)
# Dot syntax
self.assert_update_traces(
[8], {"dimensions[1].label": "Dimension A"}, selector={"type": "parcoords"}
)
# underscore syntax
# Dot syntax
self.assert_update_traces(
[8], dict(dimensions_1_label="Dimension X"), selector={"type": "parcoords"}
)
self.assert_update_traces(
[], {"hoverinfo": "label+percent"}, selector={"type": "pie"}
)
def test_update_traces_by_grid_and_selector(self):
self.assert_update_traces(
[4, 6], {"marker.size": 5}, selector={"marker.color": "green"}, col=2
)
self.assert_update_traces(
[0, 4], {"marker.size": 6}, selector={"marker.color": "green"}, row=1
)
self.assert_update_traces(
[6], {"marker.size": 6}, selector={"marker.color": "green"}, row=2, col=2
)
self.assert_update_traces([9], {"marker.size": 6}, col=1, secondary_y=True)
def test_update_traces_overwrite(self):
fig = go.Figure(
data=[go.Scatter(marker_line_color="red"), go.Bar(marker_line_color="red")]
)
fig.update_traces(overwrite=True, marker={"line": {"width": 10}})
self.assertEqual(
fig.to_plotly_json()["data"],
[
{"type": "scatter", "marker": {"line": {"width": 10}}},
{"type": "bar", "marker": {"line": {"width": 10}}},
],
)
@pytest.fixture
def select_traces_fixture():
fig = make_subplots(2, 3)
for n in range(3):
fig.add_trace(go.Scatter(x=[1, 2], y=[3, n]), row=2, col=3)
for n, ty in zip(range(3), [go.Scatter, go.Bar, go.Bar]):
fig.add_trace(ty(x=[1, 2], y=[3, 10 * n]), row=1, col=3)
return fig
def test_select_traces_integer(select_traces_fixture):
fig = select_traces_fixture
# check that selecting first trace does indeed only select the first
tr = list(fig.select_traces(selector=0))
assert len(tr) == 1
assert tr[0].y[1] == 0
# check we can index last trace selected
tr = list(fig.select_traces(selector=-1))[0]
assert tr.y[1] == 20
# check we can index last trace selected in a row and column
tr = list(fig.select_traces(selector=-1, row=2, col=3))[0]
assert tr.y[1] == 2
# check that indexing out of bounds raises IndexError
with pytest.raises(IndexError):
tr = list(fig.select_traces(selector=6))[0]
def test_select_traces_string(select_traces_fixture):
fig = select_traces_fixture
# check we can select traces by type simply by passing a string to selector
trs = list(fig.select_traces(selector="bar"))
assert len(trs) == 2 and reduce(
lambda last, cur: last
and (cur[0]["type"] == "bar")
and (cur[0]["y"][1] == cur[1]),
zip(trs, [10, 20]),
True,
)
# check we can select traces by type regardless of the subplots they are on
trs = list(fig.select_traces(selector="scatter"))
assert len(trs) == 4 and reduce(
lambda last, cur: last
and (cur[0]["type"] == "scatter")
and (cur[0]["y"][1] == cur[1]),
zip(trs, [0, 1, 2, 0]),
True,
)
# check that we can select traces by type but only on a specific subplot
trs = list(fig.select_traces(row=2, col=3, selector="scatter"))
assert len(trs) == 3 and reduce(
lambda last, cur: last
and (cur[0]["type"] == "scatter")
and (cur[0]["y"][1] == cur[1]),
zip(trs, [0, 1, 2]),
True,
)
# check that if selector matches no trace types then no traces are returned
trs = list(fig.select_traces(selector="bogus"))
assert len(trs) == 0
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@tests@test_core@test_update_objects@test_update_traces.py@.PATH_END.py
|
{
"filename": "version.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/setuptools/py3/setuptools/version.py",
"type": "Python"
}
|
from ._importlib import metadata
try:
__version__ = metadata.version('setuptools') or '0.dev0+unknown'
except Exception:
__version__ = '0.dev0+unknown'
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@setuptools@py3@setuptools@version.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.