metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "__init__.py",
"repo_name": "PrincetonUniversity/athena",
"repo_path": "athena_extracted/athena-master/tst/regression/scripts/tests/scalars/__init__.py",
"type": "Python"
}
|
PrincetonUniversityREPO_NAMEathenaPATH_START.@athena_extracted@athena-master@tst@regression@scripts@tests@scalars@__init__.py@.PATH_END.py
|
|
{
"filename": "_textcase.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/coloraxis/colorbar/tickfont/_textcase.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextcaseValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="textcase",
parent_name="layout.coloraxis.colorbar.tickfont",
**kwargs,
):
super(TextcaseValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["normal", "word caps", "upper", "lower"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@coloraxis@colorbar@tickfont@_textcase.py@.PATH_END.py
|
{
"filename": "numeric.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/typing/tests/data/pass/numeric.py",
"type": "Python"
}
|
"""
Tests for :mod:`numpy._core.numeric`.
Does not include tests which fall under ``array_constructors``.
"""
from __future__ import annotations
from typing import cast
import numpy as np
import numpy.typing as npt
class SubClass(npt.NDArray[np.float64]):
...
i8 = np.int64(1)
A = cast(
np.ndarray[tuple[int, int, int], np.dtype[np.intp]],
np.arange(27).reshape(3, 3, 3),
)
B: list[list[list[int]]] = A.tolist()
C = np.empty((27, 27)).view(SubClass)
np.count_nonzero(i8)
np.count_nonzero(A)
np.count_nonzero(B)
np.count_nonzero(A, keepdims=True)
np.count_nonzero(A, axis=0)
np.isfortran(i8)
np.isfortran(A)
np.argwhere(i8)
np.argwhere(A)
np.flatnonzero(i8)
np.flatnonzero(A)
np.correlate(B[0][0], A.ravel(), mode="valid")
np.correlate(A.ravel(), A.ravel(), mode="same")
np.convolve(B[0][0], A.ravel(), mode="valid")
np.convolve(A.ravel(), A.ravel(), mode="same")
np.outer(i8, A)
np.outer(B, A)
np.outer(A, A)
np.outer(A, A, out=C)
np.tensordot(B, A)
np.tensordot(A, A)
np.tensordot(A, A, axes=0)
np.tensordot(A, A, axes=(0, 1))
np.isscalar(i8)
np.isscalar(A)
np.isscalar(B)
np.roll(A, 1)
np.roll(A, (1, 2))
np.roll(B, 1)
np.rollaxis(A, 0, 1)
np.moveaxis(A, 0, 1)
np.moveaxis(A, (0, 1), (1, 2))
np.cross(B, A)
np.cross(A, A)
np.indices([0, 1, 2])
np.indices([0, 1, 2], sparse=False)
np.indices([0, 1, 2], sparse=True)
np.binary_repr(1)
np.base_repr(1)
np.allclose(i8, A)
np.allclose(B, A)
np.allclose(A, A)
np.isclose(i8, A)
np.isclose(B, A)
np.isclose(A, A)
np.array_equal(i8, A)
np.array_equal(B, A)
np.array_equal(A, A)
np.array_equiv(i8, A)
np.array_equiv(B, A)
np.array_equiv(A, A)
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@typing@tests@data@pass@numeric.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/bar/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._error_x import ErrorX
from ._error_y import ErrorY
from ._hoverlabel import Hoverlabel
from ._insidetextfont import Insidetextfont
from ._legendgrouptitle import Legendgrouptitle
from ._marker import Marker
from ._outsidetextfont import Outsidetextfont
from ._selected import Selected
from ._stream import Stream
from ._textfont import Textfont
from ._unselected import Unselected
from . import hoverlabel
from . import legendgrouptitle
from . import marker
from . import selected
from . import unselected
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[".hoverlabel", ".legendgrouptitle", ".marker", ".selected", ".unselected"],
[
"._error_x.ErrorX",
"._error_y.ErrorY",
"._hoverlabel.Hoverlabel",
"._insidetextfont.Insidetextfont",
"._legendgrouptitle.Legendgrouptitle",
"._marker.Marker",
"._outsidetextfont.Outsidetextfont",
"._selected.Selected",
"._stream.Stream",
"._textfont.Textfont",
"._unselected.Unselected",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@bar@__init__.py@.PATH_END.py
|
{
"filename": "hubconf.py",
"repo_name": "ultralytics/yolov5",
"repo_path": "yolov5_extracted/yolov5-master/hubconf.py",
"type": "Python"
}
|
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
"""
PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5.
Usage:
import torch
model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # official model
model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s') # from branch
model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') # custom/local model
model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local') # local repo
"""
import torch
def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
"""
Creates or loads a YOLOv5 model, with options for pretrained weights and model customization.
Args:
name (str): Model name (e.g., 'yolov5s') or path to the model checkpoint (e.g., 'path/to/best.pt').
pretrained (bool, optional): If True, loads pretrained weights into the model. Defaults to True.
channels (int, optional): Number of input channels the model expects. Defaults to 3.
classes (int, optional): Number of classes the model is expected to detect. Defaults to 80.
autoshape (bool, optional): If True, applies the YOLOv5 .autoshape() wrapper for various input formats. Defaults to True.
verbose (bool, optional): If True, prints detailed information during the model creation/loading process. Defaults to True.
device (str | torch.device | None, optional): Device to use for model parameters (e.g., 'cpu', 'cuda'). If None, selects
the best available device. Defaults to None.
Returns:
(DetectMultiBackend | AutoShape): The loaded YOLOv5 model, potentially wrapped with AutoShape if specified.
Examples:
```python
import torch
from ultralytics import _create
# Load an official YOLOv5s model with pretrained weights
model = _create('yolov5s')
# Load a custom model from a local checkpoint
model = _create('path/to/custom_model.pt', pretrained=False)
# Load a model with specific input channels and classes
model = _create('yolov5s', channels=1, classes=10)
```
Notes:
For more information on model loading and customization, visit the
[YOLOv5 PyTorch Hub Documentation](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading).
"""
from pathlib import Path
from models.common import AutoShape, DetectMultiBackend
from models.experimental import attempt_load
from models.yolo import ClassificationModel, DetectionModel, SegmentationModel
from utils.downloads import attempt_download
from utils.general import LOGGER, ROOT, check_requirements, intersect_dicts, logging
from utils.torch_utils import select_device
if not verbose:
LOGGER.setLevel(logging.WARNING)
check_requirements(ROOT / "requirements.txt", exclude=("opencv-python", "tensorboard", "thop"))
name = Path(name)
path = name.with_suffix(".pt") if name.suffix == "" and not name.is_dir() else name # checkpoint path
try:
device = select_device(device)
if pretrained and channels == 3 and classes == 80:
try:
model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model
if autoshape:
if model.pt and isinstance(model.model, ClassificationModel):
LOGGER.warning(
"WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. "
"You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224)."
)
elif model.pt and isinstance(model.model, SegmentationModel):
LOGGER.warning(
"WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. "
"You will not be able to run inference with this model."
)
else:
model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
except Exception:
model = attempt_load(path, device=device, fuse=False) # arbitrary model
else:
cfg = list((Path(__file__).parent / "models").rglob(f"{path.stem}.yaml"))[0] # model.yaml path
model = DetectionModel(cfg, channels, classes) # create model
if pretrained:
ckpt = torch.load(attempt_download(path), map_location=device) # load
csd = ckpt["model"].float().state_dict() # checkpoint state_dict as FP32
csd = intersect_dicts(csd, model.state_dict(), exclude=["anchors"]) # intersect
model.load_state_dict(csd, strict=False) # load
if len(ckpt["model"].names) == classes:
model.names = ckpt["model"].names # set class names attribute
if not verbose:
LOGGER.setLevel(logging.INFO) # reset to default
return model.to(device)
except Exception as e:
help_url = "https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading"
s = f"{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help."
raise Exception(s) from e
def custom(path="path/to/model.pt", autoshape=True, _verbose=True, device=None):
"""
Loads a custom or local YOLOv5 model from a given path with optional autoshaping and device specification.
Args:
path (str): Path to the custom model file (e.g., 'path/to/model.pt').
autoshape (bool): Apply YOLOv5 .autoshape() wrapper to model if True, enabling compatibility with various input
types (default is True).
_verbose (bool): If True, prints all informational messages to the screen; otherwise, operates silently
(default is True).
device (str | torch.device | None): Device to load the model on, e.g., 'cpu', 'cuda', torch.device('cuda:0'), etc.
(default is None, which automatically selects the best available device).
Returns:
torch.nn.Module: A YOLOv5 model loaded with the specified parameters.
Notes:
For more details on loading models from PyTorch Hub:
https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading
Examples:
```python
# Load model from a given path with autoshape enabled on the best available device
model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt')
# Load model from a local path without autoshape on the CPU device
model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local', autoshape=False, device='cpu')
```
"""
return _create(path, autoshape=autoshape, verbose=_verbose, device=device)
def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
"""
Instantiates the YOLOv5-nano model with options for pretraining, input channels, class count, autoshaping,
verbosity, and device.
Args:
pretrained (bool): If True, loads pretrained weights into the model. Defaults to True.
channels (int): Number of input channels for the model. Defaults to 3.
classes (int): Number of classes for object detection. Defaults to 80.
autoshape (bool): If True, applies the YOLOv5 .autoshape() wrapper to the model for various formats (file/URI/PIL/
cv2/np) and non-maximum suppression (NMS) during inference. Defaults to True.
_verbose (bool): If True, prints detailed information to the screen. Defaults to True.
device (str | torch.device | None): Specifies the device to use for model computation. If None, uses the best device
available (i.e., GPU if available, otherwise CPU). Defaults to None.
Returns:
DetectionModel | ClassificationModel | SegmentationModel: The instantiated YOLOv5-nano model, potentially with
pretrained weights and autoshaping applied.
Notes:
For further details on loading models from PyTorch Hub, refer to [PyTorch Hub models](https://pytorch.org/hub/
ultralytics_yolov5).
Examples:
```python
import torch
from ultralytics import yolov5n
# Load the YOLOv5-nano model with defaults
model = yolov5n()
# Load the YOLOv5-nano model with a specific device
model = yolov5n(device='cuda')
```
"""
return _create("yolov5n", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
"""
Create a YOLOv5-small (yolov5s) model with options for pretraining, input channels, class count, autoshaping,
verbosity, and device configuration.
Args:
pretrained (bool, optional): Flag to load pretrained weights into the model. Defaults to True.
channels (int, optional): Number of input channels. Defaults to 3.
classes (int, optional): Number of model classes. Defaults to 80.
autoshape (bool, optional): Whether to wrap the model with YOLOv5's .autoshape() for handling various input formats.
Defaults to True.
_verbose (bool, optional): Flag to print detailed information regarding model loading. Defaults to True.
device (str | torch.device | None, optional): Device to use for model computation, can be 'cpu', 'cuda', or
torch.device instances. If None, automatically selects the best available device. Defaults to None.
Returns:
torch.nn.Module: The YOLOv5-small model configured and loaded according to the specified parameters.
Example:
```python
import torch
# Load the official YOLOv5-small model with pretrained weights
model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
# Load the YOLOv5-small model from a specific branch
model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s')
# Load a custom YOLOv5-small model from a local checkpoint
model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt')
# Load a local YOLOv5-small model specifying source as local repository
model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local')
```
Notes:
For more details on model loading and customization, visit
the [YOLOv5 PyTorch Hub Documentation](https://pytorch.org/hub/ultralytics_yolov5).
"""
return _create("yolov5s", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
"""
Instantiates the YOLOv5-medium model with customizable pretraining, channel count, class count, autoshaping,
verbosity, and device.
Args:
pretrained (bool, optional): Whether to load pretrained weights into the model. Default is True.
channels (int, optional): Number of input channels. Default is 3.
classes (int, optional): Number of model classes. Default is 80.
autoshape (bool, optional): Apply YOLOv5 .autoshape() wrapper to the model for handling various input formats.
Default is True.
_verbose (bool, optional): Whether to print detailed information to the screen. Default is True.
device (str | torch.device | None, optional): Device specification to use for model parameters (e.g., 'cpu', 'cuda').
Default is None.
Returns:
torch.nn.Module: The instantiated YOLOv5-medium model.
Usage Example:
```python
import torch
model = torch.hub.load('ultralytics/yolov5', 'yolov5m') # Load YOLOv5-medium from Ultralytics repository
model = torch.hub.load('ultralytics/yolov5:master', 'yolov5m') # Load from the master branch
model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m.pt') # Load a custom/local YOLOv5-medium model
model = torch.hub.load('.', 'custom', 'yolov5m.pt', source='local') # Load from a local repository
```
For more information, visit https://pytorch.org/hub/ultralytics_yolov5.
"""
return _create("yolov5m", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
"""
Creates YOLOv5-large model with options for pretraining, channels, classes, autoshaping, verbosity, and device
selection.
Args:
pretrained (bool): Load pretrained weights into the model. Default is True.
channels (int): Number of input channels. Default is 3.
classes (int): Number of model classes. Default is 80.
autoshape (bool): Apply YOLOv5 .autoshape() wrapper to model. Default is True.
_verbose (bool): Print all information to screen. Default is True.
device (str | torch.device | None): Device to use for model parameters, e.g., 'cpu', 'cuda', or a torch.device instance.
Default is None.
Returns:
YOLOv5 model (torch.nn.Module): The YOLOv5-large model instantiated with specified configurations and possibly
pretrained weights.
Examples:
```python
import torch
model = torch.hub.load('ultralytics/yolov5', 'yolov5l')
```
Notes:
For additional details, refer to the PyTorch Hub models documentation:
https://pytorch.org/hub/ultralytics_yolov5
"""
return _create("yolov5l", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
"""
Perform object detection using the YOLOv5-xlarge model with options for pretraining, input channels, class count,
autoshaping, verbosity, and device specification.
Args:
pretrained (bool): If True, loads pretrained weights into the model. Defaults to True.
channels (int): Number of input channels for the model. Defaults to 3.
classes (int): Number of model classes for object detection. Defaults to 80.
autoshape (bool): If True, applies the YOLOv5 .autoshape() wrapper for handling different input formats. Defaults to
True.
_verbose (bool): If True, prints detailed information during model loading. Defaults to True.
device (str | torch.device | None): Device specification for computing the model, e.g., 'cpu', 'cuda:0', torch.device('cuda').
Defaults to None.
Returns:
torch.nn.Module: The YOLOv5-xlarge model loaded with the specified parameters, optionally with pretrained weights and
autoshaping applied.
Example:
```python
import torch
model = torch.hub.load('ultralytics/yolov5', 'yolov5x')
```
For additional details, refer to the official YOLOv5 PyTorch Hub models documentation:
https://pytorch.org/hub/ultralytics_yolov5
"""
return _create("yolov5x", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
"""
Creates YOLOv5-nano-P6 model with options for pretraining, channels, classes, autoshaping, verbosity, and device.
Args:
pretrained (bool, optional): If True, loads pretrained weights into the model. Default is True.
channels (int, optional): Number of input channels. Default is 3.
classes (int, optional): Number of model classes. Default is 80.
autoshape (bool, optional): If True, applies the YOLOv5 .autoshape() wrapper to the model. Default is True.
_verbose (bool, optional): If True, prints all information to screen. Default is True.
device (str | torch.device | None, optional): Device to use for model parameters. Can be 'cpu', 'cuda', or None.
Default is None.
Returns:
torch.nn.Module: YOLOv5-nano-P6 model loaded with the specified configurations.
Example:
```python
import torch
model = yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device='cuda')
```
Notes:
For more information on PyTorch Hub models, visit: https://pytorch.org/hub/ultralytics_yolov5
"""
return _create("yolov5n6", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
"""
Instantiate the YOLOv5-small-P6 model with options for pretraining, input channels, number of classes, autoshaping,
verbosity, and device selection.
Args:
pretrained (bool): If True, loads pretrained weights. Default is True.
channels (int): Number of input channels. Default is 3.
classes (int): Number of object detection classes. Default is 80.
autoshape (bool): If True, applies YOLOv5 .autoshape() wrapper to the model, allowing for varied input formats.
Default is True.
_verbose (bool): If True, prints detailed information during model loading. Default is True.
device (str | torch.device | None): Device specification for model parameters (e.g., 'cpu', 'cuda', or torch.device).
Default is None, which selects an available device automatically.
Returns:
torch.nn.Module: The YOLOv5-small-P6 model instance.
Usage:
```python
import torch
model = torch.hub.load('ultralytics/yolov5', 'yolov5s6')
model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s6') # load from a specific branch
model = torch.hub.load('ultralytics/yolov5', 'custom', 'path/to/yolov5s6.pt') # custom/local model
model = torch.hub.load('.', 'custom', 'path/to/yolov5s6.pt', source='local') # local repo model
```
Notes:
- For more information, refer to the PyTorch Hub models documentation at https://pytorch.org/hub/ultralytics_yolov5
Raises:
Exception: If there is an error during model creation or loading, with a suggestion to visit the YOLOv5
tutorials for help.
"""
return _create("yolov5s6", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
"""
Create YOLOv5-medium-P6 model with options for pretraining, channel count, class count, autoshaping, verbosity, and
device.
Args:
pretrained (bool): If True, loads pretrained weights. Default is True.
channels (int): Number of input channels. Default is 3.
classes (int): Number of model classes. Default is 80.
autoshape (bool): Apply YOLOv5 .autoshape() wrapper to the model for file/URI/PIL/cv2/np inputs and NMS.
Default is True.
_verbose (bool): If True, prints detailed information to the screen. Default is True.
device (str | torch.device | None): Device to use for model parameters. Default is None, which uses the
best available device.
Returns:
torch.nn.Module: The YOLOv5-medium-P6 model.
Refer to the PyTorch Hub models documentation: https://pytorch.org/hub/ultralytics_yolov5 for additional details.
Example:
```python
import torch
# Load YOLOv5-medium-P6 model
model = torch.hub.load('ultralytics/yolov5', 'yolov5m6')
```
Notes:
- The model can be loaded with pre-trained weights for better performance on specific tasks.
- The autoshape feature simplifies input handling by allowing various popular data formats.
"""
return _create("yolov5m6", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
"""
Instantiate the YOLOv5-large-P6 model with options for pretraining, channel and class counts, autoshaping,
verbosity, and device selection.
Args:
pretrained (bool, optional): If True, load pretrained weights into the model. Default is True.
channels (int, optional): Number of input channels. Default is 3.
classes (int, optional): Number of model classes. Default is 80.
autoshape (bool, optional): If True, apply YOLOv5 .autoshape() wrapper to the model for input flexibility. Default is True.
_verbose (bool, optional): If True, print all information to the screen. Default is True.
device (str | torch.device | None, optional): Device to use for model parameters, e.g., 'cpu', 'cuda', or torch.device.
If None, automatically selects the best available device. Default is None.
Returns:
torch.nn.Module: The instantiated YOLOv5-large-P6 model.
Example:
```python
import torch
model = torch.hub.load('ultralytics/yolov5', 'yolov5l6') # official model
model = torch.hub.load('ultralytics/yolov5:master', 'yolov5l6') # from specific branch
model = torch.hub.load('ultralytics/yolov5', 'custom', 'path/to/yolov5l6.pt') # custom/local model
model = torch.hub.load('.', 'custom', 'path/to/yolov5l6.pt', source='local') # local repository
```
Note:
Refer to [PyTorch Hub Documentation](https://pytorch.org/hub/ultralytics_yolov5) for additional usage instructions.
"""
return _create("yolov5l6", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
"""
Creates the YOLOv5-xlarge-P6 model with options for pretraining, number of input channels, class count, autoshaping,
verbosity, and device selection.
Args:
pretrained (bool): If True, loads pretrained weights into the model. Default is True.
channels (int): Number of input channels. Default is 3.
classes (int): Number of model classes. Default is 80.
autoshape (bool): If True, applies YOLOv5 .autoshape() wrapper to the model. Default is True.
_verbose (bool): If True, prints all information to the screen. Default is True.
device (str | torch.device | None): Device to use for model parameters, can be a string, torch.device object, or
None for default device selection. Default is None.
Returns:
torch.nn.Module: The instantiated YOLOv5-xlarge-P6 model.
Example:
```python
import torch
model = torch.hub.load('ultralytics/yolov5', 'yolov5x6') # load the YOLOv5-xlarge-P6 model
```
Note:
For more information on YOLOv5 models, visit the official documentation:
https://docs.ultralytics.com/yolov5
"""
return _create("yolov5x6", pretrained, channels, classes, autoshape, _verbose, device)
if __name__ == "__main__":
import argparse
from pathlib import Path
import numpy as np
from PIL import Image
from utils.general import cv2, print_args
# Argparser
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="yolov5s", help="model name")
opt = parser.parse_args()
print_args(vars(opt))
# Model
model = _create(name=opt.model, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True)
# model = custom(path='path/to/model.pt') # custom
# Images
imgs = [
"data/images/zidane.jpg", # filename
Path("data/images/zidane.jpg"), # Path
"https://ultralytics.com/images/zidane.jpg", # URI
cv2.imread("data/images/bus.jpg")[:, :, ::-1], # OpenCV
Image.open("data/images/bus.jpg"), # PIL
np.zeros((320, 640, 3)),
] # numpy
# Inference
results = model(imgs, size=320) # batched inference
# Results
results.print()
results.save()
|
ultralyticsREPO_NAMEyolov5PATH_START.@yolov5_extracted@yolov5-master@hubconf.py@.PATH_END.py
|
{
"filename": "ClassCOMPAS.py",
"repo_name": "FloorBroekgaarden/Double-Compact-Object-Mergers",
"repo_path": "Double-Compact-Object-Mergers_extracted/Double-Compact-Object-Mergers-main/demo_read_hdf5_file/ClassCOMPAS.py",
"type": "Python"
}
|
#!/usr/bin/env python3
import numpy as np
import h5py as h5
import os
import totalMassEvolvedPerZ as MPZ
import astropy.units as u
class COMPASData(object):
def __init__(
self,
path=None,
lazyData=True,
Mlower=None,
Mupper=None,
m2_min=None,
binaryFraction=None,
suppress_reminder=False,
):
self.path = path
if self.path is None:
print("Template COMPASData object created with no data path")
elif not os.path.isfile(path):
raise ValueError( "h5 file not found. Wrong path given? {}".format(path))
# Crucial values to be able to calculate MSSFR
self.metallicityGrid = None
self.metallicitySystems = None
self.delayTimes = None # Myr
# Crucial values I need for selection effects
self.mass1 = None # Msun
self.mass2 = None # Msun
self.DCOmask = None
self.allTypesMask = None
self.BBHmask = None
self.DNSmask = None
self.BHNSmask = None
self.CHE_mask = None
self.CHE_BBHmask = None
self.NonCHE_BBHmask = None
self.initialZ = None
self.sw_weights = None
self.n_systems = None
# Additional arrays that might be nice to store
# to more quickly make some plots.
# If you need more memory might help a tiny bit to not do
self.lazyData = lazyData
self.mChirp = None # Msun
self.q = None
self.optimisticmask = None
# Needed to recover true solar mass evolved
self.Mlower = Mlower # Msun
self.Mupper = Mupper # Msun
self.m2_min = m2_min # Msun
self.binaryFraction = binaryFraction
self.totalMassEvolvedPerZ = None # Msun
self.mass_evolved_per_binary = None # Msun
if not suppress_reminder:
print("ClassCOMPAS: Remember to self.setCOMPASDCOmask()")
print(" then self.setCOMPASData()")
print(" and optionally self.setGridAndMassEvolved() if using a metallicity grid")
def setCOMPASDCOmask(
self, types="BBH", withinHubbleTime=True, pessimistic=True, noRLOFafterCEE=True
):
# By default, we mask for BBHs that merge within a Hubble time, assumming
# the pessimistic CEE prescription (HG donors cannot survive a CEE) and
# not allowing immediate RLOF post-CEE
stellar_type_1, stellar_type_2, hubble_flag, dco_seeds = \
self.get_COMPAS_variables("doubleCompactObjects", ["stellarType1", "stellarType2", "mergesInHubbleTimeFlag", "seed"])
if types == "CHE_BBH" or types == "NON_CHE_BBH":
stellar_type_1_zams, stellar_type_2_zams, che_ms_1, che_ms_2, sys_seeds = \
self.get_COMPAS_variables("systems", ["Stellar_Type@ZAMS(1)", "Stellar_Type@ZAMS(2)", "SEED"])
che_mask = np.logical_and.reduce((stellar_type_1_zams == 16, stellar_type_2_zams == 16, che_ms_1 == True, che_ms_2 == True))
che_seeds = sys_seeds[()][che_mask]
self.CHE_mask = np.in1d(dco_seeds, che_seeds) if types == "CHE_BBH" or types == "NON_CHE_BBH" else np.repeat(False, len(dco_seeds)) #// Floor
# if user wants to mask on Hubble time use the flag, otherwise just set all to True
hubble_mask = hubble_flag.astype(bool) if withinHubbleTime else np.repeat(True, len(dco_seeds))
# mask on stellar types (where 14=BH and 13=NS), BHNS can be BHNS or NSBH
type_masks = {
"all": np.repeat(True, len(dco_seeds)),
"BBH": np.logical_and(stellar_type_1 == 14, stellar_type_2 == 14),
"BHNS": np.logical_or(np.logical_and(stellar_type_1 == 14, stellar_type_2 == 13), np.logical_and(stellar_type_1 == 13, stellar_type_2 == 14)),
"BNS": np.logical_and(stellar_type_1 == 13, stellar_type_2 == 13),
}
type_masks["CHE_BBH"] = np.logical_and(self.CHE_mask, type_masks["BBH"]) if types == "CHE_BBH" else np.repeat(False, len(dco_seeds))
type_masks["NON_CHE_BBH"] = np.logical_and(np.logical_not(self.CHE_mask), type_masks["BBH"]) if types == "NON_CHE_BBH" else np.repeat(True, len(dco_seeds))
# if the user wants to make RLOF or optimistic CEs
if noRLOFafterCEE or pessimistic:
# get the flags and unique seeds from the Common Envelopes file
ce_seeds = self.get_COMPAS_variables("commonEnvelopes", "randomSeed")
dco_from_ce = np.in1d(ce_seeds, dco_seeds)
dco_ce_seeds = ce_seeds[dco_from_ce]
# if masking on RLOF, get flag and match seeds to dco seeds
if noRLOFafterCEE:
rlof_flag = self.get_COMPAS_variables("commonEnvelopes", "immediateRLOFAfterCEE")[dco_from_ce].astype(bool)
rlof_seeds = np.unique(dco_ce_seeds[rlof_flag])
rlof_mask = np.logical_not(np.in1d(dco_seeds, rlof_seeds))
else:
rlof_mask = np.repeat(True, len(dco_seeds))
# if masking on pessimistic CE, get flag and match seeds to dco seeds
if pessimistic:
pessimistic_flag = self.get_COMPAS_variables("commonEnvelopes", "optimisticCommonEnvelopeFlag")[dco_from_ce].astype(bool)
pessimistic_seeds = np.unique(dco_ce_seeds[pessimistic_flag])
pessimistic_mask = np.logical_not(np.in1d(dco_seeds, pessimistic_seeds))
else:
pessimistic_mask = np.repeat(True, len(dco_seeds))
else:
rlof_mask = np.repeat(True, len(dco_seeds))
pessimistic_mask = np.repeat(True, len(dco_seeds))
# create a mask for each dco type supplied
self.DCOmask = type_masks[types] * hubble_mask * rlof_mask * pessimistic_mask
self.BBHmask = type_masks["BBH"] * hubble_mask * rlof_mask * pessimistic_mask
self.BHNSmask = type_masks["BHNS"] * hubble_mask * rlof_mask * pessimistic_mask
self.DNSmask = type_masks["BNS"] * hubble_mask * rlof_mask * pessimistic_mask
self.CHE_BBHmask = type_masks["CHE_BBH"] * hubble_mask * rlof_mask * pessimistic_mask
self.NonCHE_BBHmask = type_masks["NON_CHE_BBH"] * hubble_mask * rlof_mask * pessimistic_mask
self.allTypesMask = type_masks["all"] * hubble_mask * rlof_mask * pessimistic_mask
self.optimisticmask = pessimistic_mask
def setGridAndMassEvolved(self):
# The COMPAS simulation does not evolve all stars
# give me the correction factor for the total mass evolved
# I assume each metallicity has the same limits, and does correction
# factor, but the total mass evolved might be different.
# This does not change when we change types and other masks this is
# general to the entire simulation so calculate once
_, self.totalMassEvolvedPerZ = MPZ.totalMassEvolvedPerZ(
path=self.path,
Mlower=self.Mlower,
Mupper=self.Mupper,
binaryFraction=self.binaryFraction,
)
# Want to recover entire metallicity grid, assume that every metallicity
# evolved shows in all systems again should not change within same run
# so dont redo if we reset the data
Data = h5.File(self.path, "r")
if self.initialZ is None:
self.initialZ = Data["systems"]["Metallicity1"][()]
self.metallicityGrid = np.unique(self.initialZ)
Data.close()
def setCOMPASData(self):
primary_masses, secondary_masses, formation_times, coalescence_times, dco_seeds = \
self.get_COMPAS_variables('doubleCompactObjects', ['M1', "M2", "tform", "tc", "seed"])
initial_seeds, initial_Z = self.get_COMPAS_variables("systems", ["SEED", "Metallicity1"])
# Get metallicity grid of DCOs
self.seedsDCO = dco_seeds[self.DCOmask]
if self.initialZ is None:
self.initialZ = initial_Z
maskMetallicity = np.in1d(initial_seeds, self.seedsDCO)
self.metallicitySystems = self.initialZ[maskMetallicity]
self.n_systems = len(initial_seeds)
self.delayTimes = np.add(formation_times[self.DCOmask], coalescence_times[self.DCOmask])
self.mass1 = primary_masses[self.DCOmask]
self.mass2 = secondary_masses[self.DCOmask]
# Stuff of data I dont need for integral
# but I might be to laze to read in myself
# and often use. Might turn it of for memory efficiency
if self.lazyData:
self.q = np.divide(self.mass2, self.mass1)
boolq = self.mass2 > self.mass1
self.q[boolq] = np.divide(self.mass1[boolq], self.mass2[boolq])
self.mChirp = np.divide(
(np.multiply(self.mass2, self.mass1) ** (3.0 / 5.0)),
(np.add(self.mass2, self.mass1) ** (1.0 / 5.0)),
)
self.Hubble = self.get_COMPAS_variables("doubleCompactObjects", "mergesInHubbleTimeFlag")[self.DCOmask]
def recalculateTrueSolarMassEvolved(self, Mlower, Mupper, binaryFraction):
# Possibility to test assumptions of True solar mass evolved
self.Mlower = Mlower
self.Mupper = Mupper
self.binaryFraction = binaryFraction
_, self.totalMassEvolvedPerZ = MPZ.totalMassEvolvedPerZ(
pathCOMPASh5=self.path,
Mlower=self.Mlower,
Mupper=self.Mupper,
binaryFraction=self.binaryFraction,
)
def get_COMPAS_variables(self, hdf5_file, var_names):
"""
Get a variable or variables from a COMPAS file
Args:
hdf5_file --> [string] Name of HDF5 subfile (e.g. "doubleCompactObjects")
var_names --> [string or string list] A variable name or list of variables names to return
Returns:
var_list --> [list of lists] A list of variables (or a single variable if only one name supplied)
"""
# open the COMPAS file
with h5.File(self.path, "r") as compas_file:
# if the list is only a string (i.e. one variable) then don't return a list
if isinstance(var_names, str):
return compas_file[hdf5_file][var_names][...].squeeze()
# else return each variable in a list
else:
return [compas_file[hdf5_file][var_name][...].squeeze() for var_name in var_names]
def set_sw_weights(self, column_name):
""" Set STROOPWAFEL adaptive sampling weights given a column name in the BSE_Double_Compact_Objects file """
if column_name is not None:
self.sw_weights = self.get_COMPAS_variables("doubleCompactObjects", column_name)[self.DCOmask]
def find_star_forming_mass_per_binary_sampling(self, m1=0.01, m2=0.08, m3=0.5, m4=200.0, a12=0.3, a23=1.3, a34=2.3,
primary_mass_inverse_CDF=None, mass_ratio_inverse_CDF=None, SAMPLES=20000000):
"""
Calculate the star forming mass evolved for each binary in the file.
This function does this by sampling from the IMF and mass ratio distributions
Args:
mi --> [float] masses at which to transition the slope of the IMF (ignored if primary_mass_inverse_CDF is not None)
aij --> [float] slope of the IMF between mi and mj (ignored if primary_mass_inverse_CDF is not None)
primary_mass_inverse_CDF --> [function] a function that computes the inverse CDF functoin for the primary mass distribution
this defaults to the Kroupa IMF (which can be varied using mi, aij)
mass_ratio_inverse_CDF --> [function] a function that computes the inverse CDF function for the mass ratio distribution
this defaults to assuming a uniform mass ratio on [0, 1]
SAMPLES --> [int] number of samples to draw when creating a mock universe
"""
# if primary mass inverse CDF is None, assume the Kroupa IMF
if primary_mass_inverse_CDF is None:
primary_mass_inverse_CDF = lambda U: inverse_CDF_IMF(U, m1=m1, m2=m2, m3=m3, m4=m4, a12=a12, a23=a23, a34=a34)
# if mass ratio inverse CDF function is None, assume uniform
if mass_ratio_inverse_CDF is None:
mass_ratio_inverse_CDF = lambda q: q
# randomly sample a large number of masses from IMF, mass ratios from supplied function, binary for boolean
primary_mass = primary_mass_inverse_CDF(np.random.rand(SAMPLES)) * u.Msun
mass_ratio = mass_ratio_inverse_CDF(np.random.rand(SAMPLES))
binary = np.random.rand(SAMPLES)
# only fbin fraction of stars have a secondary (in a binary)
binary_mask = binary < self.binaryFraction
# assign each a random secondary mass, default 0 because single stars have m2=0 (surprisingly :P)
secondary_mass = np.zeros(SAMPLES) * u.Msun
secondary_mass[binary_mask] = primary_mass[binary_mask] * mass_ratio[binary_mask]
# find the total mass of the whole population
total_mass = np.sum(primary_mass) + np.sum(secondary_mass)
# apply the COMPAS cuts on primary and secondary mass
primary_mask = np.logical_and(primary_mass >= self.Mlower, primary_mass <= self.Mupper)
secondary_mask = secondary_mass > self.m2_min
full_mask = np.logical_and(primary_mask, secondary_mask)
# find the total mass with COMPAS cuts
total_mass_COMPAS = np.sum(primary_mass[full_mask]) + np.sum(secondary_mass[full_mask])
# use the totals to find the ratio and return the average mass as well
f_mass_sampled = total_mass_COMPAS / total_mass
average_mass_COMPAS = total_mass_COMPAS / len(primary_mass[full_mask])
# find the average star forming mass evolved per binary in the Universe
self.mass_evolved_per_binary = average_mass_COMPAS / f_mass_sampled
# ============================================== #
# Initial Mass Function PDF, CDF and inverse CDF #
# ============================================== #
def IMF(m, m1=0.01, m2=0.08, m3=0.5, m4=200.0, a12=0.3, a23=1.3, a34=2.3):
"""
Calculate the fraction of stellar mass between m and m + dm for a three part broken power law.
Default values follow Kroupa (2001)
zeta(m) ~ m^(-a_ij)
Args:
m --> [float, list of floats] mass or masses at which to evaluate
mi --> [float] masses at which to transition the slope
aij --> [float] slope of the IMF between mi and mj
Returns:
zeta(m) --> [float, list of floats] value or values of the IMF at m
"""
# calculate normalisation constants that ensure the IMF is continuous
b1 = 1 / (
(m2**(1 - a12) - m1**(1 - a12)) / (1 - a12) \
+ m2**(-(a12 - a23)) * (m3**(1 - a23) - m2**(1 - a23)) / (1 - a23) \
+ m2**(-(a12 - a23)) * m3**(-(a23 - a34)) * (m4**(1 - a34) - m3**(1 - a34)) / (1 - a34)
)
b2 = b1 * m2**(-(a12 - a23))
b3 = b2 * m3**(-(a23 - a34))
# evaluate IMF either at a point or for a list of points
if isinstance(m, float):
if m < m1:
return 0
elif m < m2:
return b1 * m**(-a12)
elif m < m3:
return b2 * m**(-a23)
elif m < m4:
return b3 * m**(-a34)
else:
return 0
else:
imf_vals = np.zeros(len(m))
imf_vals[np.logical_and(m >= m1, m < m2)] = b1 * m[np.logical_and(m >= m1, m < m2)]**(-a12)
imf_vals[np.logical_and(m >= m2, m < m3)] = b2 * m[np.logical_and(m >= m2, m < m3)]**(-a23)
imf_vals[np.logical_and(m >= m3, m < m4)] = b3 * m[np.logical_and(m >= m3, m < m4)]**(-a34)
return imf_vals
def CDF_IMF(m, m1=0.01, m2=0.08, m3=0.5, m4=200.0, a12=0.3, a23=1.3, a34=2.3):
"""
Calculate the fraction of stellar mass between 0 and m for a three part broken power law.
Default values follow Kroupa (2001)
F(m) ~ int_0^m zeta(m) dm
Args:
m --> [float, list of floats] mass or masses at which to evaluate
mi --> [float] masses at which to transition the slope
aij --> [float] slope of the IMF between mi and mj
Returns:
zeta(m) --> [float, list of floats] value or values of the IMF at m
NOTE: this is implemented recursively, probably not the most efficient if you're using this
intensively but I'm not and it looks prettier so I'm being lazy ¯\_(ツ)_/¯
"""
# calculate normalisation constants that ensure the IMF is continuous
b1 = 1 / (
(m2**(1 - a12) - m1**(1 - a12)) / (1 - a12) \
+ m2**(-(a12 - a23)) * (m3**(1 - a23) - m2**(1 - a23)) / (1 - a23) \
+ m2**(-(a12 - a23)) * m3**(-(a23 - a34)) * (m4**(1 - a34) - m3**(1 - a34)) / (1 - a34)
)
b2 = b1 * m2**(-(a12 - a23))
b3 = b2 * m3**(-(a23 - a34))
if isinstance(m, float):
if m <= m1:
return 0
elif m <= m2:
return b1 / (1 - a12) * (m**(1 - a12) - m1**(1 - a12))
elif m <= m3:
return CDF_IMF(m2) + b2 / (1 - a23) * (m**(1 - a23) - m2**(1 - a23))
elif m <= m4:
return CDF_IMF(m3) + b3 / (1 - a34) * (m**(1 - a34) - m3**(1 - a34))
else:
return 0
else:
CDF = np.zeros(len(m))
CDF[np.logical_and(m >= m1, m < m2)] = b1 / (1 - a12) * (m[np.logical_and(m >= m1, m < m2)]**(1 - a12) - m1**(1 - a12))
CDF[np.logical_and(m >= m2, m < m3)] = CDF_IMF(m2) + b2 / (1 - a23) * (m[np.logical_and(m >= m2, m < m3)]**(1 - a23) - m2**(1 - a23))
CDF[np.logical_and(m >= m3, m < m4)] = CDF_IMF(m3) + b3 / (1 - a34) * (m[np.logical_and(m >= m3, m < m4)]**(1 - a34) - m3**(1 - a34))
CDF[m >= m4] = np.ones(len(m[m >= m4]))
return CDF
def inverse_CDF_IMF(U, m1=0.01, m2=0.08, m3=0.5, m4=200, a12=0.3, a23=1.3, a34=2.3):
"""
Calculate the inverse CDF for a three part broken power law.
Default values follow Kroupa (2001)
Args:
U --> [float, list of floats] A uniform random variable on [0, 1]
mi --> [float] masses at which to transition the slope
aij --> [float] slope of the IMF between mi and mj
Returns:
zeta(m) --> [float, list of floats] value or values of the IMF at m
NOTE: this is implemented recursively, probably not the most efficient if you're using this intensively but I'm not so I'm being lazy ¯\_(ツ)_/¯
"""
# calculate normalisation constants that ensure the IMF is continuous
b1 = 1 / (
(m2**(1 - a12) - m1**(1 - a12)) / (1 - a12) \
+ m2**(-(a12 - a23)) * (m3**(1 - a23) - m2**(1 - a23)) / (1 - a23) \
+ m2**(-(a12 - a23)) * m3**(-(a23 - a34)) * (m4**(1 - a34) - m3**(1 - a34)) / (1 - a34)
)
b2 = b1 * m2**(-(a12 - a23))
b3 = b2 * m3**(-(a23 - a34))
# find the probabilities at which the gradient changes
F1, F2, F3, F4 = CDF_IMF(np.array([m1, m2, m3, m4]), m1=0.01, m2=0.08, m3=0.5, m4=200, a12=0.3, a23=1.3, a34=2.3)
masses = np.zeros(len(U))
masses[np.logical_and(U > F1, U <= F2)] = np.power((1 - a12) / b1 * (U[np.logical_and(U > F1, U <= F2)] - F1) + m1**(1 - a12), 1 / (1 - a12))
masses[np.logical_and(U > F2, U <= F3)] = np.power((1 - a23) / b2 * (U[np.logical_and(U > F2, U <= F3)] - F2) + m2**(1 - a23), 1 / (1 - a23))
masses[np.logical_and(U > F3, U <= F4)] = np.power((1 - a34) / b3 * (U[np.logical_and(U > F3, U <= F4)] - F3) + m3**(1 - a34), 1 / (1 - a34))
return masses
|
FloorBroekgaardenREPO_NAMEDouble-Compact-Object-MergersPATH_START.@Double-Compact-Object-Mergers_extracted@Double-Compact-Object-Mergers-main@demo_read_hdf5_file@ClassCOMPAS.py@.PATH_END.py
|
{
"filename": "logoguidelines.md",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/branding/logo/logoguidelines.md",
"type": "Markdown"
}
|
# NumPy Logo Guidelines
These guidelines are meant to help keep the NumPy logo consistent and recognizable across all its uses. They also provide a common language for referring to the logos and their components.
The primary logo is the horizontal option (logomark and text next to each other) and the secondary logo is the stacked version (logomark over text). I’ve also provided the logomark on its own (meaning it doesn’t have text). When in doubt, it’s preferable to use primary or secondary options over the logomark alone.
## Color
The full color options are a combo of two shades of blue, rgb(77, 171, 207) and rgb(77, 119, 207), while light options are rgb(255, 255, 255) and dark options are rgb(1, 50, 67).
Whenever possible, use the full color logos. One color logos (light or dark) are to be used when full color will not have enough contrast, usually when logos must be on colored backgrounds.
## Minimum Size
Please do not make the primary logo smaller than 50px wide, secondary logo smaller than 35px wide, or logomark smaller than 20px wide.
## Logo Integrity
A few other notes to keep in mind when using the logo:
- Make sure to scale the logo proportionally.
- Maintain a good amount of space around the logo. Don’t let it overlap with text, images, or other elements.
- Do not try and recreate or modify the logo. For example, do not use the logomark and then try to write NumPy in another font.
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@branding@logo@logoguidelines.md@.PATH_END.py
|
{
"filename": "_style.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/ternary/aaxis/title/font/_style.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StyleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="style",
parent_name="layout.ternary.aaxis.title.font",
**kwargs,
):
super(StyleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop("values", ["normal", "italic"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@ternary@aaxis@title@font@_style.py@.PATH_END.py
|
{
"filename": "read_data.py",
"repo_name": "lucatelli/morphen",
"repo_path": "morphen_extracted/morphen-main/libs/read_data.py",
"type": "Python"
}
|
"""
___
|_ _|_ __ ___ __ _ __ _ ___
| || '_ ` _ \ / _` |/ _` |/ _ \
| || | | | | | (_| | (_| | __/
|___|_| |_| |_|\__,_|\__, |\___|
|___/
___ _ _
/ _ \ _ __ ___ _ __ __ _| |_(_) ___ _ __ ___
| | | | '_ \ / _ \ '__/ _` | __| |/ _ \| '_ \/ __|
| |_| | |_) | __/ | | (_| | |_| | (_) | | | \__ \
\___/| .__/ \___|_| \__,_|\__|_|\___/|_| |_|___/
|_|
"""
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.ticker as mticker
from astropy import units as u
import astropy.io.fits as pf
from casatools import image as IA
from astropy.nddata import Cutout2D
from astropy.wcs import WCS
import scipy.ndimage as nd
def ctn(image):
'''
ctn > casa to numpy
FUnction that read fits files inside CASA environment.
Read a CASA format image file and return as a numpy array.
Also works with wsclean images!
Note: For some reason, casa returns a rotated mirroed array, so we need
to undo it by a rotation.
'''
try:
ia = IA()
ia.open(image)
try:
numpy_array = ia.getchunk()[:, :, 0, 0]
except:
numpy_array = ia.getchunk()[:, :]
ia.close()
# casa gives a mirroed and 90-degree rotated image :(
data_image = np.rot90(numpy_array)[::-1, ::]
return (data_image)
except:
try:
data_image = pf.getdata(image)
return (data_image)
except:
print('Error loading fits file')
return(ValueError)
|
lucatelliREPO_NAMEmorphenPATH_START.@morphen_extracted@morphen-main@libs@read_data.py@.PATH_END.py
|
{
"filename": "Fitter.py",
"repo_name": "CU-NESS/pylinex",
"repo_path": "pylinex_extracted/pylinex-master/pylinex/fitter/Fitter.py",
"type": "Python"
}
|
"""
Module containing class which computes fits of data using linear models through
analytical calculations. It has functions to output the signal estimate (with
errors), parameter covariance, and more. It can accept the noise level either
as standard deviations of channels (if uncorrelated) or as a covariance matrix
in the form of a
`distpy.util.SparseSquareBlockDiagonalMatrix.SparseSquareBlockDiagonalMatrix`.
**File**: $PYLINEX/pylinex/fitter/Fitter.py
**Author**: Keith Tauscher
**Date**: 25 May 2021
"""
from __future__ import division
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as pl
from distpy import GaussianDistribution, ChiSquaredDistribution
from ..util import Savable, create_hdf5_dataset, psi_squared
from .TrainingSetIterator import TrainingSetIterator
from .BaseFitter import BaseFitter
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
class Fitter(BaseFitter, Savable):
"""
Class which computes fits of data using linear models through analytical
calculations. It has functions to output the signal estimate (with errors),
parameter covariance, and more. It can accept the noise level either as
standard deviations of channels (if uncorrelated) or as a covariance matrix
in the form of a
`distpy.util.SparseSquareBlockDiagonalMatrix.SparseSquareBlockDiagonalMatrix`.
"""
def __init__(self, basis_sum, data, error=None, **priors):
"""
Initializes a new `Fitter` object using the given inputs. The
likelihood used by the fit is of the form \\(\\mathcal{L}\
(\\boldsymbol{x}) \\propto \\exp{\\left\\{-\\frac{1}{2}\
[\\boldsymbol{y}-(\\boldsymbol{G}\\boldsymbol{x} +\
\\boldsymbol{\\mu})]^T\\boldsymbol{C}^{-1}[\\boldsymbol{y}-\
(\\boldsymbol{G}\\boldsymbol{x}+\\boldsymbol{\\mu})]\\right\\}}\\) and
the prior used is \\(\\pi(\\boldsymbol{x}) \\propto\
\\exp{\\left\\{-\\frac{1}{2}(\\boldsymbol{x}-\\boldsymbol{\\nu})^T\
\\boldsymbol{\\Lambda}^{-1}(\\boldsymbol{x}-\\boldsymbol{\\nu})\
\\right\\}}\\). The posterior distribution explored is
\\(p(\\boldsymbol{x})=\
\\mathcal{L}(\\boldsymbol{x})\\times\\pi(\\boldsymbol{x})\\).
Parameters
----------
basis_sum : `pylinex.basis.BasisSum.BasisSum` or\
`pylinex.basis.Basis.Basis`
the basis used to model the data, represented in equations by
\\(\\boldsymbol{G}\\) alongside the translation component
\\(\\boldsymbol{\\mu}\\). Two types of inputs are accepted:
- If `basis_sum` is a `pylinex.basis.BasisSum.BasisSum`, then it is
assumed to have constituent bases for each modeled component
alongside `pylinex.expander.Expander.Expander` objects determining
how those components enter into the data
- If `basis_sum` is a `pylinex.basis.Basis.Basis`, then it is
assumed that this single basis represents the only component that
needs to be modeled. The
`pylinex.fitter.BaseFitter.BaseFitter.basis_sum` property will be
set to a `pylinex.basis.BasisSum.BasisSum` object with this
`pylinex.basis.Basis.Basis` as its only component, labeled with the
string name `"sole"`
data : numpy.ndarray
the data to fit, represented in equations by \\(\\boldsymbol{y}\\)
- if `data` is 1D, then its length should be the same as the
(expanded) vectors in `basis_sum`, i.e. the number of rows of
\\(\\boldsymbol{G}\\), `nchannels`
- if `data` is 2D, then it should have shape `(ncurves, nchannels)`
and it will be interpreted as a list of data vectors to fit
independently
error : numpy.ndarray or\
`distpy.util.SparseSquareBlockDiagonalMatrix.SparseSquareBlockDiagonalMatrix`
the noise level of the data that determines the covariance matrix,
represented in equations by \\(\\boldsymbol{C}\\):
- if `error` is a 1D `numpy.ndarray`, it should have the same
length as the (expanded) vectors in `basis_sum`, i.e. the number of
rows of \\(\\boldsymbol{G}\\), `nchannels` and should only contain
positive numbers. In this case, \\(\\boldsymbol{C}\\) is a diagonal
matrix whose elements are the squares of the values in `error`
- if `error` is a
`distpy.util.SparseSquareBlockDiagonalMatrix.SparseSquareBlockDiagonalMatrix`,
then it is assumed to represent a block diagonal
\\(\\boldsymbol{C}\\) directly
priors : dict
keyword arguments where the keys are exactly the names of the
`basis_sum` with `'_prior'` appended to them and the values are
`distpy.distribution.GaussianDistribution.GaussianDistribution`
objects. Priors are optional and can be included or excluded for
any given component. If `basis_sum` was given as a
`pylinex.basis.Basis.Basis`, then `priors` should either be empty
or a dictionary of the form
`{'sole_prior': gaussian_distribution}`. The means and inverse
covariances of all priors are combined into a full parameter prior
mean and full parameter prior inverse covariance, represented in
equations by \\(\\boldsymbol{\\nu}\\) and
\\(\\boldsymbol{\\Lambda}^{-1}\\), respectively. Having no prior is
equivalent to having an infinitely wide prior, i.e. a prior with an
inverse covariance matrix of \\(\\boldsymbol{0}\\)
"""
self.basis_sum = basis_sum
self.priors = priors
self.data = data
self.error = error
@property
def prior_significance(self):
"""
The prior significance, represented mathematically as
\\(\\boldsymbol{\\nu}^T\\boldsymbol{\\Lambda}^{-1}\\boldsymbol{\\nu}.
"""
if not hasattr(self, '_prior_significance'):
self._prior_significance = np.dot(self.prior_mean,\
np.dot(self.prior_inverse_covariance, self.prior_mean))
return self._prior_significance
@property
def log_prior_covariance_determinant(self):
"""
The logarithm (base e) of the determinant of the prior
parameter covariance matrix, \\(|\\boldsymbol{\\Lambda}|\\). Note that
if a given prior is not given, it is simply not used here (to avoid
getting 0 or \\(\\infty\\) as the determinant).
"""
if not hasattr(self, '_log_prior_covariance_determinant'):
self._log_prior_covariance_determinant = 0
for key in self.priors:
this_prior_covariance = self.priors[key].covariance.A
self._log_prior_covariance_determinant +=\
la.slogdet(this_prior_covariance)[1]
return self._log_prior_covariance_determinant
@property
def data_significance(self):
"""
The data significance, represented mathematically as
\\((\\boldsymbol{y}-\\boldsymbol{\\mu})^T\\boldsymbol{C}^{-1}\
(\\boldsymbol{y} - \\boldsymbol{\\mu})\\). It is either a single number
(if `Fitter.multiple_data_curves` is True) or a 1D `numpy.ndarray` (if
`Fitter.multiple_data_curves` is False)
"""
if not hasattr(self, '_data_significance'):
if self.multiple_data_curves:
self._data_significance =\
np.sum(self.weighted_translated_data ** 2, axis=1)
else:
self._data_significance =\
np.dot(self.weighted_translated_data,\
self.weighted_translated_data)
return self._data_significance
@property
def num_parameters(self):
"""
The number of parameters of the fit. This is the same as the
`num_basis_vectors` property of `Fitter.basis_sum`.
"""
return self.basis_sum.num_basis_vectors
@property
def posterior_covariance_times_prior_inverse_covariance(self):
"""
The posterior covariance multiplied on the right by the prior inverse
covariance, represented mathematically as
\\(\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1}\\). This is a matrix
measure of the effect of the data on the distribution of parameters
(i.e. it approaches the zero matrix if the data constrains parameters
much more powerfully than the prior and approaches the identity matrix
if the prior constrains parameters much more powerfully than the data).
"""
if not hasattr(self,\
'_posterior_covariance_times_prior_inverse_covariance'):
self._posterior_covariance_times_prior_inverse_covariance =\
np.dot(self.parameter_covariance,\
self.prior_inverse_covariance)
return self._posterior_covariance_times_prior_inverse_covariance
@property
def model_complexity_mean_to_peak_logL(self):
"""
A measure of the model complexity that is computed by taking the
difference between the mean and peak values of the log likelihood. If
this `Fitter` has no priors, then this property will always simply
return the number of parameters, \\(p\\). It is represented
mathematically as
\\(p-\\text{tr}(\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1})\\).
"""
if not hasattr(self, '_model_complexity_mean_to_peak_logL'):
self._model_complexity_mean_to_peak_logL = self.num_parameters
if self.has_priors:
self._model_complexity_mean_to_peak_logL -= np.trace(\
self.posterior_covariance_times_prior_inverse_covariance)
return self._model_complexity_mean_to_peak_logL
@property
def model_complexity_logL_variance(self):
"""
A measure of the model complexity which is computed by finding the
variance of the log likelihood function. It is represented
mathematically as \\(p+2\\ \\boldsymbol{\\delta}\\boldsymbol{C}^{-1}\
\\boldsymbol{G}\\boldsymbol{S}\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\
\\boldsymbol{\\delta} + \\text{tr}(\\boldsymbol{S}\
\\boldsymbol{\\Lambda}^{-1}\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1})\
-2\\ \\text{tr}(\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1})\\).
"""
if not hasattr(self, '_model_complexity_logL_variance'):
self._model_complexity_logL_variance = self.num_parameters
bias_term = np.dot(self.weighted_basis, self.weighted_bias.T).T
if self.multiple_data_curves:
covariance_times_bias_term =\
np.dot(bias_term, self.parameter_covariance)
bias_term =\
np.sum(bias_term * covariance_times_bias_term, axis=1)
del covariance_times_bias_term
else:
bias_term = np.dot(bias_term,\
np.dot(self.parameter_covariance, bias_term))
self._model_complexity_logL_variance += (2 * bias_term)
if self.has_priors:
self._model_complexity_logL_variance += np.trace(np.dot(\
self.posterior_covariance_times_prior_inverse_covariance,\
self.posterior_covariance_times_prior_inverse_covariance))
self._model_complexity_logL_variance -= (2 * np.trace(\
self.posterior_covariance_times_prior_inverse_covariance))
return self._model_complexity_logL_variance
@property
def basis_dot_products(self):
"""
The dot products between the `pylinex.basis.Basis.Basis` objects
underlying the `Fitter.basis_sum` this object stores. See the
`pylinex.basis.Basis.Basis.dot` method for details on this calculation.
"""
if not hasattr(self, '_basis_dot_products'):
if self.non_diagonal_noise_covariance:
raise NotImplementedError("Basis dot products are not yet " +\
"implemented for non diagonal noise covariance matrices.")
else:
self._basis_dot_products =\
self.basis_sum.basis_dot_products(error=self.error)
return self._basis_dot_products
@property
def basis_dot_product_sum(self):
"""
The sum of all off diagonal elements of the upper triangle of
`Fitter.basis_dot_products`.
"""
if not hasattr(self, '_basis_dot_product_sum'):
self._basis_dot_product_sum = np.sum(self.basis_dot_products)
self._basis_dot_product_sum = self._basis_dot_product_sum -\
np.trace(self.basis_dot_products)
self._basis_dot_product_sum = self._basis_dot_product_sum / 2.
return self._basis_dot_product_sum
@property
def parameter_inverse_covariance(self):
"""
The inverse of the posterior distribution's covariance matrix. This is
represented mathematically as \\(\\boldsymbol{S}^{-1}=\
\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\\boldsymbol{G} +\
\\boldsymbol{\\Lambda}^{-1}\\).
"""
if not hasattr(self, '_parameter_inverse_covariance'):
self._parameter_inverse_covariance = self.basis_overlap_matrix
if self.has_priors:
self._parameter_inverse_covariance =\
self._parameter_inverse_covariance +\
self.prior_inverse_covariance
return self._parameter_inverse_covariance
@property
def likelihood_parameter_covariance(self):
"""
The parameter covariance implied only by the likelihood, represented
mathematically as
\\((\\boldsymbol{G}^T\\boldsymbol{C}\\boldsymbol{G})^{-1}\\).
"""
if not hasattr(self, '_likelihood_parameter_covariance'):
if self.has_priors:
self._likelihood_parameter_covariance =\
la.inv(self.basis_overlap_matrix)
else:
self._likelihood_parameter_covariance =\
self.parameter_covariance
return self._likelihood_parameter_covariance
@property
def likelihood_parameter_mean(self):
"""
Property storing the parameter mean implied by the likelihood (i.e.
disregarding priors). It is represented mathematically as
\\((\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\\boldsymbol{G})^{-1}\
\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\
(\\boldsymbol{y}-\\boldsymbol{\\mu})\\).
"""
if not hasattr(self, '_likelihood_parameter_mean'):
if self.has_priors:
self._likelihood_parameter_mean =\
np.dot(self.likelihood_parameter_covariance,\
np.dot(self.weighted_basis,\
self.weighted_translated_data.T)).T
else:
self._likelihood_parameter_mean = self.parameter_mean
return self._likelihood_parameter_mean
@property
def likelihood_channel_mean(self):
"""
Property storing the channel mean associated with the likelihood
parameter mean (i.e. the result if there are no priors). It is
represented mathematically as \\(\\boldsymbol{G}\
(\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\\boldsymbol{G})^{-1}\
\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\
(\\boldsymbol{y}-\\boldsymbol{\\mu}) + \\boldsymbol{\\mu}\\).
"""
if not hasattr(self, '_likelihood_channel_mean'):
if self.has_priors:
self._likelihood_channel_mean = self.basis_sum.translation +\
np.dot(self.basis_sum.basis.T,\
self.likelihood_parameter_mean.T).T
else:
self._likelihood_channel_mean = self.channel_mean
return self._likelihood_channel_mean
@property
def likelihood_channel_bias(self):
"""
Property storing the channel-space bias associated with the likelihood
parameter mean (i.e. the result if there are no priors). It is
represented mathematically as \\(\\boldsymbol{\\delta}_{\\text{NP}}=\
\\left[\\boldsymbol{I}-\\boldsymbol{G}\
(\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\\boldsymbol{G})^{-1}\
\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\\right]\
(\\boldsymbol{y}-\\boldsymbol{\\mu})\\).
"""
if not hasattr(self, '_likelihood_channel_bias'):
if self.has_priors:
self._likelihood_channel_bias =\
self.data - self.likelihood_channel_mean
else:
self._likelihood_channel_bias = self.channel_bias
return self._likelihood_channel_bias
@property
def likelihood_weighted_bias(self):
"""
The likelihood channel bias weighted by the error, represented
mathematically as
\\(\\boldsymbol{C}^{-1/2}\\boldsymbol{\\delta}_{\\text{NP}}\\).
"""
if not hasattr(self, '_likelihood_weighted_bias'):
if self.has_priors:
self._likelihood_weighted_bias =\
self.weight(self.likelihood_channel_bias, -1)
else:
self._likelihood_weighted_bias = self.weighted_bias
return self._likelihood_weighted_bias
@property
def likelihood_bias_statistic(self):
"""
The maximum value of the loglikelihood, represented mathematically as
\\(\\boldsymbol{\\delta}_{\\text{NP}}^T \\boldsymbol{C}^{-1}\
\\boldsymbol{\\delta}_{\\text{NP}}\\). It is equal to -2 times the peak
value of the loglikelihood.
"""
if not hasattr(self, '_likelihood_bias_statistic'):
if self.has_priors:
if self.multiple_data_curves:
self._likelihood_bias_statistic =\
np.sum(self.likelihood_weighted_bias ** 2, axis=1)
else:
self._likelihood_bias_statistic = np.dot(\
self.likelihood_weighted_bias,\
self.likelihood_weighted_bias)
else:
self._likelihood_bias_statistic = self.bias_statistic
return self._likelihood_bias_statistic
@property
def degrees_of_freedom(self):
"""
The difference between the number of channels and the number of
parameters.
"""
if not hasattr(self, '_degrees_of_freedom'):
self._degrees_of_freedom = self.num_channels - self.num_parameters
return self._degrees_of_freedom
@property
def normalized_likelihood_bias_statistic(self):
"""
The normalized version of the likelihood bias statistic. This is a
statistic that should be close to 1 which measures how well the total
data is fit and is represented mathematically as
\\(\\frac{1}{\\text{dof}}\\boldsymbol{\\delta}_{\\text{NP}}^T\
\\boldsymbol{C}^{-1}\\boldsymbol{\\delta}_{\\text{NP}}\\), where
\\(\\text{dof}\\) and is the number of degrees of freedom.
"""
if not hasattr(self, '_normalized_likelihood_bias_statistic'):
self._normalized_likelihood_bias_statistic =\
self.likelihood_bias_statistic / self.degrees_of_freedom
return self._normalized_likelihood_bias_statistic
@property
def chi_squared(self):
"""
The (non-reduced) chi-squared value(s) of the fit(s) in this `Fitter`,
represented mathematically as
\\(\\boldsymbol{\\delta}^T\\boldsymbol{C}^{-1}\\boldsymbol{\\delta}\\).
"""
return self.bias_statistic
@property
def reduced_chi_squared(self):
"""
The reduced chi-squared value(s) of the fit(s) in this `Fitter`,
represented mathematically as \\(\\frac{1}{\\text{dof}}\
\\boldsymbol{\\delta}^T\\boldsymbol{C}^{-1}\\boldsymbol{\\delta}\\).
"""
return self.normalized_bias_statistic
@property
def reduced_chi_squared_expected_mean(self):
"""
The expected mean of `Fitter.reduced_chi_squared`, represented
mathematically as \\(\\frac{1}{\\text{dof}}[\\text{dof} +\
\\text{tr}(\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1})]\\).
"""
if not hasattr(self, '_reduced_chi_squared_expected_mean'):
if self.has_priors:
mean = np.sum(np.diag(\
self.posterior_covariance_times_prior_inverse_covariance))
else:
mean = 0
self._reduced_chi_squared_expected_mean =\
(mean + self.degrees_of_freedom) / self.degrees_of_freedom
return self._reduced_chi_squared_expected_mean
@property
def reduced_chi_squared_expected_variance(self):
"""
The expected variance of `Fitter.reduced_chi_squared`, represented
mathematically as \\(\\frac{2}{\\text{dof}^2}[\\text{dof} +\
\\text{tr}(\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1}\
\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1})]\\).
"""
if not hasattr(self, '_reduced_chi_squared_expected_variance'):
if self.has_priors:
variance =\
self.posterior_covariance_times_prior_inverse_covariance
variance = np.sum(variance * variance.T)
else:
variance = 0
self._reduced_chi_squared_expected_variance =\
(2 * (variance + self.degrees_of_freedom)) /\
(self.degrees_of_freedom ** 2)
return self._reduced_chi_squared_expected_variance
@property
def reduced_chi_squared_expected_distribution(self):
"""
A `distpy.distribution.GaussianDistribution.GaussianDistribution` with
mean given by `Fitter.reduced_chi_squared_expected_mean` and variance
given by `Fitter.reduced_chi_squared_expected_variance`.
"""
if not hasattr(self, '_reduced_chi_squared_expected_distribution'):
if self.has_priors:
self._reduced_chi_squared_expected_distribution =\
GaussianDistribution(\
self.reduced_chi_squared_expected_mean,\
self.reduced_chi_squared_expected_variance)
else:
self._reduced_chi_squared_expected_distribution =\
ChiSquaredDistribution(self.degrees_of_freedom,\
reduced=True)
return self._reduced_chi_squared_expected_distribution
@property
def psi_squared(self):
"""
Property storing the reduced psi-squared values of the fit(s) in this
Fitter.
"""
if not hasattr(self, '_psi_squared'):
if self.multiple_data_curves:
self._psi_squared =\
np.array([psi_squared(bias, error=None)\
for bias in self.weighted_bias])
else:
self._psi_squared = psi_squared(self.weighted_bias, error=None)
return self._psi_squared
@property
def maximum_loglikelihood(self):
"""
The maximum value of the Gaussian loglikelihood (when the normalizing
constant outside the exponential is left off).
"""
if not hasattr(self, '_maximum_loglikelihood'):
self._maximum_loglikelihood =\
(-(self.likelihood_bias_statistic / 2.))
return self._maximum_loglikelihood
@property
def parameter_covariance(self):
"""
The covariance matrix of the posterior parameter distribution,
represented mathematically as \\(\\boldsymbol{S}=(\\boldsymbol{G}^T\
\\boldsymbol{C}^{-1}\\boldsymbol{G} +\
\\boldsymbol{\\Lambda}^{-1})^{-1}\\).
"""
if not hasattr(self, '_parameter_covariance'):
self._parameter_covariance =\
la.inv(self.parameter_inverse_covariance)
return self._parameter_covariance
@property
def log_parameter_covariance_determinant(self):
"""
The logarithm (base e) of the determinant of the posterior parameter
covariance matrix, represented mathematically as
\\(\\Vert\\boldsymbol{S}\\Vert\\).
"""
if not hasattr(self, '_log_parameter_covariance_determinant'):
self._log_parameter_covariance_determinant =\
la.slogdet(self.parameter_covariance)[1]
return self._log_parameter_covariance_determinant
@property
def log_parameter_covariance_determinant_ratio(self):
"""
The logarithm (base e) of the ratio of the determinant of the posterior
parameter covariance matrix to the determinant of the prior parameter
covariance matrix. This can be thought of as the log of the ratio of
the hypervolume of the 1 sigma posterior ellipse to the hypervolume of
the 1 sigma prior ellipse. It is represented mathematically as
\\(\\ln{\\left(\\frac{\\Vert\\boldsymbol{S}\\Vert}{\
\\Vert\\boldsymbol{\\Lambda}\\Vert}\\right)}\\).
"""
if not hasattr(self, '_log_parameter_covariance_determinant_ratio'):
self._log_parameter_covariance_determinant_ratio =\
self.log_parameter_covariance_determinant -\
self.log_prior_covariance_determinant
return self._log_parameter_covariance_determinant_ratio
@property
def channel_error(self):
"""
The error on the estimate of the full data in channel space,
represented mathematically as
\\(\\boldsymbol{G}\\boldsymbol{S}\\boldsymbol{G}^T\\).
"""
if not hasattr(self, '_channel_error'):
SAT = np.dot(self.parameter_covariance, self.basis_sum.basis)
self._channel_error =\
np.sqrt(np.einsum('ab,ab->b', self.basis_sum.basis, SAT))
return self._channel_error
@property
def channel_RMS(self):
"""
The RMS error on the estimate of the full data in channel space.
"""
if not hasattr(self, '_channel_RMS'):
self._channel_RMS =\
np.sqrt(np.mean(np.power(self.channel_error, 2)))
return self._channel_RMS
@property
def parameter_mean(self):
"""
The posterior mean parameter vector(s). It is represented
mathematically as
\\(\\boldsymbol{\\gamma} =\
(\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\\boldsymbol{G} +\
\\boldsymbol{\\Lambda}^{-1})[\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\
(\\boldsymbol{y}-\\boldsymbol{\\mu}) +\
\\boldsymbol{\\Lambda}^{-1}\\boldsymbol{\\nu}]\\) and is store in a
`numpy.ndarray` of shape of the result is either `(nparams,)` or
`(ncurves, nparams)`.
"""
if not hasattr(self, '_parameter_mean'):
self._parameter_mean =\
np.dot(self.weighted_basis, self.weighted_translated_data.T).T
if self.has_priors:
if self.multiple_data_curves:
self._parameter_mean = self._parameter_mean +\
self.prior_inverse_covariance_times_mean[np.newaxis,:]
else:
self._parameter_mean = self._parameter_mean +\
self.prior_inverse_covariance_times_mean
self._parameter_mean =\
np.dot(self.parameter_covariance, self._parameter_mean.T).T
return self._parameter_mean
@property
def parameter_distribution(self):
"""
Property storing a
`distpy.distribution.GaussianDistribution.GaussianDistribution`
representing a distribution with the mean and covariance stored in
`Fitter.parameter_mean` and `Fitter.parameter_covariance`,
respectively.
"""
if not hasattr(self, '_parameter_distribution'):
if self.multiple_data_curves:
raise ValueError("parameter_distribution only makes sense " +\
"if the Fitter has only one data curve.")
else:
self._parameter_distribution = GaussianDistribution(\
self.parameter_mean, self.parameter_covariance)
return self._parameter_distribution
@property
def posterior_significance(self):
"""
The posterior significance, represented mathematically as
\\(\\boldsymbol{z}^T \\boldsymbol{S}^{-1} \\boldsymbol{z}\\),
where \\(z\\) is `Fitter.parameter_mean`.
"""
if not hasattr(self, '_posterior_significance'):
if self.multiple_data_curves:
inverse_covariance_times_mean = np.dot(self.parameter_mean,\
self.parameter_inverse_covariance)
self._posterior_significance = np.sum(\
self.parameter_mean * inverse_covariance_times_mean,\
axis=1)
del inverse_covariance_times_mean
else:
self._posterior_significance =\
np.dot(self.parameter_mean,\
np.dot(self.parameter_inverse_covariance,\
self.parameter_mean))
return self._posterior_significance
@property
def channel_mean(self):
"""
The posterior estimate of the modeled data in channel space.
"""
if not hasattr(self, '_channel_mean'):
self._channel_mean = self.basis_sum.translation +\
np.dot(self.basis_sum.basis.T, self.parameter_mean.T).T
return self._channel_mean
@property
def channel_bias(self):
"""
The bias of the estimate of the data (i.e. the posterior estimate of
the data minus the data), represented mathematically as
\\(\\boldsymbol{\\delta}\\).
"""
if not hasattr(self, '_channel_bias'):
self._channel_bias = self.data - self.channel_mean
return self._channel_bias
@property
def channel_bias_RMS(self):
"""
The RMS of `Fitter.channel_bias`.
"""
if not hasattr(self, '_channel_bias_RMS'):
if self.multiple_data_curves:
self._channel_bias_RMS = np.sqrt(\
np.sum(self.channel_bias ** 2, axis=1) / self.num_channels)
else:
self._channel_bias_RMS =\
np.sqrt(np.dot(self.channel_bias, self.channel_bias) /\
self.num_channels)
return self._channel_bias_RMS
@property
def weighted_bias(self):
"""
The posterior channel bias weighted down by the errors, represented
mathematically as \\(\\boldsymbol{C}^{-1}\\boldsymbol{\\delta}\\).
"""
if not hasattr(self, '_weighted_bias'):
self._weighted_bias = self.weight(self.channel_bias, -1)
return self._weighted_bias
@property
def bias_statistic(self):
"""
A statistic known as the "bias statistic", represented mathematically
as
\\(\\boldsymbol{\\delta}^T\\boldsymbol{C}^{-1}\\boldsymbol{\\delta}\\).
It is a measure of the bias of the full model being fit. It should have
a \\(\\chi^2(N)\\) distribution where \\(N\\) is the number of degrees
of freedom.
"""
if not hasattr(self, '_bias_statistic'):
if self.multiple_data_curves:
self._bias_statistic = np.sum(self.weighted_bias ** 2, axis=1)
else:
self._bias_statistic =\
np.dot(self.weighted_bias, self.weighted_bias)
return self._bias_statistic
@property
def loglikelihood_at_posterior_maximum(self):
"""
The value of the Gaussian loglikelihood (without the normalizing factor
outside the exponential) at the maximum of the posterior distribution.
"""
if not hasattr(self, '_loglikelihood_at_posterior_maximum'):
self._loglikelihood_at_posterior_maximum =\
(-(self.bias_statistic / 2.))
return self._loglikelihood_at_posterior_maximum
@property
def normalized_bias_statistic(self):
"""
The reduced chi-squared value(s) of the fit(s) in this `Fitter`,
represented mathematically as \\(\\frac{1}{\\text{dof}}\
\\boldsymbol{\\delta}^T\\boldsymbol{C}^{-1}\\boldsymbol{\\delta}\\).
"""
if not hasattr(self, '_normalized_bias_statistic'):
self._normalized_bias_statistic =\
self.bias_statistic / self.degrees_of_freedom
return self._normalized_bias_statistic
@property
def likelihood_significance_difference(self):
"""
The likelihood covariance part of the significance difference, equal to
\\(\\boldsymbol{\\gamma}^T\\boldsymbol{C}\\boldsymbol{\\gamma}-\
\\boldsymbol{y}^T\\boldsymbol{C}^{-1}\\boldsymbol{y}\\) where
\\(\\boldsymbol{\\gamma}\\) is `Fitter.parameter_mean`.
"""
if not hasattr(self, '_likelihood_significance_difference'):
mean_sum = self.weight(self.channel_mean + self.data -\
(2 * self.basis_sum.translation), -1)
mean_difference = (self.channel_mean - self.data) / error_to_divide
if self.multiple_data_curves:
self._likelihood_significance_difference =\
np.sum(mean_sum * mean_difference, axis=1)
else:
self._likelihood_significance_difference =\
np.dot(mean_sum, mean_difference)
return self._likelihood_significance_difference
@property
def prior_significance_difference(self):
"""
Property storing the prior covariance part of the significance
difference. This is equal to (\\boldsymbol{\\gamma}^T\
\\boldsymbol{\\Lambda}^{-1} \\boldsymbol{\\gamma} -\
\\boldsymbol{\\nu}^T \\boldsymbol{\\Lambda}^{-1} \\boldsymbol{\\nu}\\).
"""
if not hasattr(self, '_prior_significance_difference'):
if self.multiple_data_curves:
self._prior_significance_difference =\
np.zeros(self.data.shape[:-1])
else:
self._prior_significance_difference = 0
for name in self.names:
key = '{!s}_prior'.format(name)
if key in self.priors:
prior = self.priors[key]
prior_mean = prior.internal_mean.A[0]
prior_inverse_covariance = prior.inverse_covariance.A
posterior_mean = self.subbasis_parameter_mean(name=name)
mean_sum = posterior_mean + prior_mean
mean_difference = posterior_mean - prior_mean
if self.multiple_data_curves:
this_term =\
np.dot(mean_difference, prior_inverse_covariance)
this_term = np.sum(this_term * mean_sum, axis=1)
else:
this_term = np.dot(mean_sum,\
np.dot(prior_inverse_covariance, mean_difference))
self._prior_significance_difference =\
self._prior_significance_difference + this_term
return self._prior_significance_difference
@property
def significance_difference(self):
"""
The difference between the posterior significance and the sum of the
data significance and prior significance. It is a term in the log
evidence and is given by
\\(\\boldsymbol{\\gamma}^T\\boldsymbol{S}^{-1}\\boldsymbol{\\gamma} -\
\\boldsymbol{y}^T\\boldsymbol{C}^{-1}\\boldsymbol{y} -\
\\boldsymbol{\\nu}^T\\boldsymbol{\\Lambda}^{-1}\\boldsymbol{\\nu}\\).
"""
if not hasattr(self, '_significance_difference'):
self._significance_difference =\
self.likelihood_significance_difference +\
self.prior_significance_difference
return self._significance_difference
@property
def log_evidence(self):
"""
The natural logarithm of the evidence (a.k.a. marginal likelihood) of
this fit. The evidence is the integral over parameter space of the
product of the likelihood and the prior and is often very large.
"""
if not hasattr(self, '_log_evidence'):
log_evidence = (self.log_parameter_covariance_determinant_ratio +\
self.significance_difference) / 2.
if self.has_all_priors:
# only constants added below, ignore if numerical problems
log_evidence = log_evidence -\
((self.num_channels * np.log(2 * np.pi)) / 2.)
if self.non_diagonal_noise_covariance:
log_evidence = log_evidence +\
(self.error.sign_and_log_abs_determinant()[1]) / 2
else:
log_evidence = log_evidence + np.sum(np.log(self.error))
self._log_evidence = log_evidence
return self._log_evidence
@property
def log_evidence_per_data_channel(self):
"""
`Fitter.log_evidence` divided by the number of channels.
"""
if not hasattr(self, '_log_evidence_per_data_channel'):
self._log_evidence_per_data_channel =\
self.log_evidence / self.num_channels
return self._log_evidence_per_data_channel
@property
def evidence(self):
"""
The evidence (a.k.a. marginal likelihood) of this fit. Beware: the
evidence is often extremely large in magnitude, with log evidences
sometimes approaching +-10^7. In these cases, the evidence will end up
NaN.
"""
if not hasattr(self, '_evidence'):
self._evidence = np.exp(self.log_evidence)
return self._evidence
@property
def evidence_per_data_channel(self):
"""
The factor by which each data channel multiplies the Bayesian evidence
on average (more precisely, the geometric mean of these numbers).
"""
if not hasattr(self, '_evidence_per_data_channel'):
self._evidence_per_data_channel =\
np.exp(self.log_evidence_per_data_channel)
return self._evidence_per_data_channel
@property
def bayesian_information_criterion(self):
"""
The Bayesian Information Criterion (BIC) which is essentially the same
as the bias statistic except it includes information about the
complexity of the model. It is \\(\\boldsymbol{\\delta}^T\
\\boldsymbol{C}^{-1}\\boldsymbol{\\delta} + p\\ln{N}\\), where \\(p\\)
is the number of parameters and \\(N\\) is the number of data channels.
"""
if not hasattr(self, '_bayesian_information_criterion'):
self._bayesian_information_criterion =\
self.likelihood_bias_statistic +\
(self.num_parameters * np.log(self.num_channels))
return self._bayesian_information_criterion
@property
def BIC(self):
"""
Alias for `Fitter.bayesian_information_criterion`.
"""
return self.bayesian_information_criterion
@property
def akaike_information_criterion(self):
"""
An information criterion given by \\(\\boldsymbol{\\delta}^T\
\\boldsymbol{C}^{-1}\\boldsymbol{\\delta} + 2p\\), where \\(p\\) is the
number of parameters.
"""
if not hasattr(self, '_akaike_information_criterion'):
self._akaike_information_criterion =\
self.likelihood_bias_statistic + (2 * self.num_parameters)
return self._akaike_information_criterion
@property
def AIC(self):
"""
Alias for `Fitter.akaike_information_criterion`.
"""
return self.akaike_information_criterion
######################## TODO documentation below this line has't been updated!
@property
def deviance_information_criterion(self):
"""
An information criterion given by -4 ln(L_max) + <2 ln(L)> where L is
the likelihood, <> denotes averaging over the posterior, and L_max is
the maximum likelihood.
"""
if not hasattr(self, '_deviance_information_criterion'):
self._deviance_information_criterion =\
self.likelihood_bias_statistic +\
(2 * self.model_complexity_mean_to_peak_logL)
return self._deviance_information_criterion
@property
def DIC(self):
"""
Alias for deviance_information_criterion property.
"""
return self.deviance_information_criterion
@property
def deviance_information_criterion_logL_variance(self):
"""
Version of the Deviance Information Criterion (DIC) which estimates the
model complexity through computation of the variance of the log
likelihood (with respect to the posterior).
"""
if not hasattr(self, '_deviance_information_criterion_logL_variance'):
self._deviance_information_criterion_logL_variance =\
self.likelihood_bias_statistic +\
self.model_complexity_logL_variance
return self._deviance_information_criterion_logL_variance
@property
def DIC2(self):
"""
Alias for the deviance_information_criterion_logL_variance property.
"""
return self.deviance_information_criterion_logL_variance
@property
def posterior_prior_mean_difference(self):
"""
Property storing the difference between the posterior parameter mean
and the prior parameter mean.
"""
if not hasattr(self, '_posterior_prior_mean_difference'):
if self.multiple_data_curves:
self._posterior_prior_mean_difference =\
self.parameter_mean - self.prior_mean[np.newaxis,:]
else:
self._posterior_prior_mean_difference =\
self.parameter_mean - self.prior_mean
return self._posterior_prior_mean_difference
@property
def bayesian_predictive_information_criterion(self):
"""
The Bayesian Predictive Information Criterion (BPIC), a statistic which
gives relatives goodness of fit values.
"""
if not hasattr(self, '_bayesian_predictive_information_criterion'):
self._bayesian_predictive_information_criterion =\
self.num_parameters + self.bias_statistic
if self.has_priors: # TODO
self._bayesian_predictive_information_criterion -= np.trace(\
self.posterior_covariance_times_prior_inverse_covariance)
term_v1 = np.dot(\
self.posterior_covariance_times_prior_inverse_covariance,\
self.posterior_prior_mean_difference.T).T
term_v2 = np.dot(self.prior_inverse_covariance,\
self.posterior_prior_mean_difference.T).T +\
(2 * np.dot(self.weighted_basis, self.weighted_bias.T).T)
if self.multiple_data_curves:
self._bayesian_predictive_information_criterion +=\
(np.sum(term_v1 * term_v2, axis=1) / self.num_channels)
else:
self._bayesian_predictive_information_criterion +=\
(np.dot(term_v1, term_v2) / self.num_channels)
if self.non_diagonal_noise_covariance:
doubly_weighted_basis =\
self.weight(self.weight(self.basis_sum.basis, -1), -1)
self._bayesian_predictive_information_criterion +=\
(2 * np.einsum('ij,ik,jk,k', self.parameter_covariance,\
doubly_weighted_basis, doubly_weighted_basis,\
self.channel_bias ** 2))
else:
weighted_error = self.channel_error / self.error
if self.multiple_data_curves:
weighted_error = weighted_error[np.newaxis,:]
to_sum = ((weighted_error * self.weighted_bias) ** 2)
self._bayesian_predictive_information_criterion +=\
(2 * np.sum(to_sum, axis=-1))
del to_sum
return self._bayesian_predictive_information_criterion
@property
def BPIC(self):
"""
Alias for `Fitter.bayesian_predictive_information_criterion`.
"""
return self.bayesian_predictive_information_criterion
def subbasis_log_separation_evidence(self, name=None):
"""
Calculates the subbasis_log_separation evidence per degree of freedom.
This is the same as the evidence with the log covariance determinant
ratio replaced by the log covariance determinant ratio for the given
subbasis (normalized by the degrees of freedom).
name: string identifying subbasis under concern
per_channel: if True, normalizes the log_separation_evidence by
dividing by the nuiber of data channels.
returns: single float number
"""
if not hasattr(self, '_subbasis_log_separation_evidences'):
self._subbasis_log_separation_evidences = {}
if name not in self._subbasis_log_separation_evidences:
self._subbasis_log_separation_evidences[name] =\
(self.log_evidence -\
(self.log_parameter_covariance_determinant_ratio / 2.) +\
(self.subbasis_log_parameter_covariance_determinant_ratio(\
name=name) / 2.)) / self.degrees_of_freedom
return self._subbasis_log_separation_evidences[name]
def subbasis_separation_evidence_per_degree_of_freedom(self, name=None):
"""
Finds the subbasis separation evidence per degree of freedom.
name: string identifying subbasis under concern
returns: single non-negative float number
"""
if not hasattr(self,\
'_subbasis_separation_evidences_per_degree_of_freedom'):
self._subbasis_separation_evidences_per_degree_of_freedom = {}
if name not in\
self._subbasis_separation_evidences_per_degree_of_freedom:
self._subbasis_separation_evidences_per_degree_of_freedom[name] =\
np.exp(self.subbasis_log_separation_evidence(name=name))
return self._subbasis_separation_evidences_per_degree_of_freedom[name]
@property
def log_separation_evidence(self):
"""
Property storing the logarithm (base e) of the separation evidence, a
version of the evidence where the log of the ratio of the determinants
of the posterior to prior covariance matrices is replaced by the sum
over all subbases of such logs of ratios.
"""
if not hasattr(self, '_log_separation_evidence'):
self._log_separation_evidence = self.log_evidence -\
(self.log_parameter_covariance_determinant_ratio / 2.) +\
(self.subbasis_log_parameter_covariance_determinant_ratios_sum\
/ 2.)
return self._log_separation_evidence
@property
def log_separation_evidence_per_data_channel(self):
"""
Property storing the log_separation_evidence divided by the number of
data channels. For more information, see the log_separation_evidence
property.
"""
if not hasattr(self, '_log_separation_evidence_per_data_channel'):
self._log_separation_evidence_per_data_channel =\
self.log_separation_evidence / self.num_channels
return self._log_separation_evidence_per_data_channel
@property
def separation_evidence(self):
"""
Property storing the separation evidence, a version of the evidence
where the log of the ratio of the determinants of the posterior to
prior covariance matrices is replaced by the sum over all subbases of
such logs of ratios.
"""
if not hasattr(self, '_separation_evidence'):
self._separation_evidence = np.exp(self.log_separation_evidence)
return self._separation_evidence
@property
def separation_evidence_per_data_channel(self):
"""
Property storing the average (geometric mean) factor by which each data
channel affects the separation evidence.
"""
if not hasattr(self, '_separation_evidence_per_data_channel'):
self._separation_evidence_per_data_channel =\
np.exp(self.log_separation_evidence_per_data_channel)
return self._separation_evidence_per_data_channel
@property
def subbasis_log_parameter_covariance_determinant_ratios_sum(self):
"""
Property storing the sum of the logarithms (base e) of the ratios of
the posterior parameter covariance matrices to the prior parameter
covariance matrices.
"""
if not hasattr(self,\
'_subbasis_log_parameter_covariance_determinant_ratios_sum'):
self._subbasis_log_parameter_covariance_determinant_ratios_sum =\
sum([self.subbasis_log_parameter_covariance_determinant_ratio(\
name=name) for name in self.names])
return self._subbasis_log_parameter_covariance_determinant_ratios_sum
def subbasis_prior_significance(self, name=None):
"""
Finds and returns the quantity: mu^T Lambda^{-1} mu, where mu is the
prior subbasis parameter mean and Lambda is the prior subbasis
parameter covariance.
name: string identifying subbasis under concern
returns: single float number
"""
if not hasattr(self, '_subbasis_prior_significances'):
self._subbasis_prior_significances = {}
if name not in self._subbasis_prior_significances:
prior = self.priors[name + '_prior']
mean = prior.internal_mean.A[0]
inverse_covariance = prior.inverse_covariance.A
self._subbasis_prior_significances[name] =\
np.dot(mean, np.dot(inverse_covariance, mean))
return self._subbasis_prior_significances[name]
def subbasis_parameter_inverse_covariance(self, name=None):
"""
Finds the inverse of the marginalized covariance matrix corresponding
to the given subbasis.
name: string identifying subbasis under concern
"""
if not hasattr(self, '_subbasis_parameter_inverse_covariances'):
self._subbasis_parameter_inverse_covariances = {}
if name not in self._subbasis_parameter_inverse_covariances:
self._subbasis_parameter_inverse_covariances[name] =\
la.inv(self.subbasis_parameter_covariance(name=name))
return self._subbasis_parameter_inverse_covariances[name]
def subbases_overlap_matrix(self, row_name=None, column_name=None):
"""
Creates a view into the overlap matrix between the given subbases.
row_name: the (string) name of the subbasis whose parameter number will
be represented by the row of the returned matrix.
column_name: the (string) name of the subbasis whose parameter number
will be represented by the column of the returned matrix
returns: n x m matrix where n is the number of basis vectors in the row
subbasis and m is the number of basis vectors in the column
subbasis in the form of a 2D numpy.ndarray
"""
row_slice = self.basis_sum.slices_by_name[row_name]
column_slice = self.basis_sum.slices_by_name[column_name]
return self.basis_overlap_matrix[:,column_slice][row_slice]
def subbasis_parameter_covariance(self, name=None):
"""
Finds and returns the portion of the parameter covariance matrix
associated with the given subbasis.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
returns 2D numpy.ndarray of shape (k, k) where k is the number of basis
vectors in the subbasis
"""
if not hasattr(self, '_subbasis_parameter_covariances'):
self._subbasis_parameter_covariances = {}
if name not in self._subbasis_parameter_covariances:
subbasis_slice = self.basis_sum.slices_by_name[name]
self._subbasis_parameter_covariances[name] =\
self.parameter_covariance[:,subbasis_slice][subbasis_slice]
return self._subbasis_parameter_covariances[name]
def subbasis_log_parameter_covariance_determinant(self, name=None):
"""
Finds the logarithm (base e) of the determinant of the posterior
parameter covariance matrix for the given subbasis.
name: string identifying subbasis under concern
returns: single float number
"""
if not hasattr(self,\
'_subbasis_log_parameter_covariance_determinants'):
self._subbasis_log_parameter_covariance_determinants = {}
if name not in self._subbasis_log_parameter_covariance_determinants:
self._subbasis_log_parameter_covariance_determinants[name] =\
la.slogdet(self.subbasis_parameter_covariance(name=name))[1]
return self._subbasis_log_parameter_covariance_determinants[name]
def subbasis_log_prior_covariance_determinant(self, name=None):
"""
Finds the logarithm (base e) of the determinant of the prior parameter
covariance matrix for the given subbasis.
name: string identifying subbasis under concern
returns: single float number
"""
if type(name) is type(None):
return self.log_prior_covariance_determinant
if not hasattr(self, '_subbasis_log_prior_covariance_determinants'):
self._subbasis_log_prior_covariance_determinants = {}
if name not in self._subbasis_log_prior_covariance_determinants:
self._subbasis_log_prior_covariance_determinants[name] =\
la.slogdet(self.priors[name + '_prior'].covariance.A)[1]
return self._subbasis_log_prior_covariance_determinants[name]
def subbasis_log_parameter_covariance_determinant_ratio(self, name=None):
"""
Finds logarithm (base e) of the ratio of the determinant of the
posterior covariance matrix to the determinant of the prior covariance
matrix for the given subbasis.
name: string identifying subbasis under concern
returns: single float number
"""
if not hasattr(self,\
'_subbasis_log_parameter_covariance_determinant_ratios'):
self._subbasis_log_parameter_covariance_determinant_ratios = {}
if name not in\
self._subbasis_log_parameter_covariance_determinant_ratios:
self._subbasis_log_parameter_covariance_determinant_ratios[name] =\
self.subbasis_log_parameter_covariance_determinant(name=name)-\
self.subbasis_log_prior_covariance_determinant(name=name)
return self._subbasis_log_parameter_covariance_determinant_ratios[name]
def subbasis_parameter_covariance_determinant_ratio(self, name=None):
"""
Finds the ratio of the determinant of the posterior covariance matrix
to the determinant of the prior covariance matrix for the given
subbasis.
name: string identifying subbasis under concern
returns: single non-negative float number
"""
if not hasattr(self,\
'_subbasis_parameter_covariance_determinant_ratios'):
self._subbasis_parameter_covariance_determinant_ratios = {}
if type(name) is type(None):
self._subbasis_parameter_covariance_determinant_ratios[name] =\
np.exp(\
self.subbasis_log_parameter_covariance_determinant_ratios_sum)
elif name not in\
self._subbasis_parameter_covariance_determinant_ratios:
self._subbasis_parameter_covariance_determinant_ratios[name] =\
np.exp(\
self.subbasis_log_parameter_covariance_determinant_ratio(\
name=name))
return self._subbasis_parameter_covariance_determinant_ratios[name]
def subbasis_channel_error(self, name=None):
"""
Finds the error (in data channel space) of the fit by a given subbasis.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
returns: 1D numpy.ndarray of the same length as the basis vectors of
the subbasis (which may or may not be different than the
length of the expanded basis vectors).
"""
if type(name) is type(None):
return self.channel_error
if not hasattr(self, '_subbasis_channel_errors'):
self._subbasis_channel_errors = {}
if name not in self._subbasis_channel_errors:
basis = self.basis_sum[name].basis
covariance_times_basis =\
np.dot(self.subbasis_parameter_covariance(name=name), basis)
self._subbasis_channel_errors[name] =\
np.sqrt(np.sum(covariance_times_basis * basis, axis=0))
return self._subbasis_channel_errors[name]
def subbasis_parameter_mean(self, name=None):
"""
Finds the posterior parameter mean for a subbasis. This is just a view
into the view posterior parameter mean.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
returns: 1D numpy.ndarray containing the parameters for the given
subbasis
"""
if not hasattr(self, '_subbasis_parameter_means'):
self._subbasis_parameter_means = {}
if name not in self._subbasis_parameter_means:
self._subbasis_parameter_means[name] =\
self.parameter_mean[...,self.basis_sum.slices_by_name[name]]
return self._subbasis_parameter_means[name]
def subbasis_channel_mean(self, name=None):
"""
The estimate of the contribution to the data from the given subbasis.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
returns: 1D numpy.ndarray containing the channel-space estimate from
the given subbasis
"""
if not hasattr(self, '_subbasis_channel_means'):
self._subbasis_channel_means = {}
if name not in self._subbasis_channel_means:
self._subbasis_channel_means[name] =\
np.dot(self.subbasis_parameter_mean(name=name),\
self.basis_sum[name].basis) + self.basis_sum[name].translation
return self._subbasis_channel_means[name]
def subbasis_channel_RMS(self, name=None):
"""
Calculates and returns the RMS channel error on the estimate of the
contribution to the data from the given subbasis.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
returns: single float number RMS
"""
if not hasattr(self, '_subbasis_channel_RMSs'):
self._subbasis_channel_RMSs = {}
if name not in self._subbasis_channel_RMSs:
self._subbasis_channel_RMSs[name] = np.sqrt(\
np.mean(np.power(self.subbasis_channel_error(name=name), 2)))
return self._subbasis_channel_RMSs[name]
def subbasis_separation_statistic(self, name=None):
"""
Finds the separation statistic associated with the given subbasis. The
separation statistic is essentially an RMS'd error expansion factor.
name: name of the subbasis for which to find the separation statistic
"""
if not hasattr(self, '_subbasis_separation_statistics'):
self._subbasis_separation_statistics = {}
if name not in self._subbasis_separation_statistics:
weighted_basis =\
self.weight(self.basis_sum[name].expanded_basis, -1)
stat = np.dot(weighted_basis, weighted_basis.T)
stat = np.sum(stat * self.subbasis_parameter_covariance(name=name))
stat = np.sqrt(stat / self.degrees_of_freedom)
self._subbasis_separation_statistics[name] = stat
return self._subbasis_separation_statistics[name]
def subbasis_channel_bias(self, name=None, true_curve=None):
"""
Calculates and returns the bias on the estimate from the given subbasis
using the given curve as a reference.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
true_curve: 1D numpy.ndarray of the same length as the basis vectors in
the subbasis channel space
returns: 1D numpy.ndarray in channel space containing the difference
between the estimate of the data's contribution from the given
subbasis and the given true curve
"""
if type(name) is type(None):
if type(true_curve) is type(None):
return self.channel_bias
else:
raise ValueError("true_curve should only be given to " +\
"subbasis_channel_bias if the name of a " +\
"subbasis is specified.")
else:
if type(true_curve) is type(None):
raise ValueError("true_curve must be given to " +\
"subbasis_channel_bias if the name of a " +\
"subbasis is specified.")
if self.multiple_data_curves and (true_curve.ndim == 1):
return true_curve[np.newaxis,:] -\
self.subbasis_channel_mean(name=name)
else:
return true_curve - self.subbasis_channel_mean(name=name)
def subbasis_weighted_bias(self, name=None, true_curve=None):
"""
The bias of the contribution of a given subbasis to the data. This
function requires knowledge of the "truth".
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
true_curve: 1D numpy.ndarray of the same length as the basis vectors in
the subbasis
returns: 1D numpy.ndarray of weighted bias values
"""
subbasis_channel_bias =\
self.subbasis_channel_bias(name=name, true_curve=true_curve)
subbasis_channel_error = self.subbasis_channel_error(name=name)
if self.multiple_data_curves:
return subbasis_channel_bias / subbasis_channel_error[np.newaxis,:]
else:
return subbasis_channel_bias / subbasis_channel_error
def subbasis_bias_statistic(self, name=None, true_curve=None,\
norm_by_dof=False):
"""
The bias statistic of the fit to the contribution of the given
subbasis. The bias statistic is delta^T C^-1 delta where delta is the
difference between the true curve(s) and the channel mean(s) normalized
by the degrees of freedom.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
true_curve: 1D numpy.ndarray of the same length as the basis vectors in
the subbasis
norm_by_dof: if True, summed squared subbasis error weighted subbasis
bias is normalized by the subbasis degrees of
freedom
if False (default), summed squared subbasis error weighted
subbasis bias is returned is
normalized by the number of channels
in the subbasis
returns: single float number representing roughly
"""
weighted_bias = self.subbasis_weighted_bias(name=name,\
true_curve=true_curve)
normalization_factor = weighted_bias.shape[-1]
if norm_by_dof:
normalization_factor -= self.basis_sum[name].num_basis_vectors
if self.multiple_data_curves:
unnormalized = np.sum(weighted_bias ** 2, axis=1)
else:
unnormalized = np.dot(weighted_bias, weighted_bias)
return unnormalized / normalization_factor
def bias_score(self, training_sets, max_block_size=2**20,\
num_curves_to_score=None, bases_to_score=None):
"""
Evaluates the candidate basis_sum given the available training sets.
training_sets: dictionary of training_sets indexed by basis name
max_block_size: number of floats in the largest possible training set
block
num_curves_to_score: total number of training set curves to consider
bases_to_score: the names of the subbases to include in the scoring
(all bases are always used, the names not in
bases_to_score simply do not have their
subbasis_bias_statistic calculated/included)
returns: scalar value of Delta
"""
if len(self.basis_sum.names) != len(training_sets):
raise ValueError("There must be the same number of basis sets " +\
"as training sets.")
if (type(bases_to_score) is type(None)) or (not bases_to_score):
bases_to_score = self.basis_sum.names
score = 0.
expanders = [basis.expander for basis in self.basis_sum]
iterator = TrainingSetIterator(training_sets, expanders=expanders,\
max_block_size=max_block_size, mode='add',\
curves_to_return=num_curves_to_score, return_constituents=True)
for (block, constituents) in iterator:
num_channels = block.shape[1]
fitter = Fitter(self.basis_sum, block, self.error, **self.priors)
for basis_to_score in bases_to_score:
true_curve =\
constituents[self.basis_sum.names.index(basis_to_score)]
result = fitter.subbasis_bias_statistic(\
name=basis_to_score, true_curve=true_curve)
score += np.sum(result)
if type(num_curves_to_score) is type(None):
num_curves_to_score =\
np.prod([ts.shape[0] for ts in training_sets])
score = score / (num_curves_to_score * num_channels)
return score
def fill_hdf5_group(self, root_group, data_link=None, error_link=None,\
basis_links=None, expander_links=None, prior_mean_links=None,\
prior_covariance_links=None, save_channel_estimates=False):
"""
Fills the given hdf5 file group with data about the inputs and results
of this Fitter.
root_group: the hdf5 file group to fill (only required argument)
data_link: link to existing data dataset, if it exists (see
create_hdf5_dataset docs for info about accepted formats)
error_link: link to existing error dataset, if it exists (see
create_hdf5_dataset docs for info about accepted formats)
basis_links: list of links to basis functions saved elsewhere (see
create_hdf5_dataset docs for info about accepted formats)
expander_links: list of links to existing saved Expander (see
create_hdf5_dataset docs for info about accepted
formats)
prior_mean_links: dict of links to existing saved prior means (see
create_hdf5_dataset docs for info about accepted
formats)
prior_covariance_links: dict of links to existing saved prior
covariances (see create_hdf5_dataset docs for
info about accepted formats)
"""
self.save_data(root_group, data_link=data_link)
self.save_error(root_group, error_link=error_link)
group = root_group.create_group('sizes')
for name in self.names:
group.attrs[name] = self.sizes[name]
group = root_group.create_group('posterior')
create_hdf5_dataset(group, 'parameter_mean', data=self.parameter_mean)
create_hdf5_dataset(group, 'parameter_covariance',\
data=self.parameter_covariance)
if save_channel_estimates:
create_hdf5_dataset(group, 'channel_mean', data=self.channel_mean)
create_hdf5_dataset(group, 'channel_error', data=self.channel_error)
for name in self.names:
subgroup = group.create_group(name)
subbasis_slice = self.basis_sum.slices_by_name[name]
create_hdf5_dataset(subgroup, 'parameter_covariance',\
link=(group['parameter_covariance'],[subbasis_slice]*2))
mean_slices =\
(((slice(None),) * (self.data.ndim - 1)) + (subbasis_slice,))
create_hdf5_dataset(subgroup, 'parameter_mean',\
link=(group['parameter_mean'],mean_slices))
if save_channel_estimates:
create_hdf5_dataset(subgroup, 'channel_mean',\
data=self.subbasis_channel_mean(name=name))
create_hdf5_dataset(subgroup, 'channel_error',\
data=self.subbasis_channel_error(name=name))
self.save_basis_sum(root_group, basis_links=basis_links,\
expander_links=expander_links)
root_group.attrs['degrees_of_freedom'] = self.degrees_of_freedom
root_group.attrs['BPIC'] = self.BPIC
root_group.attrs['DIC'] = self.DIC
root_group.attrs['AIC'] = self.AIC
root_group.attrs['BIC'] = self.BIC
root_group.attrs['normalized_likelihood_bias_statistic'] =\
self.normalized_likelihood_bias_statistic
root_group.attrs['normalized_bias_statistic'] =\
self.normalized_bias_statistic
self.save_priors(root_group, prior_mean_links=prior_mean_links,\
prior_covariance_links=prior_covariance_links)
if self.has_priors:
root_group.attrs['log_evidence_per_data_channel'] =\
self.log_evidence_per_data_channel
def plot_overlap_matrix(self, title='Overlap matrix', fig=None, ax=None,\
show=True, **kwargs):
"""
Plots the overlap matrix of the total basis.
title: (Optional) the title of the plot. default: 'Overlap matrix'
fig: the matplotlib.figure object on which the plot should appear
ax: the matplotlib.axes object on which the plot should appear
show: if True, matplotlib.pyplot.show() is called before this function
returns
**kwargs: keyword arguments to supply to matplotlib.pyplot.imshow()
"""
def_kwargs = {'interpolation': None}
def_kwargs.update(**kwargs)
if (type(fig) is type(None)) and (type(ax) is type(None)):
fig = pl.figure()
ax = fig.add_subplot(111)
image = ax.imshow(self.basis_overlap_matrix, **def_kwargs)
pl.colorbar(image)
ax.set_title(title)
if show:
pl.show()
else:
return ax
def plot_parameter_covariance(self, title='Covariance matrix', fig=None,\
ax=None, show=True, **kwargs):
"""
Plots the posterior parameter covariance matrix.
title: (Optional) the title of the plot. default: 'Overlap matrix'
fig: the matplotlib.figure object on which the plot should appear
ax: the matplotlib.axes object on which the plot should appear
show: if True, matplotlib.pyplot.show() is called before this function
returns
**kwargs: keyword arguments to supply to matplotlib.pyplot.imshow()
"""
def_kwargs = {'interpolation': None}
def_kwargs.update(**kwargs)
if (type(fig) is type(None)) and (type(ax) is type(None)):
fig = pl.figure()
ax = fig.add_subplot(111)
image = ax.imshow(self.parameter_covariance, **def_kwargs)
pl.colorbar(image)
ax.set_title(title)
if show:
pl.show()
else:
return ax
def plot_subbasis_fit(self, nsigma=1, name=None, which_data=None,\
true_curve=None, subtract_truth=False, shorter_error=None,\
x_values=None, title=None, xlabel='x', ylabel='y', fig=None, ax=None,\
show_noise_level=False, noise_level_alpha=0.5, full_error_alpha=0.2,\
colors='b', full_error_first=True, yscale='linear', show=False):
"""
Plots the fit of the contribution to the data from a given subbasis.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
true_curve: 1D numpy.ndarray of the same length as the basis vectors in
the subbasis
subtract_truth: Boolean which determines whether the residuals of a fit
are plotted or just the curves. Can only be True if
true_curve is given or name is None.
shorter_error: 1D numpy.ndarray of the same length as the vectors of
the subbasis containing the error on the given subbasis
x_values: (Optional) x_values to use for plot
title: (Optional) the title of the plot
fig: the matplotlib.figure object on which the plot should appear
ax: the matplotlib.axes object on which the plot should appear
show: If True, matplotlib.pyplot.show() is called before this function
returns.
"""
if self.multiple_data_curves and (type(which_data) is type(None)):
which_data = 0
if type(name) is type(None):
mean = self.channel_mean
error = self.channel_error
else:
mean = self.subbasis_channel_mean(name=name)
error = self.subbasis_channel_error(name=name)
if isinstance(colors, basestring):
colors = [colors] * 3
if self.multiple_data_curves:
mean = mean[which_data]
if (type(fig) is type(None)) and (type(ax) is type(None)):
fig = pl.figure()
ax = fig.add_subplot(111)
if type(x_values) is type(None):
x_values = np.arange(len(mean))
if (type(true_curve) is type(None)) and (type(name) is type(None)):
if self.multiple_data_curves:
true_curve = self.data[which_data]
else:
true_curve = self.data
if (type(true_curve) is type(None)) and subtract_truth:
raise ValueError("Truth cannot be subtracted because it is not " +\
"known. Supply it as the true_curve argument " +\
"if you wish for it to be subtracted.")
if subtract_truth:
to_subtract = true_curve
ax.plot(x_values, np.zeros_like(x_values), color='k', linewidth=2,\
label='true')
else:
to_subtract = np.zeros_like(x_values)
if type(true_curve) is not type(None):
ax.plot(x_values, true_curve, color='k', linewidth=2,\
label='true')
ax.plot(x_values, mean - to_subtract, color=colors[0], linewidth=2,\
label='mean')
if full_error_first:
ax.fill_between(x_values, mean - to_subtract - (nsigma * error),\
mean - to_subtract + (nsigma * error), alpha=full_error_alpha,\
color=colors[1])
if show_noise_level:
if type(shorter_error) is not type(None):
ax.fill_between(x_values,\
mean - to_subtract - (nsigma * shorter_error),\
mean - to_subtract + (nsigma * shorter_error),\
alpha=noise_level_alpha, color=colors[2])
elif len(mean) == self.num_channels:
if self.non_diagonal_noise_covariance:
noise_error = np.sqrt(self.error.diagonal)
ax.fill_between(x_values,\
mean - to_subtract - (nsigma * noise_error),\
mean - to_subtract + (nsigma * noise_error),\
alpha=noise_level_alpha, color=colors[2])
else:
ax.fill_between(x_values,\
mean - to_subtract - (nsigma * self.error),\
mean - to_subtract + (nsigma * self.error),\
alpha=noise_level_alpha, color=colors[2])
if not full_error_first:
ax.fill_between(x_values, mean - to_subtract - (nsigma * error),\
mean - to_subtract + (nsigma * error), alpha=full_error_alpha,\
color=colors[1])
ax.set_yscale(yscale)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if type(title) is type(None):
if subtract_truth:
ax.set_title('Fit residual')
else:
ax.set_title('Fit curve')
else:
ax.set_title(title)
if show:
pl.show()
else:
return ax
def plot_overlap_matrix_block(self, row_name=None, column_name=None,\
title='Overlap matrix', fig=None, ax=None, show=True, **kwargs):
"""
Plots a block of the overlap matrix between the given subbases.
row_name: the (string) name of the subbasis whose parameter number will
be represented by the row of the returned matrix.
column_name: the (string) name of the subbasis whose parameter number
will be represented by the column of the returned matrix
title: (Optional) the title of the plot. default: 'Overlap matrix'
fig: the matplotlib.figure object on which the plot should appear
ax: the matplotlib.axes object on which the plot should appear
show: if True, matplotlib.pyplot.show() is called before this function
returns
**kwargs: keyword arguments to supply to matplotlib.pyplot.imshow()
"""
def_kwargs = {'interpolation': None}
def_kwargs.update(**kwargs)
if (type(fig) is type(None)) and (type(ax) is type(None)):
fig = pl.figure()
ax = fig.add_subplot(111)
to_show = self.subbases_overlap_matrix(row_name=row_name,\
column_name=column_name)
image = ax.imshow(to_show, **def_kwargs)
pl.colorbar(image)
ax.set_title(title)
if show:
pl.show()
else:
return ax
def plot_parameter_covariance(self, name=None, title='Covariance matrix',\
fig=None, ax=None, show=True, **kwargs):
"""
Plots the posterior parameter covariance matrix.
name: the (string) name of the subbasis whose parameter number
will be represented by the rows and columns of the returned
matrix. If None, full parameter covariance is plotted.
Default: None
title: (Optional) the title of the plot. default: 'Overlap matrix'
fig: the matplotlib.figure object on which the plot should appear
ax: the matplotlib.axes object on which the plot should appear
show: if True, matplotlib.pyplot.show() is called before this function
returns
**kwargs: keyword arguments to supply to matplotlib.pyplot.imshow()
"""
def_kwargs = {'interpolation': None}
def_kwargs.update(**kwargs)
if (type(fig) is type(None)) and (type(ax) is type(None)):
fig = pl.figure()
ax = fig.add_subplot(111)
to_show = self.subbasis_parameter_covariances[name]
image = ax.imshow(to_show, **def_kwargs)
pl.colorbar(image)
ax.set_title(title)
if show:
pl.show()
else:
return ax
|
CU-NESSREPO_NAMEpylinexPATH_START.@pylinex_extracted@pylinex-master@pylinex@fitter@Fitter.py@.PATH_END.py
|
{
"filename": "multiplex_4_op.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/examples/custom_ops_doc/multiplex_4/multiplex_4_op.py",
"type": "Python"
}
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper for gen_multiplex_4_op.py.
This defines a public API (and provides a docstring for it) for the C++ Op
defined by multiplex_4_kernel.cc
"""
import tensorflow as tf
from tensorflow.python.platform import resource_loader
_multiplex_4_module = tf.load_op_library(
resource_loader.get_path_to_datafile("multiplex_4_kernel.so"))
examples_multiplex_dense = _multiplex_4_module.examples_multiplex_dense
def multiplex(cond, a, b, name=None):
"""Return elements chosen from `a` or `b` depending on `cond`.
This is similar to `np.where` and `tf.where` if `cond` and `a` are tensors.
This is similar to `np.select` if `cond` and `a` are lists of tensors.
In either case, this is simplified to only handle the case of dense tensors,
no optional parameters, no broadcasting, etc..
>>> multiplex([True, False, False, True], [1,2,3,4], [100,200,300,400])
<tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 1, 200, 300, 4], ...)>
>>> a1 = tf.constant([1, 2, 3, 4, 5], dtype=tf.int64)
>>> a2 = tf.constant([6, 7, 8, 9, 10], dtype=tf.int64)
>>> a3 = tf.constant([11, 12, 13, 14, 15], dtype=tf.int64)
>>> b = tf.constant([101, 102, 103, 104, 105], dtype=tf.int64)
>>> cond1 = tf.constant([False, False, True, False, False], dtype=bool)
>>> cond2 = tf.constant([False, False, False, False, True], dtype=bool)
>>> cond3 = tf.constant([True, False, True, False, True], dtype=bool)
>>> multiplex_4_op.multiplex([cond1, cond2, cond3], [a1, a2, a3], b)
<tf.Tensor: shape=(5,), ... numpy=array([ 11, 102, 3, 104, 10], ...)>
Args:
cond: tf.Tensor or list of tf.Tensor of type bool. Where True, yield `a`.
When multiple corresponding `cond` elements are true, the first one yield
based on the first one encountered.
a: tf.Tensor or list of tf.Tensor, each with the same type and shape as `b`.
b: tf.Tensor or list of tf.Tensor with the same type and shape as `a`. Yield
`b` if all corresponding `cond` values is False.
name: An optional name for the op.
Returns:
A tf.Tensor with elements from `a` where `cond` is True, and elements
from `b` elsewhere.
"""
if not isinstance(cond, (list, tuple)):
# Support "old" use of multiplex where `cond` and `a` are tensors,
# not lists of tensors.
return examples_multiplex_dense(
cond=[cond], a_values=[a], b_values=b, name=name)
return examples_multiplex_dense(
cond=cond, a_values=a, b_values=b, name=name)
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@examples@custom_ops_doc@multiplex_4@multiplex_4_op.py@.PATH_END.py
|
{
"filename": "base_parameters.py",
"repo_name": "psheehan/pdspy",
"repo_path": "pdspy_extracted/pdspy-master/pdspy/modeling/base_parameters.py",
"type": "Python"
}
|
base_parameters = {
# Stellar parameters.
"logM_star":{"fixed":True, "value":0.0, "limits":[-1.,1.]},
"T_star":{"fixed":True, "value":4000., "limits":[500.,10000.]},
"logL_star":{"fixed":True, "value":0.0, "limits":[-1.,2.]},
# Disk parameters.
"disk_type":{"fixed":True, "value":"truncated", "limits":[0.,0.]},
"logM_disk":{"fixed":True, "value":-4., "limits":[-10.,-2.5]},
"logR_in":{"fixed":True, "value":-1., "limits":[-1.,4.]},
"logR_disk":{"fixed":True, "value":2., "limits":[0.,4.]},
"h_0":{"fixed":True, "value":0.1, "limits":[0.01,0.5]},
"gamma":{"fixed":True, "value":1.0, "limits":[-0.5,2.0]},
"gamma_taper":{"fixed":True, "value":"gamma", "limits":[-0.5,2.0]},
"beta":{"fixed":True, "value":1.0, "limits":[0.5,1.5]},
"logR_cav":{"fixed":True, "value":1.0, "limits":[-1.,3.]},
"logdelta_cav":{"fixed":True, "value":0.0, "limits":[-4.,0.]},
"logR_gap1":{"fixed":True, "value":1.0, "limits":[-1.,3.]},
"w_gap1":{"fixed":True, "value":10., "limits":[1.,100.]},
"logdelta_gap1":{"fixed":True, "value":0.0, "limits":[-4.,0.]},
"logR_gap2":{"fixed":True, "value":1.4, "limits":[-1.,3.]},
"w_gap2":{"fixed":True, "value":10., "limits":[1.,100.]},
"logdelta_gap2":{"fixed":True, "value":0.0, "limits":[-4.,0.]},
"logR_gap3":{"fixed":True, "value":1.8, "limits":[-1.,3.]},
"w_gap3":{"fixed":True, "value":10., "limits":[1.,100.]},
"logdelta_gap3":{"fixed":True, "value":0.0, "limits":[-4.,0.]},
"f_M_large":{"fixed":True, "value":1., "limits":[0.05, 1.]},
"logalpha_settle":{"fixed":True, "value":-2., "limits":[-5., 0.]},
# Disk temperature parameters.
"logT0":{"fixed":True, "value":2.5, "limits":[1.,3.]},
"q":{"fixed":True, "value":0.25, "limits":[0.,1.]},
"loga_turb":{"fixed":True, "value":-1.0, "limits":[-1.5,1.]},
# Dartois temperature properties.
"logTmid0":{"fixed":True, "value":2.0, "limits":[0.,3.]},
"logTatm0":{"fixed":True, "value":2.5, "limits":[1.,3.]},
"zq0":{"fixed":True, "value":0.1, "limits":[0.01,0.5]},
"pltgas":{"fixed":True, "value":0.5, "limits":[0.,1.]},
"delta":{"fixed":True, "value":1.0, "limits":[0.5,1.5]},
# Envelope parameters.
"envelope_type":{"fixed":True, "value":"none", "limits":[0.,0.]},
"logM_env":{"fixed":True, "value":-3., "limits":[-10., -2.]},
"logR_in_env":{"fixed":True, "value":"logR_in", "limits":[-1., 4.]},
"logR_env":{"fixed":True, "value":3., "limits": [2.,5.]},
"logR_c":{"fixed":True, "value":"logR_disk", "limits":[-1.,4.]},
"f_cav":{"fixed":True, "value":0.5, "limits":[0.,1.]},
"ksi":{"fixed":True, "value":1.0, "limits":[0.5,1.5]},
"theta_open":{"fixed":True, "value":"45", "limits":[0.,90.]},
"zoffset":{"fixed":True, "value":1, "limits":[0.,5.]},
"gamma_env":{"fixed":True, "value":0., "limits":[-0.5,2.0]},
# Envelope temperature parameters.
"logT0_env":{"fixed":True, "value":2.5, "limits":[1.,3.5]},
"q_env":{"fixed":True, "value":0.25, "limits":[0.,1.5]},
"loga_turb_env":{"fixed":True, "value":-1.0, "limits":[-1.5,1.]},
# Ambient medium?
"ambient_medium":{"fixed":True, "value":False, "limits":[0.,0.]},
# Dust parameters.
"dust_file":{"fixed":True, "value":"pollack_new.hdf5", "limits":[0.,0.]},
"loga_min":{"fixed":True, "value":-1.3, "limits":[0.,5.]},
"loga_max":{"fixed":True, "value":0., "limits":[0.,5.]},
"p":{"fixed":True, "value":3.5, "limits":[2.5,4.5]},
"na":{"fixed":True, "value":100, "limits":[0,1000]},
"envelope_dust":{"fixed":True, "value":"pollack_new.hdf5", "limits":[0.,0.]},
# Gas parameters.
"gas_file1":{"fixed":True, "value":"co.dat", "limits":[0.,0.]},
"logabundance1":{"fixed":True, "value":-4., "limits":[-6.,-2.]},
"freezeout1":{"fixed":True, "value":0., "limits":[0.,40.]},
"mu":{"fixed":True, "value":2.37, "limits":[0.,0.]},
# Viewing parameters.
"i":{"fixed":True, "value":45., "limits":[0.,180.]},
"pa":{"fixed":True, "value":0., "limits":[0.,360.]},
"x0":{"fixed":True, "value":0., "limits":[-0.1,0.1]},
"y0":{"fixed":True, "value":0., "limits":[-0.1,0.1]},
"dpc":{"fixed":True, "value":140., "prior":"box", "sigma":0., "limits":[1.,1e6]},
"Ak":{"fixed":True, "value":0., "limits":[0.,1.]},
"v_sys":{"fixed":True, "value":5., "limits":[0.,10.]},
"docontsub":{"fixed":True, "value":False, "limits":[0.,0.]},
# Gas extinction parameters.
"tau0":{"fixed":True, "value":0., "limits":[0.,10.]},
"v_ext":{"fixed":True, "value":4., "limits":[2.,6.]},
"sigma_vext":{"fixed":True, "value":1.0, "limits":[0.01,5.]},
# Free-free emission.
"logF_nu_ff":{"fixed":True, "value":-20., "limits":[-30.,0.]},
"lognu_turn":{"fixed":True, "value":0., "limits":[-1.0,2.]},
"pl_turn":{"fixed":True, "value":0.6, "limits":[0.,2.]},
# Nuisance parameters.
"flux_unc1":{"fixed":True, "value":1., "prior":"box", "sigma":0., "limits":[0.5,1.5]},
"flux_unc2":{"fixed":True, "value":1., "prior":"box", "sigma":0., "limits":[0.5,1.5]},
"flux_unc3":{"fixed":True, "value":1., "prior":"box", "sigma":0., "limits":[0.5,1.5]},
}
|
psheehanREPO_NAMEpdspyPATH_START.@pdspy_extracted@pdspy-master@pdspy@modeling@base_parameters.py@.PATH_END.py
|
{
"filename": "registry.py",
"repo_name": "hgrecco/pint",
"repo_path": "pint_extracted/pint-master/pint/facets/nonmultiplicative/registry.py",
"type": "Python"
}
|
"""
pint.facets.nonmultiplicative.registry
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2022 by Pint Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import annotations
from typing import Any, Generic, TypeVar
from ...compat import TypeAlias
from ...errors import DimensionalityError, UndefinedUnitError
from ...util import UnitsContainer, logger
from ..plain import GenericPlainRegistry, QuantityT, UnitDefinition, UnitT
from . import objects
from .definitions import OffsetConverter, ScaleConverter
T = TypeVar("T")
class GenericNonMultiplicativeRegistry(
Generic[QuantityT, UnitT], GenericPlainRegistry[QuantityT, UnitT]
):
"""Handle of non multiplicative units (e.g. Temperature).
Capabilities:
- Register non-multiplicative units and their relations.
- Convert between non-multiplicative units.
Parameters
----------
default_as_delta : bool
If True, non-multiplicative units are interpreted as
their *delta* counterparts in multiplications.
autoconvert_offset_to_baseunit : bool
If True, non-multiplicative units are
converted to plain units in multiplications.
"""
def __init__(
self,
default_as_delta: bool = True,
autoconvert_offset_to_baseunit: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
#: When performing a multiplication of units, interpret
#: non-multiplicative units as their *delta* counterparts.
self.default_as_delta = default_as_delta
# Determines if quantities with offset units are converted to their
# plain units on multiplication and division.
self.autoconvert_offset_to_baseunit = autoconvert_offset_to_baseunit
def parse_units_as_container(
self,
input_string: str,
as_delta: bool | None = None,
case_sensitive: bool | None = None,
) -> UnitsContainer:
""" """
if as_delta is None:
as_delta = self.default_as_delta
return super().parse_units_as_container(input_string, as_delta, case_sensitive)
def _add_unit(self, definition: UnitDefinition) -> None:
super()._add_unit(definition)
if definition.is_multiplicative:
return
if definition.is_logarithmic:
return
if not isinstance(definition.converter, OffsetConverter):
logger.debug(
"Cannot autogenerate delta version for a unit in "
"which the converter is not an OffsetConverter"
)
return
delta_name = "delta_" + definition.name
if definition.symbol:
delta_symbol = "Δ" + definition.symbol
else:
delta_symbol = None
delta_aliases = tuple("Δ" + alias for alias in definition.aliases) + tuple(
"delta_" + alias for alias in definition.aliases
)
delta_reference = self.UnitsContainer(
{ref: value for ref, value in definition.reference.items()}
)
delta_def = UnitDefinition(
delta_name,
delta_symbol,
delta_aliases,
ScaleConverter(definition.converter.scale),
delta_reference,
)
super()._add_unit(delta_def)
def _is_multiplicative(self, unit_name: str) -> bool:
"""True if the unit is multiplicative.
Parameters
----------
unit_name
Name of the unit to check.
Can be prefixed, pluralized or even an alias
Raises
------
UndefinedUnitError
If the unit is not in the registry.
"""
if unit_name in self._units:
return self._units[unit_name].is_multiplicative
# If the unit is not in the registry might be because it is not
# registered with its prefixed version.
# TODO: Might be better to register them.
names = self.parse_unit_name(unit_name)
assert len(names) == 1
_, base_name, _ = names[0]
try:
return self._units[base_name].is_multiplicative
except KeyError:
raise UndefinedUnitError(unit_name)
def _validate_and_extract(self, units: UnitsContainer) -> str | None:
"""Used to check if a given units is suitable for a simple
conversion.
Return None if all units are non-multiplicative
Return the unit name if a single non-multiplicative unit is found
and is raised to a power equals to 1.
Otherwise, raise an Exception.
Parameters
----------
units
Compound dictionary.
Raises
------
ValueError
If the more than a single non-multiplicative unit is present,
or a single one is present but raised to a power different from 1.
"""
# TODO: document what happens if autoconvert_offset_to_baseunit
# TODO: Clarify docs
# u is for unit, e is for exponent
nonmult_units = [
(u, e) for u, e in units.items() if not self._is_multiplicative(u)
]
# Let's validate source offset units
if len(nonmult_units) > 1:
# More than one src offset unit is not allowed
raise ValueError("more than one offset unit.")
elif len(nonmult_units) == 1:
# A single src offset unit is present. Extract it
# But check that:
# - the exponent is 1
# - is not used in multiplicative context
nonmult_unit, exponent = nonmult_units.pop()
if exponent != 1:
raise ValueError("offset units in higher order.")
if len(units) > 1 and not self.autoconvert_offset_to_baseunit:
raise ValueError("offset unit used in multiplicative context.")
return nonmult_unit
return None
def _add_ref_of_log_or_offset_unit(
self, offset_unit: str, all_units: UnitsContainer
) -> UnitsContainer:
slct_unit = self._units[offset_unit]
if slct_unit.is_logarithmic:
# Extract reference unit
slct_ref = slct_unit.reference
# TODO: Check that reference is None
# If reference unit is not dimensionless
if slct_ref != UnitsContainer():
# Extract reference unit
(u, e) = [(u, e) for u, e in slct_ref.items()].pop()
# Add it back to the unit list
return all_units.add(u, e)
if not slct_unit.is_multiplicative: # is offset unit
# Extract reference unit
return slct_unit.reference
# Otherwise, return the units unmodified
return all_units
def _convert(
self, value: T, src: UnitsContainer, dst: UnitsContainer, inplace: bool = False
) -> T:
"""Convert value from some source to destination units.
In addition to what is done by the PlainRegistry,
converts between non-multiplicative units.
Parameters
----------
value :
value
src : UnitsContainer
source units.
dst : UnitsContainer
destination units.
inplace :
(Default value = False)
Returns
-------
type
converted value
"""
# Conversion needs to consider if non-multiplicative (AKA offset
# units) are involved. Conversion is only possible if src and dst
# have at most one offset unit per dimension. Other rules are applied
# by validate and extract.
try:
src_offset_unit = self._validate_and_extract(src)
except ValueError as ex:
raise DimensionalityError(src, dst, extra_msg=f" - In source units, {ex}")
try:
dst_offset_unit = self._validate_and_extract(dst)
except ValueError as ex:
raise DimensionalityError(
src, dst, extra_msg=f" - In destination units, {ex}"
)
# convert if no offset units are present
if not (src_offset_unit or dst_offset_unit):
return super()._convert(value, src, dst, inplace)
src_dim = self._get_dimensionality(src)
dst_dim = self._get_dimensionality(dst)
# If the source and destination dimensionality are different,
# then the conversion cannot be performed.
if src_dim != dst_dim:
raise DimensionalityError(src, dst, src_dim, dst_dim)
# clean src from offset units by converting to reference
if src_offset_unit:
if any(u.startswith("delta_") for u in dst):
raise DimensionalityError(src, dst)
value = self._units[src_offset_unit].converter.to_reference(value, inplace)
src = src.remove([src_offset_unit])
# Add reference unit for multiplicative section
src = self._add_ref_of_log_or_offset_unit(src_offset_unit, src)
# clean dst units from offset units
if dst_offset_unit:
if any(u.startswith("delta_") for u in src):
raise DimensionalityError(src, dst)
dst = dst.remove([dst_offset_unit])
# Add reference unit for multiplicative section
dst = self._add_ref_of_log_or_offset_unit(dst_offset_unit, dst)
# Convert non multiplicative units to the dst.
value = super()._convert(value, src, dst, inplace, False)
# Finally convert to offset units specified in destination
if dst_offset_unit:
value = self._units[dst_offset_unit].converter.from_reference(
value, inplace
)
return value
class NonMultiplicativeRegistry(
GenericNonMultiplicativeRegistry[
objects.NonMultiplicativeQuantity[Any], objects.NonMultiplicativeUnit
]
):
Quantity: TypeAlias = objects.NonMultiplicativeQuantity[Any]
Unit: TypeAlias = objects.NonMultiplicativeUnit
|
hgreccoREPO_NAMEpintPATH_START.@pint_extracted@pint-master@pint@facets@nonmultiplicative@registry.py@.PATH_END.py
|
{
"filename": "f_score_metrics.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/metrics/f_score_metrics.py",
"type": "Python"
}
|
from keras.src import backend
from keras.src import initializers
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.metrics.metric import Metric
@keras_export("keras.metrics.FBetaScore")
class FBetaScore(Metric):
"""Computes F-Beta score.
Formula:
```python
b2 = beta ** 2
f_beta_score = (1 + b2) * (precision * recall) / (precision * b2 + recall)
```
This is the weighted harmonic mean of precision and recall.
Its output range is `[0, 1]`. It works for both multi-class
and multi-label classification.
Args:
average: Type of averaging to be performed across per-class results
in the multi-class case.
Acceptable values are `None`, `"micro"`, `"macro"` and
`"weighted"`. Defaults to `None`.
If `None`, no averaging is performed and `result()` will return
the score for each class.
If `"micro"`, compute metrics globally by counting the total
true positives, false negatives and false positives.
If `"macro"`, compute metrics for each label,
and return their unweighted mean.
This does not take label imbalance into account.
If `"weighted"`, compute metrics for each label,
and return their average weighted by support
(the number of true instances for each label).
This alters `"macro"` to account for label imbalance.
It can result in an score that is not between precision and recall.
beta: Determines the weight of given to recall
in the harmonic mean between precision and recall (see pseudocode
equation above). Defaults to `1`.
threshold: Elements of `y_pred` greater than `threshold` are
converted to be 1, and the rest 0. If `threshold` is
`None`, the argmax of `y_pred` is converted to 1, and the rest to 0.
name: Optional. String name of the metric instance.
dtype: Optional. Data type of the metric result.
Returns:
F-Beta Score: float.
Example:
>>> metric = keras.metrics.FBetaScore(beta=2.0, threshold=0.5)
>>> y_true = np.array([[1, 1, 1],
... [1, 0, 0],
... [1, 1, 0]], np.int32)
>>> y_pred = np.array([[0.2, 0.6, 0.7],
... [0.2, 0.6, 0.6],
... [0.6, 0.8, 0.0]], np.float32)
>>> metric.update_state(y_true, y_pred)
>>> result = metric.result()
>>> result
[0.3846154 , 0.90909094, 0.8333334 ]
"""
def __init__(
self,
average=None,
beta=1.0,
threshold=None,
name="fbeta_score",
dtype=None,
):
super().__init__(name=name, dtype=dtype)
# Metric should be maximized during optimization.
self._direction = "up"
if average not in (None, "micro", "macro", "weighted"):
raise ValueError(
"Invalid `average` argument value. Expected one of: "
"{None, 'micro', 'macro', 'weighted'}. "
f"Received: average={average}"
)
if not isinstance(beta, float):
raise ValueError(
"Invalid `beta` argument value. "
"It should be a Python float. "
f"Received: beta={beta} of type '{type(beta)}'"
)
if beta <= 0.0:
raise ValueError(
"Invalid `beta` argument value. "
"It should be > 0. "
f"Received: beta={beta}"
)
if threshold is not None:
if not isinstance(threshold, float):
raise ValueError(
"Invalid `threshold` argument value. "
"It should be a Python float. "
f"Received: threshold={threshold} "
f"of type '{type(threshold)}'"
)
if threshold > 1.0 or threshold <= 0.0:
raise ValueError(
"Invalid `threshold` argument value. "
"It should verify 0 < threshold <= 1. "
f"Received: threshold={threshold}"
)
self.average = average
self.beta = beta
self.threshold = threshold
self.axis = None
self._built = False
if self.average != "micro":
self.axis = 0
def _build(self, y_true_shape, y_pred_shape):
if len(y_pred_shape) != 2 or len(y_true_shape) != 2:
raise ValueError(
"FBetaScore expects 2D inputs with shape "
"(batch_size, output_dim). Received input "
f"shapes: y_pred.shape={y_pred_shape} and "
f"y_true.shape={y_true_shape}."
)
if y_pred_shape[-1] is None or y_true_shape[-1] is None:
raise ValueError(
"FBetaScore expects 2D inputs with shape "
"(batch_size, output_dim), with output_dim fully "
"defined (not None). Received input "
f"shapes: y_pred.shape={y_pred_shape} and "
f"y_true.shape={y_true_shape}."
)
num_classes = y_pred_shape[-1]
if self.average != "micro":
init_shape = (num_classes,)
else:
init_shape = ()
def _add_zeros_variable(name):
return self.add_variable(
name=name,
shape=init_shape,
initializer=initializers.Zeros(),
dtype=self.dtype,
)
self.true_positives = _add_zeros_variable("true_positives")
self.false_positives = _add_zeros_variable("false_positives")
self.false_negatives = _add_zeros_variable("false_negatives")
self.intermediate_weights = _add_zeros_variable("intermediate_weights")
self._built = True
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = ops.convert_to_tensor(y_true, dtype=self.dtype)
y_pred = ops.convert_to_tensor(y_pred, dtype=self.dtype)
if not self._built:
self._build(y_true.shape, y_pred.shape)
if self.threshold is None:
threshold = ops.max(y_pred, axis=-1, keepdims=True)
# make sure [0, 0, 0] doesn't become [1, 1, 1]
# Use abs(x) > eps, instead of x != 0 to check for zero
y_pred = ops.logical_and(
y_pred >= threshold, ops.abs(y_pred) > 1e-9
)
else:
y_pred = y_pred > self.threshold
y_pred = ops.cast(y_pred, dtype=self.dtype)
y_true = ops.cast(y_true, dtype=self.dtype)
if sample_weight is not None:
sample_weight = ops.convert_to_tensor(
sample_weight, dtype=self.dtype
)
def _weighted_sum(val, sample_weight):
if sample_weight is not None:
val = ops.multiply(val, ops.expand_dims(sample_weight, 1))
return ops.sum(val, axis=self.axis)
self.true_positives.assign(
self.true_positives + _weighted_sum(y_pred * y_true, sample_weight)
)
self.false_positives.assign(
self.false_positives
+ _weighted_sum(y_pred * (1 - y_true), sample_weight)
)
self.false_negatives.assign(
self.false_negatives
+ _weighted_sum((1 - y_pred) * y_true, sample_weight)
)
self.intermediate_weights.assign(
self.intermediate_weights + _weighted_sum(y_true, sample_weight)
)
def result(self):
precision = ops.divide(
self.true_positives,
self.true_positives + self.false_positives + backend.epsilon(),
)
recall = ops.divide(
self.true_positives,
self.true_positives + self.false_negatives + backend.epsilon(),
)
precision = ops.convert_to_tensor(precision, dtype=self.dtype)
recall = ops.convert_to_tensor(recall, dtype=self.dtype)
mul_value = precision * recall
add_value = ((self.beta**2) * precision) + recall
mean = ops.divide(mul_value, add_value + backend.epsilon())
f1_score = mean * (1 + (self.beta**2))
if self.average == "weighted":
weights = ops.divide(
self.intermediate_weights,
ops.sum(self.intermediate_weights) + backend.epsilon(),
)
f1_score = ops.sum(f1_score * weights)
elif self.average is not None: # [micro, macro]
f1_score = ops.mean(f1_score)
return f1_score
def get_config(self):
"""Returns the serializable config of the metric."""
config = {
"name": self.name,
"dtype": self.dtype,
"average": self.average,
"beta": self.beta,
"threshold": self.threshold,
}
base_config = super().get_config()
return {**base_config, **config}
def reset_state(self):
for v in self.variables:
v.assign(ops.zeros(v.shape, dtype=v.dtype))
@keras_export("keras.metrics.F1Score")
class F1Score(FBetaScore):
r"""Computes F-1 Score.
Formula:
```python
f1_score = 2 * (precision * recall) / (precision + recall)
```
This is the harmonic mean of precision and recall.
Its output range is `[0, 1]`. It works for both multi-class
and multi-label classification.
Args:
average: Type of averaging to be performed on data.
Acceptable values are `None`, `"micro"`, `"macro"`
and `"weighted"`. Defaults to `None`.
If `None`, no averaging is performed and `result()` will return
the score for each class.
If `"micro"`, compute metrics globally by counting the total
true positives, false negatives and false positives.
If `"macro"`, compute metrics for each label,
and return their unweighted mean.
This does not take label imbalance into account.
If `"weighted"`, compute metrics for each label,
and return their average weighted by support
(the number of true instances for each label).
This alters `"macro"` to account for label imbalance.
It can result in an score that is not between precision and recall.
threshold: Elements of `y_pred` greater than `threshold` are
converted to be 1, and the rest 0. If `threshold` is
`None`, the argmax of `y_pred` is converted to 1, and the rest to 0.
name: Optional. String name of the metric instance.
dtype: Optional. Data type of the metric result.
Returns:
F-1 Score: float.
Example:
>>> metric = keras.metrics.F1Score(threshold=0.5)
>>> y_true = np.array([[1, 1, 1],
... [1, 0, 0],
... [1, 1, 0]], np.int32)
>>> y_pred = np.array([[0.2, 0.6, 0.7],
... [0.2, 0.6, 0.6],
... [0.6, 0.8, 0.0]], np.float32)
>>> metric.update_state(y_true, y_pred)
>>> result = metric.result()
array([0.5 , 0.8 , 0.6666667], dtype=float32)
"""
def __init__(
self,
average=None,
threshold=None,
name="f1_score",
dtype=None,
):
super().__init__(
average=average,
beta=1.0,
threshold=threshold,
name=name,
dtype=dtype,
)
def get_config(self):
base_config = super().get_config()
del base_config["beta"]
return base_config
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@metrics@f_score_metrics.py@.PATH_END.py
|
{
"filename": "shear.py",
"repo_name": "jpcoles/glass",
"repo_path": "glass_extracted/glass-master/glass/shear.py",
"type": "Python"
}
|
from __future__ import division
from numpy import array, vectorize
from math import pi, cos, sin
class Shear:
def __init__(self, shift=10, name='shear'):
self.nParams = 2
#self.shift = 10 # arcsec
self.name = name
self.shift = shift
def poten(self, r):
x,y = r.real, r.imag
n0 = (x**2 - y**2)/2
n1 = x*y
return array([n0,n1])
def poten_dx(self, r):
return array([r.real, r.imag])
def poten_dy(self, r):
return array([-r.imag, r.real])
def poten_dxdx(self, r):
return array([1, 0])
def poten_dydy(self, r):
return array([-1, 0])
def poten_dxdy(self, r):
return array([0, 1])
def maginv(self, r, theta):
#print 'maginv', r, theta, a
xx = self.poten_dxdx(r)
yy = self.poten_dydy(r)
delta = self.poten_dxdy(r)
theta *= pi/180
cs = cos(2*theta)
sn = sin(2*theta)
kappa = (xx+yy)/2
gamma = (xx-yy)/2
k,g,d = kappa[0],gamma[0], delta[0]
n0 = ([ 0 - sn*g + cs*d,
k + cs*g + sn*d,
k - cs*g - sn*d])
k,g,d = kappa[1],gamma[1], delta[1]
n1 = ([ 0 - sn*g + cs*d,
k + cs*g + sn*d,
k - cs*g - sn*d])
return array([n0, n1])
|
jpcolesREPO_NAMEglassPATH_START.@glass_extracted@glass-master@glass@shear.py@.PATH_END.py
|
{
"filename": "_start.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram2dcontour/contours/_start.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StartValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="start", parent_name="histogram2dcontour.contours", **kwargs
):
super(StartValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {"^autocontour": False}),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram2dcontour@contours@_start.py@.PATH_END.py
|
{
"filename": "_show.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter3d/projection/y/_show.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="show", parent_name="scatter3d.projection.y", **kwargs
):
super(ShowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatter3d@projection@y@_show.py@.PATH_END.py
|
{
"filename": "map.py",
"repo_name": "gammapy/gammapy",
"repo_path": "gammapy_extracted/gammapy-main/gammapy/irf/edisp/map.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from gammapy.maps import Map, MapAxis, MapCoord, RegionGeom, WcsGeom
from gammapy.utils.random import InverseCDFSampler, get_random_state
from ..core import IRFMap
from .kernel import EDispKernel
__all__ = ["EDispMap", "EDispKernelMap"]
def get_overlap_fraction(energy_axis, energy_axis_true):
a_min = energy_axis.edges[:-1]
a_max = energy_axis.edges[1:]
b_min = energy_axis_true.edges[:-1][:, np.newaxis]
b_max = energy_axis_true.edges[1:][:, np.newaxis]
xmin = np.fmin(a_max, b_max)
xmax = np.fmax(a_min, b_min)
return (np.clip(xmin - xmax, 0, np.inf) / (b_max - b_min)).to("")
class EDispMap(IRFMap):
"""Energy dispersion map.
Parameters
----------
edisp_map : `~gammapy.maps.Map`
The input Energy Dispersion Map. Should be a Map with 2 non-spatial axes.
migra and true energy axes should be given in this specific order.
exposure_map : `~gammapy.maps.Map`, optional
Associated exposure map. Needs to have a consistent map geometry.
Examples
--------
::
# Energy dispersion map for CTAO data
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord
from gammapy.maps import WcsGeom, MapAxis
from gammapy.irf import EnergyDispersion2D, EffectiveAreaTable2D
from gammapy.makers.utils import make_edisp_map, make_map_exposure_true_energy
# Define energy dispersion map geometry
energy_axis_true = MapAxis.from_edges(np.logspace(-1, 1, 10), unit="TeV", name="energy_true")
migra_axis = MapAxis.from_edges(np.linspace(0, 3, 100), name="migra")
pointing = SkyCoord(0, 0, unit="deg")
geom = WcsGeom.create(
binsz=0.25 * u.deg,
width=10 * u.deg,
skydir=pointing,
axes=[migra_axis, energy_axis_true],
)
# Extract EnergyDispersion2D from CTA 1DC IRF
filename = "$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits"
edisp2D = EnergyDispersion2D.read(filename, hdu="ENERGY DISPERSION")
aeff2d = EffectiveAreaTable2D.read(filename, hdu="EFFECTIVE AREA")
# Create the exposure map
exposure_geom = geom.squash(axis_name="migra")
exposure_map = make_map_exposure_true_energy(pointing, "1 h", aeff2d, exposure_geom)
# Create the EDispMap for the specified pointing
edisp_map = make_edisp_map(edisp2D, pointing, geom, exposure_map)
# Get an Energy Dispersion (1D) at any position in the image
pos = SkyCoord(2.0, 2.5, unit="deg")
energy_axis = MapAxis.from_energy_bounds(0.1, 10, 5, unit="TeV", name="energy")
edisp = edisp_map.get_edisp_kernel(energy_axis, position=pos)
# Write map to disk
edisp_map.write("edisp_map.fits")
"""
tag = "edisp_map"
required_axes = ["migra", "energy_true"]
def __init__(self, edisp_map, exposure_map=None):
super().__init__(irf_map=edisp_map, exposure_map=exposure_map)
@property
def edisp_map(self):
return self._irf_map
@edisp_map.setter
def edisp_map(self, value):
del self.has_single_spatial_bin
self._irf_map = value
def normalize(self):
"""Normalize PSF map."""
self.edisp_map.normalize(axis_name="migra")
def get_edisp_kernel(self, energy_axis, position=None):
"""Get energy dispersion at a given position.
Parameters
----------
energy_axis : `~gammapy.maps.MapAxis`
Reconstructed energy axis.
position : `~astropy.coordinates.SkyCoord`
The target position. Should be a single coordinates.
Returns
-------
edisp : `~gammapy.irf.EnergyDispersion`
The energy dispersion (i.e. rmf object).
"""
edisp_map = self.to_region_nd_map(region=position)
edisp_kernel_map = edisp_map.to_edisp_kernel_map(energy_axis=energy_axis)
return edisp_kernel_map.get_edisp_kernel()
def to_edisp_kernel_map(self, energy_axis):
"""Convert to map with energy dispersion kernels.
Parameters
----------
energy_axis : `~gammapy.maps.MapAxis`
Reconstructed energy axis.
Returns
-------
edisp : `~gammapy.maps.EDispKernelMap`
Energy dispersion kernel map.
"""
energy_axis_true = self.edisp_map.geom.axes["energy_true"]
geom_image = self.edisp_map.geom.to_image()
geom = geom_image.to_cube([energy_axis, energy_axis_true])
coords = geom.get_coord(sparse=True, mode="edges", axis_name="energy")
migra = coords["energy"] / coords["energy_true"]
coords = {
"skycoord": coords.skycoord,
"energy_true": coords["energy_true"],
"migra": migra,
}
values = self.edisp_map.integral(axis_name="migra", coords=coords)
axis = self.edisp_map.geom.axes.index_data("migra")
data = np.clip(np.diff(values, axis=axis), 0, np.inf)
edisp_kernel_map = Map.from_geom(geom=geom, data=data.to_value(""), unit="")
if self.exposure_map:
geom = geom.squash(axis_name=energy_axis.name)
exposure_map = self.exposure_map.copy(geom=geom)
else:
exposure_map = None
return EDispKernelMap(
edisp_kernel_map=edisp_kernel_map, exposure_map=exposure_map
)
@classmethod
def from_geom(cls, geom):
"""Create energy dispersion map from geometry.
By default, a diagonal energy dispersion matrix is created.
Parameters
----------
geom : `~gammapy.maps.Geom`
Energy dispersion map geometry.
Returns
-------
edisp_map : `~gammapy.maps.EDispMap`
Energy dispersion map.
"""
if "energy_true" not in [ax.name for ax in geom.axes]:
raise ValueError("EDispMap requires true energy axis")
exposure_map = Map.from_geom(geom=geom.squash(axis_name="migra"), unit="m2 s")
edisp_map = Map.from_geom(geom, unit="")
migra_axis = geom.axes["migra"]
migra_0 = migra_axis.coord_to_pix(1)
# distribute over two pixels
migra = geom.get_idx()[2]
data = np.abs(migra - migra_0)
data = np.where(data < 1, 1 - data, 0)
edisp_map.quantity = data / migra_axis.bin_width.reshape((1, -1, 1, 1))
return cls(edisp_map, exposure_map)
def sample_coord(self, map_coord, random_state=0, chunk_size=10000):
"""Apply the energy dispersion corrections on the coordinates of a set of simulated events.
Parameters
----------
map_coord : `~gammapy.maps.MapCoord`
Sequence of coordinates and energies of sampled events.
random_state : {int, 'random-seed', 'global-rng', `~numpy.random.RandomState`}, optional
Defines random number generator initialisation.
Passed to `~gammapy.utils.random.get_random_state`.
Default is 0.
chunk_size : int
If set, this will slice the input MapCoord into smaller chunks of chunk_size elements.
Default is 10000.
Returns
-------
`~gammapy.maps.MapCoord`.
Sequence of energy dispersion corrected coordinates of the input map_coord map.
"""
random_state = get_random_state(random_state)
migra_axis = self.edisp_map.geom.axes["migra"]
position = map_coord.skycoord
energy_true = map_coord["energy_true"]
size = position.size
energy_reco = np.ones(size) * map_coord["energy_true"].unit
chunk_size = size if chunk_size is None else chunk_size
index = 0
while index < size:
chunk = slice(index, index + chunk_size, 1)
coord = {
"skycoord": position[chunk].reshape(-1, 1),
"energy_true": energy_true[chunk].reshape(-1, 1),
"migra": migra_axis.center,
}
pdf_edisp = self.edisp_map.interp_by_coord(coord)
sample_edisp = InverseCDFSampler(
pdf_edisp, axis=1, random_state=random_state
)
pix_edisp = sample_edisp.sample_axis()
migra = migra_axis.pix_to_coord(pix_edisp)
energy_reco[chunk] = energy_true[chunk] * migra
index += chunk_size
return MapCoord.create({"skycoord": position, "energy": energy_reco})
@classmethod
def from_diagonal_response(cls, energy_axis_true, migra_axis=None):
"""Create an all-sky EDisp map with diagonal response.
Parameters
----------
energy_axis_true : `~gammapy.maps.MapAxis`
True energy axis.
migra_axis : `~gammapy.maps.MapAxis`, optional
Migra axis. Default is None.
Returns
-------
edisp_map : `~gammapy.maps.EDispMap`
Energy dispersion map.
"""
migra_res = 1e-5
migra_axis_default = MapAxis.from_bounds(
1 - migra_res, 1 + migra_res, nbin=3, name="migra", node_type="edges"
)
migra_axis = migra_axis or migra_axis_default
geom = WcsGeom.create(
npix=(2, 1), proj="CAR", binsz=180, axes=[migra_axis, energy_axis_true]
)
return cls.from_geom(geom)
def peek(self, figsize=(15, 5)):
"""Quick-look summary plots.
Plots corresponding to the center of the map.
Parameters
----------
figsize : tuple
Size of figure.
"""
e_true = self.edisp_map.geom.axes[1]
e_reco = MapAxis.from_energy_bounds(
e_true.edges.min(),
e_true.edges.max(),
nbin=len(e_true.center),
name="energy",
)
self.get_edisp_kernel(energy_axis=e_reco).peek(figsize)
class EDispKernelMap(IRFMap):
"""Energy dispersion kernel map.
Parameters
----------
edisp_kernel_map : `~gammapy.maps.Map`
The input energy dispersion kernel map. Should be a Map with 2 non-spatial axes.
Reconstructed and true energy axes should be given in this specific order.
exposure_map : `~gammapy.maps.Map`, optional
Associated exposure map. Needs to have a consistent map geometry.
"""
tag = "edisp_kernel_map"
required_axes = ["energy", "energy_true"]
def __init__(self, edisp_kernel_map, exposure_map=None):
super().__init__(irf_map=edisp_kernel_map, exposure_map=exposure_map)
@property
def edisp_map(self):
return self._irf_map
@edisp_map.setter
def edisp_map(self, value):
self._irf_map = value
@classmethod
def from_geom(cls, geom):
"""Create energy dispersion map from geometry.
By default, a diagonal energy dispersion matrix is created.
Parameters
----------
geom : `~gammapy.maps.Geom`
Energy dispersion map geometry.
Returns
-------
edisp_map : `EDispKernelMap`
Energy dispersion kernel map.
"""
# TODO: allow only list of additional axes
geom.axes.assert_names(cls.required_axes, allow_extra=True)
geom_exposure = geom.squash(axis_name="energy")
exposure = Map.from_geom(geom_exposure, unit="m2 s")
energy_axis = geom.axes["energy"]
energy_axis_true = geom.axes["energy_true"]
data = get_overlap_fraction(energy_axis, energy_axis_true)
edisp_kernel_map = Map.from_geom(geom, unit="")
edisp_kernel_map.quantity += np.resize(data, geom.data_shape_axes)
return cls(edisp_kernel_map=edisp_kernel_map, exposure_map=exposure)
def get_edisp_kernel(self, position=None, energy_axis=None):
"""Get energy dispersion at a given position.
Parameters
----------
position : `~astropy.coordinates.SkyCoord` or `~regions.SkyRegion`, optional
The target position. Should be a single coordinates.
Default is None.
energy_axis : `MapAxis`, optional
Reconstructed energy axis, only used for checking.
Default is None.
Returns
-------
edisp : `~gammapy.irf.EnergyDispersion`
The energy dispersion (i.e. rmf object).
"""
if energy_axis:
assert energy_axis == self.edisp_map.geom.axes["energy"]
if isinstance(self.edisp_map.geom, RegionGeom):
kernel_map = self.edisp_map
else:
if position is None:
position = self.edisp_map.geom.center_skydir
position = self._get_nearest_valid_position(position)
kernel_map = self.edisp_map.to_region_nd_map(region=position)
return EDispKernel(
axes=kernel_map.geom.axes[["energy_true", "energy"]],
data=kernel_map.data[..., 0, 0],
)
@classmethod
def from_diagonal_response(cls, energy_axis, energy_axis_true, geom=None):
"""Create an energy dispersion map with diagonal response.
Parameters
----------
energy_axis : `~gammapy.maps.MapAxis`
Energy axis.
energy_axis_true : `~gammapy.maps.MapAxis`
True energy axis
geom : `~gammapy.maps.Geom`, optional
The (2D) geometry object to use. If None, an all sky geometry with 2 bins is created.
Default is None.
Returns
-------
edisp_map : `EDispKernelMap`
Energy dispersion kernel map.
"""
if geom is None:
geom = WcsGeom.create(
npix=(2, 1), proj="CAR", binsz=180, axes=[energy_axis, energy_axis_true]
)
else:
geom = geom.to_image().to_cube([energy_axis, energy_axis_true])
return cls.from_geom(geom)
@classmethod
def from_edisp_kernel(cls, edisp, geom=None):
"""Create an energy dispersion map from the input 1D kernel.
The kernel will be duplicated over all spatial bins.
Parameters
----------
edisp : `~gammapy.irf.EDispKernel`
The input 1D kernel.
geom : `~gammapy.maps.Geom`, optional
The (2D) geometry object to use. If None, an all sky geometry with 2 bins is created.
Default is None.
Returns
-------
edisp_map : `EDispKernelMap`
Energy dispersion kernel map.
"""
edisp_map = cls.from_diagonal_response(
edisp.axes["energy"], edisp.axes["energy_true"], geom=geom
)
edisp_map.edisp_map.data *= 0
edisp_map.edisp_map.data[:, :, ...] = edisp.pdf_matrix[
:, :, np.newaxis, np.newaxis
]
return edisp_map
@classmethod
def from_gauss(
cls, energy_axis, energy_axis_true, sigma, bias, pdf_threshold=1e-6, geom=None
):
"""Create an energy dispersion map from the input 1D kernel.
The kernel will be duplicated over all spatial bins.
Parameters
----------
energy_axis_true : `~astropy.units.Quantity`
Bin edges of true energy axis.
energy_axis : `~astropy.units.Quantity`
Bin edges of reconstructed energy axis.
bias : float or `~numpy.ndarray`
Center of Gaussian energy dispersion, bias.
sigma : float or `~numpy.ndarray`
RMS width of Gaussian energy dispersion, resolution.
pdf_threshold : float, optional
Zero suppression threshold. Default is 1e-6.
geom : `~gammapy.maps.Geom`, optional
The (2D) geometry object to use. If None, an all sky geometry with 2 bins is created.
Default is None.
Returns
-------
edisp_map : `EDispKernelMap`
Energy dispersion kernel map.
"""
kernel = EDispKernel.from_gauss(
energy_axis=energy_axis,
energy_axis_true=energy_axis_true,
sigma=sigma,
bias=bias,
pdf_threshold=pdf_threshold,
)
return cls.from_edisp_kernel(kernel, geom=geom)
def to_image(self, weights=None):
"""Return a 2D EdispKernelMap by summing over the reconstructed energy axis.
Parameters
----------
weights: `~gammapy.maps.Map`, optional
Weights to be applied. Default is None.
Returns
-------
edisp : `EDispKernelMap`
Energy dispersion kernel map.
"""
edisp = self.edisp_map.data
if weights:
edisp = edisp * weights.data
data = np.sum(edisp, axis=1, keepdims=True)
geom = self.edisp_map.geom.squash(axis_name="energy")
edisp_map = Map.from_geom(geom=geom, data=data)
return self.__class__(
edisp_kernel_map=edisp_map, exposure_map=self.exposure_map
)
def resample_energy_axis(self, energy_axis, weights=None):
"""Return a resampled `EDispKernelMap`.
Bins are grouped according to the edges of the reconstructed energy axis provided.
The true energy is left unchanged.
Parameters
----------
energy_axis : `~gammapy.maps.MapAxis`
The reconstructed energy axis to use for the grouping.
weights: `~gammapy.maps.Map`, optional
Weights to be applied. Default is None.
Returns
-------
edisp : `EDispKernelMap`
Energy dispersion kernel map.
"""
new_edisp_map = self.edisp_map.resample_axis(axis=energy_axis, weights=weights)
return self.__class__(
edisp_kernel_map=new_edisp_map, exposure_map=self.exposure_map
)
def peek(self, figsize=(15, 5)):
"""Quick-look summary plots.
Plots corresponding to the center of the map.
Parameters
----------
figsize : tuple, optional
Size of the figure. Default is (15, 5).
"""
self.get_edisp_kernel().peek(figsize)
|
gammapyREPO_NAMEgammapyPATH_START.@gammapy_extracted@gammapy-main@gammapy@irf@edisp@map.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/bar/unselected/marker/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._opacity import OpacityValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._opacity.OpacityValidator", "._color.ColorValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@bar@unselected@marker@__init__.py@.PATH_END.py
|
{
"filename": "plotting.py",
"repo_name": "gbrammer/unicorn",
"repo_path": "unicorn_extracted/unicorn-master/plotting.py",
"type": "Python"
}
|
import os
import glob
import shutil
import re
import time
import math
#import pyfits
import astropy.io.fits as pyfits
import numpy as np
import pylab
import matplotlib
import matplotlib.pyplot as plt
USE_PLOT_GUI=False
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib.ticker as mticker
import threedhst
def plot_init(square=True, xs=6, aspect=1, left=0.22, bottom=0.11, right=0.02, top=0.02, wspace=0.2, hspace=0.02, fontsize=10, NO_GUI=False, use_tex=False, invert=False):
"""
Wrapper for generating a plot window, contains input parameters for setting the
full window geometry and also handles toggling the GUI/interactive backend.
NO_GUI should be set to True if your session has no X11 connection.
"""
import unicorn
import matplotlib
rc = matplotlib.rcParams
#### If logged in to an external machine ("uni"), don't use GUI plotter
if unicorn.hostname().startswith('uni') | NO_GUI:
unicorn.plotting.USE_PLOT_GUI = False
else:
unicorn.plotting.USE_PLOT_GUI = True
# plt.rcParams['font.family'] = 'serif'
# plt.rcParams['font.serif'] = ['Times']
plt.rcParams['patch.edgecolor'] = 'None'
plt.rcParams['font.size'] = fontsize
plt.rcParams['image.origin'] = 'lower'
plt.rcParams['image.interpolation'] = 'nearest'
if use_tex:
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
#### White on black colormap
if invert:
if isinstance(invert, str):
color = invert
else:
color = 'white'
rc['lines.color'] = color
rc['patch.edgecolor'] = color
rc['text.color'] = color
rc['axes.facecolor'] = 'black'
rc['axes.edgecolor'] = color
rc['axes.labelcolor'] = color
rc['xtick.color'] = color
rc['ytick.color'] = color
rc['grid.color'] = color
rc['figure.facecolor'] = 'black'
rc['figure.edgecolor'] = 'black'
rc['savefig.facecolor'] = 'black'
rc['savefig.edgecolor'] = 'black'
else:
rc['lines.color'] = 'black'
rc['patch.edgecolor'] = 'black'
rc['text.color'] = 'black'
rc['axes.facecolor'] = 'white'
rc['axes.edgecolor'] = 'black'
rc['axes.labelcolor'] = 'black'
rc['xtick.color'] = 'black'
rc['ytick.color'] = 'black'
rc['grid.color'] = 'black'
rc['figure.facecolor'] = 'white'
rc['figure.edgecolor'] = 'white'
rc['savefig.facecolor'] = 'white'
rc['savefig.edgecolor'] = 'white'
if square:
#xs=5
lrbt = np.array([left,right,bottom,top])*5./xs
ys = (1-lrbt[1]-lrbt[0])/(1-lrbt[3]-lrbt[2])*xs*aspect
lrbt[[2,3]] /= aspect
if USE_PLOT_GUI:
fig = plt.figure(figsize=(xs,ys), dpi=100)
else:
fig = Figure(figsize=(xs,ys), dpi=100)
fig.subplots_adjust(left=lrbt[0], bottom=lrbt[2], right=1-lrbt[1], top=1-lrbt[3], wspace=wspace, hspace=hspace)
else:
if USE_PLOT_GUI:
fig = plt.figure(figsize=(7,5), dpi=100)
else:
fig = Figure(figsize=(7,5), dpi=100)
fig.subplots_adjust(wspace=wspace, hspace=hspace,left=0.10,
bottom=0.10,right=0.99,top=0.97)
if invert:
fig.invert = True
else:
fig.invert = False
return fig
def savefig(fig, filename='figure.png', no_tex=True, dpi=100, increment=False, transparent=False):
"""
Wrapper around the `savefig` method to handle the two different backends, set whether or not
an X11/interactive connection is available.
If `increment` is set and the output filename exists, add an integer to
the filename to avoid overwriting the current figure.
"""
try:
if fig.invert:
fig.patch.set_visible(False)
for ax in fig.axes:
ax.patch.set_visible(False)
except:
pass
if increment:
if os.path.exists(filename):
spl = filename.split('.')
root, ext = '.'.join(spl[:-1]), spl[-1]
saved = glob.glob('%s.[0-9]*.%s' %(root, ext))
filename = '%s.%03d.%s' %(root, len(saved)+1, ext)
print 'Save %s' %(filename)
if USE_PLOT_GUI:
fig.savefig(filename,dpi=dpi,transparent=transparent)
else:
canvas = FigureCanvasAgg(fig)
canvas.print_figure(filename, dpi=dpi, transparent=transparent)
#
if no_tex:
plt.rcParams['text.usetex'] = False
plt.rcParams['font.family'] = 'sans-serif'
class MyLocator(mticker.MaxNLocator):
"""
Set maximum number of ticks, from
http://matplotlib.sourceforge.net/examples/pylab_examples/finance_work2.html
"""
def __init__(self, *args, **kwargs):
mticker.MaxNLocator.__init__(self, *args, **kwargs)
def __call__(self, *args, **kwargs):
return mticker.MaxNLocator.__call__(self, *args, **kwargs)
def fill_between_steps(x, y, z, ax=None, *args, **kwargs):
"""
Make `fill_between` work like linestyle='steps-mid'.
"""
so = np.argsort(x)
mid = x[so][:-1] + np.diff(x[so])/2.
xfull = np.append(np.append(x, mid), mid+np.diff(x[so])/1.e6)
yfull = np.append(np.append(y, y[:-1]), y[1:])
zfull = np.append(np.append(z, z[:-1]), z[1:])
so = np.argsort(xfull)
if ax is None:
ax = plt.gca()
ax.fill_between(xfull[so], yfull[so], zfull[so], *args, **kwargs)
def scatter_annotate(x, y, labels, xtol=None, ytol=None, ax=None,*args, **kwargs):
if ax is None:
axi = plt
else:
axi = ax
if len(labels) != len(x):
threedhst.showMessage('`x`, `y`, and `labels` inputs must be same size', warn=True)
return False
if not isinstance(labels, list):
labels = list(labels)
plt.scatter(x, y, *args, **kwargs)
af = AnnoteFinder(x, y, labels, xtol=xtol, ytol=ytol, axis=ax)
plt.connect('button_press_event', af)
return af
# Code taken from http://www.scipy.org/Cookbook/Matplotlib/Interactive_Plotting for annotating
# a plot label with some sort of label per point.
def linkAnnotationFinders(afs):
for i in range(len(afs)):
allButSelfAfs = afs[:i]+afs[i+1:]
afs[i].links.extend(allButSelfAfs)
class AnnoteFinder:
"""
callback for matplotlib to display an annotation when points are clicked on. The
point which is closest to the click and within xtol and ytol is identified.
Register this function like this:
scatter(xdata, ydata)
af = AnnoteFinder(xdata, ydata, annotes)
connect('button_press_event', af)
"""
def __init__(self, xdata, ydata, annotes, axis=None, xtol=None, ytol=None):
self.data = zip(xdata, ydata, annotes)
if xtol is None:
# xtol = ((max(xdata) - min(xdata))/float(len(xdata)))/2
xtol = (max(xdata) - min(xdata))/40.
if ytol is None:
# ytol = ((max(ydata) - min(ydata))/float(len(ydata)))/2
ytol = (max(ydata) - min(ydata))/40.
self.xtol = xtol
self.ytol = ytol
if axis is None:
self.axis = pylab.gca()
else:
self.axis= axis
self.drawnAnnotations = {}
self.links = []
def distance(self, x1, x2, y1, y2):
"""
return the distance between two points
"""
return math.hypot(x1 - x2, y1 - y2)
def __call__(self, event):
tb = pylab.get_current_fig_manager().toolbar
if not (tb.mode == ''):
return False
if (event.inaxes):
clickX = event.xdata
clickY = event.ydata
if self.axis is None or self.axis==event.inaxes:
annotes = []
for x,y,a in self.data:
if clickX-self.xtol < x < clickX+self.xtol and clickY-self.ytol < y < clickY+self.ytol :
annotes.append((self.distance(x,clickX,y,clickY),x,y, a) )
if annotes:
annotes.sort()
distance, x, y, annote = annotes[0]
self.drawAnnote(event.inaxes, x, y, annote)
for l in self.links:
l.drawSpecificAnnote(annote)
def drawAnnote(self, axis, x, y, annote):
"""
Draw the annotation on the plot
"""
if (x,y) in self.drawnAnnotations:
markers = self.drawnAnnotations[(x,y)]
for m in markers:
m.set_visible(not m.get_visible())
self.axis.figure.canvas.draw()
else:
t = axis.text(x,y, "%s" %(annote), )
print annote
m = axis.scatter([x],[y], marker='s', c='r', zorder=100)
self.drawnAnnotations[(x,y)] =(t,m)
self.axis.figure.canvas.draw()
def drawSpecificAnnote(self, annote):
annotesToDraw = [(x,y,a) for x,y,a in self.data if a==annote]
for x,y,a in annotesToDraw:
self.drawAnnote(self.axis, x, y, a)
|
gbrammerREPO_NAMEunicornPATH_START.@unicorn_extracted@unicorn-master@plotting.py@.PATH_END.py
|
{
"filename": "instruments.py",
"repo_name": "PrincetonUniversity/charis-dep",
"repo_path": "charis-dep_extracted/charis-dep-main/charis/instruments/instruments.py",
"type": "Python"
}
|
import os
# from abc import ABCMeta, abstractmethod, abstractproperty
from builtins import input, object
import numpy as np
import pkg_resources
from astropy import units as u
from astropy.coordinates import EarthLocation
__all__ = ['CHARIS', 'SPHERE', 'instrument_from_data']
class CHARIS(object):
"""Class containing instrument properties of CHARIS
"""
__valid_observing_modes = ['J', 'H', 'K',
'Broadband', 'ND']
__wavelength_range = {'J': [1155., 1340.] * u.nanometer,
'H': [1470., 1800.] * u.nanometer,
'K': [2005., 2380.] * u.nanometer,
'Broadband': [1140., 2410.] * u.nanometer,
'ND': [1140., 2410.] * u.nanometer}
__resolution = {'J': 100,
'H': 100,
'K': 100,
'Broadband': 30,
'ND': 30}
def wavelengths(self, lower_wavelength_limit, upper_wavelength_limit, R):
Nspec = int(np.log(upper_wavelength_limit * 1. / lower_wavelength_limit) * R + 1.5)
loglam_endpts = np.linspace(np.log(lower_wavelength_limit),
np.log(upper_wavelength_limit), Nspec)
loglam_midpts = (loglam_endpts[1:] + loglam_endpts[:-1]) / 2.
lam_endpts = np.exp(loglam_endpts)
lam_midpts = np.exp(loglam_midpts)
return lam_midpts, lam_endpts
def __init__(self, observing_mode, static_calibdir=None):
self.instrument_name = 'CHARIS'
if observing_mode in self.__valid_observing_modes:
self.observing_mode = observing_mode
self.wavelength_range = self.__wavelength_range[observing_mode]
self.resolution = self.__resolution[observing_mode]
self.lenslet_geometry = 'rectilinear'
self.pixel_scale = 0.015 * u.arcsec / u.pixel
self.gain = 2.
self.wavelengthpolyorder = 3
self.offsets = np.arange(-5, 6)
index_range = np.arange(-100, 101, dtype='float')
self.lenslet_ix, self.lenslet_iy = np.meshgrid(index_range, index_range)
longitude, latitude = [-155.4760187, 19.825504] * u.degree
self.location = EarthLocation(longitude, latitude)
if self.observing_mode == 'ND':
observing_mode = 'Broadband'
if static_calibdir is None:
static_calibdir = pkg_resources.resource_filename('charis', 'calibrations')
self.calibration_path_instrument = \
os.path.join(
static_calibdir, 'CHARIS')
self.calibration_path_mode = os.path.join(
self.calibration_path_instrument, self.observing_mode)
self.transmission = np.loadtxt(os.path.join(
self.calibration_path_mode, self.observing_mode + '_tottrans.dat'))
self.lam_midpts, self.lam_endpts = \
self.wavelengths(self.wavelength_range[0].value,
self.wavelength_range[1].value,
self.resolution)
def __repr__(self):
return "{} {}".format(self.instrument_name, self.observing_mode)
class SPHERE(object):
"""Class containing instrument properties of SPHERE
"""
__valid_observing_modes = ['YJ', 'YH']
__wavelength_range = {'YJ': [940., 1370.] * u.nanometer,
'YH': [920., 1700.] * u.nanometer}
__resolution = {'YJ': 55,
'YH': 35}
__calibration_wavelength = {
'YJ': [987.72, 1123.71, 1309.37] * u.nanometer,
'YH': [987.72, 1123.71, 1309.37, 1545.07] * u.nanometer}
__wavelengthpolyorder = {
'YJ': 2,
'YH': 3}
# def wavelengths(self, lower_wavelength_limit, upper_wavelength_limit, R):
# Nspec = int(np.log(upper_wavelength_limit * 1. / lower_wavelength_limit) * R + 1.5)
# loglam_midpts = np.linspace(np.log(lower_wavelength_limit), np.log(upper_wavelength_limit), Nspec)
# loglam_binsize = np.diff(loglam_midpts)
# loglam_endpts = np.zeros(len(loglam_midpts) + 1)
# for i in range(loglam_binsize.shape[0]):
# loglam_endpts[i] = loglam_midpts[i] - loglam_binsize[i] / 2.
# loglam_endpts[-2] = loglam_midpts[-1] - loglam_binsize[-1] / 2.
# loglam_endpts[-1] = loglam_midpts[-1] + loglam_binsize[-1] / 2.
#
# lam_endpts = np.exp(loglam_endpts)
# lam_midpts = np.exp(loglam_midpts)
#
# return lam_midpts, lam_endpts
def wavelengths(self, lower_wavelength_limit, upper_wavelength_limit, R):
Nspec = int(np.log(upper_wavelength_limit * 1. / lower_wavelength_limit) * R + 1.5)
loglam_endpts = np.linspace(np.log(lower_wavelength_limit),
np.log(upper_wavelength_limit), Nspec)
loglam_midpts = (loglam_endpts[1:] + loglam_endpts[:-1]) / 2.
lam_endpts = np.exp(loglam_endpts)
lam_midpts = np.exp(loglam_midpts)
return lam_midpts, lam_endpts
# def wavelengths(self, lower_wavelength_limit, upper_wavelength_limit, R):
# lam_midpts = np.linspace(957.478, 1635.75, 39)
# binsize = np.diff(lam_midpts)[0]
# lam_endpts = np.append(lam_midpts - binsize / 2., [lam_midpts[-1] + binsize])
#
# return lam_midpts, lam_endpts
def __init__(self, observing_mode, static_calibdir=None):
self.instrument_name = 'SPHERE'
if observing_mode in self.__valid_observing_modes:
self.observing_mode = observing_mode
self.wavelength_range = self.__wavelength_range[observing_mode]
self.resolution = self.__resolution[observing_mode]
self.lenslet_geometry = 'hexagonal'
self.pixel_scale = 0.00746 * u.arcsec / u.pixel
self.gain = 1.8
self.calibration_wavelength = self.__calibration_wavelength[observing_mode]
self.wavelengthpolyorder = self.__wavelengthpolyorder[observing_mode]
self.offsets = np.arange(-5, 6)
index_range = np.arange(-100, 101, dtype='float')
self.lenslet_ix, self.lenslet_iy = np.meshgrid(index_range, index_range)
self.lenslet_ix[::2] += 0.5
self.lenslet_iy *= np.sqrt(3) / 2.
################################################################
# Flip the horizontal axis in the resulting cubes to match the
# orientation of the SPHERE pipeline
################################################################
self.lenslet_ix = self.lenslet_ix[:, ::-1]
self.lenslet_iy = self.lenslet_iy[:, ::-1]
longitude, latitude = [-70.4045, -24.6268] * u.degree
self.location = EarthLocation(longitude, latitude)
if static_calibdir is None:
static_calibdir = pkg_resources.resource_filename('charis', 'calibrations')
self.calibration_path_instrument = \
os.path.join(
static_calibdir, 'SPHERE')
self.calibration_path_mode = os.path.join(
self.calibration_path_instrument, self.observing_mode)
self.transmission = np.loadtxt(os.path.join(
self.calibration_path_mode, self.observing_mode + '_tottrans.dat'))
self.lam_midpts, self.lam_endpts = \
self.wavelengths(self.wavelength_range[0].value,
self.wavelength_range[1].value,
self.resolution)
def __repr__(self):
return "{} {}".format(self.instrument_name, self.observing_mode)
def instrument_from_data(
header, calibration=True, interactive=False, static_calibdir=None,
verbose=False):
correct_header = True
if 'CHARIS' in header['INSTRUME']:
if 'Y_FLTNAM' in header and 'OBJECT' in header:
observing_mode = header['Y_FLTNAM']
instrument = CHARIS(observing_mode, static_calibdir)
if verbose:
print("Instrument: {}".format(instrument.instrument_name))
print("Mode: {}".format(instrument.observing_mode))
if calibration:
if header['OBJECT'] in ['1200nm', '1550nm', '2346nm']:
calibration_wavelength = [int(header['OBJECT'].split('n')[0])] * u.nanometer
print(f"Wavelength detected: {calibration_wavelength}")
else:
print("Invalid wavelength keyword")
correct_header = False
else:
return instrument, None, correct_header
else:
correct_header = False
if not correct_header and interactive:
print("\n" + "*" * 60)
print("The file you selected doesn't appear to have the correct header keywords set")
print("This can happen for files taken before Apr 1st, 2017. Please enter them manually.")
print("*" * 60)
while True:
observing_mode = input(" Band? [J/H/K/Broadband/ND]: ")
if observing_mode in ["J", "H", "K", "Broadband", "ND"]:
break
else:
print("Invalid input.")
while True:
calibration_wavelength = input(" Wavelength? [1200/1550/2346]: ")
if calibration_wavelength in ["1200", "1550", "2346"]:
calibration_wavelength = [int(calibration_wavelength)] * u.nanometer
break
else:
print("Invalid input")
instrument = CHARIS(observing_mode)
elif 'SPHERE' in header['INSTRUME']:
if 'IFS' in header['HIERARCH ESO SEQ ARM']:
if 'YJ' in header['HIERARCH ESO INS2 COMB IFS']:
observing_mode = 'YJ'
else:
observing_mode = 'YH'
else:
raise ValueError("Data is not for IFS")
instrument = SPHERE(observing_mode, static_calibdir=static_calibdir)
calibration_wavelength = instrument.calibration_wavelength
else:
raise NotImplementedError("The instrument is not supported.")
return instrument, calibration_wavelength, correct_header
|
PrincetonUniversityREPO_NAMEcharis-depPATH_START.@charis-dep_extracted@charis-dep-main@charis@instruments@instruments.py@.PATH_END.py
|
{
"filename": "together.ipynb",
"repo_name": "Zstone19/pypetal",
"repo_path": "pypetal_extracted/pypetal-main/docs/notebooks/pyroa/together.ipynb",
"type": "Jupyter Notebook"
}
|
# The ``together`` Argument
In the previous example for PyROA, we fit all light curves to the ROA model simultaneously. However, in some circumstances, the user may want to fit each light curve to the continuum individually. pyPetal accounts for this using the ``together`` argument in the PyROA module.
If ``together=True``, PyROA will fit all light curves to the continuum (individually) simultaneously. pypetal will save all light curves to be used in PyROA in the ``output_dir/pyroa_lcs/`` directory, and all files/figures in the ``output_dir/pyroa/`` directory.
If ``together=False``, PyROA will fit each light curve to the continuum separately. Like the previous case, the light curves to be used for PyROA will be saved to ``output_dir/pyroa_lcs/``. However, the PyROA files and figures for each line will now be saved in ``output_dir/(line_name)/pyroa/``, where ``(line_name)`` is the name for each line.
In addition, some arguments (``add_var``, ``delay_dist``) may be input as arrays instead of values, for each line. However, if one value is given, it will apply to all lines.
We've seen ``together=True`` in the basic example, so now we'll set ``together=False``:
```python
%matplotlib inline
import pypetal.pipeline as pl
main_dir = 'pypetal/examples/dat/javelin_'
line_names = ['continuum', 'yelm', 'zing']
filenames = [ main_dir + x + '.dat' for x in line_names ]
output_dir = 'pyroa_output2/'
```
```python
params = {
'nchain': 10000,
'nburn': 7000,
'together': False,
'subtract_mean': False,
'add_var': True,
'delay_dist': False,
'init_tau': [80, 150]
}
res = pl.run_pipeline( output_dir, filenames, line_names,
run_pyroa=True, pyroa_params=params,
verbose=True, plot=True,
file_fmt='ascii', lag_bounds=['baseline', [0,500]])
```
Running PyROA
----------------
nburn: 7000
nchain: 10000
init_tau: [80, 150]
subtract_mean: False
div_mean: False
add_var: [True, True]
delay_dist: [False, False]
psi_types: ['Gaussian', 'Gaussian']
together: False
objname: pyroa
----------------
Initial Parameter Values
A0 B0 σ0 A1 B1 τ1 σ1 Δ
------- ------- ---- ------- ------- ---- ---- ---
2.30824 9.92677 0.01 1.19302 5.10527 80 0.01 10
NWalkers=18
100%|██████████| 10000/10000 [40:13<00:00, 4.14it/s]
Filter: continuum
Delay, error: 0.00 (fixed)
Filter: yelm
Delay, error: 100.80177 (+ 1.52500 - 1.50212)
Best Fit Parameters
A0 B0 σ0 A1 B1 τ1 σ1 Δ
------- ------- -------- ------ ------- ------- -------- -------
2.16544 10.1648 0.369855 1.0844 5.10848 100.802 0.188084 13.5799
Initial Parameter Values
A0 B0 σ0 A1 B1 τ1 σ1 Δ
------- ------- ---- ------ ------- ---- ---- ---
2.30824 9.92677 0.01 0.5882 2.44884 150 0.01 10
NWalkers=18
100%|██████████| 10000/10000 [40:26<00:00, 4.12it/s]
Filter: continuum
Delay, error: 0.00 (fixed)
Filter: zing
Delay, error: 250.31817 (+ 1.05027 - 1.12401)
Best Fit Parameters
A0 B0 σ0 A1 B1 τ1 σ1 Δ
------- ------- -------- -------- ------- ------- --------- -------
2.41656 9.89386 0.335357 0.602175 2.47354 250.318 0.0823911 12.0856








The output under the "pyroa_res" key in the output dictionary will now be a list of ``MyFit`` results (one per line) instead of one.
```python
res['pyroa_res']
```
[<pypetal.pyroa.utils.MyFit at 0x7f4630d27550>,
<pypetal.pyroa.utils.MyFit at 0x7f4759059c00>]
In addition, each result will fit the continuum (and hence the driving light curve model) separately. This gives a different "chunked" sample array for each line with the following form:
$[[A_{cont}, B_{cont}, \tau_{cont}, \sigma_{cont}],[A_{line}, B_{line}, \tau_{line}, \sigma_{line}],[\Delta]]$
|
Zstone19REPO_NAMEpypetalPATH_START.@pypetal_extracted@pypetal-main@docs@notebooks@pyroa@together.ipynb@.PATH_END.py
|
{
"filename": "roi_heads.py",
"repo_name": "pytorch/vision",
"repo_path": "vision_extracted/vision-main/torchvision/models/detection/roi_heads.py",
"type": "Python"
}
|
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
import torchvision
from torch import nn, Tensor
from torchvision.ops import boxes as box_ops, roi_align
from . import _utils as det_utils
def fastrcnn_loss(class_logits, box_regression, labels, regression_targets):
# type: (Tensor, Tensor, List[Tensor], List[Tensor]) -> Tuple[Tensor, Tensor]
"""
Computes the loss for Faster R-CNN.
Args:
class_logits (Tensor)
box_regression (Tensor)
labels (list[BoxList])
regression_targets (Tensor)
Returns:
classification_loss (Tensor)
box_loss (Tensor)
"""
labels = torch.cat(labels, dim=0)
regression_targets = torch.cat(regression_targets, dim=0)
classification_loss = F.cross_entropy(class_logits, labels)
# get indices that correspond to the regression targets for
# the corresponding ground truth labels, to be used with
# advanced indexing
sampled_pos_inds_subset = torch.where(labels > 0)[0]
labels_pos = labels[sampled_pos_inds_subset]
N, num_classes = class_logits.shape
box_regression = box_regression.reshape(N, box_regression.size(-1) // 4, 4)
box_loss = F.smooth_l1_loss(
box_regression[sampled_pos_inds_subset, labels_pos],
regression_targets[sampled_pos_inds_subset],
beta=1 / 9,
reduction="sum",
)
box_loss = box_loss / labels.numel()
return classification_loss, box_loss
def maskrcnn_inference(x, labels):
# type: (Tensor, List[Tensor]) -> List[Tensor]
"""
From the results of the CNN, post process the masks
by taking the mask corresponding to the class with max
probability (which are of fixed size and directly output
by the CNN) and return the masks in the mask field of the BoxList.
Args:
x (Tensor): the mask logits
labels (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra field mask
"""
mask_prob = x.sigmoid()
# select masks corresponding to the predicted classes
num_masks = x.shape[0]
boxes_per_image = [label.shape[0] for label in labels]
labels = torch.cat(labels)
index = torch.arange(num_masks, device=labels.device)
mask_prob = mask_prob[index, labels][:, None]
mask_prob = mask_prob.split(boxes_per_image, dim=0)
return mask_prob
def project_masks_on_boxes(gt_masks, boxes, matched_idxs, M):
# type: (Tensor, Tensor, Tensor, int) -> Tensor
"""
Given segmentation masks and the bounding boxes corresponding
to the location of the masks in the image, this function
crops and resizes the masks in the position defined by the
boxes. This prepares the masks for them to be fed to the
loss computation as the targets.
"""
matched_idxs = matched_idxs.to(boxes)
rois = torch.cat([matched_idxs[:, None], boxes], dim=1)
gt_masks = gt_masks[:, None].to(rois)
return roi_align(gt_masks, rois, (M, M), 1.0)[:, 0]
def maskrcnn_loss(mask_logits, proposals, gt_masks, gt_labels, mask_matched_idxs):
# type: (Tensor, List[Tensor], List[Tensor], List[Tensor], List[Tensor]) -> Tensor
"""
Args:
proposals (list[BoxList])
mask_logits (Tensor)
targets (list[BoxList])
Return:
mask_loss (Tensor): scalar tensor containing the loss
"""
discretization_size = mask_logits.shape[-1]
labels = [gt_label[idxs] for gt_label, idxs in zip(gt_labels, mask_matched_idxs)]
mask_targets = [
project_masks_on_boxes(m, p, i, discretization_size) for m, p, i in zip(gt_masks, proposals, mask_matched_idxs)
]
labels = torch.cat(labels, dim=0)
mask_targets = torch.cat(mask_targets, dim=0)
# torch.mean (in binary_cross_entropy_with_logits) doesn't
# accept empty tensors, so handle it separately
if mask_targets.numel() == 0:
return mask_logits.sum() * 0
mask_loss = F.binary_cross_entropy_with_logits(
mask_logits[torch.arange(labels.shape[0], device=labels.device), labels], mask_targets
)
return mask_loss
def keypoints_to_heatmap(keypoints, rois, heatmap_size):
# type: (Tensor, Tensor, int) -> Tuple[Tensor, Tensor]
offset_x = rois[:, 0]
offset_y = rois[:, 1]
scale_x = heatmap_size / (rois[:, 2] - rois[:, 0])
scale_y = heatmap_size / (rois[:, 3] - rois[:, 1])
offset_x = offset_x[:, None]
offset_y = offset_y[:, None]
scale_x = scale_x[:, None]
scale_y = scale_y[:, None]
x = keypoints[..., 0]
y = keypoints[..., 1]
x_boundary_inds = x == rois[:, 2][:, None]
y_boundary_inds = y == rois[:, 3][:, None]
x = (x - offset_x) * scale_x
x = x.floor().long()
y = (y - offset_y) * scale_y
y = y.floor().long()
x[x_boundary_inds] = heatmap_size - 1
y[y_boundary_inds] = heatmap_size - 1
valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size)
vis = keypoints[..., 2] > 0
valid = (valid_loc & vis).long()
lin_ind = y * heatmap_size + x
heatmaps = lin_ind * valid
return heatmaps, valid
def _onnx_heatmaps_to_keypoints(
maps, maps_i, roi_map_width, roi_map_height, widths_i, heights_i, offset_x_i, offset_y_i
):
num_keypoints = torch.scalar_tensor(maps.size(1), dtype=torch.int64)
width_correction = widths_i / roi_map_width
height_correction = heights_i / roi_map_height
roi_map = F.interpolate(
maps_i[:, None], size=(int(roi_map_height), int(roi_map_width)), mode="bicubic", align_corners=False
)[:, 0]
w = torch.scalar_tensor(roi_map.size(2), dtype=torch.int64)
pos = roi_map.reshape(num_keypoints, -1).argmax(dim=1)
x_int = pos % w
y_int = (pos - x_int) // w
x = (torch.tensor(0.5, dtype=torch.float32) + x_int.to(dtype=torch.float32)) * width_correction.to(
dtype=torch.float32
)
y = (torch.tensor(0.5, dtype=torch.float32) + y_int.to(dtype=torch.float32)) * height_correction.to(
dtype=torch.float32
)
xy_preds_i_0 = x + offset_x_i.to(dtype=torch.float32)
xy_preds_i_1 = y + offset_y_i.to(dtype=torch.float32)
xy_preds_i_2 = torch.ones(xy_preds_i_1.shape, dtype=torch.float32)
xy_preds_i = torch.stack(
[
xy_preds_i_0.to(dtype=torch.float32),
xy_preds_i_1.to(dtype=torch.float32),
xy_preds_i_2.to(dtype=torch.float32),
],
0,
)
# TODO: simplify when indexing without rank will be supported by ONNX
base = num_keypoints * num_keypoints + num_keypoints + 1
ind = torch.arange(num_keypoints)
ind = ind.to(dtype=torch.int64) * base
end_scores_i = (
roi_map.index_select(1, y_int.to(dtype=torch.int64))
.index_select(2, x_int.to(dtype=torch.int64))
.view(-1)
.index_select(0, ind.to(dtype=torch.int64))
)
return xy_preds_i, end_scores_i
@torch.jit._script_if_tracing
def _onnx_heatmaps_to_keypoints_loop(
maps, rois, widths_ceil, heights_ceil, widths, heights, offset_x, offset_y, num_keypoints
):
xy_preds = torch.zeros((0, 3, int(num_keypoints)), dtype=torch.float32, device=maps.device)
end_scores = torch.zeros((0, int(num_keypoints)), dtype=torch.float32, device=maps.device)
for i in range(int(rois.size(0))):
xy_preds_i, end_scores_i = _onnx_heatmaps_to_keypoints(
maps, maps[i], widths_ceil[i], heights_ceil[i], widths[i], heights[i], offset_x[i], offset_y[i]
)
xy_preds = torch.cat((xy_preds.to(dtype=torch.float32), xy_preds_i.unsqueeze(0).to(dtype=torch.float32)), 0)
end_scores = torch.cat(
(end_scores.to(dtype=torch.float32), end_scores_i.to(dtype=torch.float32).unsqueeze(0)), 0
)
return xy_preds, end_scores
def heatmaps_to_keypoints(maps, rois):
"""Extract predicted keypoint locations from heatmaps. Output has shape
(#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob)
for each keypoint.
"""
# This function converts a discrete image coordinate in a HEATMAP_SIZE x
# HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain
# consistency with keypoints_to_heatmap_labels by using the conversion from
# Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a
# continuous coordinate.
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = rois[:, 2] - rois[:, 0]
heights = rois[:, 3] - rois[:, 1]
widths = widths.clamp(min=1)
heights = heights.clamp(min=1)
widths_ceil = widths.ceil()
heights_ceil = heights.ceil()
num_keypoints = maps.shape[1]
if torchvision._is_tracing():
xy_preds, end_scores = _onnx_heatmaps_to_keypoints_loop(
maps,
rois,
widths_ceil,
heights_ceil,
widths,
heights,
offset_x,
offset_y,
torch.scalar_tensor(num_keypoints, dtype=torch.int64),
)
return xy_preds.permute(0, 2, 1), end_scores
xy_preds = torch.zeros((len(rois), 3, num_keypoints), dtype=torch.float32, device=maps.device)
end_scores = torch.zeros((len(rois), num_keypoints), dtype=torch.float32, device=maps.device)
for i in range(len(rois)):
roi_map_width = int(widths_ceil[i].item())
roi_map_height = int(heights_ceil[i].item())
width_correction = widths[i] / roi_map_width
height_correction = heights[i] / roi_map_height
roi_map = F.interpolate(
maps[i][:, None], size=(roi_map_height, roi_map_width), mode="bicubic", align_corners=False
)[:, 0]
# roi_map_probs = scores_to_probs(roi_map.copy())
w = roi_map.shape[2]
pos = roi_map.reshape(num_keypoints, -1).argmax(dim=1)
x_int = pos % w
y_int = torch.div(pos - x_int, w, rounding_mode="floor")
# assert (roi_map_probs[k, y_int, x_int] ==
# roi_map_probs[k, :, :].max())
x = (x_int.float() + 0.5) * width_correction
y = (y_int.float() + 0.5) * height_correction
xy_preds[i, 0, :] = x + offset_x[i]
xy_preds[i, 1, :] = y + offset_y[i]
xy_preds[i, 2, :] = 1
end_scores[i, :] = roi_map[torch.arange(num_keypoints, device=roi_map.device), y_int, x_int]
return xy_preds.permute(0, 2, 1), end_scores
def keypointrcnn_loss(keypoint_logits, proposals, gt_keypoints, keypoint_matched_idxs):
# type: (Tensor, List[Tensor], List[Tensor], List[Tensor]) -> Tensor
N, K, H, W = keypoint_logits.shape
if H != W:
raise ValueError(
f"keypoint_logits height and width (last two elements of shape) should be equal. Instead got H = {H} and W = {W}"
)
discretization_size = H
heatmaps = []
valid = []
for proposals_per_image, gt_kp_in_image, midx in zip(proposals, gt_keypoints, keypoint_matched_idxs):
kp = gt_kp_in_image[midx]
heatmaps_per_image, valid_per_image = keypoints_to_heatmap(kp, proposals_per_image, discretization_size)
heatmaps.append(heatmaps_per_image.view(-1))
valid.append(valid_per_image.view(-1))
keypoint_targets = torch.cat(heatmaps, dim=0)
valid = torch.cat(valid, dim=0).to(dtype=torch.uint8)
valid = torch.where(valid)[0]
# torch.mean (in binary_cross_entropy_with_logits) doesn't
# accept empty tensors, so handle it sepaartely
if keypoint_targets.numel() == 0 or len(valid) == 0:
return keypoint_logits.sum() * 0
keypoint_logits = keypoint_logits.view(N * K, H * W)
keypoint_loss = F.cross_entropy(keypoint_logits[valid], keypoint_targets[valid])
return keypoint_loss
def keypointrcnn_inference(x, boxes):
# type: (Tensor, List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]
kp_probs = []
kp_scores = []
boxes_per_image = [box.size(0) for box in boxes]
x2 = x.split(boxes_per_image, dim=0)
for xx, bb in zip(x2, boxes):
kp_prob, scores = heatmaps_to_keypoints(xx, bb)
kp_probs.append(kp_prob)
kp_scores.append(scores)
return kp_probs, kp_scores
def _onnx_expand_boxes(boxes, scale):
# type: (Tensor, float) -> Tensor
w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5
h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5
x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5
y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5
w_half = w_half.to(dtype=torch.float32) * scale
h_half = h_half.to(dtype=torch.float32) * scale
boxes_exp0 = x_c - w_half
boxes_exp1 = y_c - h_half
boxes_exp2 = x_c + w_half
boxes_exp3 = y_c + h_half
boxes_exp = torch.stack((boxes_exp0, boxes_exp1, boxes_exp2, boxes_exp3), 1)
return boxes_exp
# the next two functions should be merged inside Masker
# but are kept here for the moment while we need them
# temporarily for paste_mask_in_image
def expand_boxes(boxes, scale):
# type: (Tensor, float) -> Tensor
if torchvision._is_tracing():
return _onnx_expand_boxes(boxes, scale)
w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5
h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5
x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5
y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5
w_half *= scale
h_half *= scale
boxes_exp = torch.zeros_like(boxes)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
@torch.jit.unused
def expand_masks_tracing_scale(M, padding):
# type: (int, int) -> float
return torch.tensor(M + 2 * padding).to(torch.float32) / torch.tensor(M).to(torch.float32)
def expand_masks(mask, padding):
# type: (Tensor, int) -> Tuple[Tensor, float]
M = mask.shape[-1]
if torch._C._get_tracing_state(): # could not import is_tracing(), not sure why
scale = expand_masks_tracing_scale(M, padding)
else:
scale = float(M + 2 * padding) / M
padded_mask = F.pad(mask, (padding,) * 4)
return padded_mask, scale
def paste_mask_in_image(mask, box, im_h, im_w):
# type: (Tensor, Tensor, int, int) -> Tensor
TO_REMOVE = 1
w = int(box[2] - box[0] + TO_REMOVE)
h = int(box[3] - box[1] + TO_REMOVE)
w = max(w, 1)
h = max(h, 1)
# Set shape to [batchxCxHxW]
mask = mask.expand((1, 1, -1, -1))
# Resize mask
mask = F.interpolate(mask, size=(h, w), mode="bilinear", align_corners=False)
mask = mask[0][0]
im_mask = torch.zeros((im_h, im_w), dtype=mask.dtype, device=mask.device)
x_0 = max(box[0], 0)
x_1 = min(box[2] + 1, im_w)
y_0 = max(box[1], 0)
y_1 = min(box[3] + 1, im_h)
im_mask[y_0:y_1, x_0:x_1] = mask[(y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])]
return im_mask
def _onnx_paste_mask_in_image(mask, box, im_h, im_w):
one = torch.ones(1, dtype=torch.int64)
zero = torch.zeros(1, dtype=torch.int64)
w = box[2] - box[0] + one
h = box[3] - box[1] + one
w = torch.max(torch.cat((w, one)))
h = torch.max(torch.cat((h, one)))
# Set shape to [batchxCxHxW]
mask = mask.expand((1, 1, mask.size(0), mask.size(1)))
# Resize mask
mask = F.interpolate(mask, size=(int(h), int(w)), mode="bilinear", align_corners=False)
mask = mask[0][0]
x_0 = torch.max(torch.cat((box[0].unsqueeze(0), zero)))
x_1 = torch.min(torch.cat((box[2].unsqueeze(0) + one, im_w.unsqueeze(0))))
y_0 = torch.max(torch.cat((box[1].unsqueeze(0), zero)))
y_1 = torch.min(torch.cat((box[3].unsqueeze(0) + one, im_h.unsqueeze(0))))
unpaded_im_mask = mask[(y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])]
# TODO : replace below with a dynamic padding when support is added in ONNX
# pad y
zeros_y0 = torch.zeros(y_0, unpaded_im_mask.size(1))
zeros_y1 = torch.zeros(im_h - y_1, unpaded_im_mask.size(1))
concat_0 = torch.cat((zeros_y0, unpaded_im_mask.to(dtype=torch.float32), zeros_y1), 0)[0:im_h, :]
# pad x
zeros_x0 = torch.zeros(concat_0.size(0), x_0)
zeros_x1 = torch.zeros(concat_0.size(0), im_w - x_1)
im_mask = torch.cat((zeros_x0, concat_0, zeros_x1), 1)[:, :im_w]
return im_mask
@torch.jit._script_if_tracing
def _onnx_paste_masks_in_image_loop(masks, boxes, im_h, im_w):
res_append = torch.zeros(0, im_h, im_w)
for i in range(masks.size(0)):
mask_res = _onnx_paste_mask_in_image(masks[i][0], boxes[i], im_h, im_w)
mask_res = mask_res.unsqueeze(0)
res_append = torch.cat((res_append, mask_res))
return res_append
def paste_masks_in_image(masks, boxes, img_shape, padding=1):
# type: (Tensor, Tensor, Tuple[int, int], int) -> Tensor
masks, scale = expand_masks(masks, padding=padding)
boxes = expand_boxes(boxes, scale).to(dtype=torch.int64)
im_h, im_w = img_shape
if torchvision._is_tracing():
return _onnx_paste_masks_in_image_loop(
masks, boxes, torch.scalar_tensor(im_h, dtype=torch.int64), torch.scalar_tensor(im_w, dtype=torch.int64)
)[:, None]
res = [paste_mask_in_image(m[0], b, im_h, im_w) for m, b in zip(masks, boxes)]
if len(res) > 0:
ret = torch.stack(res, dim=0)[:, None]
else:
ret = masks.new_empty((0, 1, im_h, im_w))
return ret
class RoIHeads(nn.Module):
__annotations__ = {
"box_coder": det_utils.BoxCoder,
"proposal_matcher": det_utils.Matcher,
"fg_bg_sampler": det_utils.BalancedPositiveNegativeSampler,
}
def __init__(
self,
box_roi_pool,
box_head,
box_predictor,
# Faster R-CNN training
fg_iou_thresh,
bg_iou_thresh,
batch_size_per_image,
positive_fraction,
bbox_reg_weights,
# Faster R-CNN inference
score_thresh,
nms_thresh,
detections_per_img,
# Mask
mask_roi_pool=None,
mask_head=None,
mask_predictor=None,
keypoint_roi_pool=None,
keypoint_head=None,
keypoint_predictor=None,
):
super().__init__()
self.box_similarity = box_ops.box_iou
# assign ground-truth boxes for each proposal
self.proposal_matcher = det_utils.Matcher(fg_iou_thresh, bg_iou_thresh, allow_low_quality_matches=False)
self.fg_bg_sampler = det_utils.BalancedPositiveNegativeSampler(batch_size_per_image, positive_fraction)
if bbox_reg_weights is None:
bbox_reg_weights = (10.0, 10.0, 5.0, 5.0)
self.box_coder = det_utils.BoxCoder(bbox_reg_weights)
self.box_roi_pool = box_roi_pool
self.box_head = box_head
self.box_predictor = box_predictor
self.score_thresh = score_thresh
self.nms_thresh = nms_thresh
self.detections_per_img = detections_per_img
self.mask_roi_pool = mask_roi_pool
self.mask_head = mask_head
self.mask_predictor = mask_predictor
self.keypoint_roi_pool = keypoint_roi_pool
self.keypoint_head = keypoint_head
self.keypoint_predictor = keypoint_predictor
def has_mask(self):
if self.mask_roi_pool is None:
return False
if self.mask_head is None:
return False
if self.mask_predictor is None:
return False
return True
def has_keypoint(self):
if self.keypoint_roi_pool is None:
return False
if self.keypoint_head is None:
return False
if self.keypoint_predictor is None:
return False
return True
def assign_targets_to_proposals(self, proposals, gt_boxes, gt_labels):
# type: (List[Tensor], List[Tensor], List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]
matched_idxs = []
labels = []
for proposals_in_image, gt_boxes_in_image, gt_labels_in_image in zip(proposals, gt_boxes, gt_labels):
if gt_boxes_in_image.numel() == 0:
# Background image
device = proposals_in_image.device
clamped_matched_idxs_in_image = torch.zeros(
(proposals_in_image.shape[0],), dtype=torch.int64, device=device
)
labels_in_image = torch.zeros((proposals_in_image.shape[0],), dtype=torch.int64, device=device)
else:
# set to self.box_similarity when https://github.com/pytorch/pytorch/issues/27495 lands
match_quality_matrix = box_ops.box_iou(gt_boxes_in_image, proposals_in_image)
matched_idxs_in_image = self.proposal_matcher(match_quality_matrix)
clamped_matched_idxs_in_image = matched_idxs_in_image.clamp(min=0)
labels_in_image = gt_labels_in_image[clamped_matched_idxs_in_image]
labels_in_image = labels_in_image.to(dtype=torch.int64)
# Label background (below the low threshold)
bg_inds = matched_idxs_in_image == self.proposal_matcher.BELOW_LOW_THRESHOLD
labels_in_image[bg_inds] = 0
# Label ignore proposals (between low and high thresholds)
ignore_inds = matched_idxs_in_image == self.proposal_matcher.BETWEEN_THRESHOLDS
labels_in_image[ignore_inds] = -1 # -1 is ignored by sampler
matched_idxs.append(clamped_matched_idxs_in_image)
labels.append(labels_in_image)
return matched_idxs, labels
def subsample(self, labels):
# type: (List[Tensor]) -> List[Tensor]
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
sampled_inds = []
for img_idx, (pos_inds_img, neg_inds_img) in enumerate(zip(sampled_pos_inds, sampled_neg_inds)):
img_sampled_inds = torch.where(pos_inds_img | neg_inds_img)[0]
sampled_inds.append(img_sampled_inds)
return sampled_inds
def add_gt_proposals(self, proposals, gt_boxes):
# type: (List[Tensor], List[Tensor]) -> List[Tensor]
proposals = [torch.cat((proposal, gt_box)) for proposal, gt_box in zip(proposals, gt_boxes)]
return proposals
def check_targets(self, targets):
# type: (Optional[List[Dict[str, Tensor]]]) -> None
if targets is None:
raise ValueError("targets should not be None")
if not all(["boxes" in t for t in targets]):
raise ValueError("Every element of targets should have a boxes key")
if not all(["labels" in t for t in targets]):
raise ValueError("Every element of targets should have a labels key")
if self.has_mask():
if not all(["masks" in t for t in targets]):
raise ValueError("Every element of targets should have a masks key")
def select_training_samples(
self,
proposals, # type: List[Tensor]
targets, # type: Optional[List[Dict[str, Tensor]]]
):
# type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]]
self.check_targets(targets)
if targets is None:
raise ValueError("targets should not be None")
dtype = proposals[0].dtype
device = proposals[0].device
gt_boxes = [t["boxes"].to(dtype) for t in targets]
gt_labels = [t["labels"] for t in targets]
# append ground-truth bboxes to propos
proposals = self.add_gt_proposals(proposals, gt_boxes)
# get matching gt indices for each proposal
matched_idxs, labels = self.assign_targets_to_proposals(proposals, gt_boxes, gt_labels)
# sample a fixed proportion of positive-negative proposals
sampled_inds = self.subsample(labels)
matched_gt_boxes = []
num_images = len(proposals)
for img_id in range(num_images):
img_sampled_inds = sampled_inds[img_id]
proposals[img_id] = proposals[img_id][img_sampled_inds]
labels[img_id] = labels[img_id][img_sampled_inds]
matched_idxs[img_id] = matched_idxs[img_id][img_sampled_inds]
gt_boxes_in_image = gt_boxes[img_id]
if gt_boxes_in_image.numel() == 0:
gt_boxes_in_image = torch.zeros((1, 4), dtype=dtype, device=device)
matched_gt_boxes.append(gt_boxes_in_image[matched_idxs[img_id]])
regression_targets = self.box_coder.encode(matched_gt_boxes, proposals)
return proposals, matched_idxs, labels, regression_targets
def postprocess_detections(
self,
class_logits, # type: Tensor
box_regression, # type: Tensor
proposals, # type: List[Tensor]
image_shapes, # type: List[Tuple[int, int]]
):
# type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]
device = class_logits.device
num_classes = class_logits.shape[-1]
boxes_per_image = [boxes_in_image.shape[0] for boxes_in_image in proposals]
pred_boxes = self.box_coder.decode(box_regression, proposals)
pred_scores = F.softmax(class_logits, -1)
pred_boxes_list = pred_boxes.split(boxes_per_image, 0)
pred_scores_list = pred_scores.split(boxes_per_image, 0)
all_boxes = []
all_scores = []
all_labels = []
for boxes, scores, image_shape in zip(pred_boxes_list, pred_scores_list, image_shapes):
boxes = box_ops.clip_boxes_to_image(boxes, image_shape)
# create labels for each prediction
labels = torch.arange(num_classes, device=device)
labels = labels.view(1, -1).expand_as(scores)
# remove predictions with the background label
boxes = boxes[:, 1:]
scores = scores[:, 1:]
labels = labels[:, 1:]
# batch everything, by making every class prediction be a separate instance
boxes = boxes.reshape(-1, 4)
scores = scores.reshape(-1)
labels = labels.reshape(-1)
# remove low scoring boxes
inds = torch.where(scores > self.score_thresh)[0]
boxes, scores, labels = boxes[inds], scores[inds], labels[inds]
# remove empty boxes
keep = box_ops.remove_small_boxes(boxes, min_size=1e-2)
boxes, scores, labels = boxes[keep], scores[keep], labels[keep]
# non-maximum suppression, independently done per class
keep = box_ops.batched_nms(boxes, scores, labels, self.nms_thresh)
# keep only topk scoring predictions
keep = keep[: self.detections_per_img]
boxes, scores, labels = boxes[keep], scores[keep], labels[keep]
all_boxes.append(boxes)
all_scores.append(scores)
all_labels.append(labels)
return all_boxes, all_scores, all_labels
def forward(
self,
features, # type: Dict[str, Tensor]
proposals, # type: List[Tensor]
image_shapes, # type: List[Tuple[int, int]]
targets=None, # type: Optional[List[Dict[str, Tensor]]]
):
# type: (...) -> Tuple[List[Dict[str, Tensor]], Dict[str, Tensor]]
"""
Args:
features (List[Tensor])
proposals (List[Tensor[N, 4]])
image_shapes (List[Tuple[H, W]])
targets (List[Dict])
"""
if targets is not None:
for t in targets:
# TODO: https://github.com/pytorch/pytorch/issues/26731
floating_point_types = (torch.float, torch.double, torch.half)
if not t["boxes"].dtype in floating_point_types:
raise TypeError(f"target boxes must of float type, instead got {t['boxes'].dtype}")
if not t["labels"].dtype == torch.int64:
raise TypeError(f"target labels must of int64 type, instead got {t['labels'].dtype}")
if self.has_keypoint():
if not t["keypoints"].dtype == torch.float32:
raise TypeError(f"target keypoints must of float type, instead got {t['keypoints'].dtype}")
if self.training:
proposals, matched_idxs, labels, regression_targets = self.select_training_samples(proposals, targets)
else:
labels = None
regression_targets = None
matched_idxs = None
box_features = self.box_roi_pool(features, proposals, image_shapes)
box_features = self.box_head(box_features)
class_logits, box_regression = self.box_predictor(box_features)
result: List[Dict[str, torch.Tensor]] = []
losses = {}
if self.training:
if labels is None:
raise ValueError("labels cannot be None")
if regression_targets is None:
raise ValueError("regression_targets cannot be None")
loss_classifier, loss_box_reg = fastrcnn_loss(class_logits, box_regression, labels, regression_targets)
losses = {"loss_classifier": loss_classifier, "loss_box_reg": loss_box_reg}
else:
boxes, scores, labels = self.postprocess_detections(class_logits, box_regression, proposals, image_shapes)
num_images = len(boxes)
for i in range(num_images):
result.append(
{
"boxes": boxes[i],
"labels": labels[i],
"scores": scores[i],
}
)
if self.has_mask():
mask_proposals = [p["boxes"] for p in result]
if self.training:
if matched_idxs is None:
raise ValueError("if in training, matched_idxs should not be None")
# during training, only focus on positive boxes
num_images = len(proposals)
mask_proposals = []
pos_matched_idxs = []
for img_id in range(num_images):
pos = torch.where(labels[img_id] > 0)[0]
mask_proposals.append(proposals[img_id][pos])
pos_matched_idxs.append(matched_idxs[img_id][pos])
else:
pos_matched_idxs = None
if self.mask_roi_pool is not None:
mask_features = self.mask_roi_pool(features, mask_proposals, image_shapes)
mask_features = self.mask_head(mask_features)
mask_logits = self.mask_predictor(mask_features)
else:
raise Exception("Expected mask_roi_pool to be not None")
loss_mask = {}
if self.training:
if targets is None or pos_matched_idxs is None or mask_logits is None:
raise ValueError("targets, pos_matched_idxs, mask_logits cannot be None when training")
gt_masks = [t["masks"] for t in targets]
gt_labels = [t["labels"] for t in targets]
rcnn_loss_mask = maskrcnn_loss(mask_logits, mask_proposals, gt_masks, gt_labels, pos_matched_idxs)
loss_mask = {"loss_mask": rcnn_loss_mask}
else:
labels = [r["labels"] for r in result]
masks_probs = maskrcnn_inference(mask_logits, labels)
for mask_prob, r in zip(masks_probs, result):
r["masks"] = mask_prob
losses.update(loss_mask)
# keep none checks in if conditional so torchscript will conditionally
# compile each branch
if (
self.keypoint_roi_pool is not None
and self.keypoint_head is not None
and self.keypoint_predictor is not None
):
keypoint_proposals = [p["boxes"] for p in result]
if self.training:
# during training, only focus on positive boxes
num_images = len(proposals)
keypoint_proposals = []
pos_matched_idxs = []
if matched_idxs is None:
raise ValueError("if in trainning, matched_idxs should not be None")
for img_id in range(num_images):
pos = torch.where(labels[img_id] > 0)[0]
keypoint_proposals.append(proposals[img_id][pos])
pos_matched_idxs.append(matched_idxs[img_id][pos])
else:
pos_matched_idxs = None
keypoint_features = self.keypoint_roi_pool(features, keypoint_proposals, image_shapes)
keypoint_features = self.keypoint_head(keypoint_features)
keypoint_logits = self.keypoint_predictor(keypoint_features)
loss_keypoint = {}
if self.training:
if targets is None or pos_matched_idxs is None:
raise ValueError("both targets and pos_matched_idxs should not be None when in training mode")
gt_keypoints = [t["keypoints"] for t in targets]
rcnn_loss_keypoint = keypointrcnn_loss(
keypoint_logits, keypoint_proposals, gt_keypoints, pos_matched_idxs
)
loss_keypoint = {"loss_keypoint": rcnn_loss_keypoint}
else:
if keypoint_logits is None or keypoint_proposals is None:
raise ValueError(
"both keypoint_logits and keypoint_proposals should not be None when not in training mode"
)
keypoints_probs, kp_scores = keypointrcnn_inference(keypoint_logits, keypoint_proposals)
for keypoint_prob, kps, r in zip(keypoints_probs, kp_scores, result):
r["keypoints"] = keypoint_prob
r["keypoints_scores"] = kps
losses.update(loss_keypoint)
return result, losses
|
pytorchREPO_NAMEvisionPATH_START.@vision_extracted@vision-main@torchvision@models@detection@roi_heads.py@.PATH_END.py
|
{
"filename": "_line.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattermapbox/_line.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="line", parent_name="scattermapbox", **kwargs):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Line"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the line color.
width
Sets the line width (in px).
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattermapbox@_line.py@.PATH_END.py
|
{
"filename": "rmtools_bwdepol.py",
"repo_name": "CIRADA-Tools/RM-Tools",
"repo_path": "RM-Tools_extracted/RM-Tools-master/RMtools_1D/rmtools_bwdepol.py",
"type": "Python"
}
|
#!/usr/bin/env python
# =============================================================================#
# #
# NAME: rmtools_bwdepol.py #
# #
# PURPOSE: Algorithm for finding polarized sources while accounting for #
# bandwidth depolarization. #
# #
# =============================================================================#
# #
# The MIT License (MIT) #
# #
# Copyright (c) 2020 Canadian Initiative for Radio Astronomy Data Analysis # #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the "Software"), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
# =============================================================================#
import argparse
import math as m
import os
import sys
import time
import traceback
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from astropy.constants import c as speed_of_light
from matplotlib.ticker import MaxNLocator
from RMtools_1D.do_RMsynth_1D import saveOutput
from RMutils.util_misc import (
MAD,
create_frac_spectra,
nanmedian,
poly5,
progress,
toscalar,
)
from RMutils.util_plotTk import (
plot_complexity_fig,
plot_dirtyFDF_ax,
plot_Ipqu_spectra_fig,
)
from RMutils.util_RM import (
calc_parabola_vertex,
extrap,
fit_rmsf,
measure_qu_complexity,
)
if sys.version_info.major == 2:
print("RM-tools will no longer run with Python 2! Please use Python 3.")
exit()
# -----------------------------------------------------------------------------#
def rotation_integral_limit(freq, phi):
"""Calculates the analytic solution to the channel polarization integral
at one limit frequency.
Parameters
----------
freq: float
frequency in Hz
phi: float
Faraday depth (rad/m^2)
Returns
-------
intergral_lim: complex
intergral limit
"""
funct1 = freq * np.exp(2.0j * phi * ((speed_of_light.value / freq) ** 2))
funct2 = speed_of_light.value * np.sqrt((np.abs(phi) * np.pi))
funct3 = -1.0j + np.sign(phi)
funct4 = sp.special.erf(
np.sqrt(np.abs(phi)) * (speed_of_light.value / freq) * (-1.0j + np.sign(phi))
)
intergral_lim = funct1 + (funct2 * funct3 * funct4)
return intergral_lim
def rotation_operator(channel_width, channel_center, phi):
"""Rotation operator for channel with top-hat-in-frequency sensitivity.
Computes the net effect on a polarization vector for a channel of given
center frequnecy and bandwidth, for a given RM.
Parameters
----------
channel_width: float
channel bandwidth in Hz
channel_center: float
channel frequency in Hz
phi: float
channel frequency in hz
Returns
-------
(complex) rotation operator for that channel
"""
b = channel_center + 0.5 * channel_width
a = channel_center - 0.5 * channel_width
int_a = rotation_integral_limit(a, phi)
int_b = rotation_integral_limit(b, phi)
return (1 / channel_width) * (int_b - int_a)
def estimate_channel_bandwidth(freq_array):
"""Estimates the bandwidth per channel given the spacing between channel
centers. Only looks at the first 2 channels.
Parameters
----------
freq_array: array-like
array of channel centers
Returns
-------
ban: float
seperation between first two channel centers
"""
ban = freq_array[1] - freq_array[0]
return ban
def l2_to_freq_array(lambda_square_array):
"""returns the freqency array, corresponding to a lambda square array"""
f = speed_of_light.value**2 / lambda_square_array
return np.sqrt(f)
def adjoint_theory(adjoint_vars, dQUArr, show_progress=False, log=print):
"""Calculates the theoretical sensitivity and noise for the adjoint method
Parameters
----------
adjoint_vars: list
list like object containing organized as
[widths_Hz, freqArr_Hz, phiArr_radm2, K, weightArr]
dQUArr: array like
array containing the error in Stokes Q, and U
show_progress: Boolean
If set to True, shows progress, Default is False
log: function
logging function, default is print
Returns
-------
adjoint_info: list
list containing phiArr, and the theoretical noise and
sensitivity organized as
[phiArr_radm2, adjoint_sens, adjoint_noise]
"""
widths_Hz, freqArr_Hz, phiArr_radm2, K, weightArr = adjoint_vars
adjoint_noise = np.ones(len(phiArr_radm2))
adjoint_sens = np.ones(len(phiArr_radm2))
nPhi = len(phiArr_radm2)
if show_progress:
log("Calculating Theoretical Sensitivity & Noise")
progress(40, 0)
for i in range(nPhi):
if show_progress:
progress(40, ((i + 1) * 100.0 / nPhi))
r_i = rotation_operator(widths_Hz, freqArr_Hz, phiArr_radm2[i])
adjoint_noise2 = (
np.sum((weightArr * dQUArr) ** 2 * np.abs(r_i) ** 2)
/ np.sum(weightArr) ** 2
) # equation 34
adjoint_noise[i] = np.sqrt(adjoint_noise2)
adjoint_sens[i] = K * np.sum(weightArr * (np.abs(r_i) ** 2))
adjoint_info = [phiArr_radm2, adjoint_sens, adjoint_noise]
return adjoint_info
def plot_adjoint_info(mylist, units="Jy/beam"):
"""plots theoretical noise, sensitivity"""
fig, ax = plt.subplots(2, dpi=100, figsize=(12, 8))
fig.subplots_adjust(wspace=0.4, hspace=0.4)
[phiArr_radm2, adjoint_sens, adjoint_noise] = mylist
ax[1].plot(
phiArr_radm2,
adjoint_sens / adjoint_noise * np.max(adjoint_noise),
)
ax[1].set_xlabel(r"$\phi$ (rad m$^{-2}$)")
ax[1].set_ylabel("S:N multiplier")
ax[1].set_title("Theoretical S:N after bandwidth depolarization")
# plot 2
ax[0].plot(
phiArr_radm2,
adjoint_sens,
)
ax[0].set_xlabel(r"$\phi$ (rad m$^{-2}$)")
ax[0].set_ylabel("Sensitivity")
ax[0].set_title("Theoretical Sensitivity after bandwidth depolarization")
return
# -----------------------------------------------------------------------------#
def analytical_chan_pol(f, ban, phi, xi_knot=0, p=1):
"""Calculates the average analytic solution to the channel polarization
integral per channel
Based on equation 13 of Schnitzeler & Lee (2015)
Parameters
----------
f: float
channel center frequency in Hz
ban: float
channel bandwidth in Hz
phi: float
Faraday depth value in rad/m^2
xi_knot: float
inital polarization angle in radians
p: float
polarzied intensity
Returns
-------
avg_p_tilda: complex
the average complex polarization, for the bandwidth,
real is Q, imaginary is U
"""
a = f - (ban / 2)
b = f + (ban / 2) # integral start and stop values
ya = rotation_integral_limit(
a,
phi,
)
yb = rotation_integral_limit(
b,
phi,
) # check orig for xi_knot
i = p * (yb - ya)
avg_p_tilda = i / ban
return avg_p_tilda
def bwdepol_simulation(peak_rm, freqArr_Hz, widths_Hz):
"""Farday thin simulated source of the same RM as the measured source,
with unit intensity
Parameters
----------
peak_rm: float
peak in Faraday depth value (in rad/m^2) for sim
freqArr_hz: array like
frequency array (in Hz)
width_Hz: float
channel width in Hz
Returns
-------
data:
Dirty FDF for the simulated data formated as a list of arrays
[freq_Hz, q, u, dq, du]
"""
if widths_Hz == None:
widths_Hz = estimate_channel_bandwidth(freqArr_Hz)
p_tilda = analytical_chan_pol(freqArr_Hz, widths_Hz, peak_rm)
size_f = len(freqArr_Hz)
dq = np.ones(size_f)
du = np.ones(size_f)
# format = [freq_Hz, q, u, dq, du]
data = [freqArr_Hz, np.real(p_tilda), np.imag(p_tilda), dq, du]
return data
# -----------------------------------------------------------------------------#
def bwdepol_tweakAxFormat(
ax,
pad=10,
loc="upper right",
linewidth=1,
ncol=1,
bbox_to_anchor=(1.00, 1.00),
showLeg=True,
):
"""Tweaks some default plotting parameters for the RMSF, returns ax"""
# Axis/tic formatting
ax.tick_params(pad=pad)
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_markeredgewidth(linewidth)
# Legend formatting
if showLeg:
leg = ax.legend(
numpoints=1,
loc=loc,
shadow=False,
borderaxespad=0.3,
ncol=ncol,
bbox_to_anchor=bbox_to_anchor,
)
for t in leg.get_texts():
t.set_fontsize("small")
leg.get_frame().set_linewidth(0.5)
leg.get_frame().set_alpha(0.5)
return ax
def gauss(p, peak_rm):
"""Return a fucntion to evaluate a Gaussian with parameters
off set by peak_rm
Parameters
----------
p: list
parameters for Gaussian, p = [ampplitude, mean, FWHM]
peak_rm: float
peak in Faraday depth (in rad/m^2), used to center Gaussian
Returns
-------
rfun: fuction
Gaussian with specified parameters, off set my peak_rm
"""
a, b, w = p
gfactor = 2.0 * m.sqrt(2.0 * m.log(2.0))
s = w / gfactor
def rfunc(x):
y = a * np.exp(-((x - b - peak_rm) ** 2.0) / (2.0 * s**2.0))
return y
return rfunc
def bwdepol_plot_RMSF_ax(
ax,
phiArr,
RMSFArr,
peak_rm,
fwhmRMSF=None,
axisYright=False,
axisXtop=False,
doTitle=False,
):
"""Modified for bwdepol, Plots each ax for the RMSF plotting"""
# Set the axis positions
if axisYright:
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
if axisXtop:
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
# Plot the RMSF
ax.step(phiArr, RMSFArr.real, where="mid", color="tab:blue", lw=0.5, label="Real")
ax.step(
phiArr, RMSFArr.imag, where="mid", color="tab:red", lw=0.5, label="Imaginary"
)
ax.step(phiArr, np.abs(RMSFArr), where="mid", color="k", lw=1.0, label="PI")
ax.axhline(0, color="grey")
if doTitle:
ax.text(0.05, 0.84, "RMSF", transform=ax.transAxes)
# Plot the Gaussian fit
if fwhmRMSF is not None:
yGauss = np.max(np.abs(RMSFArr)) * gauss([1.0, 0.0, fwhmRMSF], peak_rm)(phiArr)
ax.plot(
phiArr,
yGauss,
color="g",
marker="None",
mfc="w",
mec="g",
ms=10,
label="Gaussian",
lw=2.0,
ls="--",
)
# Scaling
ax.yaxis.set_major_locator(MaxNLocator(4))
ax.xaxis.set_major_locator(MaxNLocator(4))
xRange = np.nanmax(phiArr) - np.nanmin(phiArr)
ax.set_xlim(np.nanmin(phiArr) - xRange * 0.01, np.nanmax(phiArr) + xRange * 0.01)
ax.set_ylabel("Normalised Units")
ax.set_xlabel(r"$\phi$ rad m$^{-2}$")
ax.axhline(0, color="grey")
# Format tweaks
ax = bwdepol_tweakAxFormat(ax)
ax.autoscale_view(True, True, True)
def bwdepol_plot_rmsf_fdf_fig(
phiArr,
FDF,
phi2Arr,
RMSFArr,
peak_rm,
fwhmRMSF=None,
gaussParm=[],
vLine=None,
fig=None,
units="flux units",
):
"""Modified for bwdepol, Plot the RMSF and FDF on a single figure"""
# Default to a pyplot figure
if fig == None:
fig = plt.figure(figsize=(12.0, 8))
# Plot the RMSF
ax1 = fig.add_subplot(211)
bwdepol_plot_RMSF_ax(
ax=ax1,
phiArr=phi2Arr,
RMSFArr=RMSFArr,
peak_rm=peak_rm,
fwhmRMSF=fwhmRMSF,
doTitle=True,
)
[label.set_visible(False) for label in ax1.get_xticklabels()]
ax1.set_xlabel("")
ax2 = fig.add_subplot(212, sharex=ax1)
plot_dirtyFDF_ax(
ax=ax2,
phiArr=phiArr,
FDFArr=FDF,
gaussParm=gaussParm,
vLine=vLine,
doTitle=True,
units=units,
)
return fig
# -----------------------------------------------------------------------------#
# modified for adjoint
def bwdepol_get_rmsf_planes(
freqArr_Hz,
widths_Hz,
phiArr_radm2,
peak_rm,
weightArr=None,
mskArr=None,
lam0Sq_m2=None,
double=True,
fitRMSF=True,
fitRMSFreal=False,
nBits=64,
verbose=False,
log=print,
):
"""Calculate the Rotation Measure Spread Function from inputs. This version
returns a cube (1, 2 or 3D) of RMSF spectra based on the shape of a
boolean mask array, where flagged data are True and unflagged data False.
If only whole planes (wavelength channels) are flagged then the RMSF is the
same for all pixels and the calculation is done once and replicated to the
dimensions of the mask. If some isolated voxels are flagged then the RMSF
is calculated by looping through each wavelength plane, which can take some
time. By default the routine returns the analytical width of the RMSF main
lobe but can also use MPFIT to fit a Gaussian.
This has been modified from the convientual RMtools_1D version for bwdepol
Parameters
----------
freqArr_Hz ... vector of frequency values
phiArr_radm2 ... vector of trial Faraday depth values
weightArr ... vector of weights, default [None] is no weighting
maskArr ... cube of mask values used to shape return cube [None]
lam0Sq_m2 ... force a reference lambda^2 value (def=calculate) [None]
double ... pad the Faraday depth to double-size [True]
fitRMSF ... fit the main lobe of the RMSF with a Gaussian [False]
fitRMSFreal ... fit RMSF.real, rather than abs(RMSF) [False]
nBits ... precision of data arrays [64]
verbose ... print feedback during calculation [False]
log ... function to be used to output messages [print]
Returns
-------
RMSFcube: array
a cube (1, 2 or 3D) of RMSF spectra based on the shape of a
boolean mask array
phi2Arr: array
array of the Faraday Depth (in rad/m^2)
fwhmRMSFArr: array
fwhm of the RMSF
statArr: array
"""
# Default data types
dtFloat = "float" + str(nBits)
dtComplex = "complex" + str(2 * nBits)
# For cleaning the RMSF should extend by 1/2 on each side in phi-space
if double:
nPhi = phiArr_radm2.shape[0]
nExt = np.ceil(nPhi / 2.0)
resampIndxArr = np.arange(2.0 * nExt + nPhi) - nExt
phi2Arr = extrap(resampIndxArr, np.arange(nPhi, dtype="int"), phiArr_radm2)
else:
phi2Arr = phiArr_radm2
# Set the weight array
if weightArr is None:
weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat)
weightArr = np.where(np.isnan(weightArr), 0.0, weightArr)
# Set the mask array (default to 1D, no masked channels)
if mskArr is None:
mskArr = np.zeros_like(freqArr_Hz, dtype="bool")
nDims = 1
else:
mskArr = mskArr.astype("bool")
nDims = len(mskArr.shape)
# Sanity checks on array sizes
if not weightArr.shape == freqArr_Hz.shape:
log("Err: wavelength^2 and weight arrays must be the same shape.")
return None, None, None, None
if not nDims <= 3:
log("Err: mask dimensions must be <= 3.")
return None, None, None, None
if not mskArr.shape[0] == freqArr_Hz.shape[0]:
log("Err: mask depth does not match lambda^2 vector (%d vs %d).", end=" ")
(mskArr.shape[0], freqArr_Hz.shape[-1])
log(" Check that the mask is in [z, y, x] order.")
return None, None, None, None
# Reshape the mask array to 3 dimensions
if nDims == 1:
mskArr = np.reshape(mskArr, (mskArr.shape[0], 1, 1))
elif nDims == 2:
mskArr = np.reshape(mskArr, (mskArr.shape[0], mskArr.shape[1], 1))
# Initialise the complex RM Spread Function cube
nX = mskArr.shape[-1]
nY = mskArr.shape[-2]
nPix = nX * nY
nPhi = phi2Arr.shape[0]
RMSFcube = np.ones((nPhi, nY, nX), dtype=dtComplex)
# If full planes are flagged then set corresponding weights to zero
xySum = np.sum(np.sum(mskArr, axis=1), axis=1)
mskPlanes = np.where(xySum == nPix, 0, 1)
weightArr *= mskPlanes
# Check for isolated clumps of flags (# flags in a plane not 0 or nPix)
flagTotals = np.unique(xySum).tolist()
try:
flagTotals.remove(0)
except Exception:
pass
try:
flagTotals.remove(nPix)
except Exception:
pass
lambdaSqArr_m2 = np.power(speed_of_light.value / freqArr_Hz, 2.0)
# Calculate the analytical FWHM width of the main lobe
fwhmRMSF = (
2.0 * m.sqrt(3.0) / (np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2))
)
# Create simulated data set with simRM = peakRM
RMSF_data = bwdepol_simulation(peak_rm, freqArr_Hz, widths_Hz)
# RMSFArr = fdf from bwdepol_simulation
RMSFArr, _, _ = do_adjoint_rmsynth_planes(
freqArr_Hz,
RMSF_data[1],
RMSF_data[2],
phiArr_radm2,
widths_Hz=widths_Hz,
weightArr=weightArr,
lam0Sq_m2=lam0Sq_m2,
verbose=verbose,
log=print,
)
# Fit the RMSF main lobe
fitStatus = -1
if fitRMSF:
if verbose:
log("Fitting Gaussian to the main lobe.")
mp = fit_rmsf(phi2Arr, np.abs(RMSFArr) / np.max(np.abs(RMSFArr)))
if mp is None or mp.status < 1:
pass
log("Err: failed to fit the RMSF.")
log(" Defaulting to analytical value.")
else:
fwhmRMSF = mp.params[2]
fitStatus = mp.status
# Replicate along X and Y axes
RMSFcube = np.tile(RMSFArr[:, np.newaxis, np.newaxis], (1, nY, nX))
fwhmRMSFArr = np.ones((nY, nX), dtype=dtFloat) * fwhmRMSF
statArr = np.ones((nY, nX), dtype="int") * fitStatus
# Remove redundant dimensions
fwhmRMSFArr = np.squeeze(fwhmRMSFArr)
statArr = np.squeeze(statArr)
RMSFcube = RMSFcube.reshape(-1)
return RMSFcube, phi2Arr, fwhmRMSFArr, statArr
# -----------------------------------------------------------------------------#
def bwdepol_measure_FDF_parms(
FDF,
phiArr,
fwhmRMSF,
adjoint_sens,
adjoint_noise,
dFDF=None,
lamSqArr_m2=None,
lam0Sq=None,
snrDoBiasCorrect=5.0,
):
"""
Measure standard parameters from a complex Faraday Dispersion Function.
Currently this function assumes that the noise levels in the Stokes Q
and U spectra are the same.
Returns a dictionary containing measured parameters.
This has been modified from the convientual RMtools_1D version for bwdepol
"""
# Determine the peak channel in the FDF, its amplitude and index
absFDF = np.abs(FDF)
rm_fdf = (
absFDF / adjoint_noise
) # RM spectrum in S:N units (normalized by RM-dependent noise)
amp_fdf = (
absFDF / adjoint_sens
) # RM spectrum normalized by (RM-dependent) sensitivity
indxPeakPIchan = np.nanargmax(rm_fdf[1:-1]) + 1 # Masks out the edge channels
# new theoretical dFDF correction for adjoint method
# This is noise in the adjoint-spectrum.
dFDF = adjoint_noise[indxPeakPIchan]
# This is the error in the amplitude (accounting for re-normalization)
dampPeakPI = dFDF / adjoint_sens[indxPeakPIchan]
# Measure the RMS noise in the spectrum after masking the peak
# changed all absFDF to rm_fdf
# Since this is normalized by theoretical noise, it's effectively testing
# the noise relative to the theoretical noise.
dPhi = np.nanmin(np.diff(phiArr))
fwhmRMSF_chan = np.ceil(fwhmRMSF / dPhi)
iL = int(max(0, indxPeakPIchan - fwhmRMSF_chan * 2))
iR = int(min(len(absFDF), indxPeakPIchan + fwhmRMSF_chan * 2))
absFDFmsked = rm_fdf.copy()
absFDFmsked[iL:iR] = np.nan
absFDFmsked = absFDFmsked[np.where(absFDFmsked == absFDFmsked)]
if float(len(absFDFmsked)) / len(absFDF) < 0.3:
dFDFcorMAD = MAD(rm_fdf)
else:
dFDFcorMAD = MAD(absFDFmsked)
# The noise is re-normalized by the predicted noise at the peak RM.
dFDFcorMAD = dFDFcorMAD * adjoint_noise[indxPeakPIchan]
nChansGood = np.sum(np.where(lamSqArr_m2 == lamSqArr_m2, 1.0, 0.0))
varLamSqArr_m2 = (
np.sum(lamSqArr_m2**2.0) - np.sum(lamSqArr_m2) ** 2.0 / nChansGood
) / (nChansGood - 1)
# Determine the peak in the FDF, its amplitude and Phi using a
# 3-point parabolic interpolation
phiPeakPIfit = None
dPhiPeakPIfit = None
ampPeakPIfit = None
snrPIfit = None
ampPeakPIfitEff = None
indxPeakPIfit = None
peakFDFimagFit = None
peakFDFrealFit = None
polAngleFit_deg = None
dPolAngleFit_deg = None
polAngle0Fit_deg = None
dPolAngle0Fit_deg = None
# Only do the 3-point fit if peak is 1-channel from either edge
if indxPeakPIchan > 0 and indxPeakPIchan < len(FDF) - 1:
phiPeakPIfit, ampPeakPIfit = calc_parabola_vertex(
phiArr[indxPeakPIchan - 1],
amp_fdf[indxPeakPIchan - 1],
phiArr[indxPeakPIchan],
amp_fdf[indxPeakPIchan],
phiArr[indxPeakPIchan + 1],
amp_fdf[indxPeakPIchan + 1],
)
snrPIfit = ampPeakPIfit * adjoint_sens[indxPeakPIchan] / dFDF
# Error on fitted Faraday depth (RM) is same as channel
# but using fitted PI
dPhiPeakPIfit = fwhmRMSF / (2.0 * snrPIfit)
# Correct the peak for polarisation bias (POSSUM report 11)
ampPeakPIfitEff = ampPeakPIfit
if snrPIfit >= snrDoBiasCorrect:
ampPeakPIfitEff = np.sqrt(ampPeakPIfit**2.0 - 2.3 * dampPeakPI**2.0)
# Calculate the polarisation angle from the fitted peak
# Uncertainty from Eqn A.12 in Brentjens & De Bruyn 2005
indxPeakPIfit = np.interp(
phiPeakPIfit, phiArr, np.arange(phiArr.shape[-1], dtype="f4")
)
peakFDFimagFit = np.interp(phiPeakPIfit, phiArr, FDF.imag)
peakFDFrealFit = np.interp(phiPeakPIfit, phiArr, FDF.real)
polAngleFit_deg = (
0.5 * np.degrees(np.arctan2(peakFDFimagFit, peakFDFrealFit)) % 180
)
dPolAngleFit_deg = np.degrees(1 / (2.0 * snrPIfit))
# Calculate the derotated polarisation angle and uncertainty
# Uncertainty from Eqn A.20 in Brentjens & De Bruyn 2005
polAngle0Fit_deg = (
np.degrees(np.radians(polAngleFit_deg) - phiPeakPIfit * lam0Sq)
) % 180
dPolAngle0Fit_rad = np.sqrt(
nChansGood
/ (4.0 * (nChansGood - 2.0) * snrPIfit**2.0)
* ((nChansGood - 1) / nChansGood + lam0Sq**2.0 / varLamSqArr_m2)
)
dPolAngle0Fit_deg = np.degrees(dPolAngle0Fit_rad)
# Store the measurements in a dictionary and return
mDict = {
"dFDFcorMAD": toscalar(dFDFcorMAD),
"phiPeakPIfit_rm2": toscalar(phiPeakPIfit),
"dPhiPeakPIfit_rm2": toscalar(dPhiPeakPIfit),
"ampPeakPIfit": toscalar(ampPeakPIfit),
"ampPeakPIfitEff": toscalar(ampPeakPIfitEff),
"dAmpPeakPIfit": toscalar(dampPeakPI),
"snrPIfit": toscalar(snrPIfit),
"indxPeakPIfit": toscalar(indxPeakPIfit),
"peakFDFimagFit": toscalar(peakFDFimagFit),
"peakFDFrealFit": toscalar(peakFDFrealFit),
"polAngleFit_deg": toscalar(polAngleFit_deg),
"dPolAngleFit_deg": toscalar(dPolAngleFit_deg),
"polAngle0Fit_deg": toscalar(polAngle0Fit_deg),
"dPolAngle0Fit_deg": toscalar(dPolAngle0Fit_deg),
}
return mDict
# -----------------------------------------------------------------------------#
def do_adjoint_rmsynth_planes(
freqArr_Hz,
dataQ,
dataU,
phiArr_radm2,
widths_Hz=None,
weightArr=None,
lam0Sq_m2=None,
nBits=64,
verbose=False,
log=print,
):
"""Perform RM-synthesis on Stokes Q and U cubes (1,2 or 3D). This version
of the routine loops through spectral planes and is faster than the pixel-
by-pixel code. This version also correctly deals with isolated clumps of
NaN-flagged voxels within the data-cube (unlikely in interferometric cubes,
but possible in single-dish cubes). Input data must be in standard python
[z,y,x] order, where z is the frequency axis in ascending order.
This has been modified from the convientual RMtools_1D version for bwdepol
Parameters
----------
dataQ ... 1, 2 or 3D Stokes Q data array
dataU ... 1, 2 or 3D Stokes U data array
lambdaSqArr_m2 ... vector of wavelength^2 values (assending freq order)
phiArr_radm2 ... vector of trial Faraday depth values
weightArr ... vector of weights, default [None] is Uniform (all 1s)
nBits ... precision of data arrays [32]
verbose ... print feedback during calculation [False]
log ... function to be used to output messages [print]
Returns
-------
FDFcube: array
Faraday Dispersion Function (FDF)
lam0Sq_m2: array
lam0Sq_m2 is the weighted mean of lambda^2 distribution
(B&dB Eqn. 32)
adjoint_vars: list
information to generate theoretical noise, sensitivity
adjoint_vars = [widths_Hz, freqArr_Hz, phiArr_radm2, K,
weightArr]
"""
# Default data types
dtFloat = "float" + str(nBits)
dtComplex = "complex" + str(2 * nBits)
lambdaSqArr_m2 = np.power(speed_of_light.value / freqArr_Hz, 2.0)
# Set the weight array
if weightArr is None:
weightArr = np.ones(lambdaSqArr_m2.shape, dtype=dtFloat)
weightArr = np.where(np.isnan(weightArr), 0.0, weightArr)
# Sanity check on array sizes
if not weightArr.shape == lambdaSqArr_m2.shape:
log("Err: Lambda^2 and weight arrays must be the same shape.")
return None, None
if not dataQ.shape == dataU.shape:
log("Err: Stokes Q and U data arrays must be the same shape.")
return None, None
nDims = len(dataQ.shape)
if not nDims <= 3:
log("Err: data dimensions must be <= 3.")
return None, None
if not dataQ.shape[0] == lambdaSqArr_m2.shape[0]:
log(
"Err: Data depth does not match lambda^2 vector ({} vs {}).".format(
dataQ.shape[0], lambdaSqArr_m2.shape[0]
),
end=" ",
)
log(" Check that data is in [z, y, x] order.")
return None, None
# Reshape the data arrays to 3 dimensions
if nDims == 1:
dataQ = np.reshape(dataQ, (dataQ.shape[0], 1, 1))
dataU = np.reshape(dataU, (dataU.shape[0], 1, 1))
elif nDims == 2:
dataQ = np.reshape(dataQ, (dataQ.shape[0], dataQ.shape[1], 1))
dataU = np.reshape(dataU, (dataU.shape[0], dataU.shape[1], 1))
# Create a complex polarised cube, B&dB Eqns. (8) and (14)
# Array has dimensions [nFreq, nY, nX]
pCube = (dataQ + 1j * dataU) * weightArr[:, np.newaxis, np.newaxis]
# Check for NaNs (flagged data) in the cube & set to zero
mskCube = np.isnan(pCube)
pCube = np.nan_to_num(pCube)
# If full planes are flagged then set corresponding weights to zero
mskPlanes = np.sum(np.sum(~mskCube, axis=1), axis=1)
mskPlanes = np.where(mskPlanes == 0, 0, 1)
weightArr *= mskPlanes
# Initialise the complex Faraday Dispersion Function cube
nX = dataQ.shape[-1]
nY = dataQ.shape[-2]
nPhi = phiArr_radm2.shape[0]
FDFcube = np.zeros((nPhi, nY, nX), dtype=dtComplex)
# lam0Sq_m2 is the weighted mean of lambda^2 distribution (B&dB Eqn. 32)
# Calculate a global lam0Sq_m2 value, ignoring isolated flagged voxels
K = 1.0 / np.sum(weightArr)
if lam0Sq_m2 is None:
lam0Sq_m2 = K * np.sum(weightArr * lambdaSqArr_m2)
# The K value used to scale each FDF spectrum must take into account
# flagged voxels data in the datacube and can be position dependent
weightCube = np.invert(mskCube) * weightArr[:, np.newaxis, np.newaxis]
with np.errstate(divide="ignore", invalid="ignore"):
KArr = np.true_divide(1.0, np.sum(weightCube, axis=0))
KArr[KArr == np.inf] = 0
KArr = np.nan_to_num(KArr)
# Do the RM-synthesis on each plane
if verbose:
log("Running RM-synthesis by channel.")
progress(40, 0)
# calculate channel widths if necessary
if widths_Hz == None:
widths_Hz = estimate_channel_bandwidth(freqArr_Hz)
for i in range(nPhi):
if verbose:
progress(40, ((i + 1) * 100.0 / nPhi))
cor = np.exp(2j * phiArr_radm2[i] * lam0Sq_m2)
r_i = rotation_operator(widths_Hz, freqArr_Hz, phiArr_radm2[i])[
:, np.newaxis, np.newaxis
]
arg0 = pCube * cor * np.conj(r_i)
arg = arg0
FDFcube[i, :, :] = KArr * np.sum(arg, axis=0)
# information to generate theoretical noise, sensitivity
adjoint_vars = [widths_Hz, freqArr_Hz, phiArr_radm2, K, weightArr]
# Remove redundant dimensions in the FDF array
FDFcube = np.squeeze(FDFcube)
return FDFcube, lam0Sq_m2, adjoint_vars
# -----------------------------------------------------------------------------#
def run_adjoint_rmsynth(
data,
polyOrd=3,
phiMax_radm2=None,
dPhi_radm2=None,
nSamples=10.0,
weightType="variance",
fitRMSF=True,
noStokesI=False,
phiNoise_radm2=1e6,
nBits=64,
showPlots=False,
debug=False,
verbose=False,
log=print,
units="Jy/beam",
):
"""Run bwdepol RM synthesis on 1D data.
Args:
data (list): Contains frequency and polarization data as either:
[freq_Hz, I, Q, U, dI, dQ, dU]
freq_Hz (array_like): Frequency of each channel in Hz.
I (array_like): Stokes I intensity in each channel.
Q (array_like): Stokes Q intensity in each channel.
U (array_like): Stokes U intensity in each channel.
dI (array_like): Error in Stokes I intensity in each channel.
dQ (array_like): Error in Stokes Q intensity in each channel.
dU (array_like): Error in Stokes U intensity in each channel.
or
[freq_Hz, q, u, dq, du]
freq_Hz (array_like): Frequency of each channel in Hz.
q (array_like): Fractional Stokes Q intensity (Q/I) in each channel.
u (array_like): Fractional Stokes U intensity (U/I) in each channel.
dq (array_like): Error in fractional Stokes Q intensity in each channel.
du (array_like): Error in fractional Stokes U intensity in each channel.
Kwargs:
polyOrd (int): Order of polynomial to fit to Stokes I spectrum.
phiMax_radm2 (float): Maximum absolute Faraday depth (rad/m^2).
dPhi_radm2 (float): Faraday depth channel size (rad/m^2).
nSamples (float): Number of samples across the RMSF.
weightType (str): Can be "variance" or "uniform"
"variance" -- Weight by uncertainty in Q and U.
"uniform" -- Weight uniformly (i.e. with 1s)
fitRMSF (bool): Fit a Gaussian to the RMSF?
noStokesI (bool: Is Stokes I data provided?
phiNoise_radm2 (float): ????
nBits (int): Precision of floating point numbers.
showPlots (bool): Show plots?
debug (bool): Turn on debugging messages & plots?
verbose (bool): Verbosity.
log (function): Which logging function to use.
units (str): Units of data.
Returns:
mDict (dict): Summary of RM synthesis results.
aDict (dict): Data output by RM synthesis.
"""
# Default data types
dtFloat = "float" + str(nBits)
# dtComplex = "complex" + str(2*nBits)
# freq_Hz, I, Q, U, dI, dQ, dU
if data.shape[0] == 7:
if verbose:
log("> Seven columns found, trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=" ")
(freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = data
widths_Hz = None
elif data.shape[0] == 8:
if verbose:
log(
"> Eight columns found, trying [freq_Hz, widths_Hz, I, Q, U, dI, dQ, dU]",
end=" ",
)
(freqArr_Hz, widths_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = data
elif data.shape[0] == 6:
if verbose:
log(
"> Six columns found, trying [freq_Hz, widths_Hz, Q, U, dQ, dU]",
end=" ",
)
(freqArr_Hz, width_Hz, QArr, UArr, dQArr, dUArr) = data
elif data.shape[0] == 5:
if verbose:
log("> Five columns found, trying [freq_Hz, Q, U, dQ, dU]", end=" ")
(freqArr_Hz, QArr, UArr, dQArr, dUArr) = data
widths_Hz = None
noStokesI = True
else:
log("Failed to read in data, aborting.")
if debug:
log(traceback.format_exc())
sys.exit()
if verbose:
log("Successfully read in the Stokes spectra.")
# If no Stokes I present, create a dummy spectrum = unity
if noStokesI:
if verbose:
log("Warn: no Stokes I data in use.")
IArr = np.ones_like(QArr)
dIArr = np.zeros_like(QArr)
# Convert to GHz for convenience
freqArr_GHz = freqArr_Hz / 1e9
dQUArr = (dQArr + dUArr) / 2.0
# Fit the Stokes I spectrum and create the fractional spectra
IModArr, qArr, uArr, dqArr, duArr, fit_result = create_frac_spectra(
freqArr=freqArr_GHz,
IArr=IArr,
QArr=QArr,
UArr=UArr,
dIArr=dIArr,
dQArr=dQArr,
dUArr=dUArr,
polyOrd=polyOrd,
verbose=True,
debug=debug,
)
# Plot the data and the Stokes I model fit
if showPlots:
if verbose:
log("Plotting the input data and spectral index fit.")
freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
IModHirArr = poly5(fit_result.params)(freqHirArr_Hz / 1e9)
specFig = plt.figure(figsize=(12.0, 8))
plot_Ipqu_spectra_fig(
freqArr_Hz=freqArr_Hz,
IArr=IArr,
qArr=qArr,
uArr=uArr,
dIArr=dIArr,
dqArr=dqArr,
duArr=duArr,
freqHirArr_Hz=freqHirArr_Hz,
IModArr=IModHirArr,
fig=specFig,
units=units,
)
# DEBUG (plot the Q, U and average RMS spectrum)
if debug:
rmsFig = plt.figure(figsize=(12.0, 8))
ax = rmsFig.add_subplot(111)
ax.plot(
freqArr_Hz / 1e9,
dQUArr,
marker="o",
color="k",
lw=0.5,
label="rms <QU>",
)
ax.plot(
freqArr_Hz / 1e9, dQArr, marker="o", color="b", lw=0.5, label="rms Q"
)
ax.plot(
freqArr_Hz / 1e9, dUArr, marker="o", color="r", lw=0.5, label="rms U"
)
xRange = (np.nanmax(freqArr_Hz) - np.nanmin(freqArr_Hz)) / 1e9
ax.set_xlim(
np.min(freqArr_Hz) / 1e9 - xRange * 0.05,
np.max(freqArr_Hz) / 1e9 + xRange * 0.05,
)
ax.set_xlabel(r"$\nu$ (GHz)")
ax.set_ylabel("RMS " + units)
ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra")
# Calculate some wavelength parameters
lambdaSqArr_m2 = np.power(speed_of_light.value / freqArr_Hz, 2.0)
# dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz)))
lambdaSqRange_m2 = np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2)
# dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2)))
dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2)))
# Set the Faraday depth range
fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2
if dPhi_radm2 is None:
dPhi_radm2 = fwhmRMSF_radm2 / nSamples
if phiMax_radm2 is None:
phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2
phiMax_radm2 = max(phiMax_radm2, 600.0) # Force the minimum phiMax
# Faraday depth sampling. Zero always centred on middle channel
nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0)
startPhi_radm2 = -(nChanRM - 1.0) * dPhi_radm2 / 2.0
stopPhi_radm2 = +(nChanRM - 1.0) * dPhi_radm2 / 2.0
phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM)
phiArr_radm2 = phiArr_radm2.astype(dtFloat)
if verbose:
log(
"PhiArr = %.2f to %.2f by %.2f (%d chans)."
% (phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM)
)
# Calculate the weighting as 1/sigma^2 or all 1s (uniform)
if weightType == "variance":
weightArr = 1.0 / np.power(dQUArr, 2.0)
else:
weightType = "uniform"
weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat)
if verbose:
log("Weight type is '%s'." % weightType)
startTime = time.time()
# Perform adjoint RM-synthesis on the spectrum
dirtyFDF, lam0Sq_m2, adjoint_vars = do_adjoint_rmsynth_planes(
freqArr_Hz=freqArr_Hz,
widths_Hz=widths_Hz,
dataQ=qArr,
dataU=uArr,
phiArr_radm2=phiArr_radm2,
weightArr=weightArr,
nBits=nBits,
verbose=verbose,
log=log,
)
# generate adjoint_noise and adjoint__sens
adjoint_info = adjoint_theory(adjoint_vars, dQUArr, show_progress=False)
phiArr_radm2, adjoint_sens, adjoint_noise = adjoint_info
# calculate peak RM
absFDF = np.abs(dirtyFDF)
rm_fdf = absFDF / adjoint_noise # used for finding peak in RM
indxPeakPIchan = np.nanargmax(rm_fdf[1:-1]) + 1
peak_rm = phiArr_radm2[indxPeakPIchan]
# Calculate the Rotation Measure Spread Function
RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = bwdepol_get_rmsf_planes(
freqArr_Hz=freqArr_Hz,
widths_Hz=widths_Hz,
phiArr_radm2=phiArr_radm2,
weightArr=weightArr,
mskArr=~np.isfinite(qArr),
lam0Sq_m2=lam0Sq_m2,
double=True,
fitRMSF=fitRMSF,
fitRMSFreal=False,
nBits=nBits,
verbose=verbose,
log=log,
peak_rm=peak_rm,
)
fwhmRMSF = float(fwhmRMSFArr)
endTime = time.time()
cputime = endTime - startTime
if verbose:
log("> RM-synthesis completed in %.2f seconds." % cputime)
# Determine the Stokes I value at lam0Sq_m2 from the Stokes I model
# Multiply the dirty FDF by Ifreq0 to recover the PI
freq0_Hz = speed_of_light.value / m.sqrt(lam0Sq_m2)
Ifreq0 = poly5(fit_result.params)(freq0_Hz / 1e9)
dirtyFDF *= Ifreq0 # FDF is in fracpol units initially, convert back to flux
# Calculate the theoretical noise in the FDF !!
# Old formula only works for wariance weights!
weightArr = np.where(np.isnan(weightArr), 0.0, weightArr)
dFDFth = np.sqrt(
np.sum(weightArr**2 * np.nan_to_num(dQUArr) ** 2) / (np.sum(weightArr)) ** 2
)
# Measure the parameters of the dirty FDF
# Use the theoretical noise to calculate uncertainties
mDict = bwdepol_measure_FDF_parms(
FDF=dirtyFDF,
phiArr=phiArr_radm2,
fwhmRMSF=fwhmRMSF,
adjoint_sens=adjoint_sens,
adjoint_noise=adjoint_noise,
dFDF=dFDFth,
lamSqArr_m2=lambdaSqArr_m2,
lam0Sq=lam0Sq_m2,
)
mDict["Ifreq0"] = toscalar(Ifreq0)
mDict["polyCoeffs"] = ",".join([str(x) for x in fit_result.params])
mDict["IfitStat"] = fit_result.fitStatus
mDict["IfitChiSqRed"] = fit_result.chiSqRed
mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2)
mDict["freq0_Hz"] = toscalar(freq0_Hz)
mDict["fwhmRMSF"] = toscalar(fwhmRMSF)
mDict["dQU"] = toscalar(nanmedian(dQUArr))
# mDict["dFDFth"] = toscalar(dFDFth)
mDict["units"] = units
if fit_result.fitStatus >= 128:
log("WARNING: Stokes I model contains negative values!")
elif fit_result.fitStatus >= 64:
log("Caution: Stokes I model has low signal-to-noise.")
# Add information on nature of channels:
good_channels = np.where(np.logical_and(weightArr != 0, np.isfinite(qArr)))[0]
mDict["min_freq"] = float(np.min(freqArr_Hz[good_channels]))
mDict["max_freq"] = float(np.max(freqArr_Hz[good_channels]))
mDict["N_channels"] = good_channels.size
if widths_Hz != None:
mDict["median_channel_width"] = float(np.median(widths_Hz))
else:
mDict["median_channel_width"] = float(np.median(np.diff(freqArr_Hz)))
# Measure the complexity of the q and u spectra
mDict["fracPol"] = mDict["ampPeakPIfit"] / (Ifreq0)
mD, pD = measure_qu_complexity(
freqArr_Hz=freqArr_Hz,
qArr=qArr,
uArr=uArr,
dqArr=dqArr,
duArr=duArr,
fracPol=mDict["fracPol"],
psi0_deg=mDict["polAngle0Fit_deg"],
RM_radm2=mDict["phiPeakPIfit_rm2"],
)
mDict.update(mD)
# Debugging plots for spectral complexity measure
if debug:
tmpFig = plot_complexity_fig(
xArr=pD["xArrQ"],
qArr=pD["yArrQ"],
dqArr=pD["dyArrQ"],
sigmaAddqArr=pD["sigmaAddArrQ"],
chiSqRedqArr=pD["chiSqRedArrQ"],
probqArr=pD["probArrQ"],
uArr=pD["yArrU"],
duArr=pD["dyArrU"],
sigmaAdduArr=pD["sigmaAddArrU"],
chiSqReduArr=pD["chiSqRedArrU"],
probuArr=pD["probArrU"],
mDict=mDict,
)
tmpFig.show()
# add array dictionary
aDict = dict()
aDict["phiArr_radm2"] = phiArr_radm2
aDict["phi2Arr_radm2"] = phi2Arr_radm2
aDict["RMSFArr"] = RMSFArr
aDict["freqArr_Hz"] = freqArr_Hz
aDict["weightArr"] = weightArr
aDict["dirtyFDF"] = dirtyFDF / adjoint_sens
if verbose:
# Print the results to the screen
log()
log("-" * 80)
log("RESULTS:\n")
log("FWHM RMSF = %.4g rad/m^2" % (mDict["fwhmRMSF"]))
log(
"Pol Angle = %.4g (+/-%.4g) deg"
% (mDict["polAngleFit_deg"], mDict["dPolAngleFit_deg"])
)
log(
"Pol Angle 0 = %.4g (+/-%.4g) deg"
% (mDict["polAngle0Fit_deg"], mDict["dPolAngle0Fit_deg"])
)
log(
"Peak FD = %.4g (+/-%.4g) rad/m^2"
% (mDict["phiPeakPIfit_rm2"], mDict["dPhiPeakPIfit_rm2"])
)
log("freq0_GHz = %.4g " % (mDict["freq0_Hz"] / 1e9))
log("I freq0 = %.4g %s" % (mDict["Ifreq0"], units))
log(
"Peak PI = %.4g (+/-%.4g) %s"
% (mDict["ampPeakPIfit"], mDict["dAmpPeakPIfit"], units)
)
log("QU Noise = %.4g %s" % (mDict["dQU"], units))
log("FDF Noise (theory) = %.4g %s" % (mDict["dFDFth"], units))
log("FDF Noise (Corrected MAD) = %.4g %s" % (mDict["dFDFcorMAD"], units))
log("FDF SNR = %.4g " % (mDict["snrPIfit"]))
log(
"sigma_add (combined) = %.4g (+%.4g, -%.4g)"
% (mDict["sigmaAddC"], mDict["dSigmaAddPlusC"], mDict["dSigmaAddMinusC"])
)
log()
log("-" * 80)
# Plot the RM Spread Function and dirty FDF
if showPlots:
plot_adjoint_info(adjoint_info, units=units)
fdfFig = plt.figure(figsize=(12.0, 8))
bwdepol_plot_rmsf_fdf_fig(
phiArr=phiArr_radm2,
FDF=(dirtyFDF / adjoint_sens),
phi2Arr=phiArr_radm2,
RMSFArr=RMSFArr,
peak_rm=peak_rm,
fwhmRMSF=fwhmRMSF,
vLine=mDict["phiPeakPIfit_rm2"],
fig=fdfFig,
units=units,
)
if showPlots or debug:
plt.show()
return mDict, aDict
# -----------------------------------------------------------------------------#
def main():
"""
Start the function to perform bwdepol RM-synthesis if called from the command line.
"""
# Help string to be shown using the -h option
descStr = """
Run bandwidth-depolarization-corrected RM-synthesis (based on Fine et al 2022)
on Stokes I, Q and U spectra (1D) stored in an ASCII file.
Behaves similarly to rmsynth1d except that the input file can optionally
contain a column with the channel widths in Hz. If this column is not
given, the channel widths will be assumed to be uniform and calculated
based on the difference between the frequencies of the first two channels.
The ASCII file requires one of the following column configurations,
depending on whether Stokes I and channel width information are available,
in a space separated format:
[freq_Hz, I, Q, U, I_err, Q_err, U_err]
[freq_Hz, widths_Hz, I, Q, U, I_err, Q_err, U_err]
[freq_Hz, Q, U, Q_err, U_err]
[freq_Hz, widths_Hz, Q, U, Q_err, U_err]
To get outputs, one or more of the following flags must be set: -S, -p, -v.
"""
epilog_text = """
Outputs with -S flag:
_FDFdirty.dat: Dirty FDF/RM Spectrum [Phi, Q, U]
_RMSF.dat: Computed RMSF [Phi, Q, U]
_RMsynth.dat: list of derived parameters for RM spectrum
(approximately equivalent to -v flag output)
_RMsynth.json: dictionary of derived parameters for RM spectrum
_weight.dat: Calculated channel weights [freq_Hz, weight]
"""
# Parse the command line options
parser = argparse.ArgumentParser(
description=descStr,
epilog=epilog_text,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"dataFile",
metavar="dataFile.dat",
nargs=1,
help="ASCII file containing Stokes spectra & errors.",
)
parser.add_argument(
"-t",
dest="fitRMSF",
action="store_false",
help="fit a Gaussian to the RMSF [True; set flag to disable]",
)
parser.add_argument(
"-l",
dest="phiMax_radm2",
type=float,
default=None,
help="absolute max Faraday depth sampled [Auto].",
)
parser.add_argument(
"-d",
dest="dPhi_radm2",
type=float,
default=None,
help="width of Faraday depth channel [Auto].\n(overrides -s NSAMPLES flag)",
)
parser.add_argument(
"-s",
dest="nSamples",
type=float,
default=10,
help="number of samples across the RMSF lobe [10].",
)
parser.add_argument(
"-w",
dest="weightType",
default="variance",
help="weighting [inverse variance] or 'uniform' (all 1s).",
)
parser.add_argument(
"-o",
dest="polyOrd",
type=int,
default=2,
help="polynomial order to fit to I spectrum [2].",
)
parser.add_argument(
"-i",
dest="noStokesI",
action="store_true",
help="ignore the Stokes I spectrum [False].",
)
parser.add_argument(
"-p", dest="showPlots", action="store_true", help="show the plots [False]."
)
parser.add_argument(
"-v", dest="verbose", action="store_true", help="verbose output [False]."
)
parser.add_argument(
"-S", dest="saveOutput", action="store_true", help="save the arrays [False]."
)
parser.add_argument(
"-D",
dest="debug",
action="store_true",
help="turn on debugging messages & plots [False].",
)
parser.add_argument(
"-U",
dest="units",
type=str,
default="Jy/beam",
help="Intensity units of the data. [Jy/beam]",
)
args = parser.parse_args()
# Sanity checks
if not os.path.exists(args.dataFile[0]):
print("File does not exist: '%s'." % args.dataFile[0])
sys.exit()
prefixOut, ext = os.path.splitext(args.dataFile[0])
dataDir, dummy = os.path.split(args.dataFile[0])
# Set the floating point precision
nBits = 64
# Read in the data. Don't parse until inside the first function.
data = np.loadtxt(args.dataFile[0], unpack=True, dtype="float" + str(nBits))
# Run (modified) RM-synthesis on the spectra
mDict, aDict = run_adjoint_rmsynth(
data=data,
polyOrd=args.polyOrd,
phiMax_radm2=args.phiMax_radm2,
dPhi_radm2=args.dPhi_radm2,
nSamples=args.nSamples,
weightType=args.weightType,
fitRMSF=args.fitRMSF,
noStokesI=args.noStokesI,
nBits=nBits,
showPlots=args.showPlots,
debug=args.debug,
verbose=args.verbose,
units=args.units,
)
if args.saveOutput:
saveOutput(mDict, aDict, prefixOut, args.verbose)
# -----------------------------------------------------------------------------#
if __name__ == "__main__":
main()
|
CIRADA-ToolsREPO_NAMERM-ToolsPATH_START.@RM-Tools_extracted@RM-Tools-master@RMtools_1D@rmtools_bwdepol.py@.PATH_END.py
|
{
"filename": "model_analysis.py",
"repo_name": "rhayes777/PyAutoFit",
"repo_path": "PyAutoFit_extracted/PyAutoFit-main/autofit/non_linear/analysis/model_analysis.py",
"type": "Python"
}
|
from typing import Optional
from autofit.mapper.prior_model.abstract import AbstractPriorModel
from autofit.mapper.prior_model.collection import Collection
from .analysis import Analysis
from .indexed import IndexCollectionAnalysis
from ... import SamplesSummary, AbstractPaths, SamplesPDF
class ModelAnalysis(Analysis):
def __init__(self, analysis: Analysis, model: AbstractPriorModel):
"""
Comprises a model and an analysis that can be applied to instances of that model.
Parameters
----------
analysis
model
"""
self.analysis = analysis
self.model = model
def __getattr__(self, item):
if item in ("__getstate__", "__setstate__"):
raise AttributeError(item)
return getattr(self.analysis, item)
def log_likelihood_function(self, instance):
return self.analysis.log_likelihood_function(instance)
def make_result(
self,
samples_summary: SamplesSummary,
paths: AbstractPaths,
samples: Optional[SamplesPDF] = None,
search_internal: Optional[object] = None,
analysis: Optional[object] = None,
):
"""
Return the correct type of result by calling the underlying analysis.
"""
try:
return self.analysis.make_result(
samples_summary=samples_summary,
paths=paths,
samples=samples,
search_internal=search_internal,
)
except TypeError:
raise
class CombinedModelAnalysis(IndexCollectionAnalysis):
def modify_model(self, model: AbstractPriorModel) -> Collection:
"""
Creates a collection with one model for each analysis. For each ModelAnalysis
the model is used; for other analyses the default model is used.
Parameters
----------
model
A default model
Returns
-------
A collection of models, one for each analysis.
"""
return Collection(
[
analysis.modify_model(analysis.analysis.model)
if isinstance(analysis.analysis, ModelAnalysis)
else analysis.modify_model(model)
for analysis in self.analyses
]
)
|
rhayes777REPO_NAMEPyAutoFitPATH_START.@PyAutoFit_extracted@PyAutoFit-main@autofit@non_linear@analysis@model_analysis.py@.PATH_END.py
|
{
"filename": "dustpop.py",
"repo_name": "LSSTDESC/lsstdesc-diffsky",
"repo_path": "lsstdesc-diffsky_extracted/lsstdesc-diffsky-main/lsstdesc_diffsky/photometry/dustpop.py",
"type": "Python"
}
|
"""JAX-based implementation of the dust population model in Nagaraj+22.
See https://arxiv.org/abs/2202.05102 for details."""
from collections import OrderedDict
import numpy as np
from jax import jit as jjit
from jax import numpy as jnp
from jax import random as jran
from ..dspspop.nagaraj22_dust import (
DELTA_PDICT,
TAU_BOUNDS_PDICT,
TAU_PDICT,
_get_median_dust_params_kern,
)
def mc_generate_dust_params(ran_key, logsm, logssfr, redshift, **kwargs):
"""Generate dust_params array that should be passed to precompute_dust_attenuation
Parameters
----------
ran_key : JAX random seed
Instance of jax.random.PRNGKey(seed), where seed is any integer
logsm : float or ndarray of shape (n_gals, )
Base-10 log of stellar mass in units of Msun assuming h=0.7
logssfr : float or ndarray of shape (n_gals, )
Base-10 log of SFR/Mstar in units of yr^-1
redshift : float or ndarray of shape (n_gals, )
Returns
-------
dust_params : ndarray of shape (n_gals, 3)
"""
tau_pdict = TAU_PDICT.copy()
tau_pdict.update((k, kwargs[k]) for k in tau_pdict.keys() & kwargs.keys())
tau_params = np.array(list(tau_pdict.values()))
delta_pdict = DELTA_PDICT.copy()
delta_pdict.update((k, kwargs[k]) for k in delta_pdict.keys() & kwargs.keys())
delta_params = np.array(list(delta_pdict.values()))
logsm, logssfr, redshift = get_1d_arrays(logsm, logssfr, redshift)
dust_params = mc_generate_dust_params_kern(
ran_key, logsm, logssfr, redshift, tau_params, delta_params
)
return dust_params
@jjit
def mc_generate_dust_params_kern(
ran_key, logsm, logssfr, redshift, tau_params, delta_params
):
delta_key, av_key = jran.split(ran_key, 2)
n = logsm.size
median_eb, median_delta, median_av = _get_median_dust_params_kern(
logsm, logssfr, redshift, tau_params, delta_params
)
delta_lgav = jran.uniform(av_key, minval=-0.2, maxval=0.2, shape=(n,))
lgav = delta_lgav + jnp.log10(median_av)
av = 10**lgav
delta = median_delta + jran.uniform(delta_key, minval=-0.1, maxval=0.1, shape=(n,))
eb = median_eb + jran.uniform(delta_key, minval=-0.15, maxval=0.15, shape=(n,))
dust_params = jnp.array((eb, delta, av)).T
return dust_params
def mc_generate_alt_dustpop_params(ran_key):
"""Generate a dictionary of alternative dustpop parameters
Parameters
----------
ran_key : JAX random seed
Instance of jax.random.PRNGKey(seed), where seed is any integer
Returns
-------
pdict : OrderedDict
Dictionary of dustpop parameters with different values than
those appearing in nagaraj22_dust.TAU_PDICT
Notes
-----
The returned pdict can be passed as the tau_params arguments to the
dustpop.mc_generate_dust_params function
"""
pdict = OrderedDict()
for pname, bounds in TAU_BOUNDS_PDICT.items():
bounds = TAU_BOUNDS_PDICT[pname]
pkey, ran_key = jran.split(ran_key, 2)
u = jran.uniform(pkey, minval=0, maxval=1, shape=(1,))
alt_val = bounds[0] + u * (bounds[1] - bounds[0])
pdict[pname] = float(alt_val)
return pdict
def get_1d_arrays(*args, jax_arrays=False):
"""Return a list of ndarrays of the same length.
Each arg must be either an ndarray of shape (npts, ), or a scalar.
"""
results = [jnp.atleast_1d(arg) for arg in args]
sizes = [arr.size for arr in results]
npts = max(sizes)
msg = "All input arguments should be either a float or ndarray of shape ({0}, )"
assert set(sizes) <= set((1, npts)), msg.format(npts)
if jax_arrays:
result = [jnp.zeros(npts).astype(arr.dtype) + arr for arr in results]
else:
result = [np.zeros(npts).astype(arr.dtype) + arr for arr in results]
return result
|
LSSTDESCREPO_NAMElsstdesc-diffskyPATH_START.@lsstdesc-diffsky_extracted@lsstdesc-diffsky-main@lsstdesc_diffsky@photometry@dustpop.py@.PATH_END.py
|
{
"filename": "_style.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/parcoords/line/colorbar/title/font/_style.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StyleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="style",
parent_name="parcoords.line.colorbar.title.font",
**kwargs,
):
super(StyleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["normal", "italic"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@parcoords@line@colorbar@title@font@_style.py@.PATH_END.py
|
{
"filename": "test_utils.py",
"repo_name": "sncosmo/sncosmo",
"repo_path": "sncosmo_extracted/sncosmo-master/sncosmo/tests/test_utils.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSES
import os
from tempfile import mkdtemp
import numpy as np
import pytest
from numpy.testing import assert_allclose
from scipy.stats import norm
from sncosmo import utils
def test_result():
res = utils.Result(a=1, b=2)
assert res.a == 1
assert res.b == 2
# test deprecating result attributes
res.__dict__['deprecated']['c'] = (2, "Use b instead")
# for some reason, pytest 3.8 seems to not have warns
if hasattr(pytest, 'warns'):
with pytest.warns(UserWarning):
val = res.c
else:
val = res.c
assert val == 2
def test_format_value():
assert utils.format_value(1.234567) == '1.2345670'
assert utils.format_value(0.001234567) == '1.2345670 x 10^-3'
assert utils.format_value(1234567, error=1) == '1234567.0 +/- 1.0'
assert (utils.format_value(0.001234567, latex=True) ==
'1.2345670 \\times 10^{-3}')
def test_ppf():
"""Test the ppf function."""
# Flat prior between 0 and 10
def prior(x):
return 1.
x = np.array([0.1, 0.2, 0.9, 0.9999])
y = utils.ppf(prior, x, 0., 10.)
assert_allclose(y, [1., 2., 9., 9.999])
# test a normal distribution
priordist = norm(0., 1.)
x = np.linspace(0.05, 0.95, 5)
y = utils.ppf(priordist.pdf, x, -np.inf, np.inf)
assert_allclose(y, priordist.ppf(x), atol=1.e-10)
def test_alias_map():
mapping = utils.alias_map(['A', 'B_', 'foo'],
{'a': set(['a', 'a_']), 'b': set(['b', 'b_'])})
assert mapping == {'a': 'A', 'b': 'B_'}
def test_data_mirror_rootdir():
dirname = mkdtemp()
# rootdir is a string
mirror = utils.DataMirror(dirname, "url_goes_here")
assert mirror.rootdir() == dirname
# rootdir is a callable
mirror = utils.DataMirror(lambda: dirname, "url_goes_here")
assert mirror.rootdir() == dirname
os.rmdir(dirname)
|
sncosmoREPO_NAMEsncosmoPATH_START.@sncosmo_extracted@sncosmo-master@sncosmo@tests@test_utils.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "lofar-astron/RMextract",
"repo_path": "RMextract_extracted/RMextract-master/RMextract/LOFAR_TOOLS/__init__.py",
"type": "Python"
}
|
lofar-astronREPO_NAMERMextractPATH_START.@RMextract_extracted@RMextract-master@RMextract@LOFAR_TOOLS@__init__.py@.PATH_END.py
|
|
{
"filename": "test_bayesian.py",
"repo_name": "threeML/threeML",
"repo_path": "threeML_extracted/threeML-master/threeML/test/test_bayesian.py",
"type": "Python"
}
|
from threeML import BayesianAnalysis, Uniform_prior, Log_uniform_prior
import numpy as np
import pytest
try:
import ultranest
except:
has_ultranest = False
else:
has_ultranest = True
skip_if_ultranest_is_not_available = pytest.mark.skipif(
not has_ultranest, reason="No ultranest available"
)
try:
import autoemcee
except:
has_autoemcee = False
else:
has_autoemcee = True
skip_if_autoemcee_is_not_available = pytest.mark.skipif(
not has_autoemcee, reason="No autoemcee available"
)
try:
import dynesty
except:
has_dynesty = False
else:
has_dynesty = True
skip_if_dynesty_is_not_available = pytest.mark.skipif(
not has_dynesty, reason="No dynesty available"
)
try:
import pymultinest
except:
has_pymultinest = False
else:
has_pymultinest = True
skip_if_pymultinest_is_not_available = pytest.mark.skipif(
not has_pymultinest, reason="No pymultinest available"
)
try:
import zeus
except:
has_zeus = False
else:
has_zeus = True
skip_if_zeus_is_not_available = pytest.mark.skipif(
not has_zeus, reason="No zeus available"
)
def remove_priors(model):
for parameter in model:
parameter.prior = None
def set_priors(model):
powerlaw = model.bn090217206.spectrum.main.Powerlaw
powerlaw.index.prior = Uniform_prior(lower_bound=-5.0, upper_bound=5.0)
powerlaw.K.prior = Log_uniform_prior(lower_bound=1.0, upper_bound=10)
def check_results(fit_results):
expected_results = [2.531028, -1.1831566000728451]
assert np.isclose(
fit_results["value"]["bn090217206.spectrum.main.Powerlaw.K"],
expected_results[0],
rtol=0.1,
)
assert np.isclose(
fit_results["value"]["bn090217206.spectrum.main.Powerlaw.index"],
expected_results[1],
rtol=0.1,
)
def test_bayes_constructor(fitted_joint_likelihood_bn090217206_nai):
jl, fit_results, like_frame = fitted_joint_likelihood_bn090217206_nai
datalist = jl.data_list
model = jl.likelihood_model
jl.restore_best_fit()
# Priors might have been set by other tests, let's make sure they are
# removed so we can test the error
remove_priors(model)
with pytest.raises(RuntimeError):
_ = BayesianAnalysis(model, datalist)
set_priors(model)
bayes = BayesianAnalysis(model, datalist)
bayes.set_sampler("emcee")
assert bayes.results is None
assert bayes.samples is None
assert bayes.log_like_values is None
assert bayes.log_probability_values is None
def test_emcee(bayes_fitter):
pass
# This has been already tested in the fixtures (see conftest.py)
@skip_if_pymultinest_is_not_available
def test_multinest(bayes_fitter, completed_bn090217206_bayesian_analysis):
bayes, _ = completed_bn090217206_bayesian_analysis
bayes.set_sampler("multinest")
bayes.sampler.setup(n_live_points=400)
bayes.sample()
res = bayes.results.get_data_frame()
check_results(res)
@skip_if_ultranest_is_not_available
def test_ultranest(bayes_fitter, completed_bn090217206_bayesian_analysis):
bayes, _ = completed_bn090217206_bayesian_analysis
bayes.set_sampler("ultranest")
bayes.sampler.setup()
bayes.sample()
res = bayes.results.get_data_frame()
check_results(res)
@skip_if_autoemcee_is_not_available
def test_autoemcee(bayes_fitter, completed_bn090217206_bayesian_analysis):
bayes, _ = completed_bn090217206_bayesian_analysis
bayes.set_sampler("autoemcee")
bayes.sampler.setup()
bayes.sample()
res = bayes.results.get_data_frame()
check_results(res)
@skip_if_dynesty_is_not_available
def test_dynesty_nested(bayes_fitter, completed_bn090217206_bayesian_analysis):
bayes, _ = completed_bn090217206_bayesian_analysis
bayes.set_sampler("dynesty_nested")
bayes.sampler.setup(n_live_points=200, n_effective=10)
bayes.sample()
res = bayes.results.get_data_frame()
check_results(res)
@skip_if_dynesty_is_not_available
def test_dynesty_dynamic(bayes_fitter, completed_bn090217206_bayesian_analysis):
bayes, _ = completed_bn090217206_bayesian_analysis
bayes.set_sampler("dynesty_dynamic")
bayes.sampler.setup(nlive_init=100, maxbatch=2, n_effective=10)
bayes.sample()
res = bayes.results.get_data_frame()
check_results(res)
@skip_if_zeus_is_not_available
def test_zeus(bayes_fitter, completed_bn090217206_bayesian_analysis):
bayes, _ = completed_bn090217206_bayesian_analysis
bayes.set_sampler("zeus")
bayes.sampler.setup(n_iterations=200, n_walkers=20)
bayes.sample()
res = bayes.results.get_data_frame()
bayes.restore_median_fit()
check_results(res)
def test_bayes_plots(completed_bn090217206_bayesian_analysis):
bayes, samples = completed_bn090217206_bayesian_analysis
with pytest.raises(AssertionError):
bayes.convergence_plots(n_samples_in_each_subset=100000, n_subsets=20000)
bayes.convergence_plots(n_samples_in_each_subset=10, n_subsets=5)
bayes.plot_chains()
bayes.restore_median_fit()
def test_bayes_shared(fitted_joint_likelihood_bn090217206_nai6_nai9_bgo1):
jl, _, _ = fitted_joint_likelihood_bn090217206_nai6_nai9_bgo1
jl.restore_best_fit()
model = jl.likelihood_model
data_list = jl.data_list
powerlaw = jl.likelihood_model.bn090217206.spectrum.main.Powerlaw
powerlaw.index.prior = Uniform_prior(lower_bound=-5.0, upper_bound=5.0)
powerlaw.K.prior = Log_uniform_prior(lower_bound=1.0, upper_bound=10)
bayes = BayesianAnalysis(model, data_list)
bayes.set_sampler("emcee", share_spectrum=True)
bayes.sampler.setup(n_walkers=50, n_burn_in=50, n_iterations=100, seed=1234)
samples = bayes.sample()
res_shared = bayes.results.get_data_frame()
bayes = BayesianAnalysis(model, data_list)
bayes.set_sampler("emcee", share_spectrum=False)
bayes.sampler.setup(n_walkers=50, n_burn_in=50, n_iterations=100, seed=1234)
samples = bayes.sample()
res_not_shared = bayes.results.get_data_frame()
assert np.isclose(
res_shared["value"]["bn090217206.spectrum.main.Powerlaw.K"],
res_not_shared["value"]["bn090217206.spectrum.main.Powerlaw.K"],
rtol=0.1,
)
assert np.isclose(
res_shared["value"]["bn090217206.spectrum.main.Powerlaw.index"],
res_not_shared["value"]["bn090217206.spectrum.main.Powerlaw.index"],
rtol=0.1,
)
|
threeMLREPO_NAMEthreeMLPATH_START.@threeML_extracted@threeML-master@threeML@test@test_bayesian.py@.PATH_END.py
|
{
"filename": "SiGaps_09_VG12_f50.ipynb",
"repo_name": "Echelle/AO_bonding_paper",
"repo_path": "AO_bonding_paper_extracted/AO_bonding_paper-master/notebooks/SiGaps_09_VG12_f50.ipynb",
"type": "Jupyter Notebook"
}
|
###This IPython Notebook is for performing a fit and generating a figure of the spectrum of sample VG12, in the mesh region with 49+/-6 nm gap.
The filename of the figure is **VG12.pdf**.
Author: Michael Gully-Santiago, `gully@astro.as.utexas.edu`
Date: January 13, 2015
```
%pylab inline
import emcee
import triangle
import pandas as pd
import seaborn as sns
```
Populating the interactive namespace from numpy and matplotlib
```
sns.set_context("paper", font_scale=2.0, rc={"lines.linewidth": 2.5})
sns.set(style="ticks")
```
Read in the data. We want "VG12"
```
df = pd.read_csv('../data/cln_20130916_cary5000.csv', index_col=0)
df = df[df.index > 1250.0]
```
```
plt.plot(df.index[::4], df.run11[::4]/100.0, label='On-mesh')
plt.plot(df.index, df.run10/100.0, label='Off-mesh')
plt.plot(df.index, df.run12/100.0, label='Shard2')
plt.plot(df.index, df.run9/100.0, label='DSP')
plt.plot(df.index, df.run15/100.0, label='VG08')
plt.plot(df.index, df.run17/100.0, label='VG08 alt')
#plt.plot(x, T_gap_Si_withFF_fast(x, 65.0, 0.5, n1)/T_DSP, label='Model')
plt.legend(loc='best')
plt.ylim(0.80, 1.05)
```
(0.8, 1.05)

Import all the local models, saved locally as `etalon.py`. See the paper for derivations of these equations.
```
from etalon import *
np.random.seed(78704)
```
```
# Introduce the Real data, decimate the data.
x = df.index.values[::4]
N = len(x)
# Define T_DSP for the model
T_DSP = T_gap_Si(x, 0.0)
n1 = sellmeier_Si(x)
# Define uncertainty
yerr = 0.0004*np.ones(N)
iid_cov = np.diag(yerr ** 2)
# Select the spectrum of interest
# Normalize the spectrum by measured DSP Si wafer.
y = df.run11.values[::4]/100.0
```
Define the likelihood.
```
def lnlike(d, f, lna, lns):
a, s = np.exp(lna), np.exp(lns)
off_diag_terms = a**2 * np.exp(-0.5 * (x[:, None] - x[None, :])**2 / s**2)
C = iid_cov + off_diag_terms
sgn, logdet = np.linalg.slogdet(C)
if sgn <= 0:
return -np.inf
r = y - T_gap_Si_withFF_fast(x, d, f, n1)/T_DSP
return -0.5 * (np.dot(r, np.linalg.solve(C, r)) + logdet)
```
Define the prior.
```
def lnprior(d, f, lna, lns):
if not (40.0 < d < 150.0 and 0.1<f<0.9 and -12 < lna < -2 and 0 < lns < 10):
return -np.inf
return 0.0
```
Combine likelihood and prior to obtain the posterior.
```
def lnprob(p):
lp = lnprior(*p)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(*p)
```
Set up `emcee`.
```
ndim, nwalkers = 4, 32
d_Guess = 65.0
f_Guess = 0.5
p0 = np.array([d_Guess, f_Guess, np.log(0.003), np.log(200.0)])
pos = [p0 + 1.0e-2*p0 * np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
```
Run the burn-in phase.
```
pos, lp, state = sampler.run_mcmc(pos, 200)
```
Run the full MCMC.
```
sampler.reset()
pos, lp, state = sampler.run_mcmc(pos, 600)
chain = sampler.chain
```
Inspect the chain.
```
fig, axes = plt.subplots(4, 1, figsize=(5, 6), sharex=True)
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.96, top=0.98,
wspace=0.0, hspace=0.05)
[a.plot(np.arange(chain.shape[1]), chain[:, :, i].T, "k", alpha=0.5)
for i, a in enumerate(axes)]
[a.set_ylabel("${0}$".format(l)) for a, l in zip(axes, ["d", "f", "\ln a", "\ln s"])]
axes[-1].set_xlim(0, chain.shape[1])
axes[-1].set_xlabel("iteration");
```

Linearize $a$ and $s$ for graphical purposes.
```
samples_lin = copy(sampler.flatchain)
samples_lin[:, 2:] = np.exp(samples_lin[:, 2:])
```
Make a triangle corner plot.
```
fig = triangle.corner(samples_lin,
labels=map("${0}$".format, ["d", "f", "a", "s"]),
quantiles=[0.16, 0.84])
```
Quantiles:
[(0.16, 64.843314147932404), (0.84, 91.811573104680789)]
Quantiles:
[(0.16, 0.27279990369619456), (0.84, 0.49195703690720283)]
Quantiles:
[(0.16, 0.00098088116826277618), (0.84, 0.0017266910404358123)]
Quantiles:
[(0.16, 62.341231003412545), (0.84, 85.531531075906912)]

```
fig = triangle.corner(samples_lin[:,0:2],
labels=map("${0}$".format, ["d", "f"]),
quantiles=[0.16, 0.84])
plt.savefig("VG12_corner.pdf")
```
Quantiles:
[(0.16, 64.843314147932404), (0.84, 91.811573104680789)]
Quantiles:
[(0.16, 0.27279990369619456), (0.84, 0.49195703690720283)]
/Users/gully/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_pdf.py:2264: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
different = bool(ours != theirs)

Calculate confidence intervals.
```
d_mcmc, f_mcmc, a_mcmc, s_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples_lin, [16, 50, 84],
axis=0)))
d_mcmc, f_mcmc, a_mcmc, s_mcmc
```
((81.38549604856982, 10.426077056110969, 16.542181900637416),
(0.33256193824424041, 0.15939509866296242, 0.05976203454804585),
(0.0012923636032171415, 0.00043432743721867075, 0.00031148243495436535),
(73.925387324942207, 11.606143750964705, 11.584156321529662))
Overlay draws from the Gaussian Process.
```
plt.figure(figsize=(6,3))
for d, f, a, s in samples_lin[np.random.randint(len(samples_lin), size=60)]:
off_diag_terms = a**2 * np.exp(-0.5 * (x[:, None] - x[None, :])**2 / s**2)
C = iid_cov + off_diag_terms
fit = T_gap_Si_withFF_fast(x, d, f, n1)/T_DSP
vec = np.random.multivariate_normal(fit, C)
plt.plot(x, vec,"-b", alpha=0.06)
plt.step(x, y,color="k", label='Measurement')
fit = T_gap_Si_withFF_fast(x, 64.0, 0.5, n1)/T_DSP
fit_label = 'Model with $d={:.0f}$ nm, $f={:.1f}$'.format(64.0, 0.5)
plt.plot(x, fit, '--', color=sns.xkcd_rgb["pale red"], alpha=1.0, label=fit_label)
fit1 = T_gap_Si_withFF_fast(x, 43, 0.5, n1)/T_DSP
fit2 = T_gap_Si_withFF_fast(x, 55, 0.5, n1)/T_DSP
fit2_label = 'Model with $d={:.0f}\pm{:.0f}$ nm, $f={:.1f}$'.format(49, 6, 0.5)
plt.fill_between(x, fit1, fit2, alpha=0.6, color=sns.xkcd_rgb["green apple"])
plt.plot([-10, -9], [-10, -9],"-", alpha=0.85, color=sns.xkcd_rgb["green apple"], label=fit2_label)
plt.plot([-10, -9], [-10, -9],"-b", alpha=0.85, label='Draws from GP')
plt.plot([0, 5000], [1.0, 1.0], '-.k', alpha=0.5)
plt.fill_between([1200, 1250], 2.0, 0.0, hatch='\\', alpha=0.4, color='k', label='Si absorption cutoff')
plt.xlabel('$\lambda$ (nm)');
plt.ylabel('$T_{gap}$');
plt.xlim(1200, 2501);
plt.ylim(0.9, 1.019);
plt.legend(loc='lower right')
plt.savefig("VG12_f50.pdf", bbox_inches='tight')
```

The end.
|
EchelleREPO_NAMEAO_bonding_paperPATH_START.@AO_bonding_paper_extracted@AO_bonding_paper-master@notebooks@SiGaps_09_VG12_f50.ipynb@.PATH_END.py
|
{
"filename": "test_str_subclass.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/simplejson/py3/simplejson/tests/test_str_subclass.py",
"type": "Python"
}
|
from unittest import TestCase
import simplejson
from simplejson.compat import text_type
# Tests for issue demonstrated in https://github.com/simplejson/simplejson/issues/144
class WonkyTextSubclass(text_type):
def __getslice__(self, start, end):
return self.__class__('not what you wanted!')
class TestStrSubclass(TestCase):
def test_dump_load(self):
for s in ['', '"hello"', 'text', u'\u005c']:
self.assertEqual(
s,
simplejson.loads(simplejson.dumps(WonkyTextSubclass(s))))
self.assertEqual(
s,
simplejson.loads(simplejson.dumps(WonkyTextSubclass(s),
ensure_ascii=False)))
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@simplejson@py3@simplejson@tests@test_str_subclass.py@.PATH_END.py
|
{
"filename": "test_diagnostic.py",
"repo_name": "Samreay/ChainConsumer",
"repo_path": "ChainConsumer_extracted/ChainConsumer-master/tests/test_diagnostic.py",
"type": "Python"
}
|
import numpy as np
import pandas as pd
import pytest
from chainconsumer import Chain, ChainConsumer
@pytest.fixture
def rng():
return np.random.default_rng(seed=0)
@pytest.fixture
def good_chain(rng) -> Chain:
data = np.vstack((rng.normal(loc=0.0, size=100000), rng.normal(loc=1.0, size=100000))).T
chain = Chain(samples=pd.DataFrame(data, columns=["a", "b"]), name="A", walkers=4)
return chain
@pytest.fixture
def bad_chain(rng) -> Chain:
data = np.vstack((rng.normal(loc=0.0, size=100000), rng.normal(loc=1.0, size=100000))).T
data[80000:, :] *= 2
data[80000:, :] += 1
chain = Chain(samples=pd.DataFrame(data, columns=["a", "b"]), name="A", walkers=4)
return chain
@pytest.fixture
def good_cc(good_chain: Chain) -> ChainConsumer:
return ChainConsumer().add_chain(good_chain)
@pytest.fixture
def good_cc2(good_chain: Chain) -> ChainConsumer:
c2 = good_chain.model_copy()
c2.name = "B"
return ChainConsumer().add_chain(good_chain).add_chain(c2)
@pytest.fixture
def bad_cc(bad_chain: Chain) -> ChainConsumer:
return ChainConsumer().add_chain(bad_chain)
def test_gelman_rubin_index(good_cc: ChainConsumer) -> None:
assert good_cc.diagnostic.gelman_rubin()
def test_gelman_rubin_index2(good_cc2: ChainConsumer) -> None:
res = good_cc2.diagnostic.gelman_rubin()
assert res
assert res.passed
assert "A" in res.results
assert res.results["A"]
assert "B" in res.results
assert res.results["B"]
def test_gelman_rubin_index_not_converged(bad_cc: ChainConsumer) -> None:
assert not bad_cc.diagnostic.gelman_rubin()
def test_gelman_rubin_index_not_converged2(rng) -> None:
data = np.vstack((rng.normal(loc=0.0, size=100000), rng.normal(loc=1.0, size=100000))).T
data[:, 0] += np.linspace(0, 10, 100000)
consumer = ChainConsumer()
consumer.add_chain(Chain(samples=pd.DataFrame(data, columns=["A", "B"]), name="B", walkers=8))
res = consumer.diagnostic.gelman_rubin()
assert not res
assert not res.passed
assert "B" in res.results
assert not res.results["B"]
def test_geweke_index(good_cc: ChainConsumer) -> None:
assert good_cc.diagnostic.geweke()
def test_geweke_index_failed(bad_cc: ChainConsumer) -> None:
assert not bad_cc.diagnostic.geweke()
def test_geweke_default(good_cc2: ChainConsumer) -> None:
res = good_cc2.diagnostic.geweke()
assert res
assert res.passed
assert "A" in res.results
assert res.results["A"]
assert "B" in res.results
assert res.results["B"]
def test_geweke_default_failed(rng: np.random.Generator) -> None:
data = np.vstack((rng.normal(loc=0.0, size=100000), rng.normal(loc=1.0, size=100000))).T
consumer = ChainConsumer()
consumer.add_chain(Chain(samples=pd.DataFrame(data, columns=["a", "b"]), walkers=20, name="c1"))
data2 = data.copy()
data2[98000:, :] += 0.3
consumer.add_chain(Chain(samples=pd.DataFrame(data2, columns=["a", "b"]), walkers=20, name="c2"))
assert not consumer.diagnostic.geweke()
|
SamreayREPO_NAMEChainConsumerPATH_START.@ChainConsumer_extracted@ChainConsumer-master@tests@test_diagnostic.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "Smithsonian/ngehtsim",
"repo_path": "ngehtsim_extracted/ngehtsim-main/ngehtsim/calibration/__init__.py",
"type": "Python"
}
|
"""
Tools for carrying out calibration.
"""
__author__ = "Dom Pesce"
__all__ = ['calibration']
from . import *
|
SmithsonianREPO_NAMEngehtsimPATH_START.@ngehtsim_extracted@ngehtsim-main@ngehtsim@calibration@__init__.py@.PATH_END.py
|
{
"filename": "conv_template.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/distutils/conv_template.py",
"type": "Python"
}
|
#!/usr/bin/env python3
"""
takes templated file .xxx.src and produces .xxx file where .xxx is
.i or .c or .h, using the following template rules
/**begin repeat -- on a line by itself marks the start of a repeated code
segment
/**end repeat**/ -- on a line by itself marks it's end
After the /**begin repeat and before the */, all the named templates are placed
these should all have the same number of replacements
Repeat blocks can be nested, with each nested block labeled with its depth,
i.e.
/**begin repeat1
*....
*/
/**end repeat1**/
When using nested loops, you can optionally exclude particular
combinations of the variables using (inside the comment portion of the inner loop):
:exclude: var1=value1, var2=value2, ...
This will exclude the pattern where var1 is value1 and var2 is value2 when
the result is being generated.
In the main body each replace will use one entry from the list of named replacements
Note that all #..# forms in a block must have the same number of
comma-separated entries.
Example:
An input file containing
/**begin repeat
* #a = 1,2,3#
* #b = 1,2,3#
*/
/**begin repeat1
* #c = ted, jim#
*/
@a@, @b@, @c@
/**end repeat1**/
/**end repeat**/
produces
line 1 "template.c.src"
/*
*********************************************************************
** This file was autogenerated from a template DO NOT EDIT!!**
** Changes should be made to the original source (.src) file **
*********************************************************************
*/
#line 9
1, 1, ted
#line 9
1, 1, jim
#line 9
2, 2, ted
#line 9
2, 2, jim
#line 9
3, 3, ted
#line 9
3, 3, jim
"""
__all__ = ['process_str', 'process_file']
import os
import sys
import re
# names for replacement that are already global.
global_names = {}
# header placed at the front of head processed file
header =\
"""
/*
*****************************************************************************
** This file was autogenerated from a template DO NOT EDIT!!!! **
** Changes should be made to the original source (.src) file **
*****************************************************************************
*/
"""
# Parse string for repeat loops
def parse_structure(astr, level):
"""
The returned line number is from the beginning of the string, starting
at zero. Returns an empty list if no loops found.
"""
if level == 0 :
loopbeg = "/**begin repeat"
loopend = "/**end repeat**/"
else :
loopbeg = "/**begin repeat%d" % level
loopend = "/**end repeat%d**/" % level
ind = 0
line = 0
spanlist = []
while True:
start = astr.find(loopbeg, ind)
if start == -1:
break
start2 = astr.find("*/", start)
start2 = astr.find("\n", start2)
fini1 = astr.find(loopend, start2)
fini2 = astr.find("\n", fini1)
line += astr.count("\n", ind, start2+1)
spanlist.append((start, start2+1, fini1, fini2+1, line))
line += astr.count("\n", start2+1, fini2)
ind = fini2
spanlist.sort()
return spanlist
def paren_repl(obj):
torep = obj.group(1)
numrep = obj.group(2)
return ','.join([torep]*int(numrep))
parenrep = re.compile(r"\(([^)]*)\)\*(\d+)")
plainrep = re.compile(r"([^*]+)\*(\d+)")
def parse_values(astr):
# replaces all occurrences of '(a,b,c)*4' in astr
# with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate
# empty values, i.e., ()*4 yields ',,,'. The result is
# split at ',' and a list of values returned.
astr = parenrep.sub(paren_repl, astr)
# replaces occurrences of xxx*3 with xxx, xxx, xxx
astr = ','.join([plainrep.sub(paren_repl, x.strip())
for x in astr.split(',')])
return astr.split(',')
stripast = re.compile(r"\n\s*\*?")
named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#")
exclude_vars_re = re.compile(r"(\w*)=(\w*)")
exclude_re = re.compile(":exclude:")
def parse_loop_header(loophead) :
"""Find all named replacements in the header
Returns a list of dictionaries, one for each loop iteration,
where each key is a name to be substituted and the corresponding
value is the replacement string.
Also return a list of exclusions. The exclusions are dictionaries
of key value pairs. There can be more than one exclusion.
[{'var1':'value1', 'var2', 'value2'[,...]}, ...]
"""
# Strip out '\n' and leading '*', if any, in continuation lines.
# This should not effect code previous to this change as
# continuation lines were not allowed.
loophead = stripast.sub("", loophead)
# parse out the names and lists of values
names = []
reps = named_re.findall(loophead)
nsub = None
for rep in reps:
name = rep[0]
vals = parse_values(rep[1])
size = len(vals)
if nsub is None :
nsub = size
elif nsub != size :
msg = "Mismatch in number of values, %d != %d\n%s = %s"
raise ValueError(msg % (nsub, size, name, vals))
names.append((name, vals))
# Find any exclude variables
excludes = []
for obj in exclude_re.finditer(loophead):
span = obj.span()
# find next newline
endline = loophead.find('\n', span[1])
substr = loophead[span[1]:endline]
ex_names = exclude_vars_re.findall(substr)
excludes.append(dict(ex_names))
# generate list of dictionaries, one for each template iteration
dlist = []
if nsub is None :
raise ValueError("No substitution variables found")
for i in range(nsub):
tmp = {name: vals[i] for name, vals in names}
dlist.append(tmp)
return dlist
replace_re = re.compile(r"@(\w+)@")
def parse_string(astr, env, level, line) :
lineno = "#line %d\n" % line
# local function for string replacement, uses env
def replace(match):
name = match.group(1)
try :
val = env[name]
except KeyError:
msg = 'line %d: no definition of key "%s"'%(line, name)
raise ValueError(msg) from None
return val
code = [lineno]
struct = parse_structure(astr, level)
if struct :
# recurse over inner loops
oldend = 0
newlevel = level + 1
for sub in struct:
pref = astr[oldend:sub[0]]
head = astr[sub[0]:sub[1]]
text = astr[sub[1]:sub[2]]
oldend = sub[3]
newline = line + sub[4]
code.append(replace_re.sub(replace, pref))
try :
envlist = parse_loop_header(head)
except ValueError as e:
msg = "line %d: %s" % (newline, e)
raise ValueError(msg)
for newenv in envlist :
newenv.update(env)
newcode = parse_string(text, newenv, newlevel, newline)
code.extend(newcode)
suff = astr[oldend:]
code.append(replace_re.sub(replace, suff))
else :
# replace keys
code.append(replace_re.sub(replace, astr))
code.append('\n')
return ''.join(code)
def process_str(astr):
code = [header]
code.extend(parse_string(astr, global_names, 0, 1))
return ''.join(code)
include_src_re = re.compile(r"(\n|\A)#include\s*['\"]"
r"(?P<name>[\w\d./\\]+[.]src)['\"]", re.I)
def resolve_includes(source):
d = os.path.dirname(source)
with open(source) as fid:
lines = []
for line in fid:
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d, fn)
if os.path.isfile(fn):
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
return lines
def process_file(source):
lines = resolve_includes(source)
sourcefile = os.path.normcase(source).replace("\\", "\\\\")
try:
code = process_str(''.join(lines))
except ValueError as e:
raise ValueError('In "%s" loop at %s' % (sourcefile, e)) from None
return '#line 1 "%s"\n%s' % (sourcefile, code)
def unique_key(adict):
# this obtains a unique key given a dictionary
# currently it works by appending together n of the letters of the
# current keys and increasing n until a unique key is found
# -- not particularly quick
allkeys = list(adict.keys())
done = False
n = 1
while not done:
newkey = "".join([x[:n] for x in allkeys])
if newkey in allkeys:
n += 1
else:
done = True
return newkey
def main():
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file, 'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname, 'w')
allstr = fid.read()
try:
writestr = process_str(allstr)
except ValueError as e:
raise ValueError("In %s loop at %s" % (file, e)) from None
outfile.write(writestr)
if __name__ == "__main__":
main()
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@distutils@conv_template.py@.PATH_END.py
|
{
"filename": "threefry.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/jax/experimental/pallas/ops/tpu/random/threefry.py",
"type": "Python"
}
|
# Copyright 2024 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the Threefry PRNG as a Pallas kernel."""
from typing import Sequence
import jax
from jax import lax
from jax._src import prng
from jax.experimental import pallas as pl
from jax.experimental.pallas import tpu as pltpu
import jax.numpy as jnp
import numpy as np
Shape = Sequence[int]
BLOCK_SIZE = (256, 256)
_round_up = lambda x, y: (x + y - 1) // y * y
def blocked_iota(block_shape: Shape,
total_shape: Shape):
"""Computes a sub-block of a larger shaped iota.
Args:
block_shape: The output block shape of the iota.
total_shape: The total shape of the input tensor.
Returns:
Result of the blocked iota.
"""
iota_data = jnp.zeros(block_shape, dtype=jnp.uint32)
multiplier = 1
for dim in range(len(block_shape)-1, -1, -1):
block_mult = 1
counts_lo = lax.broadcasted_iota(
dtype=jnp.uint32, shape=block_shape, dimension=dim
)
iota_data += counts_lo * multiplier * block_mult
multiplier *= total_shape[dim]
return iota_data
def _compute_scalar_offset(iteration_index,
total_size: Shape,
block_size: Shape):
ndims = len(iteration_index)
dim_size = 1
total_idx = 0
for i in range(ndims-1, -1, -1):
dim_idx = iteration_index[i] * block_size[i]
total_idx += dim_idx * dim_size
dim_size *= total_size[i]
return total_idx
def threefry_2x32_count(key,
shape: Shape,
unpadded_shape: Shape,
block_size: tuple[int, int]):
"""Generates random bits using the Threefry hash function.
This function is a fusion of prng.shaped_iota and prng.threefry_2x32 from
the JAX core library.
Args:
key: A threefry key of shape (2,).
shape: The shape of the output. Must be divisible by `block_size`.
unpadded_shape: If `shape` is padded, then this is the shape of the
output tensor if it were not padded. This is important for indexing
calculations within the kernel. If `shape` is not padded, then this
should be equal to `shape`.
block_size: The block size of the kernel.
Returns:
A tensor of random bits of shape `shape`.
"""
shape = tuple(shape)
if np.prod(shape) > jnp.iinfo(jnp.uint32).max:
raise ValueError(
f"Shape too large: {np.prod(shape)} > {np.iinfo(jnp.uint32).max}")
if (shape[-2] % block_size[-2] != 0) or (shape[-1] % block_size[-1] != 0):
raise ValueError(
f"Shape dimension {shape[-2:]} must be divisible by {block_size}")
grid_dims = shape[:-2] + (
shape[-2] // block_size[-2], shape[-1] // block_size[1],)
def kernel(key_ref, out_ref):
counts_idx = tuple(pl.program_id(i) for i in range(len(grid_dims)))
offset = _compute_scalar_offset(counts_idx, unpadded_shape, block_shape)
counts_lo = blocked_iota(block_size, unpadded_shape)
counts_lo = counts_lo + offset
counts_lo = counts_lo.astype(jnp.uint32)
# TODO(justinfu): Support hi bits on count.
counts_hi = jnp.zeros_like(counts_lo)
k1 = jnp.reshape(key_ref[0, 0], (1, 1))
k2 = jnp.reshape(key_ref[0, 1], (1, 1))
o1, o2 = prng.threefry2x32_p.bind(
k1, k2, counts_hi, counts_lo)
out_bits = o1 ^ o2
out_ref[...] = out_bits.reshape(out_ref.shape)
key = key.reshape((1, 2))
out = jax.ShapeDtypeStruct(shape, dtype=jnp.uint32)
block_shape = (1,) * (len(shape)-2) + block_size
result = pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=pltpu.TPUMemorySpace.SMEM)],
out_specs=pl.BlockSpec(block_shape, lambda *idxs: idxs),
grid=grid_dims,
out_shape=out,
)(key)
return result
def plthreefry_random_bits(key, bit_width: int, shape: Shape):
if bit_width != 32:
raise ValueError("Only 32-bit PRNG supported.")
if len(shape) == 0:
return plthreefry_random_bits(key, bit_width, (1, 1))[0, 0]
elif len(shape) == 1:
return plthreefry_random_bits(key, bit_width, (1, *shape))[0]
requires_pad = (
shape[-2] % BLOCK_SIZE[-2] != 0) or (shape[-1] % BLOCK_SIZE[-1] != 0)
if requires_pad:
padded_shape = tuple(shape[:-2]) + (
_round_up(shape[-2], BLOCK_SIZE[-2]),
_round_up(shape[-1], BLOCK_SIZE[-1]),
)
padded_result = threefry_2x32_count(
key, padded_shape, shape, block_size=BLOCK_SIZE)
return padded_result[..., :shape[-2], :shape[-1]]
else:
return threefry_2x32_count(key, shape, shape, block_size=BLOCK_SIZE)
plthreefry_prng_impl = prng.PRNGImpl(
key_shape=(2,),
seed=prng.threefry_seed,
split=prng.threefry_split,
random_bits=plthreefry_random_bits,
fold_in=prng.threefry_fold_in,
name="pallas_threefry2x32",
tag="plfry")
prng.register_prng(plthreefry_prng_impl)
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@experimental@pallas@ops@tpu@random@threefry.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/choroplethmapbox/colorbar/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._tickfont import Tickfont
from ._tickformatstop import Tickformatstop
from ._title import Title
from . import title
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[".title"],
["._tickfont.Tickfont", "._tickformatstop.Tickformatstop", "._title.Title"],
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@choroplethmapbox@colorbar@__init__.py@.PATH_END.py
|
{
"filename": "_ticktextsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/isosurface/colorbar/_ticktextsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicktextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="ticktextsrc", parent_name="isosurface.colorbar", **kwargs
):
super(TicktextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@isosurface@colorbar@_ticktextsrc.py@.PATH_END.py
|
{
"filename": "_cmin.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter3d/marker/_cmin.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CminValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="cmin", parent_name="scatter3d.marker", **kwargs):
super(CminValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"cauto": False}),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter3d@marker@_cmin.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/choroplethmap/colorbar/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._tickfont import Tickfont
from ._tickformatstop import Tickformatstop
from ._title import Title
from . import title
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[".title"],
["._tickfont.Tickfont", "._tickformatstop.Tickformatstop", "._title.Title"],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@choroplethmap@colorbar@__init__.py@.PATH_END.py
|
{
"filename": "test_lightcurve.py",
"repo_name": "KeplerGO/PyKE",
"repo_path": "PyKE_extracted/PyKE-master/pyke/tests/test_lightcurve.py",
"type": "Python"
}
|
import pytest
import numpy as np
from numpy.testing import (assert_almost_equal, assert_array_equal,
assert_allclose)
from astropy.utils.data import get_pkg_data_filename
from ..lightcurve import (LightCurve, KeplerCBVCorrector, KeplerLightCurveFile,
SFFCorrector, KeplerLightCurve, box_period_search)
# 8th Quarter of Tabby's star
TABBY_Q8 = ("https://archive.stsci.edu/missions/kepler/lightcurves"
"/0084/008462852/kplr008462852-2011073133259_llc.fits")
KEPLER10 = ("https://archive.stsci.edu/missions/kepler/lightcurves/"
"0119/011904151/kplr011904151-2010009091648_llc.fits")
def test_kepler_cbv_fit():
# comparing that the two methods to do cbv fit are the nearly the same
cbv = KeplerCBVCorrector(TABBY_Q8)
cbv_lc = cbv.correct()
assert_almost_equal(cbv.coeffs, [0.08534423, 0.10814261], decimal=3)
lcf = KeplerLightCurveFile(TABBY_Q8)
cbv_lcf = lcf.compute_cotrended_lightcurve()
assert_almost_equal(cbv_lc.flux, cbv_lcf.flux)
def test_KeplerLightCurve():
lcf = KeplerLightCurveFile(TABBY_Q8)
kplc = lcf.get_lightcurve('SAP_FLUX')
assert kplc.channel == lcf.channel
assert kplc.campaign is None
assert kplc.quarter == lcf.quarter
assert kplc.mission == 'Kepler'
@pytest.mark.parametrize("quality_bitmask, answer", [('hardest', 2661),
('hard', 2706), ('default', 2917), (None, 3279),
(1, 3279), (100, 3252), (2096639, 2661)])
def test_bitmasking(quality_bitmask, answer):
'''Test whether the bitmasking behaves like it should'''
lcf = KeplerLightCurveFile(TABBY_Q8, quality_bitmask=quality_bitmask)
flux = lcf.get_lightcurve('SAP_FLUX').flux
assert len(flux) == answer
def test_lightcurve_fold():
"""Test the ``LightCurve.fold()`` method."""
lc = LightCurve(time=[1, 2, 3], flux=[1, 1, 1])
assert_almost_equal(lc.fold(period=1).time[0], 0)
assert_almost_equal(lc.fold(period=1, phase=-0.1).time[0], 0.1)
def test_cdpp():
"""Test the basics of the CDPP noise metric."""
# A flat lightcurve should have a CDPP close to zero
assert_almost_equal(LightCurve(np.arange(200), np.ones(200)).cdpp(), 0)
# An artificial lightcurve with sigma=100ppm should have cdpp=100ppm
lc = LightCurve(np.arange(10000), np.random.normal(loc=1, scale=100e-6, size=10000))
assert_almost_equal(lc.cdpp(transit_duration=1), 100, decimal=-0.5)
def test_cdpp_tabby():
"""Compare the cdpp noise metric against the pipeline value."""
lcf = KeplerLightCurveFile(TABBY_Q8)
# Tabby's star shows dips after cadence 1000 which increase the cdpp
lc = LightCurve(lcf.PDCSAP_FLUX.time[:1000], lcf.PDCSAP_FLUX.flux[:1000])
assert(np.abs(lc.cdpp() - lcf.header(ext=1)['CDPP6_0']) < 30)
def test_lightcurve_plot():
"""Sanity check to verify that lightcurve plotting works"""
lcf = KeplerLightCurveFile(TABBY_Q8)
lcf.plot()
lcf.SAP_FLUX.plot()
def test_sff_corrector():
"""Does our code agree with the example presented in Vanderburg
and Jhonson (2014)?"""
# The following csv file, provided by Vanderburg and Jhonson
# at https://www.cfa.harvard.edu/~avanderb/k2/ep60021426.html,
# contains the results of applying SFF to EPIC 60021426.
fn = get_pkg_data_filename('./data/ep60021426alldiagnostics.csv')
data = np.genfromtxt(fn, delimiter=',', skip_header=1)
mask = data[:, -2] == 0 # indicates whether the thrusters were on or off
time = data[:, 0]
raw_flux = data[:, 1]
corrected_flux = data[:, 2]
centroid_col = data[:, 3]
centroid_row = data[:, 4]
arclength = data[:, 5]
correction = data[:, 6]
sff = SFFCorrector()
corrected_lc = sff.correct(time=time, flux=raw_flux,
centroid_col=centroid_col,
centroid_row=centroid_row,
niters=1)
# the factor self.bspline(time-time[0]) accounts for
# the long term trend which is divided out in order to get a "flat"
# lightcurve.
assert_almost_equal(corrected_lc.flux*sff.bspline(time-time[0]),
corrected_flux, decimal=3)
assert_array_equal(time, corrected_lc.time)
# the factor of 4 below accounts for the conversion
# between pixel units to arcseconds
assert_almost_equal(4*sff.s, arclength, decimal=2)
assert_almost_equal(sff.interp(sff.s), correction, decimal=3)
# test using KeplerLightCurve interface
klc = KeplerLightCurve(time=time, flux=raw_flux, centroid_col=centroid_col,
centroid_row=centroid_row)
klc = klc.correct(niters=1)
sff = klc.corrector
assert_almost_equal(klc.flux*sff.bspline(time-time[0]),
corrected_flux, decimal=3)
assert_almost_equal(4*sff.s, arclength, decimal=2)
assert_almost_equal(sff.interp(sff.s), correction, decimal=3)
assert_array_equal(time, klc.time)
def test_bin():
lc = LightCurve(time=np.arange(10), flux=2*np.ones(10),
flux_err=2**.5*np.ones(10))
binned_lc = lc.bin(binsize=2)
assert_allclose(binned_lc.flux, 2*np.ones(5))
assert_allclose(binned_lc.flux_err, np.ones(5))
assert len(binned_lc.time) == 5
def test_normalize():
"""Does the `LightCurve.normalize()` method normalize the flux?"""
lc = LightCurve(time=np.arange(10), flux=5*np.ones(10))
assert_allclose(np.median(lc.normalize().flux), 1)
def test_box_period_search():
"""Can we recover the orbital period of Kepler-10b?"""
answer = 0.837
klc = KeplerLightCurveFile(KEPLER10)
pdc = klc.PDCSAP_FLUX
flat = pdc.flatten()
_, _, kepler10b_period = box_period_search(flat, min_period=.5,
max_period=1, nperiods=100)
assert abs(kepler10b_period - answer) < 1e-2
def test_to_pandas():
"""Test the `LightCurve.to_pandas()` method."""
time, flux, flux_err = range(3), np.ones(3), np.zeros(3)
lc = LightCurve(time, flux, flux_err)
try:
df = lc.to_pandas()
assert_allclose(df.index, time)
assert_allclose(df.flux, flux)
assert_allclose(df.flux_err, flux_err)
except ImportError:
# pandas is an optional dependency
pass
def test_to_table():
"""Test the `LightCurve.to_table()` method."""
time, flux, flux_err = range(3), np.ones(3), np.zeros(3)
lc = LightCurve(time, flux, flux_err)
tbl = lc.to_table()
assert_allclose(tbl['time'], time)
assert_allclose(tbl['flux'], flux)
assert_allclose(tbl['flux_err'], flux_err)
def test_to_csv():
"""Test the `LightCurve.to_csv()` method."""
time, flux, flux_err = range(3), np.ones(3), np.zeros(3)
try:
lc = LightCurve(time, flux, flux_err)
assert(lc.to_csv() == 'time,flux,flux_err\n0,1.0,0.0\n1,1.0,0.0\n2,1.0,0.0\n')
except ImportError:
# pandas is an optional dependency
pass
|
KeplerGOREPO_NAMEPyKEPATH_START.@PyKE_extracted@PyKE-master@pyke@tests@test_lightcurve.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "Keck-DataReductionPipelines/KCWI_DRP",
"repo_path": "KCWI_DRP_extracted/KCWI_DRP-master/kcwidrp/scripts/__init__.py",
"type": "Python"
}
|
Keck-DataReductionPipelinesREPO_NAMEKCWI_DRPPATH_START.@KCWI_DRP_extracted@KCWI_DRP-master@kcwidrp@scripts@__init__.py@.PATH_END.py
|
|
{
"filename": "__init__.py",
"repo_name": "rhayes777/PyAutoFit",
"repo_path": "PyAutoFit_extracted/PyAutoFit-main/autofit/tools/__init__.py",
"type": "Python"
}
|
rhayes777REPO_NAMEPyAutoFitPATH_START.@PyAutoFit_extracted@PyAutoFit-main@autofit@tools@__init__.py@.PATH_END.py
|
|
{
"filename": "_legend.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/funnel/_legend.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(self, plotly_name="legend", parent_name="funnel", **kwargs):
super(LegendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", "legend"),
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@funnel@_legend.py@.PATH_END.py
|
{
"filename": "annz_rndCls_quick.py",
"repo_name": "IftachSadeh/ANNZ",
"repo_path": "ANNZ_extracted/ANNZ-master/examples/scripts/annz_rndCls_quick.py",
"type": "Python"
}
|
from scripts.helperFuncs import *
# command line arguments and basic settings
# --------------------------------------------------------------------------------------------------
init()
# just in case... (may comment this out)
if not glob.annz["doRandomCls"]:
log.info(red(" - "+time.strftime("%d/%m/%y %H:%M:%S")+" - This scripts is only designed for randomClassification...")) ; sys.exit(0)
# ==================================================================================================
# The main code - randomized classification -
# --------------------------------------------------------------------------------------------------
# - run the following:
# python annz_rndCls_quick.py --randomClassification --genInputTrees
# python annz_rndCls_quick.py --randomClassification --train
# python annz_rndCls_quick.py --randomClassification --optimize
# python annz_rndCls_quick.py --randomClassification --evaluate
# --------------------------------------------------------------------------------------------------
log.info(whtOnBlck(" - "+time.strftime("%d/%m/%y %H:%M:%S")+" - starting ANNZ"))
# --------------------------------------------------------------------------------------------------
# general options which are the same for all stages
# - PLEASE ALSO INSPECT generalSettings(), WHICH HAS BEEN RUN AS PART OF init(), FOR MORE OPTIONS
# --------------------------------------------------------------------------------------------------
# outDirName - set output directory name
glob.annz["outDirName"] = "test_randCls_quick"
# nMLMs - the number of random MLMs to generate - for running the example,
# we set nMLMs at a small value, but this should be >~ 50 for production
glob.annz["nMLMs"] = 10 # 100
# --------------------------------------------------------------------------------------------------
# the definition of the signal and background classes
# --------------------------------------------------------------------------------------------------
glob.annz["userCuts_sig"] = "type == 3" # in this example, these are galaxies
glob.annz["userCuts_bck"] = "type == 6" # in this example, these are stars
# --------------------------------------------------------------------------------------------------
# pre-processing of the input dataset
# --------------------------------------------------------------------------------------------------
if glob.annz["doGenInputTrees"]:
# inDirName - directory in which input files are stored
glob.annz["inDirName"] = "examples/data/sgSeparation/train"
# inAsciiVars - list of parameter types and parameter names, corresponding to columns in the input
# file, e.g., [TYPE:NAME] may be [F:MAG_U], with 'F' standing for float. (see advanced example for detailed explanation)
glob.annz["inAsciiVars"] = "C:class; F:z; UL:objid; F:psfMag_r; F:fiberMag_r; F:modelMag_r; F:petroMag_r; F:petroRad_r; F:petroR50_r; " \
+ " F:petroR90_r; F:lnLStar_r; F:lnLExp_r; F:lnLDeV_r; F:mE1_r; F:mE2_r; F:mRrCc_r; I:type_r; I:type"
# --------------------------------------------------------------------------------------------------
# - inAsciiFiles - list of files for training, testing and validation. objects are selected for each subsample from the entire
# dataset.
# - splitType - deteermine the method for splitting the dataset into trainig testing and validation subsamples.
# - see e.g., annz_rndReg_advanced.py for alternative ways to define input files and split datasets
# --------------------------------------------------------------------------------------------------
glob.annz["splitType"] = "serial" # "serial", "blocks" or "random"
glob.annz["inAsciiFiles"] = "sgCatalogue_galaxy_0.txt;sgCatalogue_galaxy_1.txt;sgCatalogue_star_0.txt;sgCatalogue_star_1.txt;sgCatalogue_star_3.txt"
# run ANNZ with the current settings
runANNZ()
# --------------------------------------------------------------------------------------------------
# training
# --------------------------------------------------------------------------------------------------
if glob.annz["doTrain"]:
# for each MLM, run ANNZ
for nMLMnow in range(glob.annz["nMLMs"]):
glob.annz["nMLMnow"] = nMLMnow
if glob.annz["trainIndex"] >= 0 and glob.annz["trainIndex"] != nMLMnow: continue
# rndOptTypes - generate these randomized MLM types (currently "ANN", "BDT" or "ANN_BDT" are supported).
glob.annz["rndOptTypes"] = "BDT" # for this example, since BDTs are much faster to train, exclude ANNs...
# inputVariables - semicolon-separated list of input variables for the MLMs. Can include math expressions of the variables
# given in inAsciiVars (see https://root.cern.ch/root/html520/TFormula.html for examples of valid math expressions)
glob.annz["inputVariables"] = "psfMag_r; fiberMag_r;modelMag_r ; petroMag_r; petroRad_r; petroR50_r;petroR90_r;" \
+ "lnLStar_r;lnLExp_r;lnLDeV_r ; TMath::Log(pow(mE1_r,2))"
# can place here specific randomization settings, cuts and weights (see advanced example for details)
# if this is left as is, then random job options are generated internally in ANNZ, using MLM types
# given by rndOptTypes. see ANNZ::generateOptsMLM().
# ....
# --------------------------------------------------------------------------------------------------
# run ANNZ with the current settings
runANNZ()
# --------------------------------------------------------------------------------------------------
# optimization and evaluation
# --------------------------------------------------------------------------------------------------
if glob.annz["doOptim"] or glob.annz["doEval"]:
# --------------------------------------------------------------------------------------------------
# optimization
# --------------------------------------------------------------------------------------------------
if glob.annz["doOptim"]:
# run ANNZ with the current settings
runANNZ()
# --------------------------------------------------------------------------------------------------
# evaluation
# --------------------------------------------------------------------------------------------------
if glob.annz["doEval"]:
# inDirName,inAsciiFiles - directory with files to make the calculations from, and list of input files
glob.annz["inDirName"] = "examples/data/sgSeparation/eval/"
glob.annz["inAsciiFiles"] = "sgCatalogue_galaxy.txt;sgCatalogue_star.txt"
# inAsciiVars - list of parameters in the input files (doesnt need to be exactly the same as in doGenInputTrees, but must contain all
# of the parameers which were used for training)
glob.annz["inAsciiVars"] = "C:class; F:z; UL:objid; F:psfMag_r; F:fiberMag_r; F:modelMag_r; F:petroMag_r; F:petroRad_r; F:petroR50_r; " \
+ " F:petroR90_r; F:lnLStar_r; F:lnLExp_r; F:lnLDeV_r; F:mE1_r; F:mE2_r; F:mRrCc_r; I:type_r; I:type"
# evalDirPostfix - if not empty, this string will be added to the name of the evaluation directory
# (can be used to prevent multiple evaluation of different input files from overwriting each other)
glob.annz["evalDirPostfix"] = ""
# run ANNZ with the current settings
runANNZ()
log.info(whtOnBlck(" - "+time.strftime("%d/%m/%y %H:%M:%S")+" - finished running ANNZ !"))
|
IftachSadehREPO_NAMEANNZPATH_START.@ANNZ_extracted@ANNZ-master@examples@scripts@annz_rndCls_quick.py@.PATH_END.py
|
{
"filename": "test_fitting.py",
"repo_name": "PaulHancock/Aegean",
"repo_path": "Aegean_extracted/Aegean-main/tests/unit/test_fitting.py",
"type": "Python"
}
|
#! /usr/bin/env python
"""
Test fitting.py
"""
import lmfit
import numpy as np
from AegeanTools import fitting
__author__ = 'Paul Hancock'
def make_model():
"""Test that we can make lmfit.Parameter models"""
model = lmfit.Parameters()
model.add('c0_amp', 1, vary=True)
model.add('c0_xo', 5, vary=True)
model.add('c0_yo', 5, vary=True)
model.add('c0_sx', 2.001, vary=False)
model.add('c0_sy', 2, vary=False)
model.add('c0_theta', 0, vary=False)
model.add('components', 1, vary=False)
return model
def test_elliptical_gaussian():
"""Test our elliptical gaussian creation function"""
x, y = np.indices((3, 3))
gauss = fitting.elliptical_gaussian(
x, y, amp=1, xo=0, yo=1, sx=1, sy=1, theta=0)
if np.any(np.isnan(gauss)):
raise AssertionError()
gauss = fitting.elliptical_gaussian(
x, y, amp=1, xo=0, yo=1, sx=1, sy=1, theta=np.inf)
if not (np.all(np.isnan(gauss))):
raise AssertionError()
def test_CandBmatrix():
"""Test that C and B matricies can be created without error"""
x, y = map(np.ravel, np.indices((3, 3)))
C = fitting.Cmatrix(x, y, sx=1, sy=2, theta=0)
if np.any(np.isnan(C)):
raise AssertionError()
B = fitting.Bmatrix(C)
if np.any(np.isnan(B)):
raise AssertionError()
def test_hessian_shape():
"""Test that the hessian has the correct shape"""
# test a single component model
model = make_model()
nvar = 3
x, y = np.indices((10, 10))
Hij = fitting.hessian(model, x, y)
if not (Hij.shape == (nvar, nvar, 10, 10)):
raise AssertionError()
# now add another component
model.add('c1_amp', 1, vary=True)
model.add('c1_xo', 5, vary=True)
model.add('c1_yo', 5, vary=True)
model.add('c1_sx', 2.001, vary=True)
model.add('c1_sy', 2, vary=True)
model.add('c1_theta', 0, vary=True)
nvar = 9
model['components'].value = 2
Hij = fitting.hessian(model, x, y)
if not (Hij.shape == (nvar, nvar, 10, 10)):
raise AssertionError()
def test_jacobian_shape():
"""
Test to see if the Jacobian matrix if of the right shape
This includes a single source model with only 4 variable params
"""
model = make_model()
nvar = 3
x, y = np.indices((10, 10))
Jij = fitting.jacobian(model, x, y)
if not (Jij.shape == (nvar, 10, 10)):
raise AssertionError()
model.add('c1_amp', 1, vary=True)
model.add('c1_xo', 5, vary=True)
model.add('c1_yo', 5, vary=True)
model.add('c1_sx', 2.001, vary=True)
model.add('c1_sy', 2, vary=True)
model.add('c1_theta', 0, vary=True)
nvar = 9
model['components'].value = 2
Jij = fitting.jacobian(model, x, y)
if not (Jij.shape == (nvar, 10, 10)):
raise AssertionError()
def test_emp_vs_ana_jacobian():
"""Test that the empirical and analytic Jacobians agree"""
model = make_model()
x, y = np.indices((10, 10))
emp_Jij = fitting.emp_jacobian(model, x, y)
ana_Jij = fitting.jacobian(model, x, y)
diff = np.abs(ana_Jij - emp_Jij)
if not (np.max(diff) < 1e-5):
raise AssertionError()
model.add('c1_amp', 1, vary=True)
model.add('c1_xo', 5, vary=True)
model.add('c1_yo', 5, vary=True)
model.add('c1_sx', 2.001, vary=True)
model.add('c1_sy', 2, vary=True)
model.add('c1_theta', 0, vary=True)
model['components'].value = 2
emp_Jij = fitting.emp_jacobian(model, x, y)
ana_Jij = fitting.jacobian(model, x, y)
diff = np.abs(ana_Jij - emp_Jij)
if not (np.max(diff) < 1e-3):
raise AssertionError()
def test_emp_vs_ana_hessian():
"""Test that the empirical and analytical Hessians agree"""
model = make_model()
x, y = np.indices((10, 10))
emp_Hij = fitting.emp_hessian(model, x, y)
ana_Hij = fitting.hessian(model, x, y)
diff = np.abs(ana_Hij - emp_Hij)
if not (np.max(diff) < 1e-5):
raise AssertionError()
model.add('c1_amp', 1, vary=True)
model.add('c1_xo', 5, vary=True)
model.add('c1_yo', 5, vary=True)
model.add('c1_sx', 2.001, vary=True)
model.add('c1_sy', 2, vary=True)
model.add('c1_theta', 0, vary=True)
model['components'].value = 2
emp_Hij = fitting.emp_hessian(model, x, y)
ana_Hij = fitting.hessian(model, x, y)
diff = np.abs(ana_Hij - emp_Hij)
if not (np.max(diff) < 1):
raise AssertionError()
def test_make_ita():
"""Test make_ita"""
noise = np.random.random((10, 10))
ita = fitting.make_ita(noise)
if not (ita.shape == (100, 100)):
raise AssertionError()
noise *= np.nan
ita = fitting.make_ita(noise)
if not (len(ita) == 0):
raise AssertionError()
def test_RB_bias():
"""Test RB_bias"""
data = np.random.random((4, 4))
model = make_model()
bias = fitting.RB_bias(data, model)
if not (len(bias) == 3):
raise AssertionError()
def test_bias_correct():
"""test that bias_correct does things"""
data = np.random.random((4, 4))
model = make_model()
fitting.bias_correct(model, data)
if __name__ == "__main__":
# introspect and run all the functions starting with 'test'
for f in dir():
if f.startswith('test'):
print(f)
globals()[f]()
|
PaulHancockREPO_NAMEAegeanPATH_START.@Aegean_extracted@Aegean-main@tests@unit@test_fitting.py@.PATH_END.py
|
{
"filename": "status.py",
"repo_name": "mhammond/pywin32",
"repo_path": "pywin32_extracted/pywin32-main/Pythonwin/pywin/dialogs/status.py",
"type": "Python"
}
|
# No cancel button.
import threading
import time
import win32api
import win32con
import win32ui
from pywin.mfc import dialog
from pywin.mfc.thread import WinThread
def MakeProgressDlgTemplate(caption, staticText=""):
style = (
win32con.DS_MODALFRAME
| win32con.WS_POPUP
| win32con.WS_VISIBLE
| win32con.WS_CAPTION
| win32con.WS_SYSMENU
| win32con.DS_SETFONT
)
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
w = 215
h = 36 # With button
h = 40
dlg = [
[caption, (0, 0, w, h), style, None, (8, "MS Sans Serif")],
]
s = win32con.WS_TABSTOP | cs
dlg.append([130, staticText, 1000, (7, 7, w - 7, h - 32), cs | win32con.SS_LEFT])
# dlg.append([128,
# "Cancel",
# win32con.IDCANCEL,
# (w - 60, h - 18, 50, 14), s | win32con.BS_PUSHBUTTON])
return dlg
class CStatusProgressDialog(dialog.Dialog):
def __init__(self, title, msg="", maxticks=100, tickincr=1):
self.initMsg = msg
templ = MakeProgressDlgTemplate(title, msg)
dialog.Dialog.__init__(self, templ)
self.maxticks = maxticks
self.tickincr = tickincr
self.pbar = None
def OnInitDialog(self):
rc = dialog.Dialog.OnInitDialog(self)
self.static = self.GetDlgItem(1000)
self.pbar = win32ui.CreateProgressCtrl()
self.pbar.CreateWindow(
win32con.WS_CHILD | win32con.WS_VISIBLE, (10, 30, 310, 44), self, 1001
)
self.pbar.SetRange(0, self.maxticks)
self.pbar.SetStep(self.tickincr)
self.progress = 0
self.pincr = 5
return rc
def Close(self):
self.EndDialog(0)
def SetMaxTicks(self, maxticks):
if self.pbar is not None:
self.pbar.SetRange(0, maxticks)
def Tick(self):
if self.pbar is not None:
self.pbar.StepIt()
def SetTitle(self, text):
self.SetWindowText(text)
def SetText(self, text):
self.SetDlgItemText(1000, text)
def Set(self, pos, max=None):
if self.pbar is not None:
self.pbar.SetPos(pos)
if max is not None:
self.pbar.SetRange(0, max)
# a progress dialog created in a new thread - especially suitable for
# console apps with no message loop.
MYWM_SETTITLE = win32con.WM_USER + 10
MYWM_SETMSG = win32con.WM_USER + 11
MYWM_TICK = win32con.WM_USER + 12
MYWM_SETMAXTICKS = win32con.WM_USER + 13
MYWM_SET = win32con.WM_USER + 14
class CThreadedStatusProcessDialog(CStatusProgressDialog):
def __init__(self, title, msg="", maxticks=100, tickincr=1):
self.title = title
self.msg = msg
self.threadid = win32api.GetCurrentThreadId()
CStatusProgressDialog.__init__(self, title, msg, maxticks, tickincr)
def OnInitDialog(self):
rc = CStatusProgressDialog.OnInitDialog(self)
self.HookMessage(self.OnTitle, MYWM_SETTITLE)
self.HookMessage(self.OnMsg, MYWM_SETMSG)
self.HookMessage(self.OnTick, MYWM_TICK)
self.HookMessage(self.OnMaxTicks, MYWM_SETMAXTICKS)
self.HookMessage(self.OnSet, MYWM_SET)
return rc
def _Send(self, msg):
try:
self.PostMessage(msg)
except win32ui.error:
# the user closed the window - but this does not cancel the
# process - so just ignore it.
pass
def OnTitle(self, msg):
CStatusProgressDialog.SetTitle(self, self.title)
def OnMsg(self, msg):
CStatusProgressDialog.SetText(self, self.msg)
def OnTick(self, msg):
CStatusProgressDialog.Tick(self)
def OnMaxTicks(self, msg):
CStatusProgressDialog.SetMaxTicks(self, self.maxticks)
def OnSet(self, msg):
CStatusProgressDialog.Set(self, self.pos, self.max)
def Close(self):
assert self.threadid, "No thread!"
win32api.PostThreadMessage(self.threadid, win32con.WM_QUIT, 0, 0)
def SetMaxTicks(self, maxticks):
self.maxticks = maxticks
self._Send(MYWM_SETMAXTICKS)
def SetTitle(self, title):
self.title = title
self._Send(MYWM_SETTITLE)
def SetText(self, text):
self.msg = text
self._Send(MYWM_SETMSG)
def Tick(self):
self._Send(MYWM_TICK)
def Set(self, pos, max=None):
self.pos = pos
self.max = max
self._Send(MYWM_SET)
class ProgressThread(WinThread):
def __init__(self, title, msg="", maxticks=100, tickincr=1):
self.title = title
self.msg = msg
self.maxticks = maxticks
self.tickincr = tickincr
self.dialog = None
WinThread.__init__(self)
self.createdEvent = threading.Event()
def InitInstance(self):
self.dialog = CThreadedStatusProcessDialog(
self.title, self.msg, self.maxticks, self.tickincr
)
self.dialog.CreateWindow()
try:
self.dialog.SetForegroundWindow()
except win32ui.error:
pass
self.createdEvent.set()
return WinThread.InitInstance(self)
def ExitInstance(self):
return 0
def StatusProgressDialog(title, msg="", maxticks=100, parent=None):
d = CStatusProgressDialog(title, msg, maxticks)
d.CreateWindow(parent)
return d
def ThreadedStatusProgressDialog(title, msg="", maxticks=100):
t = ProgressThread(title, msg, maxticks)
t.CreateThread()
# Need to run a basic "PumpWaitingMessages" loop just incase we are
# running inside Pythonwin.
# Basic timeout incase things go terribly wrong. Ideally we should use
# win32event.MsgWaitForMultipleObjects(), but we use a threading module
# event - so use a dumb strategy
end_time = time.time() + 10
while time.time() < end_time:
if t.createdEvent.isSet():
break
win32ui.PumpWaitingMessages()
time.sleep(0.1)
return t.dialog
def demo():
d = StatusProgressDialog("A Demo", "Doing something...")
import win32api
for i in range(100):
if i == 50:
d.SetText("Getting there...")
if i == 90:
d.SetText("Nearly done...")
win32api.Sleep(20)
d.Tick()
d.Close()
def thread_demo():
d = ThreadedStatusProgressDialog("A threaded demo", "Doing something")
import win32api
for i in range(100):
if i == 50:
d.SetText("Getting there...")
if i == 90:
d.SetText("Nearly done...")
win32api.Sleep(20)
d.Tick()
d.Close()
if __name__ == "__main__":
thread_demo()
# demo()
|
mhammondREPO_NAMEpywin32PATH_START.@pywin32_extracted@pywin32-main@Pythonwin@pywin@dialogs@status.py@.PATH_END.py
|
{
"filename": "_variantsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatterpolargl/textfont/_variantsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="variantsrc", parent_name="scatterpolargl.textfont", **kwargs
):
super(VariantsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatterpolargl@textfont@_variantsrc.py@.PATH_END.py
|
{
"filename": "mhd_jacobian.ipynb",
"repo_name": "AMReX-Astro/Castro",
"repo_path": "Castro_extracted/Castro-main/Source/mhd/notes/mhd_jacobian.ipynb",
"type": "Jupyter Notebook"
}
|
```python
from sympy import init_session
init_session()
```
IPython console for SymPy 1.0 (Python 3.6.3-64-bit) (ground types: python)
These commands were executed:
>>> from __future__ import division
>>> from sympy import *
>>> x, y, z, t = symbols('x y z t')
>>> k, m, n = symbols('k m n', integer=True)
>>> f, g, h = symbols('f g h', cls=Function)
>>> init_printing()
Documentation can be found at http://docs.sympy.org/1.0/
```python
r, u, v, w, E, e, p = symbols("rho u v w E e p")
dedr, dedp = symbols(r"\left.\frac{\partial{}e}{\partial\rho}\right|_p \left.\frac{\partial{}e}{\partial{}p}\right|_\rho")
Bx, By, Bz = symbols("B_x B_y B_z")
```
```python
A = Matrix(
[[1, 0, 0, 0, 0, 0, 0, 0],
[u, r, 0, 0, 0, 0, 0, 0],
[v, 0, r, 0, 0, 0, 0, 0],
[w, 0, 0, r, 0, 0, 0, 0],
[e + r*dedr + (u**2 + v**2 + w**2)/2, r*u, r*v, r*w, r*dedp, Bx, By, Bz],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1]])
```
```python
A
```
$$\left[\begin{matrix}1 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\u & \rho & 0 & 0 & 0 & 0 & 0 & 0\\v & 0 & \rho & 0 & 0 & 0 & 0 & 0\\w & 0 & 0 & \rho & 0 & 0 & 0 & 0\\\left.\frac{\partial{}e}{\partial\rho}\right|_p \rho + e + \frac{u^{2}}{2} + \frac{v^{2}}{2} + \frac{w^{2}}{2} & \rho u & \rho v & \rho w & \left.\frac{\partial{}e}{\partial{}p}\right|_\rho \rho & B_{x} & B_{y} & B_{z}\\0 & 0 & 0 & 0 & 0 & 1 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 1 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0 & 1\end{matrix}\right]$$
```python
A.inv()
```
$$\left[\begin{matrix}1 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\- \frac{u}{\rho} & \frac{1}{\rho} & 0 & 0 & 0 & 0 & 0 & 0\\- \frac{v}{\rho} & 0 & \frac{1}{\rho} & 0 & 0 & 0 & 0 & 0\\- \frac{w}{\rho} & 0 & 0 & \frac{1}{\rho} & 0 & 0 & 0 & 0\\\frac{1}{\left.\frac{\partial{}e}{\partial{}p}\right|_\rho \rho} \left(- \left.\frac{\partial{}e}{\partial\rho}\right|_p \rho - e + \frac{u^{2}}{2} + \frac{v^{2}}{2} + \frac{w^{2}}{2}\right) & - \frac{u}{\left.\frac{\partial{}e}{\partial{}p}\right|_\rho \rho} & - \frac{v}{\left.\frac{\partial{}e}{\partial{}p}\right|_\rho \rho} & - \frac{w}{\left.\frac{\partial{}e}{\partial{}p}\right|_\rho \rho} & \frac{1}{\left.\frac{\partial{}e}{\partial{}p}\right|_\rho \rho} & - \frac{B_{x}}{\left.\frac{\partial{}e}{\partial{}p}\right|_\rho \rho} & - \frac{B_{y}}{\left.\frac{\partial{}e}{\partial{}p}\right|_\rho \rho} & - \frac{B_{z}}{\left.\frac{\partial{}e}{\partial{}p}\right|_\rho \rho}\\0 & 0 & 0 & 0 & 0 & 1 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 1 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0 & 1\end{matrix}\right]$$
```python
Fr, Fu, Fv, Fw, FE, FBx, FBy, FBz = symbols(r"F_{\rho} F_{\rho{}u} F_{\rho{}v} F_{\rho{}w} F_{\rho{}E} F_{B_x} F_{B_y} F_{B_z}")
```
```python
F = Matrix([[Fr], [Fu], [Fv], [Fw], [FE], [FBx], [FBy], [FBz]])
```
```python
A.inv() * F
```
$$\left[\begin{matrix}F_{\rho}\\\frac{F_{\rho{}u}}{\rho} - \frac{F_{\rho} u}{\rho}\\\frac{F_{\rho{}v}}{\rho} - \frac{F_{\rho} v}{\rho}\\\frac{F_{\rho{}w}}{\rho} - \frac{F_{\rho} w}{\rho}\\- \frac{B_{x} F_{B_x}}{\left.\frac{\partial{}e}{\partial{}p}\right|_\rho \rho} - \frac{B_{y} F_{B_y}}{\left.\frac{\partial{}e}{\partial{}p}\right|_\rho \rho} - \frac{B_{z} F_{B_z}}{\left.\frac{\partial{}e}{\partial{}p}\right|_\rho \rho} + \frac{F_{\rho{}E}}{\left.\frac{\partial{}e}{\partial{}p}\right|_\rho \rho} - \frac{F_{\rho{}u} u}{\left.\frac{\partial{}e}{\partial{}p}\right|_\rho \rho} - \frac{F_{\rho{}v} v}{\left.\frac{\partial{}e}{\partial{}p}\right|_\rho \rho} - \frac{F_{\rho{}w} w}{\left.\frac{\partial{}e}{\partial{}p}\right|_\rho \rho} + \frac{F_{\rho}}{\left.\frac{\partial{}e}{\partial{}p}\right|_\rho \rho} \left(- \left.\frac{\partial{}e}{\partial\rho}\right|_p \rho - e + \frac{u^{2}}{2} + \frac{v^{2}}{2} + \frac{w^{2}}{2}\right)\\F_{B_x}\\F_{B_y}\\F_{B_z}\end{matrix}\right]$$
```python
```
|
AMReX-AstroREPO_NAMECastroPATH_START.@Castro_extracted@Castro-main@Source@mhd@notes@mhd_jacobian.ipynb@.PATH_END.py
|
{
"filename": "ssfr_distributions.py",
"repo_name": "ICRAR/shark",
"repo_path": "shark_extracted/shark-master/standard_plots/ssfr_distributions.py",
"type": "Python"
}
|
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2018
# Copyright by UWA (in the framework of the ICRAR)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""Size plots"""
import functools
import numpy as np
import common
import utilities_statistics as us
##################################
#Constants
RExp = 1.67
MpcToKpc = 1e3
G = 4.299e-9 #Gravity constant in units of (km/s)^2 * Mpc/Msun
slow = -12.0
supp = -8.0
ds = 0.2
sbins = np.arange(slow,supp,ds)
xsf = sbins + ds/2.0
def prepare_data(hdf5_data, index, hist_ssfr, mbins, dm):
(h0, volh, mdisk, mbulge, sfr_disk, sfr_burst) = hdf5_data
mstars_tot = (mdisk+mbulge)/h0
sfr_tot = (sfr_disk + sfr_burst)/h0/1e9 #Msun/yr
ind = np.where(mstars_tot > 0)
mstars_tot[ind] = np.log10(mstars_tot[ind])
ind = np.where(sfr_tot > 0)
sfr_tot[ind] = np.log10(sfr_tot[ind])
for i,m in enumerate(mbins):
ind = np.where((mstars_tot > m-dm*0.5) & (mstars_tot<= m+dm*0.5))
ssfr_in = sfr_tot[ind] - mstars_tot[ind]
H, _ = np.histogram(ssfr_in,bins=np.append(sbins,supp))
hist_ssfr[index,i,:] = hist_ssfr[index,i,:] + H
vol = volh / h0**3
return(vol)
def plot_ssfr(plt, outdir, obsdir, hist_ssfr):
plots = [221, 222, 223, 224]
fig = plt.figure(figsize=(10,10))
ytit = "$\\rm log_{10} (\\rm \\rho_{\\rm sSFR}/ cMpc^{-3} dex^{-1})$"
xtit = "$\\rm log_{10} (\\rm sSFR/yr^{-1})$"
xmin, xmax, ymin, ymax = -12, -8, 1e-6, 0.1
xleg = xmax - 0.2 * (xmax - xmin)
yleg = ymax - 0.1 * (ymax - ymin)
def plot_observations(ax,i):
#load observations
sm, phi1, phi1err, phi2, phi2err, phi3, phi3err, phi4, phi4err = common.load_observation(obsdir, 'SFR/SSFR_distributions_Katsianis.dat', [0,1,2,3,4,5,6,7,8])
label = 'Katsianis+21'
if i == 0:
phi = phi1
err = phi1err
elif i == 1:
phi = phi2
err = phi2err
elif i == 2:
phi = phi3
err = phi3err
elif i == 3:
phi = phi4
err = phi4err
ind = np.where(phi != 0)
xplot = sm[ind]
yplot = phi[ind]
yerr = err[ind]
if i == 0:
ax.errorbar(xplot, yplot, yerr=yerr, ls='None', mfc='None', ecolor = 'grey', mec='grey',marker='D', label = label)
else:
ax.errorbar(xplot, yplot, yerr=yerr, ls='None', mfc='None', ecolor = 'grey', mec='grey',marker='D')
labels = ['[9.5-10)','[10-10.5)','[10.5-11)','[11-11.5)']
for i, s in enumerate(plots):
ax = fig.add_subplot(s)
if (i==1 or i == 3):
ytitle = ' '
else:
ytitle = ytit
common.prepare_ax(ax, xmin, xmax, ymin, ymax, xtit, ytitle, locators=(0.1, 1, 0.1, 1))
ax.set_yscale('log')
z=0
ind = np.where(hist_ssfr[z,i,:] != 0)
xplot = xsf[ind]
yplot = hist_ssfr[z,i,ind]
ax.plot(xplot,yplot[0],'k', linestyle='solid', label="$\\rm log_{10}(M_{\\star}/M_{\\odot})=$" + labels[i])
ax.plot(xplot-0.3,yplot[0],'k', linestyle='dashed', label="-0.3dex")
plot_observations(ax,i)
common.prepare_legend(ax, ['k','k'], loc=2)
common.savefig(outdir, fig, 'ssfr_distribution_z0.pdf')
def main(modeldir, outdir, redshift_table, subvols, obsdir):
plt = common.load_matplotlib()
fields = {'galaxies': ('mstars_disk', 'mstars_bulge', 'sfr_disk', 'sfr_burst')}
zlist = [0]
mbins = [9.75,10.25,10.75,11.25]
dm = 0.5
hist_ssfr = np.zeros(shape = (len(zlist), len(mbins), len(sbins)))
# Read data from each subvolume at a time and add it up
# rather than appending it all together
for index, snapshot in enumerate(redshift_table[zlist]):
hdf5_data = common.read_data(modeldir, snapshot, fields, subvols)
vol = prepare_data(hdf5_data, index, hist_ssfr, mbins, dm)
hist_ssfr = hist_ssfr/vol/ds
plot_ssfr(plt, outdir, obsdir, hist_ssfr)
if __name__ == '__main__':
main(*common.parse_args())
|
ICRARREPO_NAMEsharkPATH_START.@shark_extracted@shark-master@standard_plots@ssfr_distributions.py@.PATH_END.py
|
{
"filename": "test_align.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/frame/methods/test_align.py",
"type": "Python"
}
|
from datetime import timezone
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
date_range,
)
import pandas._testing as tm
class TestDataFrameAlign:
def test_frame_align_aware(self):
idx1 = date_range("2001", periods=5, freq="h", tz="US/Eastern")
idx2 = date_range("2001", periods=5, freq="2h", tz="US/Eastern")
df1 = DataFrame(np.random.default_rng(2).standard_normal((len(idx1), 3)), idx1)
df2 = DataFrame(np.random.default_rng(2).standard_normal((len(idx2), 3)), idx2)
new1, new2 = df1.align(df2)
assert df1.index.tz == new1.index.tz
assert df2.index.tz == new2.index.tz
# different timezones convert to UTC
# frame with frame
df1_central = df1.tz_convert("US/Central")
new1, new2 = df1.align(df1_central)
assert new1.index.tz is timezone.utc
assert new2.index.tz is timezone.utc
# frame with Series
new1, new2 = df1.align(df1_central[0], axis=0)
assert new1.index.tz is timezone.utc
assert new2.index.tz is timezone.utc
df1[0].align(df1_central, axis=0)
assert new1.index.tz is timezone.utc
assert new2.index.tz is timezone.utc
def test_align_float(self, float_frame):
af, bf = float_frame.align(float_frame)
assert af._mgr is not float_frame._mgr
af, bf = float_frame.align(float_frame)
assert af._mgr is not float_frame._mgr
# axis = 0
other = float_frame.iloc[:-5, :3]
af, bf = float_frame.align(other, axis=0, fill_value=-1)
tm.assert_index_equal(bf.columns, other.columns)
# test fill value
join_idx = float_frame.index.join(other.index)
diff_a = float_frame.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
assert (diff_a_vals == -1).all()
af, bf = float_frame.align(other, join="right", axis=0)
tm.assert_index_equal(bf.columns, other.columns)
tm.assert_index_equal(bf.index, other.index)
tm.assert_index_equal(af.index, other.index)
# axis = 1
other = float_frame.iloc[:-5, :3].copy()
af, bf = float_frame.align(other, axis=1)
tm.assert_index_equal(bf.columns, float_frame.columns)
tm.assert_index_equal(bf.index, other.index)
# test fill value
join_idx = float_frame.index.join(other.index)
diff_a = float_frame.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
assert (diff_a_vals == -1).all()
af, bf = float_frame.align(other, join="inner", axis=1)
tm.assert_index_equal(bf.columns, other.columns)
# Try to align DataFrame to Series along bad axis
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
float_frame.align(af.iloc[0, :3], join="inner", axis=2)
def test_align_frame_with_series(self, float_frame):
# align dataframe to series with broadcast or not
idx = float_frame.index
s = Series(range(len(idx)), index=idx)
left, right = float_frame.align(s, axis=0)
tm.assert_index_equal(left.index, float_frame.index)
tm.assert_index_equal(right.index, float_frame.index)
assert isinstance(right, Series)
def test_align_series_condition(self):
# see gh-9558
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
result = df[df["a"] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
result = df.where(df["a"] == 2, 0)
expected = DataFrame({"a": [0, 2, 0], "b": [0, 5, 0]})
tm.assert_frame_equal(result, expected)
def test_align_mixed_float(self, mixed_float_frame):
# mixed floats/ints
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = mixed_float_frame.align(
other.iloc[:, 0], join="inner", axis=1, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
def test_align_mixed_int(self, mixed_int_frame):
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = mixed_int_frame.align(
other.iloc[:, 0], join="inner", axis=1, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
@pytest.mark.parametrize(
"l_ordered,r_ordered,expected",
[
[True, True, pd.CategoricalIndex],
[True, False, Index],
[False, True, Index],
[False, False, pd.CategoricalIndex],
],
)
def test_align_categorical(self, l_ordered, r_ordered, expected):
# GH-28397
df_1 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
"B": Series(list("aabbca")).astype(
pd.CategoricalDtype(list("cab"), ordered=l_ordered)
),
}
).set_index("B")
df_2 = DataFrame(
{
"A": np.arange(5, dtype="int64"),
"B": Series(list("babca")).astype(
pd.CategoricalDtype(list("cab"), ordered=r_ordered)
),
}
).set_index("B")
aligned_1, aligned_2 = df_1.align(df_2)
assert isinstance(aligned_1.index, expected)
assert isinstance(aligned_2.index, expected)
tm.assert_index_equal(aligned_1.index, aligned_2.index)
def test_align_multiindex(self):
# GH#10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product(
[range(2), range(3), range(2)], names=("a", "b", "c")
)
idx = Index(range(2), name="b")
df1 = DataFrame(np.arange(12, dtype="int64"), index=midx)
df2 = DataFrame(np.arange(2, dtype="int64"), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join="left")
res2l, res2r = df2.align(df1, join="right")
expl = df1
tm.assert_frame_equal(expl, res1l)
tm.assert_frame_equal(expl, res2r)
expr = DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_frame_equal(expr, res1r)
tm.assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join="right")
res2l, res2r = df2.align(df1, join="left")
exp_idx = pd.MultiIndex.from_product(
[range(2), range(2), range(2)], names=("a", "b", "c")
)
expl = DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_frame_equal(expl, res1l)
tm.assert_frame_equal(expl, res2r)
expr = DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_frame_equal(expr, res1r)
tm.assert_frame_equal(expr, res2l)
def test_align_series_combinations(self):
df = DataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE"))
s = Series([1, 2, 4], index=list("ABD"), name="x")
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = DataFrame(
{"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]},
index=list("ABCDE"),
)
exp2 = Series([1, 2, np.nan, 4, np.nan], index=list("ABCDE"), name="x")
tm.assert_frame_equal(res1, exp1)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
tm.assert_series_equal(res1, exp2)
tm.assert_frame_equal(res2, exp1)
def test_multiindex_align_to_series_with_common_index_level(self):
# GH-46001
foo_index = Index([1, 2, 3], name="foo")
bar_index = Index([1, 2], name="bar")
series = Series([1, 2], index=bar_index, name="foo_series")
df = DataFrame(
{"col": np.arange(6)},
index=pd.MultiIndex.from_product([foo_index, bar_index]),
)
expected_r = Series([1, 2] * 3, index=df.index, name="foo_series")
result_l, result_r = df.align(series, axis=0)
tm.assert_frame_equal(result_l, df)
tm.assert_series_equal(result_r, expected_r)
def test_multiindex_align_to_series_with_common_index_level_missing_in_left(self):
# GH-46001
foo_index = Index([1, 2, 3], name="foo")
bar_index = Index([1, 2], name="bar")
series = Series(
[1, 2, 3, 4], index=Index([1, 2, 3, 4], name="bar"), name="foo_series"
)
df = DataFrame(
{"col": np.arange(6)},
index=pd.MultiIndex.from_product([foo_index, bar_index]),
)
expected_r = Series([1, 2] * 3, index=df.index, name="foo_series")
result_l, result_r = df.align(series, axis=0)
tm.assert_frame_equal(result_l, df)
tm.assert_series_equal(result_r, expected_r)
def test_multiindex_align_to_series_with_common_index_level_missing_in_right(self):
# GH-46001
foo_index = Index([1, 2, 3], name="foo")
bar_index = Index([1, 2, 3, 4], name="bar")
series = Series([1, 2], index=Index([1, 2], name="bar"), name="foo_series")
df = DataFrame(
{"col": np.arange(12)},
index=pd.MultiIndex.from_product([foo_index, bar_index]),
)
expected_r = Series(
[1, 2, np.nan, np.nan] * 3, index=df.index, name="foo_series"
)
result_l, result_r = df.align(series, axis=0)
tm.assert_frame_equal(result_l, df)
tm.assert_series_equal(result_r, expected_r)
def test_multiindex_align_to_series_with_common_index_level_missing_in_both(self):
# GH-46001
foo_index = Index([1, 2, 3], name="foo")
bar_index = Index([1, 3, 4], name="bar")
series = Series(
[1, 2, 3], index=Index([1, 2, 4], name="bar"), name="foo_series"
)
df = DataFrame(
{"col": np.arange(9)},
index=pd.MultiIndex.from_product([foo_index, bar_index]),
)
expected_r = Series([1, np.nan, 3] * 3, index=df.index, name="foo_series")
result_l, result_r = df.align(series, axis=0)
tm.assert_frame_equal(result_l, df)
tm.assert_series_equal(result_r, expected_r)
def test_multiindex_align_to_series_with_common_index_level_non_unique_cols(self):
# GH-46001
foo_index = Index([1, 2, 3], name="foo")
bar_index = Index([1, 2], name="bar")
series = Series([1, 2], index=bar_index, name="foo_series")
df = DataFrame(
np.arange(18).reshape(6, 3),
index=pd.MultiIndex.from_product([foo_index, bar_index]),
)
df.columns = ["cfoo", "cbar", "cfoo"]
expected = Series([1, 2] * 3, index=df.index, name="foo_series")
result_left, result_right = df.align(series, axis=0)
tm.assert_series_equal(result_right, expected)
tm.assert_index_equal(result_left.columns, df.columns)
def test_missing_axis_specification_exception(self):
df = DataFrame(np.arange(50).reshape((10, 5)))
series = Series(np.arange(5))
with pytest.raises(ValueError, match=r"axis=0 or 1"):
df.align(series)
def test_align_series_check_copy(self):
# GH#
df = DataFrame({0: [1, 2]})
ser = Series([1], name=0)
expected = ser.copy()
result, other = df.align(ser, axis=1)
ser.iloc[0] = 100
tm.assert_series_equal(other, expected)
def test_align_identical_different_object(self):
# GH#51032
df = DataFrame({"a": [1, 2]})
ser = Series([3, 4])
result, result2 = df.align(ser, axis=0)
tm.assert_frame_equal(result, df)
tm.assert_series_equal(result2, ser)
assert df is not result
assert ser is not result2
def test_align_identical_different_object_columns(self):
# GH#51032
df = DataFrame({"a": [1, 2]})
ser = Series([1], index=["a"])
result, result2 = df.align(ser, axis=1)
tm.assert_frame_equal(result, df)
tm.assert_series_equal(result2, ser)
assert df is not result
assert ser is not result2
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@frame@methods@test_align.py@.PATH_END.py
|
{
"filename": "_xref.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/mesh3d/colorbar/_xref.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XrefValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="xref", parent_name="mesh3d.colorbar", **kwargs):
super(XrefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["container", "paper"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@mesh3d@colorbar@_xref.py@.PATH_END.py
|
{
"filename": "spectral-pipeline-2024.ipynb",
"repo_name": "gbrammer/msaexp",
"repo_path": "msaexp_extracted/msaexp-main/docs/examples/spectral-pipeline-2024.ipynb",
"type": "Jupyter Notebook"
}
|
# Run the MSAEXP pipeline steps for an example dataset from GO-4233 (RUBIES)
*Pipeline steps*
1. Query mast and download full-frame exposures (``rate.fits``)
1. Run the preprocessing pipline through extracting 2D cutouts
1. Combine and rectify the cutouts to a final stack
1. Extract 1D spectrum
1. (fit redshift, line fluxes, etc. (``spectral-extractions-2024.ipynb``)
```python
# Are we on a GitHub codespace?
import os
if os.getcwd().startswith('/workspaces/msaexp'):
import os
os.environ['CRDS_PATH'] = os.path.join('/tmp/', 'crds_cache')
if not os.path.exists(os.environ['CRDS_PATH']):
! mkdir {os.environ['CRDS_PATH']}
os.environ['CRDS_SERVER_URL'] = 'https://jwst-crds.stsci.edu'
print('On codespace: ', os.environ['CRDS_PATH'], os.environ['CRDS_SERVER_URL'])
workdir = '/workspaces/msaexp/docs/examples/codespace'
if not os.path.exists(workdir):
! mkdir {workdir}
os.chdir(workdir)
else:
print('(not on a codespace)')
```
On codespace: /tmp/crds_cache https://jwst-crds.stsci.edu
```python
# CRDS variables
import os
if 'CRDS_PATH' not in os.environ is None:
os.environ['CRDS_PATH'] = f'{os.getcwd()}/crds_cache'
if not os.path.exists(os.environ['CRDS_PATH']):
os.makedirs(os.environ['CRDS_PATH'])
os.environ['CRDS_SERVER_URL'] = 'https://jwst-crds.stsci.edu'
```
```python
try:
import numba
except ImportError:
! pip install numba
```
```python
import os
import glob
import yaml
import warnings
import time
import numpy as np
import matplotlib.pyplot as plt
import grizli
from grizli import utils, jwst_utils
jwst_utils.set_quiet_logging()
utils.set_warnings()
import astropy.io.fits as pyfits
import jwst.datamodels
import jwst
import mastquery.jwst
import msaexp
from msaexp import pipeline
import msaexp.slit_combine
print(f'jwst version = {jwst.__version__}')
print(f'grizli version = {grizli.__version__}')
print(f'msaexp version = {msaexp.__version__}')
plt.rcParams['scatter.marker'] = '.'
plt.rcParams['image.origin'] = 'lower'
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['grid.linestyle'] = ':'
```
jwst version = 1.14.0
grizli version = 1.11.6
msaexp version = 0.1.dev1+g44392ac
# Query MAST for NIRSpec data
Query by program name and download `rate.fits` files with `mastquery`. May need to set `$MAST_TOKEN` environment variable to download proprietary datasets from MAST.
Can optionally limit the query to specific
- gratings: ``prism``, ``g140m``, ``g235m``, ``g395m``, ``g140m``, ``g235m``, ``g395m``
- filters: ``clear``, ``f170lp``, ``f100lp``, ``f170lp``, ``f290lp``
- detectors: ``nrs1``, ``nrs2``
```python
## Optional
# Find the program, source_id based on a particular position
ra, dec = 214.97433534766, 52.92461350726
slits_url = f"https://grizli-cutout.herokuapp.com/nirspec_slits?coord={ra},{dec}"
slits = utils.read_catalog(slits_url, format='csv')
slits['program','msametfl','source_id','grating','footprint'][slits['is_source'] == 'True']
```
<div><i>GTable length=21</i>
<table id="table126383855000336" class="table-striped table-bordered table-condensed">
<thead><tr><th>program</th><th>msametfl</th><th>source_id</th><th>grating</th><th>footprint</th></tr></thead>
<thead><tr><th>int64</th><th>str25</th><th>int64</th><th>str5</th><th>str93</th></tr></thead>
<tr><td>4233</td><td>jw04233006001_01_msa.fits</td><td>44597</td><td>PRISM</td><td>((214.974316,52.924728),(214.974422,52.924720),(214.974390,52.924585),(214.974284,52.924593))</td></tr>
<tr><td>4233</td><td>jw04233006001_01_msa.fits</td><td>44597</td><td>PRISM</td><td>((214.974316,52.924727),(214.974423,52.924718),(214.974391,52.924584),(214.974284,52.924592))</td></tr>
<tr><td>4233</td><td>jw04233006001_01_msa.fits</td><td>44597</td><td>PRISM</td><td>((214.974315,52.924729),(214.974422,52.924721),(214.974389,52.924586),(214.974283,52.924594))</td></tr>
<tr><td>1345</td><td>jw01345069001_01_msa.fits</td><td>12308</td><td>G140M</td><td>((214.974492,52.924634),(214.974494,52.924570),(214.974269,52.924568),(214.974268,52.924632))</td></tr>
<tr><td>4233</td><td>jw04233006001_02_msa.fits</td><td>44597</td><td>G395M</td><td>((214.974316,52.924727),(214.974423,52.924718),(214.974391,52.924584),(214.974284,52.924592))</td></tr>
<tr><td>4233</td><td>jw04233006001_02_msa.fits</td><td>44597</td><td>G395M</td><td>((214.974315,52.924729),(214.974422,52.924721),(214.974389,52.924586),(214.974283,52.924594))</td></tr>
<tr><td>4233</td><td>jw04233006001_02_msa.fits</td><td>44597</td><td>G395M</td><td>((214.974316,52.924728),(214.974422,52.924720),(214.974390,52.924585),(214.974284,52.924593))</td></tr>
<tr><td>1345</td><td>jw01345069001_01_msa.fits</td><td>12308</td><td>G140M</td><td>((214.974495,52.924634),(214.974497,52.924569),(214.974272,52.924567),(214.974271,52.924632))</td></tr>
<tr><td>1345</td><td>jw01345069001_01_msa.fits</td><td>12308</td><td>G235M</td><td>((214.974499,52.924633),(214.974500,52.924569),(214.974275,52.924567),(214.974274,52.924632))</td></tr>
<tr><td>1345</td><td>jw01345069001_01_msa.fits</td><td>12308</td><td>G395M</td><td>((214.974495,52.924634),(214.974497,52.924569),(214.974272,52.924567),(214.974271,52.924632))</td></tr>
<tr><td>1345</td><td>jw01345069001_01_msa.fits</td><td>12308</td><td>G395M</td><td>((214.974492,52.924634),(214.974494,52.924570),(214.974269,52.924568),(214.974268,52.924632))</td></tr>
<tr><td>1345</td><td>jw01345069001_01_msa.fits</td><td>12308</td><td>G395M</td><td>((214.974499,52.924633),(214.974500,52.924569),(214.974275,52.924567),(214.974274,52.924632))</td></tr>
<tr><td>1345</td><td>jw01345069001_01_msa.fits</td><td>12308</td><td>G140M</td><td>((214.974499,52.924633),(214.974500,52.924569),(214.974275,52.924567),(214.974274,52.924632))</td></tr>
<tr><td>1345</td><td>jw01345069001_01_msa.fits</td><td>12308</td><td>G235M</td><td>((214.974495,52.924634),(214.974497,52.924569),(214.974272,52.924567),(214.974271,52.924632))</td></tr>
<tr><td>1345</td><td>jw01345069001_01_msa.fits</td><td>12308</td><td>G235M</td><td>((214.974492,52.924634),(214.974494,52.924570),(214.974269,52.924568),(214.974268,52.924632))</td></tr>
<tr><td>1345</td><td>jw01345070001_01_msa.fits</td><td>12308</td><td>PRISM</td><td>((214.974499,52.924632),(214.974500,52.924568),(214.974275,52.924566),(214.974274,52.924631))</td></tr>
<tr><td>1345</td><td>jw01345070001_01_msa.fits</td><td>12308</td><td>PRISM</td><td>((214.974496,52.924633),(214.974497,52.924568),(214.974272,52.924566),(214.974271,52.924631))</td></tr>
<tr><td>1345</td><td>jw01345070001_01_msa.fits</td><td>12308</td><td>PRISM</td><td>((214.974502,52.924632),(214.974503,52.924568),(214.974278,52.924566),(214.974277,52.924630))</td></tr>
<tr><td>1345</td><td>jw01345070001_01_msa.fits</td><td>12308</td><td>PRISM</td><td>((214.974498,52.924657),(214.974499,52.924593),(214.974275,52.924591),(214.974274,52.924655))</td></tr>
<tr><td>1345</td><td>jw01345070001_01_msa.fits</td><td>12308</td><td>PRISM</td><td>((214.974495,52.924657),(214.974496,52.924593),(214.974272,52.924591),(214.974270,52.924656))</td></tr>
<tr><td>1345</td><td>jw01345070001_01_msa.fits</td><td>12308</td><td>PRISM</td><td>((214.974501,52.924657),(214.974503,52.924593),(214.974278,52.924591),(214.974277,52.924655))</td></tr>
</table></div>
```python
# JWST observing program ID
prog = 4233
# Single source for testing
source_ids = [
44597, # line emitter
46811, # more extended
]
# A single RUBIES mask
outroot = 'rubies-egs61'
mask_query = mastquery.jwst.make_query_filter('visit_id', values=['04233006001'])
gratings = ['prism']
detectors = ['nrs1'] # limit to NRS1 for the example
```
## Query and download
```python
# Query NIRSpec data for a program name
masks = pipeline.query_program(prog,
download=True,
detectors=detectors,
gratings=gratings,
extensions=['uncal','s2d'],
extra_filters=mask_query,
)
files = glob.glob(f'jw0{prog}*rate.fits')
print(files)
```
['jw04233006001_03101_00002_nrs1_rate.fits', 'jw04233006001_03101_00003_nrs1_rate.fits', 'jw04233006001_03101_00004_nrs1_rate.fits']
```python
# Unset DQ=4 pixels to avoid running ``snowblind`` for now
for file in files:
with pyfits.open(file, mode='update') as im:
im['DQ'].data -= (im['DQ'].data & 4)
im.flush()
```
# Initialize pipeline
Exposures are grouped by detector and with a common `MSAMETFL` metadata file for the MSA setup.
## Preprocessing pipeline
1. Apply 1/f correction and identify "snowballs" on the `rate.fits` files
1. Remove "bias" (i.e., simple median) of each exposure
1. Rescale RNOISE array based on empty parts of the exposure
1. Run parts of the Level 2 JWST calibration pipeline ([calweb_spec2](https://jwst-pipeline.readthedocs.io/en/latest/jwst/pipeline/calwebb_spec2.html#calwebb-spec2)):
- [AssignWcs](https://jwst-pipeline.readthedocs.io/en/latest/api/jwst.assign_wcs.AssignWcsStep.html) : initialize WCS and populate slit bounding_box data
- [Extract2dStep](https://jwst-pipeline.readthedocs.io/en/latest/api/jwst.extract_2d.Extract2dStep.html) : identify slits and set slit WCS
- [FlatFieldStep](https://jwst-pipeline.readthedocs.io/en/latest/api/jwst.flatfield.FlatFieldStep.html#flatfieldstep) : slit-level flat field
- [PathLossStep](https://jwst-pipeline.readthedocs.io/en/latest/api/jwst.pathloss.PathLossStep.html) : NIRSpec path loss
- [BarShadowStep](https://jwst-pipeline.readthedocs.io/en/latest/api/jwst.barshadow.BarShadowStep.html#jwst.barshadow.BarShadowStep) : Bar Shadow
- See also [MSAEXP PR#66](https://github.com/gbrammer/msaexp/pull/66)
- [PhotomStep](https://jwst-pipeline.readthedocs.io/en/latest/api/jwst.photom.PhotomStep.html) : Photometric calibration
- Note that the `srctype`, `master_background`, `wavecorr` steps are not performed. The background subtraction is done manually on the 2D slit cutouts.
1. Parse slit metadata
1. Save slit cutout `SlitModel` files of the last pipeline step performed (`phot` = `PhotomStep`)
## Note!
When the ``source_ids`` list is specified, the pipeline is only run for those sources in the MSA plan and will be much faster. Set ``source_ids=None`` to extract *everything*.
```python
SKIP_COMPLETED = True
for file in files:
mode = '-'.join(file.split('_')[:4])
if (not os.path.exists(f'{mode}.slits.yaml')) & SKIP_COMPLETED:
pipe = pipeline.NirspecPipeline(mode=mode, files=[file])
pipe = pipeline.NirspecPipeline(mode=mode,
files=[file],
source_ids=source_ids,
positive_ids=True # Ignore background slits
)
pipe.full_pipeline(run_extractions=False,
initialize_bkg=False,
load_saved=None,
scale_rnoise=True)
else:
print(f'Skip preprocessing: {mode}')
```
Skip preprocessing: jw04233006001-03101-00002-nrs1
Skip preprocessing: jw04233006001-03101-00003-nrs1
Skip preprocessing: jw04233006001-03101-00004-nrs1
The final result of the preprocessing pipeline are the SlitModel objects stored in individual ``*.phot.*fits`` files
```python
phot_files = glob.glob(f'jw0{prog}*{source_ids[0]}.fits')
phot_files.sort()
print('\n'.join(phot_files))
```
jw04233006001_03101_00002_nrs1_phot.186.4233_44597.fits
jw04233006001_03101_00003_nrs1_phot.186.4233_44597.fits
jw04233006001_03101_00004_nrs1_phot.186.4233_44597.fits
```python
fig, axes = plt.subplots(3, 1, figsize=(8,6), sharex=True, sharey=True)
for ax, file in zip(axes, phot_files):
dm = jwst.datamodels.open(file)
ax.imshow(dm.data, vmin=-0.1, vmax=0.3, aspect='auto', cmap='gray_r')
ax.grid()
fig.tight_layout(pad=1)
```

# Exposure combination and spectral extraction
```python
obj = msaexp.slit_combine.SlitGroup?
```
[0;31mInit signature:[0m
[0mmsaexp[0m[0;34m.[0m[0mslit_combine[0m[0;34m.[0m[0mSlitGroup[0m[0;34m([0m[0;34m[0m
[0;34m[0m [0mfiles[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mname[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mposition_key[0m[0;34m=[0m[0;34m'position_number'[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mdiffs[0m[0;34m=[0m[0;32mTrue[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mgrating_diffs[0m[0;34m=[0m[0;32mTrue[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mstuck_threshold[0m[0;34m=[0m[0;36m0.5[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mhot_cold_kwargs[0m[0;34m=[0m[0;32mNone[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mbad_shutter_names[0m[0;34m=[0m[0;32mNone[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mdilate_failed_open[0m[0;34m=[0m[0;32mTrue[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mundo_barshadow[0m[0;34m=[0m[0;32mFalse[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mmin_bar[0m[0;34m=[0m[0;36m0.4[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mbar_corr_mode[0m[0;34m=[0m[0;34m'wave'[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mfix_prism_norm[0m[0;34m=[0m[0;32mTrue[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0msky_arrays[0m[0;34m=[0m[0;32mNone[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mestimate_sky_kwargs[0m[0;34m=[0m[0;32mNone[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mflag_profile_kwargs[0m[0;34m=[0m[0;32mNone[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mundo_pathloss[0m[0;34m=[0m[0;32mTrue[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mtrace_with_xpos[0m[0;34m=[0m[0;32mFalse[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mtrace_with_ypos[0m[0;34m=[0m[0;32mTrue[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mtrace_from_yoffset[0m[0;34m=[0m[0;32mFalse[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mfixed_offset[0m[0;34m=[0m[0;36m0.0[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mnod_offset[0m[0;34m=[0m[0;32mNone[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mpad_border[0m[0;34m=[0m[0;36m2[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mweight_type[0m[0;34m=[0m[0;34m'ivm'[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mreference_exposure[0m[0;34m=[0m[0;34m'auto'[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0;34m**[0m[0mkwargs[0m[0;34m,[0m[0;34m[0m
[0;34m[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m
[0;31mDocstring:[0m <no docstring>
[0;31mInit docstring:[0m
Container for a list of 2D extracted ``SlitModel`` files
Parameters
----------
files : list
List of `SlitModel` files
name : str
Label for the group
position_key : str
Column in the ``info`` table to define the nod positions
- "y_index" = Rounded y offset
- "position_number" = dither number
- "shutter_state" = Shutter state from MPT. Usually robust, but can
get confused when multiple catalog sources fall within the slitlets
diffs : bool
Compute nod differences
grating_diffs : bool
Force diffs for grating spectra
stuck_threshold : float
Parameter for identifying stuck-closed shutters in prism spectra in
`~msaexp.slit_combine.SlitGroup.mask_stuck_closed_shutters`
bad_shutter_names : list, None
List of integer shutter indices (e.g., among ``[-1, 0, 1]`` for a
3-shutter slitlet) to mask as bad, e.g., from stuck shutters
dilate_failed_open : bool, int
Dilate the mask of pixels flagged with ``MSA_FAILED_OPEN``. If an integer,
do ``dilate_failed_open`` dilation iterations.
undo_barshadow : bool, 2
Undo the ``BarShadow`` correction if an extension found in the
slit model files. If ``2``, then apply internal barshadow correction
with ``bar_corr_mode``.
min_bar : float
Minimum acceptable value of the BarShadow reference
bar_corr_mode : str
Internal barshadow correction type
- ``flat``: monochromatic `~msaexp.utils.get_prism_bar_correction`
- ``wave``: wave-dependent `~msaexp.utils.get_prism_wave_bar_correction`
fix_prism_norm : bool
Apply prism normalization correction with
`~msaexp.utils.get_normalization_correction`.
sky_arrays : array-like
Optional sky data (in progress)
estimate_sky_kwargs : None, dict
Arguments to pass to `~msaexp.slit_combine.SlitGroup.estimate_sky` to
estimate sky directly from the slit data
undo_pathloss : bool
Undo pipeline pathloss correction (should usually be the
PATHLOSS_UNIFORM correction) if the extensions are found in the slit
model files
trace_with_xpos : bool
Compute traces including the predicted source center x position
trace_with_ypos : bool
Compute traces including the predicted source center y position
trace_from_yoffset : bool
Compute traces derived from y offsets
nod_offset : float, None
Nod offset size (pixels) to use if the slit model traces don't
already account for it, e.g., in background-indicated slits
without explicit catalog sources. If not provided (None), then set
to `MSA_NOD_ARCSEC / slit_pixel_scale`.
pad_border : int
Grow mask around edges of 2D cutouts
reference_exposure : int, 'auto'
Define a reference nod position. If ``'auto'``, then will use the
exposure in the middle of the nod offset distribution
weight_type : str
Weighting scheme for 2D resampling
- ``ivm`` : Use weights from ``var_rnoise``, like `jwst.resample <https://github.com/spacetelescope/jwst/blob/4342988027ee0811b57d3641bda4c8486d7da1f5/jwst/resample/resample_utils.py#L168>`_
- ``ivm_bar`` : Use a modified weight ``var_rnoise / barshadow**2``
- ``poisson`` : Weight with ``var_poisson``, msaexp extractions v1, v2
- ``exptime`` : Use ``slit.meta.exposure.exposure_time * mask``
- ``mask`` : Just use the bad pixel mask
pad_border : int
Grow mask around edges of 2D cutouts
Attributes
----------
meta : dict
Metadata about the processing status
sh : (int, int)
Dimensions of the 2D slit data
sci : array-like (float)
Science data with dimensions ``(N, sh[0]*sh[1])``
dq : array-like (int)
DQ bit flags
mask : array-like (bool)
Valid data
var : array-like (float)
Variance data
var_rnoise: array-like (float)
RNOISE variance data
var_poisson: array-like (float)
POISSON variance data
xslit : array-like (float)
Array of x slit coordinates
yslit: array-like (float)
Array of cross-dispersion coordinates. Should be zero along the
expected center of the (curved) trace
yslit_orig : array-like (float)
Copy of ``yslit``, which may be updated with new trace coefficients
ypix : array-like (float)
y pixel coordinates
wave : array-like (float)
2D wavelengths
bar : array-like (float)
The BarShadow correction if found in the `SlitModel` files
xtr : array-like (float)
1D x pixel along trace
ytr : array-like (float)
1D y trace position
wtr : array-like (float)
Wavelength along the trace
[0;31mFile:[0m ~/.python/current/lib/python3.10/site-packages/msaexp/slit_combine.py
[0;31mType:[0m type
[0;31mSubclasses:[0m
```python
group_kws = dict(
diffs=True, # For nod differences
undo_barshadow=2, # For msaexp barshadow correction
min_bar=0.35, # minimum allowed value for the (inverse) bar shadow correction
position_key="y_index",
trace_with_ypos=True, # Include expected y shutter offset in the trace
trace_from_yoffset=True,
flag_profile_kwargs=None, # Turn off profile flag
)
obj = msaexp.slit_combine.SlitGroup(
phot_files,
outroot,
**group_kws,
)
```
0 jw04233006001_03101_00002_nrs1_phot.186.4233_44597.fits (24, 431)
1 jw04233006001_03101_00003_nrs1_phot.186.4233_44597.fits (24, 431)
2 jw04233006001_03101_00004_nrs1_phot.186.4233_44597.fits (24, 431)
jw04233006001_03101_00002_nrs1_phot.186.4233_44597.fits source_type=None undo PATHLOSS_UN
jw04233006001_03101_00003_nrs1_phot.186.4233_44597.fits source_type=None undo PATHLOSS_UN
jw04233006001_03101_00004_nrs1_phot.186.4233_44597.fits source_type=None undo PATHLOSS_UN
Recomputed offsets slit: force [ 0.00, 5.07, -5.07] pix offsets
apply_spline_bar_correction(mode='wave')
get_normalization_correction: prism_slit_renormalize.yaml quadrant=4 xcen=291 ycen=50
```python
print(f"""
Number of exposures: {obj.N}
2D array shape: {obj.sh}
Flattened data array: {obj.sci.shape}
""")
```
Number of exposures: 3
2D array shape: (24, 431)
Flattened data array: (3, 10344)
Show the exposure data again now with the trace.
Also note that the sky is flatter than before with the updated bar shadow correction.
```python
fig, axes = plt.subplots(obj.N, 1, figsize=(8,6), sharex=True, sharey=True)
for i, ax in enumerate(axes):
ax.imshow(obj.sci[i,:].reshape(obj.sh), vmin=-0.1, vmax=0.3, aspect='auto', cmap='gray_r')
ax.plot(obj.ytr[i,:], color='magenta', alpha=0.3, lw=4)
ax.grid()
fig.tight_layout(pad=1)
```

## Fit the trace profile
The profile is modeled as a (pixel-integrated) Gaussian with a specified width that is added in quadrature with the tabulated
PSF width.
```python
obj.fit_all_traces?
```
[0;31mSignature:[0m [0mobj[0m[0;34m.[0m[0mfit_all_traces[0m[0;34m([0m[0mniter[0m[0;34m=[0m[0;36m3[0m[0;34m,[0m [0mdchi_threshold[0m[0;34m=[0m[0;34m-[0m[0;36m25[0m[0;34m,[0m [0mref_exp[0m[0;34m=[0m[0;36m2[0m[0;34m,[0m [0;34m**[0m[0mkwargs[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m
[0;31mDocstring:[0m
Fit all traces in the group
Parameters
----------
niter : int
Number of iterations for fitting the traces (default: 3)
dchi_threshold : float
Threshold value for the change in chi-square to consider a fit
(default: -25)
ref_exp : int
Reference exposure for fitting the traces (default: 2)
kwargs : dict
Additional keyword arguments for the fitting process
Returns
-------
tfits : dict
Dictionary containing the fit results for each exposure group
[0;31mFile:[0m ~/.python/current/lib/python3.10/site-packages/msaexp/slit_combine.py
[0;31mType:[0m method
```python
fit = obj.fit_all_traces(
offset_degree=0, # order of the offset polynomial to fit
force_positive=False,
x0=[2, 0.], # Initial guess: gaussian width in pixels x 10, trace offset pixels
niter=1,
ref_exp=obj.calc_reference_exposure
)
```
fit_all_traces, iter 0
91 sigma=2.00 [ 0.000] 5718.5
92 sigma=6.09 [ 0.036] 3975.4
Exposure group 2 dchi2 = -1743.1
93 sigma=6.09 [ 0.036] 4899.3
94 sigma=6.09 [ 0.036] 4899.3
Exposure group 1 dchi2 = 0.0
95 sigma=6.09 [ 0.036] 4890.4
96 sigma=6.09 [ 0.036] 4890.4
Exposure group 3 dchi2 = 0.0
```python
# Show the profile fit
fig2d = obj.plot_2d_differences(fit=fit)
```

# Resample the spectra to a rectified pixel grid and get optimal 1D extraction
```python
drizzle_kws = dict(
step=1, # cross dispersion step size
ny=15, # number of cross dispersion pixels
with_pathloss=True, # use MSAEXP path loss that accounts for source size
wave_sample=1.05, # wavelength sampling
dkws=dict(oversample=16, pixfrac=0.8),
)
hdul = msaexp.slit_combine.combine_grating_group(
{'prism': {'obj':obj, 'fit': fit}},
['prism'],
drizzle_kws=drizzle_kws
)
```
msaexp.drizzle.extract_from_hdul: Initial center = 0.00, sigma = 0.61
msaexp.drizzle.extract_from_hdul: dchi2/dcenter = 26234.0
msaexp.drizzle.extract_from_hdul: aperture extraction = (15, 1)
Added path_corr column to spec
msaexp.drizzle.extract_from_hdul: Output center = -0.00, sigma = 0.61
```python
hdul.info()
```
Filename: (No file associated with this HDUList)
No. Name Ver Type Cards Dimensions Format
0 PRIMARY 1 PrimaryHDU 4 ()
1 SPEC1D 1 BinTableHDU 335 435R x 5C ['D', 'D', 'D', 'D', 'D']
2 SCI 1 ImageHDU 319 (435, 31) float64
3 WHT 1 ImageHDU 319 (435, 31) float64
4 PROFILE 1 ImageHDU 319 (435, 31) float64
5 PROF1D 1 BinTableHDU 25 31R x 3C ['D', 'D', 'D']
6 SLITS 1 BinTableHDU 103 3R x 47C ['55A', 'K', 'K', 'D', 'D', 'D', 'D', 'D', 'K', '10A', 'D', 'D', 'D', 'D', '3A', 'K', 'K', 'D', 'D', 'K', 'K', 'K', 'K', 'K', 'K', 'K', '4A', '5A', '5A', '29A', 'K', 'K', 'D', 'K', 'K', 'K', '12A', 'D', 'D', 'D', 'D', '17A', 'K', '4A', 'K', 'D', 'D']
```python
spec = utils.read_catalog(hdul['SPEC1D'])
fig, ax = plt.subplots(1,1,figsize=(10,5))
pl = ax.plot(spec['wave'], spec['flux'], label='msaexp', color='steelblue', alpha=0.5)
ax.plot(spec['wave'], spec['err'], color=pl[0].get_color(), alpha=0.5)
ax.legend()
ax.set_xlabel('wavelength, um')
ax.set_ylabel(r'$f_\nu$ $\mu$Jy')
ax.grid()
```

```python
# 2D
fig, ax = plt.subplots(1,1,figsize=(10,5))
ax.imshow(hdul['SCI'].data, vmin=-0.1, vmax=0.2, aspect='auto', cmap='gray_r')
fig.tight_layout(pad=0)
```

## Estimate the sky directly from the spectrum
If the sky is well determined, this can eliminate the need to take the differences of the nodded exposure
```python
obj.estimate_sky?
```
[0;31mSignature:[0m
[0mobj[0m[0;34m.[0m[0mestimate_sky[0m[0;34m([0m[0;34m[0m
[0;34m[0m [0mmask_yslit[0m[0;34m=[0m[0;34m[[0m[0;34m[[0m[0;34m-[0m[0;36m4.5[0m[0;34m,[0m [0;36m4.5[0m[0;34m][0m[0;34m][0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mmin_bar[0m[0;34m=[0m[0;36m0.95[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mvar_percentiles[0m[0;34m=[0m[0;34m[[0m[0;34m-[0m[0;36m5[0m[0;34m,[0m [0;34m-[0m[0;36m5[0m[0;34m][0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mdf[0m[0;34m=[0m[0;36m51[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mhigh_clip[0m[0;34m=[0m[0;36m0.8[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0muse[0m[0;34m=[0m[0;32mTrue[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0moutlier_threshold[0m[0;34m=[0m[0;36m7[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mabsolute_threshold[0m[0;34m=[0m[0;36m0.2[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mmake_plot[0m[0;34m=[0m[0;32mFalse[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0;34m**[0m[0mkwargs[0m[0;34m,[0m[0;34m[0m
[0;34m[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m
[0;31mDocstring:[0m
Estimate sky spectrum by fitting a flexible spline to all available pixels in
indicated empty shutter areas
Parameters
----------
mask_yslit : list of (float, float)
Range of shutter pixels to exclude as coming from the source and/or
contaminants
min_bar : float
Minimum allowed value of the bar obscuration
var_percentiles : None, (float, float)
Exclude sky pixels with variances outside of this percentile range. If
a negative number is provided, treat as an explicit number of pixels
to exclude from the low and high sides.
df : int
Degrees of freedom of the spline fit. If ``df = 0``, then just compute
a scalar normalization factor. If ``df < 0``, then don't rescale at all.
use : bool
Use the resulting sky model for the local sky
outlier_threshold : float, None
Mask pixels where the residual w.r.t the sky model is greater than this
make_plot : bool
Make a diagnostic plto
Returns
-------
fig : `~matplotlib.figure.Figure`
Figure object if ``make_figure`` else None
Stores sky fit results in ``sky_data`` attribute
[0;31mFile:[0m ~/.python/current/lib/python3.10/site-packages/msaexp/slit_combine.py
[0;31mType:[0m method
```python
estimate_sky_kwargs = dict(
mask_yslit=[[-4.5, 4.5]], # mask pixels expected to contain the source
min_bar=0.95,
df=81, # number of splines to fit. Needs to be high to fit the wiggles in the sky spectrum
high_clip=1.0,
make_plot=True,
)
_ = obj.estimate_sky(**estimate_sky_kwargs)
```
estimate_sky: N = 3568 , outliers > 7: 11

The ``data`` attribute is ``sci - sky2d`` if ``sky2d`` is available
```python
fig, axes = plt.subplots(obj.N, 1, figsize=(8,6), sharex=True, sharey=True)
for i, ax in enumerate(axes):
ax.imshow(obj.data[i,:].reshape(obj.sh), vmin=-0.1, vmax=0.3, aspect='auto', cmap='gray_r')
ax.plot(obj.ytr[i,:], color='magenta', alpha=0.3, lw=4)
ax.grid()
fig.tight_layout(pad=1)
```

## Flag outliers based on the cross-dispersion profile
```python
obj.flag_from_profile?
```
[0;31mSignature:[0m
[0mobj[0m[0;34m.[0m[0mflag_from_profile[0m[0;34m([0m[0;34m[0m
[0;34m[0m [0mgrow[0m[0;34m=[0m[0;36m2[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mnfilt[0m[0;34m=[0m[0;34m-[0m[0;36m32[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mrequire_multiple[0m[0;34m=[0m[0;32mTrue[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mmake_plot[0m[0;34m=[0m[0;32mFalse[0m[0;34m,[0m[0;34m[0m
[0;34m[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m
[0;31mDocstring:[0m
Flag pixel outliers based on the cross-dispersion profile
- Calculate the p5, p50, and p95 rolling percentiles across the profile
- Flag high pixels where ``value > p95 + (p95 - p50)*grow``
- Flag low pixels where ``value < min(p5, 0) - 5 * sigma``
Parameters
----------
grow : float
nfilt : int
Size of the filter window for the profile. If ``nfilt < 0``, then interpret
as ``nfilt = self.mask.sum() // -nfilt``, otherwise use directly as the
filter size
require_multiple : bool
Require that flagged pixels appear in multiple exposures
make_plot : bool
Make a diagnostic figure
Returns
-------
updates the ``mask`` attribute
[0;31mFile:[0m ~/.python/current/lib/python3.10/site-packages/msaexp/slit_combine.py
[0;31mType:[0m method
```python
flag_profile_kwargs = dict(require_multiple=True, make_plot=True, grow=2, nfilt=-32)
obj.flag_from_profile(**flag_profile_kwargs)
```
flag_from_profile: 9 ( 0.1%) pixels

## Turn off exposure differences and do resample and extraction again
```python
obj.meta["diffs"] = False
```
```python
drizzle_kws = dict(
step=1, # cross dispersion step size
ny=15, # number of cross dispersion pixels
with_pathloss=True, # use MSAEXP path loss that accounts for source size
wave_sample=1.05, # wavelength sampling
dkws=dict(oversample=16, pixfrac=0.8),
)
hdul_nodiff = msaexp.slit_combine.combine_grating_group(
{'prism': {'obj':obj, 'fit': fit}},
['prism'],
drizzle_kws=drizzle_kws
)
```
msaexp.drizzle.extract_from_hdul: Initial center = 0.00, sigma = 0.61
msaexp.drizzle.extract_from_hdul: dchi2/dcenter = 34986.5
msaexp.drizzle.extract_from_hdul: aperture extraction = (15, 1)
Added path_corr column to spec
msaexp.drizzle.extract_from_hdul: Output center = -0.00, sigma = 0.61
```python
# 2D
fig, axes = plt.subplots(2,1,figsize=(10,6), sharex=True, sharey=True)
kws = dict(vmin=-0.1, vmax=0.2, aspect='auto', cmap='gray_r')
axes[0].imshow(hdul['SCI'].data, **kws)
axes[0].set_ylabel('Nod diffs')
axes[1].imshow(hdul_nodiff['SCI'].data, **kws)
axes[1].set_ylabel('Global sky')
fig.tight_layout(pad=0.5)
```

# Combination and extraction wrapped into a single script
```python
msaexp.slit_combine.extract_spectra?
```
[0;31mSignature:[0m
[0mmsaexp[0m[0;34m.[0m[0mslit_combine[0m[0;34m.[0m[0mextract_spectra[0m[0;34m([0m[0;34m[0m
[0;34m[0m [0mtarget[0m[0;34m=[0m[0;34m'1208_5110240'[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mroot[0m[0;34m=[0m[0;34m'nirspec'[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mpath_to_files[0m[0;34m=[0m[0;34m'./'[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mfiles[0m[0;34m=[0m[0;32mNone[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mdo_gratings[0m[0;34m=[0m[0;34m[[0m[0;34m'PRISM'[0m[0;34m,[0m [0;34m'G395H'[0m[0;34m,[0m [0;34m'G395M'[0m[0;34m,[0m [0;34m'G235M'[0m[0;34m,[0m [0;34m'G140M'[0m[0;34m][0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mjoin[0m[0;34m=[0m[0;34m[[0m[0;36m0[0m[0;34m,[0m [0;36m3[0m[0;34m,[0m [0;36m5[0m[0;34m][0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0msplit_uncover[0m[0;34m=[0m[0;32mTrue[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mstuck_threshold[0m[0;34m=[0m[0;36m0.0[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mpad_border[0m[0;34m=[0m[0;36m2[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0msort_by_sn[0m[0;34m=[0m[0;32mFalse[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mposition_key[0m[0;34m=[0m[0;34m'y_index'[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mmask_cross_dispersion[0m[0;34m=[0m[0;32mNone[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mcross_dispersion_mask_type[0m[0;34m=[0m[0;34m'trace'[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mtrace_from_yoffset[0m[0;34m=[0m[0;32mFalse[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mreference_exposure[0m[0;34m=[0m[0;34m'auto'[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mtrace_niter[0m[0;34m=[0m[0;36m4[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0moffset_degree[0m[0;34m=[0m[0;36m0[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mdegree_kwargs[0m[0;34m=[0m[0;34m{[0m[0;34m}[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mrecenter_all[0m[0;34m=[0m[0;32mFalse[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mnod_offset[0m[0;34m=[0m[0;32mNone[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0minitial_sigma[0m[0;34m=[0m[0;36m7[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mfit_type[0m[0;34m=[0m[0;36m1[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0minitial_theta[0m[0;34m=[0m[0;32mNone[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mfix_params[0m[0;34m=[0m[0;32mFalse[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0minput_fix_sigma[0m[0;34m=[0m[0;32mNone[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mfit_params_kwargs[0m[0;34m=[0m[0;32mNone[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mdiffs[0m[0;34m=[0m[0;32mTrue[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mundo_pathloss[0m[0;34m=[0m[0;32mTrue[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mundo_barshadow[0m[0;34m=[0m[0;32mFalse[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0msky_arrays[0m[0;34m=[0m[0;32mNone[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0muse_first_sky[0m[0;34m=[0m[0;32mFalse[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mdrizzle_kws[0m[0;34m=[0m[0;34m{[0m[0;34m'step'[0m[0;34m:[0m [0;36m1[0m[0;34m,[0m [0;34m'with_pathloss'[0m[0;34m:[0m [0;32mTrue[0m[0;34m,[0m [0;34m'wave_sample'[0m[0;34m:[0m [0;36m1.05[0m[0;34m,[0m [0;34m'ny'[0m[0;34m:[0m [0;36m13[0m[0;34m,[0m [0;34m'dkws'[0m[0;34m:[0m [0;34m{[0m[0;34m'oversample'[0m[0;34m:[0m [0;36m16[0m[0;34m,[0m [0;34m'pixfrac'[0m[0;34m:[0m [0;36m0.8[0m[0;34m}[0m[0;34m}[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mget_xobj[0m[0;34m=[0m[0;32mFalse[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mtrace_with_xpos[0m[0;34m=[0m[0;32mFalse[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mtrace_with_ypos[0m[0;34m=[0m[0;34m'auto'[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mget_background[0m[0;34m=[0m[0;32mFalse[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mmake_2d_plots[0m[0;34m=[0m[0;32mTrue[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0mplot_kws[0m[0;34m=[0m[0;34m{[0m[0;34m}[0m[0;34m,[0m[0;34m[0m
[0;34m[0m [0;34m**[0m[0mkwargs[0m[0;34m,[0m[0;34m[0m
[0;34m[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m
[0;31mDocstring:[0m
Spectral combination workflow splitting by grating and multiple observations in a particular grating
Parameters
----------
target : str
Target name. If no ``files`` specified, will search for the 2D slit
cutout files with names like ``*phot*{target}.fits``
root : str
Output file rootname
path_to_files : str
Directory path containing the ``phot`` files
files : list, None
Optional explicit list of ``phot`` files to combine
do_gratings : list
Gratings to consider
join : list
Indices of ``files[i].split('[._]') + GRATING`` to join as a group
split_uncover : bool
Split sub-pixel dithers from UNCOVER (GO-2561) when defining exposure groups
sort_by_sn : bool
Try to process groups in order of decreasing S/N, i.e., to derive the
trace offsets in the prism where it will be best defined and propagate
to other groups with the gratings
mask_cross_dispersion : None or [int, int]
Optional cross-dispersion masking, e.g., for stuck-closed shutters or
multiple sources within a slitlet. The specified values are integer
indices of the pixel range to mask. See ``cross_dispersion_mask_type``.
cross_dispersion_mask_type : str
Type of cross dispersion mask to apply. With ``'trace'``, the masked
pixels are calculated relative to the (expected) center of the trace,
and, e.g., ``mask_cross_dispersion = [5,100]`` will mask all pixels 5
pixels "above" the center of the trace (100 is an arbitrarily large
number to include all pixels). The mask will shift along with the nod
offsets.
With ``fixed``, the mask indices are relative to the trace *in the
first exposure* and won't shift with the nod offsets. So
``mask_cross_dispersion = [-3,3]`` would mask roughly the central
shutter in all exposures that will contain the source in some
exposures and not in others. This can be used to try to mitigate some
stuck-closed shutters, though the how effective it is is still under
investigation.
stuck_threshold, pad_border :
See `~msaexp.slit_combine.SlitGroup`
trace_from_yoffset, trace_with_xpos, trace_with_ypos :
See `~msaexp.slit_combine.SlitGroup`
position_key, reference_exposure, nod_offset, diffs :
See `~msaexp.slit_combine.SlitGroup`
undo_pathloss, undo_barshadow :
See `~msaexp.slit_combine.SlitGroup`
trace_niter : int, optional
Number of iterations for the trace fit
offset_degree : int, optional
Polynomial offset degree
degree_kwargs : dict, optional
Degree keyword arguments
recenter_all : bool, optional
Refit for the trace center for all groups. If False,
use the center from the first (usually highest S/N prism)
trace.
initial_sigma : float, optional
Initial sigma value. This is 10 times the Gaussian sigma
width, in pixels.
fit_type : int, optional
Fit type value
initial_theta : None, optional
Initial parameter guesses
fix_params : bool, optional
Fix parameters to ``initial_theta``
input_fix_sigma : None, optional
Input fix sigma value
fit_params_kwargs : None, optional
Fit parameters keyword arguments
drizzle_kws : dict, optional
Drizzle keyword arguments
get_xobj : bool, optional
Return `~msaexp.slit_combine.SlitGroup` objects along with the
HDU product
get_background : bool, optional
Get background value
make_2d_plots : bool, optional
Make 2D plots value
Returns
-------
None : null
If no valid spectra are found
hdu : dict
Dict of `~astropy.io.fits.HDUList` objects for the separate gratings
xobj : dict
Dictionary of `~msaexp.slit_combine.SlitGroup` objects if ``get_xobj=True``
[0;31mFile:[0m ~/.python/current/lib/python3.10/site-packages/msaexp/slit_combine.py
[0;31mType:[0m function
```python
group_kws['diffs'] = True
group_kws['flag_profile_kwargs'] = flag_profile_kwargs
target=f'{prog}_{source_ids[0]}'
_ = msaexp.slit_combine.extract_spectra(
target=target,
root=outroot,
**group_kws,
)
```
# (2024-05-16 14:24:25.442)
slit_combine.extract_spectra(**{'target': '4233_44597', 'root': 'rubies-egs61', 'path_to_files': './', 'files': None, 'do_gratings': ['PRISM', 'G395H', 'G395M', 'G235M', 'G140M'], 'join': [0, 3, 5], 'split_uncover': True, 'stuck_threshold': 0.0, 'pad_border': 2, 'sort_by_sn': False, 'position_key': 'y_index', 'mask_cross_dispersion': None, 'cross_dispersion_mask_type': 'trace', 'trace_from_yoffset': True, 'reference_exposure': 'auto', 'trace_niter': 4, 'offset_degree': 0, 'degree_kwargs': {}, 'recenter_all': False, 'nod_offset': None, 'initial_sigma': 7, 'fit_type': 1, 'initial_theta': None, 'fix_params': False, 'input_fix_sigma': None, 'fit_params_kwargs': None, 'diffs': True, 'undo_pathloss': True, 'undo_barshadow': 2, 'use_first_sky': False, 'drizzle_kws': {'step': 1, 'with_pathloss': True, 'wave_sample': 1.05, 'ny': 13, 'dkws': {'oversample': 16, 'pixfrac': 0.8}}, 'get_xobj': False, 'trace_with_xpos': False, 'trace_with_ypos': True, 'get_background': False, 'make_2d_plots': True, 'plot_kws': {}, 'kwargs': {'min_bar': 0.35, 'flag_profile_kwargs': {'require_multiple': True, 'make_plot': True, 'grow': 2, 'nfilt': -32}}})
rubies-egs61 target: 4233_44597 Files: 3
* Group jw04233006001_nrs1_186-prism N=3
==================================
0 jw04233006001_03101_00002_nrs1_phot.186.4233_44597.fits (24, 431)
1 jw04233006001_03101_00003_nrs1_phot.186.4233_44597.fits (24, 431)
2 jw04233006001_03101_00004_nrs1_phot.186.4233_44597.fits (24, 431)
./jw04233006001_03101_00002_nrs1_phot.186.4233_44597.fits source_type=None undo PATHLOSS_UN
./jw04233006001_03101_00003_nrs1_phot.186.4233_44597.fits source_type=None undo PATHLOSS_UN
./jw04233006001_03101_00004_nrs1_phot.186.4233_44597.fits source_type=None undo PATHLOSS_UN
Recomputed offsets slit: force [ 0.00, 5.07, -5.07] pix offsets
apply_spline_bar_correction(mode='wave')
get_normalization_correction: prism_slit_renormalize.yaml quadrant=4 xcen=291 ycen=50
flag_from_profile: 3 ( 0.0%) pixels
keys: ['jw04233006001_nrs1_186-prism']
##### Group #1 / 1: jw04233006001_nrs1_186-prism ####
fit_all_traces, iter 0
128 sigma=7.00 [ 0.000] 4062.6
129 sigma=6.09 [ 0.036] 3975.1
Exposure group 2 dchi2 = -87.5
130 sigma=6.09 [ 0.036] 4899.2
131 sigma=6.09 [ 0.036] 4899.2
Exposure group 1 dchi2 = 0.0
132 sigma=6.09 [ 0.036] 4889.9
133 sigma=6.09 [ 0.036] 4889.9
Exposure group 3 dchi2 = 0.0
fit_all_traces, iter 1
139 sigma=6.09 [ 0.036] 3968.0
140 sigma=5.96 [ 0.042] 3966.2
Exposure group 2 dchi2 = -1.7*
141 sigma=5.96 [ 0.042] 4848.2
142 sigma=5.96 [ 0.042] 4848.2
Exposure group 1 dchi2 = 0.0
143 sigma=5.96 [ 0.042] 4885.4
144 sigma=5.96 [ 0.042] 4885.4
Exposure group 3 dchi2 = 0.0
fit_all_traces, iter 2
104 sigma=5.96 [ 0.042] 3966.4
105 sigma=5.94 [ 0.042] 3966.4
Exposure group 2 dchi2 = -0.0*
106 sigma=5.94 [ 0.042] 4841.9
107 sigma=5.94 [ 0.042] 4841.9
Exposure group 1 dchi2 = 0.0
108 sigma=5.94 [ 0.042] 4886.4
109 sigma=5.94 [ 0.042] 4886.4
Exposure group 3 dchi2 = 0.0
fit_all_traces, iter 3
96 sigma=5.94 [ 0.042] 3966.4
97 sigma=5.94 [ 0.042] 3966.4
Exposure group 2 dchi2 = -0.0*
98 sigma=5.94 [ 0.042] 4841.0
99 sigma=5.94 [ 0.042] 4841.0
Exposure group 1 dchi2 = 0.0
100 sigma=5.94 [ 0.042] 4886.5
101 sigma=5.94 [ 0.042] 4886.5
Exposure group 3 dchi2 = 0.0
gratings: {'prism': ['jw04233006001_nrs1_186-prism']}
msaexp.drizzle.extract_from_hdul: Initial center = 0.00, sigma = 0.59
msaexp.drizzle.extract_from_hdul: dchi2/dcenter = 27157.9
msaexp.drizzle.extract_from_hdul: aperture extraction = (13, 1)
Added path_corr column to spec
msaexp.drizzle.extract_from_hdul: Output center = -0.00, sigma = 0.59
2024-05-16 14:24:31,146 - stpipe - WARNING - /home/codespace/.local/lib/python3.10/site-packages/numpy/lib/function_base.py:4824: UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedColumn.
arr.partition(
2024-05-16 14:24:31,149 - stpipe - WARNING - /home/codespace/.local/lib/python3.10/site-packages/numpy/core/fromnumeric.py:771: UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedColumn.
a.partition(kth, axis=axis, kind=kind, order=order)
rubies-egs61_prism-clear_4233_44597.spec.fits
2024-05-16 14:24:31,598 - stpipe - WARNING - /home/codespace/.local/lib/python3.10/site-packages/numpy/lib/function_base.py:4824: UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedColumn.
arr.partition(
2024-05-16 14:24:31,599 - stpipe - WARNING - /home/codespace/.local/lib/python3.10/site-packages/numpy/core/fromnumeric.py:771: UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedColumn.
a.partition(kth, axis=axis, kind=kind, order=order)




```python
# With sky estimation
group_kws['diffs'] = False
_ = msaexp.slit_combine.extract_spectra(
target=target,
root=outroot,
estimate_sky_kwargs=estimate_sky_kwargs,
**group_kws,
)
```
# (2024-05-16 14:24:34.926)
slit_combine.extract_spectra(**{'target': '4233_44597', 'root': 'rubies-egs61', 'path_to_files': './', 'files': None, 'do_gratings': ['PRISM', 'G395H', 'G395M', 'G235M', 'G140M'], 'join': [0, 3, 5], 'split_uncover': True, 'stuck_threshold': 0.0, 'pad_border': 2, 'sort_by_sn': False, 'position_key': 'y_index', 'mask_cross_dispersion': None, 'cross_dispersion_mask_type': 'trace', 'trace_from_yoffset': True, 'reference_exposure': 'auto', 'trace_niter': 4, 'offset_degree': 0, 'degree_kwargs': {}, 'recenter_all': False, 'nod_offset': None, 'initial_sigma': 7, 'fit_type': 1, 'initial_theta': None, 'fix_params': False, 'input_fix_sigma': None, 'fit_params_kwargs': None, 'diffs': False, 'undo_pathloss': True, 'undo_barshadow': 2, 'use_first_sky': False, 'drizzle_kws': {'step': 1, 'with_pathloss': True, 'wave_sample': 1.05, 'ny': 13, 'dkws': {'oversample': 16, 'pixfrac': 0.8}}, 'get_xobj': False, 'trace_with_xpos': False, 'trace_with_ypos': True, 'get_background': False, 'make_2d_plots': True, 'plot_kws': {}, 'kwargs': {'estimate_sky_kwargs': {'mask_yslit': [[-4.5, 4.5]], 'min_bar': 0.95, 'df': 81, 'high_clip': 1.0, 'make_plot': True}, 'min_bar': 0.35, 'flag_profile_kwargs': {'require_multiple': True, 'make_plot': True, 'grow': 2, 'nfilt': -32}}})
rubies-egs61 target: 4233_44597 Files: 3
* Group jw04233006001_nrs1_186-prism N=3
==================================
0 jw04233006001_03101_00002_nrs1_phot.186.4233_44597.fits (24, 431)
1 jw04233006001_03101_00003_nrs1_phot.186.4233_44597.fits (24, 431)
2 jw04233006001_03101_00004_nrs1_phot.186.4233_44597.fits (24, 431)
./jw04233006001_03101_00002_nrs1_phot.186.4233_44597.fits source_type=None undo PATHLOSS_UN
./jw04233006001_03101_00003_nrs1_phot.186.4233_44597.fits source_type=None undo PATHLOSS_UN
./jw04233006001_03101_00004_nrs1_phot.186.4233_44597.fits source_type=None undo PATHLOSS_UN
Recomputed offsets slit: force [ 0.00, 5.07, -5.07] pix offsets
apply_spline_bar_correction(mode='wave')
get_normalization_correction: prism_slit_renormalize.yaml quadrant=4 xcen=291 ycen=50
estimate_sky: N = 3554 , outliers > 7: 11
flag_from_profile: 9 ( 0.1%) pixels
keys: ['jw04233006001_nrs1_186-prism']
##### Group #1 / 1: jw04233006001_nrs1_186-prism ####
fit_all_traces, iter 0
230 sigma=7.00 [ 0.000] 3273.9
231 sigma=6.47 [ 0.026] 3247.8
Exposure group 2 dchi2 = -26.1
232 sigma=6.47 [ 0.026] 3241.1
233 sigma=6.47 [ 0.026] 3241.1
Exposure group 1 dchi2 = 0.0
234 sigma=6.47 [ 0.026] 4276.6
235 sigma=6.47 [ 0.026] 4276.6
Exposure group 3 dchi2 = 0.0
fit_all_traces, iter 1
51 sigma=6.47 [ 0.026] 3247.8
52 sigma=6.47 [ 0.026] 3247.8
Exposure group 2 dchi2 = 0.0*
53 sigma=6.47 [ 0.026] 3241.1
54 sigma=6.47 [ 0.026] 3241.1
Exposure group 1 dchi2 = 0.0
55 sigma=6.47 [ 0.026] 4276.6
56 sigma=6.47 [ 0.026] 4276.6
Exposure group 3 dchi2 = 0.0
fit_all_traces, iter 2
61 sigma=6.47 [ 0.026] 3247.8
62 sigma=6.47 [ 0.026] 3247.8
Exposure group 2 dchi2 = 0.0*
63 sigma=6.47 [ 0.026] 3241.1
64 sigma=6.47 [ 0.026] 3241.1
Exposure group 1 dchi2 = 0.0
65 sigma=6.47 [ 0.026] 4276.6
66 sigma=6.47 [ 0.026] 4276.6
Exposure group 3 dchi2 = 0.0
fit_all_traces, iter 3
44 sigma=6.47 [ 0.026] 3247.8
45 sigma=6.47 [ 0.026] 3247.8
Exposure group 2 dchi2 = -0.0*
46 sigma=6.47 [ 0.026] 3241.1
47 sigma=6.47 [ 0.026] 3241.1
Exposure group 1 dchi2 = 0.0
48 sigma=6.47 [ 0.026] 4276.6
49 sigma=6.47 [ 0.026] 4276.6
Exposure group 3 dchi2 = 0.0
gratings: {'prism': ['jw04233006001_nrs1_186-prism']}
msaexp.drizzle.extract_from_hdul: Initial center = 0.00, sigma = 0.65
msaexp.drizzle.extract_from_hdul: dchi2/dcenter = 36573.7
msaexp.drizzle.extract_from_hdul: aperture extraction = (13, 1)
Added path_corr column to spec
msaexp.drizzle.extract_from_hdul: Output center = -0.00, sigma = 0.65
2024-05-16 14:24:40,811 - stpipe - WARNING - /home/codespace/.local/lib/python3.10/site-packages/numpy/lib/function_base.py:4824: UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedColumn.
arr.partition(
2024-05-16 14:24:40,812 - stpipe - WARNING - /home/codespace/.local/lib/python3.10/site-packages/numpy/core/fromnumeric.py:771: UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedColumn.
a.partition(kth, axis=axis, kind=kind, order=order)
rubies-egs61_prism-clear_4233_44597.spec.fits
2024-05-16 14:24:41,245 - stpipe - WARNING - /home/codespace/.local/lib/python3.10/site-packages/numpy/lib/function_base.py:4824: UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedColumn.
arr.partition(
2024-05-16 14:24:41,247 - stpipe - WARNING - /home/codespace/.local/lib/python3.10/site-packages/numpy/core/fromnumeric.py:771: UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedColumn.
a.partition(kth, axis=axis, kind=kind, order=order)





```python
! ls *{target}.*
```
jw04233006001_03101_00002_nrs1_phot.186.4233_44597.fits
jw04233006001_03101_00003_nrs1_phot.186.4233_44597.fits
jw04233006001_03101_00004_nrs1_phot.186.4233_44597.fits
rubies-egs61_4233_44597.extract.log
rubies-egs61_4233_44597.extract.yml
rubies-egs61_prism-clear_4233_44597.d2d.png
rubies-egs61_prism-clear_4233_44597.flam.png
rubies-egs61_prism-clear_4233_44597.fnu.png
rubies-egs61_prism-clear_4233_44597.spec.fits
# Fitting and analysis
Now go to the ``spectral-extractions-2024.ipynb`` notebook for a demo on fitting the spectra for redshift, lines, etc.
# Expand sky fit for spatially-extended object
```python
# With sky estimation
group_kws['diffs'] = False
target=f'{prog}_{source_ids[1]}'
estimate_sky_kwargs['mask_yslit'] = [[-4.5, 4.5]]
estimate_sky_kwargs['high_clip'] = 0.5
_ = msaexp.slit_combine.extract_spectra(
target=target,
root=outroot,
estimate_sky_kwargs=estimate_sky_kwargs,
**group_kws,
)
```
# (2024-05-16 14:28:19.312)
slit_combine.extract_spectra(**{'target': '4233_46811', 'root': 'rubies-egs61', 'path_to_files': './', 'files': None, 'do_gratings': ['PRISM', 'G395H', 'G395M', 'G235M', 'G140M'], 'join': [0, 3, 5], 'split_uncover': True, 'stuck_threshold': 0.0, 'pad_border': 2, 'sort_by_sn': False, 'position_key': 'y_index', 'mask_cross_dispersion': None, 'cross_dispersion_mask_type': 'trace', 'trace_from_yoffset': True, 'reference_exposure': 'auto', 'trace_niter': 4, 'offset_degree': 0, 'degree_kwargs': {}, 'recenter_all': False, 'nod_offset': None, 'initial_sigma': 7, 'fit_type': 1, 'initial_theta': None, 'fix_params': False, 'input_fix_sigma': None, 'fit_params_kwargs': None, 'diffs': False, 'undo_pathloss': True, 'undo_barshadow': 2, 'use_first_sky': False, 'drizzle_kws': {'step': 1, 'with_pathloss': True, 'wave_sample': 1.05, 'ny': 13, 'dkws': {'oversample': 16, 'pixfrac': 0.8}}, 'get_xobj': False, 'trace_with_xpos': False, 'trace_with_ypos': True, 'get_background': False, 'make_2d_plots': True, 'plot_kws': {}, 'kwargs': {'estimate_sky_kwargs': {'mask_yslit': [[-4.5, 4.5]], 'min_bar': 0.95, 'df': 31, 'high_clip': 0.5, 'make_plot': True}, 'min_bar': 0.35, 'flag_profile_kwargs': {'require_multiple': True, 'make_plot': True, 'grow': 2, 'nfilt': -32}}})
rubies-egs61 target: 4233_46811 Files: 3
* Group jw04233006001_nrs1_142-prism N=3
==================================
0 jw04233006001_03101_00002_nrs1_phot.142.4233_46811.fits (25, 423)
1 jw04233006001_03101_00003_nrs1_phot.142.4233_46811.fits (25, 423)
2 jw04233006001_03101_00004_nrs1_phot.142.4233_46811.fits (25, 423)
./jw04233006001_03101_00002_nrs1_phot.142.4233_46811.fits source_type=None undo PATHLOSS_UN
./jw04233006001_03101_00003_nrs1_phot.142.4233_46811.fits source_type=None undo PATHLOSS_UN
./jw04233006001_03101_00004_nrs1_phot.142.4233_46811.fits source_type=None undo PATHLOSS_UN
Recomputed offsets slit: force [ 0.00, 5.14, -5.14] pix offsets
apply_spline_bar_correction(mode='wave')
PRISM: stuck bad shutters [-1]
get_normalization_correction: prism_slit_renormalize.yaml quadrant=4 xcen=101 ycen=157
estimate_sky: N = 2356 , outliers > 7: 12
flag_from_profile: 21 ( 0.2%) pixels
keys: ['jw04233006001_nrs1_142-prism']
##### Group #1 / 1: jw04233006001_nrs1_142-prism ####
fit_all_traces, iter 0
148 sigma=7.00 [ 0.000] 2218.2
149 sigma=10.54 [ 0.037] 1950.6
Exposure group 2 dchi2 = -267.7
150 sigma=10.54 [ 0.037] 2397.8
151 sigma=10.54 [ 0.037] 2397.8
Exposure group 1 dchi2 = 0.0
152 sigma=10.54 [ 0.037] 1918.8
153 sigma=10.54 [ 0.037] 1918.8
Exposure group 3 dchi2 = 0.0
fit_all_traces, iter 1
66 sigma=10.54 [ 0.037] 1950.6
67 sigma=10.54 [ 0.037] 1950.6
Exposure group 2 dchi2 = 0.0*
68 sigma=10.54 [ 0.037] 2397.8
69 sigma=10.54 [ 0.037] 2397.8
Exposure group 1 dchi2 = 0.0
70 sigma=10.54 [ 0.037] 1918.8
71 sigma=10.54 [ 0.037] 1918.8
Exposure group 3 dchi2 = 0.0
fit_all_traces, iter 2
56 sigma=10.54 [ 0.037] 1950.6
57 sigma=10.54 [ 0.037] 1950.6
Exposure group 2 dchi2 = 0.0*
58 sigma=10.54 [ 0.037] 2397.8
59 sigma=10.54 [ 0.037] 2397.8
Exposure group 1 dchi2 = 0.0
60 sigma=10.54 [ 0.037] 1918.8
61 sigma=10.54 [ 0.037] 1918.8
Exposure group 3 dchi2 = 0.0
fit_all_traces, iter 3
51 sigma=10.54 [ 0.037] 1950.6
52 sigma=10.54 [ 0.037] 1950.6
Exposure group 2 dchi2 = -0.0*
53 sigma=10.54 [ 0.037] 2397.8
54 sigma=10.54 [ 0.037] 2397.8
Exposure group 1 dchi2 = 0.0
55 sigma=10.54 [ 0.037] 1918.8
56 sigma=10.54 [ 0.037] 1918.8
Exposure group 3 dchi2 = 0.0
gratings: {'prism': ['jw04233006001_nrs1_142-prism']}
msaexp.drizzle.extract_from_hdul: Initial center = 0.00, sigma = 1.05
msaexp.drizzle.extract_from_hdul: dchi2/dcenter = 4089.7
msaexp.drizzle.extract_from_hdul: aperture extraction = (13, 1)
Added path_corr column to spec
msaexp.drizzle.extract_from_hdul: Output center = 0.00, sigma = 1.05
rubies-egs61_prism-clear_4233_46811.spec.fits
2024-05-16 14:28:25,620 - stpipe - WARNING - /home/codespace/.local/lib/python3.10/site-packages/numpy/lib/function_base.py:4824: UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedColumn.
arr.partition(
2024-05-16 14:28:25,622 - stpipe - WARNING - /home/codespace/.local/lib/python3.10/site-packages/numpy/core/fromnumeric.py:771: UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedColumn.
a.partition(kth, axis=axis, kind=kind, order=order)
2024-05-16 14:28:26,245 - stpipe - WARNING - /home/codespace/.local/lib/python3.10/site-packages/numpy/lib/function_base.py:4824: UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedColumn.
arr.partition(
2024-05-16 14:28:26,247 - stpipe - WARNING - /home/codespace/.local/lib/python3.10/site-packages/numpy/core/fromnumeric.py:771: UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedColumn.
a.partition(kth, axis=axis, kind=kind, order=order)





```python
estimate_sky_kwargs['mask_yslit'] = [[-4., 7.5]]
estimate_sky_kwargs['df'] = 51
result = msaexp.slit_combine.extract_spectra(
target=target,
root=outroot,
estimate_sky_kwargs=estimate_sky_kwargs,
**group_kws,
)
```
# (2024-05-16 14:45:49.699)
slit_combine.extract_spectra(**{'target': '4233_46811', 'root': 'rubies-egs61', 'path_to_files': './', 'files': None, 'do_gratings': ['PRISM', 'G395H', 'G395M', 'G235M', 'G140M'], 'join': [0, 3, 5], 'split_uncover': True, 'stuck_threshold': 0.0, 'pad_border': 2, 'sort_by_sn': False, 'position_key': 'y_index', 'mask_cross_dispersion': None, 'cross_dispersion_mask_type': 'trace', 'trace_from_yoffset': True, 'reference_exposure': 'auto', 'trace_niter': 4, 'offset_degree': 0, 'degree_kwargs': {}, 'recenter_all': False, 'nod_offset': None, 'initial_sigma': 7, 'fit_type': 1, 'initial_theta': None, 'fix_params': False, 'input_fix_sigma': None, 'fit_params_kwargs': None, 'diffs': False, 'undo_pathloss': True, 'undo_barshadow': 2, 'use_first_sky': False, 'drizzle_kws': {'step': 1, 'with_pathloss': True, 'wave_sample': 1.05, 'ny': 13, 'dkws': {'oversample': 16, 'pixfrac': 0.8}}, 'get_xobj': False, 'trace_with_xpos': False, 'trace_with_ypos': True, 'get_background': False, 'make_2d_plots': True, 'plot_kws': {}, 'kwargs': {'estimate_sky_kwargs': {'mask_yslit': [[-4.0, 7.5]], 'min_bar': 0.95, 'df': 51, 'high_clip': 0.5, 'make_plot': True}, 'min_bar': 0.35, 'flag_profile_kwargs': {'require_multiple': True, 'make_plot': True, 'grow': 2, 'nfilt': -32}}})
rubies-egs61 target: 4233_46811 Files: 3
* Group jw04233006001_nrs1_142-prism N=3
==================================
0 jw04233006001_03101_00002_nrs1_phot.142.4233_46811.fits (25, 423)
1 jw04233006001_03101_00003_nrs1_phot.142.4233_46811.fits (25, 423)
2 jw04233006001_03101_00004_nrs1_phot.142.4233_46811.fits (25, 423)
./jw04233006001_03101_00002_nrs1_phot.142.4233_46811.fits source_type=None undo PATHLOSS_UN
./jw04233006001_03101_00003_nrs1_phot.142.4233_46811.fits source_type=None undo PATHLOSS_UN
./jw04233006001_03101_00004_nrs1_phot.142.4233_46811.fits source_type=None undo PATHLOSS_UN
Recomputed offsets slit: force [ 0.00, 5.14, -5.14] pix offsets
apply_spline_bar_correction(mode='wave')
PRISM: stuck bad shutters [-1]
get_normalization_correction: prism_slit_renormalize.yaml quadrant=4 xcen=101 ycen=157
estimate_sky: N = 1882 , outliers > 7: 11
flag_from_profile: 21 ( 0.2%) pixels
keys: ['jw04233006001_nrs1_142-prism']
##### Group #1 / 1: jw04233006001_nrs1_142-prism ####
fit_all_traces, iter 0
138 sigma=7.00 [ 0.000] 2296.1
139 sigma=11.09 [ 0.024] 1937.6
Exposure group 2 dchi2 = -358.6
140 sigma=11.09 [ 0.024] 2593.1
141 sigma=11.09 [ 0.024] 2593.1
Exposure group 1 dchi2 = 0.0
142 sigma=11.09 [ 0.024] 1800.8
143 sigma=11.09 [ 0.024] 1800.8
Exposure group 3 dchi2 = 0.0
fit_all_traces, iter 1
47 sigma=11.09 [ 0.024] 1937.6
48 sigma=11.09 [ 0.024] 1937.6
Exposure group 2 dchi2 = 0.0*
49 sigma=11.09 [ 0.024] 2593.1
50 sigma=11.09 [ 0.024] 2593.1
Exposure group 1 dchi2 = 0.0
51 sigma=11.09 [ 0.024] 1800.8
52 sigma=11.09 [ 0.024] 1800.8
Exposure group 3 dchi2 = 0.0
fit_all_traces, iter 2
46 sigma=11.09 [ 0.024] 1937.6
47 sigma=11.09 [ 0.024] 1937.6
Exposure group 2 dchi2 = 0.0*
48 sigma=11.09 [ 0.024] 2593.1
49 sigma=11.09 [ 0.024] 2593.1
Exposure group 1 dchi2 = 0.0
50 sigma=11.09 [ 0.024] 1800.8
51 sigma=11.09 [ 0.024] 1800.8
Exposure group 3 dchi2 = 0.0
fit_all_traces, iter 3
52 sigma=11.09 [ 0.024] 1937.6
53 sigma=11.09 [ 0.024] 1937.6
Exposure group 2 dchi2 = 0.0*
54 sigma=11.09 [ 0.024] 2593.1
55 sigma=11.09 [ 0.024] 2593.1
Exposure group 1 dchi2 = 0.0
56 sigma=11.09 [ 0.024] 1800.8
57 sigma=11.09 [ 0.024] 1800.8
Exposure group 3 dchi2 = 0.0
gratings: {'prism': ['jw04233006001_nrs1_142-prism']}
msaexp.drizzle.extract_from_hdul: Initial center = 0.00, sigma = 1.11
msaexp.drizzle.extract_from_hdul: dchi2/dcenter = 4260.8
msaexp.drizzle.extract_from_hdul: aperture extraction = (13, 1)
Added path_corr column to spec
msaexp.drizzle.extract_from_hdul: Output center = 0.00, sigma = 1.11
rubies-egs61_prism-clear_4233_46811.spec.fits
2024-05-16 14:45:56,266 - stpipe - WARNING - /home/codespace/.local/lib/python3.10/site-packages/numpy/lib/function_base.py:4824: UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedColumn.
arr.partition(
2024-05-16 14:45:56,269 - stpipe - WARNING - /home/codespace/.local/lib/python3.10/site-packages/numpy/core/fromnumeric.py:771: UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedColumn.
a.partition(kth, axis=axis, kind=kind, order=order)
2024-05-16 14:45:56,819 - stpipe - WARNING - /home/codespace/.local/lib/python3.10/site-packages/numpy/lib/function_base.py:4824: UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedColumn.
arr.partition(
2024-05-16 14:45:56,820 - stpipe - WARNING - /home/codespace/.local/lib/python3.10/site-packages/numpy/core/fromnumeric.py:771: UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedColumn.
a.partition(kth, axis=axis, kind=kind, order=order)





```python
# Show profile near the Halpha line
hdu = result['prism']
wave_cuts = {
'continuum blue': [2.5, 2.8],
'continuum red': [3.0, 3.3],
'line': [2.85, 2.91],
}
spec = utils.read_catalog(hdu['SPEC1D'])
y0 = (hdu['SCI'].header['NAXIS2'] - 1)/2
y_arcsec = (np.arange(hdu['SCI'].header['NAXIS2']) - y0)*hdu['SCI'].header['YPIXSCL']
fig, ax = plt.subplots(1,1,figsize=(8,5), sharex=True)
profile = {}
for cut in wave_cuts:
slx = slice(*np.cast[int](np.round(np.interp(wave_cuts[cut], spec['wave'], np.arange(len(spec))))))
data = hdu['SCI'].data[:, slx]
wht = hdu['WHT'].data[:, slx]
profile[cut] = np.nansum(data*wht, axis=1) / np.nansum(wht, axis=1)
ax.plot(y_arcsec, profile[cut],
alpha=0.5,
color=('0.8' if cut.startswith('continuum') else 'pink')
)
ax.set_ylabel('flux')
ax.fill_between(y_arcsec, y_arcsec*0.,
(profile['continuum red'] + profile['continuum blue']) / 2.,
color='0.4', alpha=0.3,
label='continuum',
)
ax.fill_between(y_arcsec, y_arcsec*0.,
profile['line'] - (profile['continuum red'] + profile['continuum blue']) / 2.,
color='tomato', alpha=0.3,
label='line - cont.',
)
ax.legend()
ax.set_xlabel(r'$\Delta y$, arcsec')
ax.set_xlim(-0.8, 0.8)
ax.set_ylim(-0.03, 0.3)
ax.grid()
_ = fig.tight_layout(pad=0.5)
```

|
gbrammerREPO_NAMEmsaexpPATH_START.@msaexp_extracted@msaexp-main@docs@examples@spectral-pipeline-2024.ipynb@.PATH_END.py
|
{
"filename": "net_sac.py",
"repo_name": "SarodYatawatta/hintRL",
"repo_path": "hintRL_extracted/hintRL-main/bipedal_walker/net_sac.py",
"type": "Python"
}
|
import os
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions.normal import Normal
import numpy as np
import pickle # for saving replaymemory
from PER import PER
# (try to) use a GPU for computation?
use_cuda=True
if use_cuda and T.cuda.is_available():
mydevice=T.device('cuda')
else:
mydevice=T.device('cpu')
# initialize all layer weights, based on the fan in
def init_layer(layer,sc=None):
sc = sc or 1./np.sqrt(layer.weight.data.size()[0])
T.nn.init.uniform_(layer.weight.data, -sc, sc)
T.nn.init.uniform_(layer.bias.data, -sc, sc)
class ReplayBuffer(object):
def __init__(self, max_size, input_shape, n_actions, name_prefix=''):
self.mem_size = max_size
self.mem_cntr = 0
self.state_memory = np.zeros((self.mem_size, input_shape), dtype=np.float32)
self.new_state_memory = np.zeros((self.mem_size, input_shape), dtype=np.float32)
self.action_memory = np.zeros((self.mem_size,n_actions), dtype=np.float32)
self.reward_memory = np.zeros(self.mem_size, dtype=np.float32)
self.terminal_memory = np.zeros(self.mem_size, dtype=bool)
self.hint_memory = np.zeros((self.mem_size,n_actions), dtype=np.float32)
self.filename=name_prefix+'replaymem_sac.model' # for saving object
def store_transition(self, state, action, reward, state_, done, hint):
index = self.mem_cntr % self.mem_size
self.action_memory[index] = action
self.reward_memory[index] = reward
self.terminal_memory[index] = done
self.state_memory[index] = state
self.new_state_memory[index] = state_
self.hint_memory[index] = hint
self.mem_cntr += 1
def sample_buffer(self, batch_size):
max_mem = min(self.mem_cntr, self.mem_size)
batch = np.random.choice(max_mem, batch_size, replace=False)
states = self.state_memory[batch]
actions = self.action_memory[batch]
rewards = self.reward_memory[batch]
states_ = self.new_state_memory[batch]
terminal = self.terminal_memory[batch]
hint = self.hint_memory[batch]
return states, actions, rewards, states_, terminal, hint
def save_checkpoint(self):
with open(self.filename,'wb') as f:
pickle.dump(self,f)
def load_checkpoint(self):
with open(self.filename,'rb') as f:
temp=pickle.load(f)
self.mem_size=temp.mem_size
self.mem_cntr=temp.mem_cntr
self.state_memory=temp.state_memory
self.new_state_memory=temp.new_state_memory
self.action_memory=temp.action_memory
self.reward_memory=temp.reward_memory
self.terminal_memory=temp.terminal_memory
self.hint_memory=temp.hint_memory
# input: state,action output: q-value
class CriticNetwork(nn.Module):
def __init__(self, lr, n_inputs, n_actions, n_hidden, name):
super(CriticNetwork, self).__init__()
self.n_inputs = n_inputs # state dims
self.n_actions = n_actions # action dims
self.fc1 = nn.Linear(n_inputs + n_actions, n_hidden)
self.fc2 = nn.Linear(n_hidden, n_hidden)
self.fc3 = nn.Linear(n_hidden, 1)
init_layer(self.fc1)
init_layer(self.fc2)
init_layer(self.fc3,0.003)
self.optimizer = optim.Adam(self.parameters(), lr=lr)
self.device = mydevice
self.checkpoint_file = os.path.join('./', name+'_sac_critic.model')
self.to(self.device)
def forward(self, state, action):
x0 = T.cat([state, action], 1)
x = F.relu(self.fc1(x0))
x = F.relu(self.fc2(x))
x1 = self.fc3(x)
return x1
def save_checkpoint(self):
T.save(self.state_dict(), self.checkpoint_file)
def load_checkpoint(self):
self.load_state_dict(T.load(self.checkpoint_file))
def load_checkpoint_for_eval(self):
self.load_state_dict(T.load(self.checkpoint_file,map_location=T.device('cpu')))
# input: state output: action
class ActorNetwork(nn.Module):
def __init__(self, lr, n_inputs, n_actions, n_hidden, name, action_space=None):
super(ActorNetwork, self).__init__()
self.n_inputs = n_inputs
self.n_actions = n_actions
self.reparam_noise = 1e-6
self.fc1 = nn.Linear(n_inputs, n_hidden)
self.fc2 = nn.Linear(n_hidden, n_hidden)
self.fc3mu = nn.Linear(n_hidden, n_actions)
self.fc3logsigma = nn.Linear(n_hidden, n_actions)
init_layer(self.fc1)
init_layer(self.fc2)
init_layer(self.fc3mu,0.003)
init_layer(self.fc3logsigma,0.003) # last layer
self.optimizer = optim.Adam(self.parameters(), lr=lr)
self.device = mydevice
self.checkpoint_file = os.path.join('./', name+'_sac_actor.model')
# action rescaling
if action_space is None:
self.action_scale = T.tensor(1.)
self.action_bias = T.tensor(0.)
else:
self.action_scale = T.FloatTensor(
(action_space.high - action_space.low) / 2.)
self.action_bias = T.FloatTensor(
(action_space.high + action_space.low) / 2.)
self.to(self.device)
def forward(self, state):
x=F.relu(self.fc1(state))
x=F.relu(self.fc2(x))
mu=self.fc3mu(x) #
logsigma=self.fc3logsigma(x) #
logsigma = T.clamp(logsigma, min=-20, max=2)
return mu,logsigma
def sample_normal(self, state, reparameterize=True):
mu, logsigma = self.forward(state)
sigma=logsigma.exp()
probabilities = Normal(mu, sigma)
if reparameterize:
actions = probabilities.rsample()
else:
actions = probabilities.sample()
# add batch dimension if missing
if actions.dim()==1:
actions.unsqueeze_(0)
actions_t=T.tanh(actions)
# scale actions
action = (actions_t * self.action_scale + self.action_bias).to(self.device)
log_probs = probabilities.log_prob(actions)
log_probs -= T.log(self.action_scale*(1-actions_t.pow(2))+self.reparam_noise)
log_probs = log_probs.sum(1, keepdim=True)
return action, log_probs
def save_checkpoint(self):
T.save(self.state_dict(), self.checkpoint_file)
def load_checkpoint(self):
self.load_state_dict(T.load(self.checkpoint_file))
def load_checkpoint_for_eval(self):
self.load_state_dict(T.load(self.checkpoint_file,map_location=T.device('cpu')))
class Agent():
def __init__(self, gamma, lr_a, lr_c, input_dims, batch_size, n_actions, n_hidden,
action_space, alpha, max_mem_size=100, tau=0.001, name_prefix='', hint_threshold=0.5, admm_rho=0.001, use_hint=False, prioritized=False):
self.gamma = gamma
self.tau=tau
self.batch_size = batch_size
self.n_actions=n_actions
self.prioritized=prioritized
if not self.prioritized:
self.replaymem=ReplayBuffer(max_mem_size, input_dims, n_actions, name_prefix)
else:
self.replaymem=PER(max_mem_size, input_dims, n_actions, name_prefix)
# online nets
self.actor=ActorNetwork(lr_a, n_inputs=input_dims, n_actions=n_actions,
n_hidden=n_hidden, name=name_prefix+'a_eval')
self.critic_1=CriticNetwork(lr_c, n_inputs=input_dims, n_actions=n_actions, n_hidden=n_hidden, name=name_prefix+'q_eval_1')
self.critic_2=CriticNetwork(lr_c, n_inputs=input_dims, n_actions=n_actions, n_hidden=n_hidden, name=name_prefix+'q_eval_2')
# target nets
self.target_critic_1=CriticNetwork(lr_c, n_inputs=input_dims, n_actions=n_actions, n_hidden=n_hidden, name=name_prefix+'q_target_1')
self.target_critic_2=CriticNetwork(lr_c, n_inputs=input_dims, n_actions=n_actions, n_hidden=n_hidden, name=name_prefix+'q_target_2')
# temperature
self.alpha=T.tensor(alpha,requires_grad=False,device=mydevice)
self.zero_tensor=T.tensor(0.).to(mydevice)
self.learn_alpha=False
if self.learn_alpha:
# target entropy: -number of bits to represent the state
self.target_entropy = -np.sum(action_space.shape)
self.alpha_lr = 1e-4
self.use_hint=use_hint
if use_hint:
self.hint_threshold=hint_threshold
self.rho=T.tensor(0.0,requires_grad=False,device=mydevice)
self.admm_rho=admm_rho
# initialize targets (hard copy)
self.update_network_parameters(tau=1.)
self.learn_counter=0
def update_network_parameters(self, tau=None):
if tau is None:
tau = self.tau
v_params = self.critic_1.named_parameters()
v_dict = dict(v_params)
target_v_params = self.target_critic_1.named_parameters()
target_v_dict = dict(target_v_params)
for name in target_v_dict:
target_v_dict[name] = tau*v_dict[name].clone() + \
(1-tau)*target_v_dict[name].clone()
self.target_critic_1.load_state_dict(target_v_dict)
v_params = self.critic_2.named_parameters()
v_dict = dict(v_params)
target_v_params = self.target_critic_2.named_parameters()
target_v_dict = dict(target_v_params)
for name in target_v_dict:
target_v_dict[name] = tau*v_dict[name].clone() + \
(1-tau)*target_v_dict[name].clone()
self.target_critic_2.load_state_dict(target_v_dict)
def store_transition(self, state, action, reward, state_, terminal, hint):
self.replaymem.store_transition(state,action,reward,state_,terminal, hint)
def choose_action(self, observation):
state = T.FloatTensor(observation).to(mydevice).unsqueeze(0)
self.actor.eval() # to disable batchnorm
actions,_= self.actor.sample_normal(state, reparameterize=False)
self.actor.train() # to enable batchnorm
return actions.cpu().detach().numpy()[0]
def learn(self):
if self.replaymem.mem_cntr < self.batch_size:
return
if not self.prioritized:
state, action, reward, new_state, done, hint = \
self.replaymem.sample_buffer(self.batch_size)
else:
state, action, reward, new_state, done, hint, idxs, is_weights = \
self.replaymem.sample_buffer(self.batch_size)
state_batch = T.tensor(state).to(mydevice)
new_state_batch = T.tensor(new_state).to(mydevice)
action_batch = T.tensor(action).to(mydevice)
reward_batch = T.tensor(reward).to(mydevice).unsqueeze(1)
terminal_batch = T.tensor(done).to(mydevice).unsqueeze(1)
hint_batch = T.tensor(hint).to(mydevice)
if self.prioritized:
is_weight=T.tensor(is_weights).to(mydevice)
with T.no_grad():
new_actions, new_log_probs = self.actor.sample_normal(new_state_batch, reparameterize=False)
q1_new_policy = self.target_critic_1.forward(new_state_batch, new_actions)
q2_new_policy = self.target_critic_2.forward(new_state_batch, new_actions)
min_next_target=T.min(q1_new_policy,q2_new_policy)-self.alpha*new_log_probs
min_next_target[terminal_batch]=0.0
new_q_value=reward_batch+self.gamma*min_next_target
q1_new_policy = self.critic_1.forward(state_batch, action_batch)
q2_new_policy = self.critic_2.forward(state_batch, action_batch)
if self.prioritized:
errors1=T.abs(q1_new_policy-new_q_value).detach().cpu().numpy()
errors2=T.abs(q2_new_policy-new_q_value).detach().cpu().numpy()
self.replaymem.batch_update(idxs,0.5*(errors1+errors2))
if not self.prioritized:
critic_1_loss = F.mse_loss(q1_new_policy, new_q_value)
critic_2_loss = F.mse_loss(q2_new_policy, new_q_value)
else:
critic_1_loss = self.replaymem.mse(q1_new_policy, new_q_value, is_weight)
critic_2_loss = self.replaymem.mse(q2_new_policy, new_q_value, is_weight)
critic_loss = critic_1_loss + critic_2_loss
self.critic_1.optimizer.zero_grad()
self.critic_2.optimizer.zero_grad()
critic_loss.backward()
self.critic_1.optimizer.step()
self.critic_2.optimizer.step()
actions, log_probs = self.actor.sample_normal(state_batch, reparameterize=True)
q1_new_policy = self.critic_1.forward(state_batch, actions)
q2_new_policy = self.critic_2.forward(state_batch, actions)
critic_value = T.min(q1_new_policy, q2_new_policy)
if not self.use_hint:
if not self.prioritized:
actor_loss = (self.alpha*log_probs - critic_value).mean()
else:
actor_loss = (is_weight*(self.alpha*log_probs - critic_value)).mean()
self.actor.optimizer.zero_grad()
actor_loss.backward()
self.actor.optimizer.step()
else:
if self.prioritized:
gfun=(T.max(self.zero_tensor,(is_weight*(F.mse_loss(actions, hint_batch)-self.hint_threshold)).mean()).pow(2))
else:
gfun=(T.max(self.zero_tensor,((F.mse_loss(actions, hint_batch)-self.hint_threshold)).mean()).pow(2))
if not self.prioritized:
actor_loss = (self.alpha*log_probs - critic_value).mean()+0.5*self.admm_rho*gfun*gfun+self.rho*gfun
else:
actor_loss = (is_weight*(self.alpha*log_probs - critic_value)).mean()+0.5*self.admm_rho*gfun*gfun+self.rho*gfun
self.actor.optimizer.zero_grad()
actor_loss.backward()
self.actor.optimizer.step()
if self.learn_counter%10==0:
if self.learn_alpha or self.use_hint:
with T.no_grad():
actions, log_probs = self.actor.sample_normal(state_batch, reparameterize=False)
if self.learn_alpha:
if self.prioritized:
self.alpha=T.max(self.zero_tensor,self.alpha+self.alpha_lr*(is_weight*(self.target_entropy-(-log_probs))).mean())
else:
self.alpha=T.max(self.zero_tensor,self.alpha+self.alpha_lr*((self.target_entropy-(-log_probs)).mean()))
if self.use_hint:
if self.prioritized:
gfun=(T.max(self.zero_tensor,(is_weight*(F.mse_loss(actions, hint_batch)-self.hint_threshold).detach()).mean()).pow(2))
else:
gfun=(T.max(self.zero_tensor,((F.mse_loss(actions, hint_batch)-self.hint_threshold).detach()).mean()).pow(2))
self.rho+=self.admm_rho*gfun
if self.learn_counter%10000==0:
if self.use_hint and self.learn_alpha:
print(f'{self.learn_counter} {self.rho} {self.alpha}')
elif self.use_hint:
print(f'{self.learn_counter} {self.rho}')
elif self.learn_alpha:
print(f'{self.learn_counter} {self.alpha}')
self.learn_counter+=1
self.update_network_parameters()
def save_models(self):
self.actor.save_checkpoint()
self.critic_1.save_checkpoint()
self.critic_2.save_checkpoint()
self.replaymem.save_checkpoint()
def load_models(self):
self.actor.load_checkpoint()
self.critic_1.load_checkpoint()
self.critic_2.load_checkpoint()
self.replaymem.load_checkpoint()
self.actor.train()
self.critic_1.train()
self.critic_2.train()
self.update_network_parameters(tau=1.)
def load_models_for_eval(self):
self.actor.load_checkpoint_for_eval()
self.critic_1.load_checkpoint_for_eval()
self.critic_2.load_checkpoint_for_eval()
self.actor.eval()
self.critic_1.eval()
self.critic_2.eval()
self.update_network_parameters(tau=1.)
def disable_hint(self):
self.use_hint=False
def print(self):
print(self.actor)
print(self.critic_1)
#a=Agent(gamma=0.99, batch_size=32, n_actions=2,
# max_mem_size=1000, input_dims=11, n_hidden=10, lr_a=0.001, lr_c=0.001)
|
SarodYatawattaREPO_NAMEhintRLPATH_START.@hintRL_extracted@hintRL-main@bipedal_walker@net_sac.py@.PATH_END.py
|
{
"filename": "symbols.py",
"repo_name": "bek0s/gbkfit",
"repo_path": "gbkfit_extracted/gbkfit-master/src/gbkfit/params/symbols.py",
"type": "Python"
}
|
import re
from gbkfit.params.pdescs import ParamVectorDesc
from gbkfit.utils import iterutils, stringutils
__all__ = [
'is_param_symbol',
'is_param_symbol_name',
'is_param_symbol_scalar',
'is_param_symbol_vector',
'is_param_symbol_vector_bindx',
'is_param_symbol_vector_slice',
'is_param_symbol_vector_aindx',
'is_param_symbol_subscript',
'is_param_symbol_subscript_bindx',
'is_param_symbol_subscript_slice',
'is_param_symbol_subscript_aindx',
'is_param_attrib_symbol',
'make_param_symbol_subscript_bindx',
'make_param_symbol_subscript_slice',
'make_param_symbol_subscript_aindx',
'make_param_symbol',
'parse_param_symbol_subscript',
'parse_param_symbol_into_name_and_subscript_str',
'parse_param_symbol',
'make_param_symbols_from_name_and_indices',
'make_param_symbols_from_names_and_indices',
'make_param_symbols_from_pdesc',
'make_param_symbols_from_pdescs'
]
_REGEX_PARAM_SYMBOL_SUBSCRIPT_COMMON = r'(?!.*\D0+[1-9])'
_REGEX_PARAM_SYMBOL_SUBSCRIPT_BINDX = (
fr'{_REGEX_PARAM_SYMBOL_SUBSCRIPT_COMMON}'
r'\[\s*([-+]?\s*\d+)\s*\]')
_REGEX_PARAM_SYMBOL_SUBSCRIPT_SLICE = (
fr'{_REGEX_PARAM_SYMBOL_SUBSCRIPT_COMMON}'
r'\[\s*([+-]?\s*\d+)?\s*:\s*([+-]?\s*\d+)?\s*(:\s*([+-]?\s*[1-9]+)?\s*)?\]')
_REGEX_PARAM_SYMBOL_SUBSCRIPT_AINDX = (
fr'{_REGEX_PARAM_SYMBOL_SUBSCRIPT_COMMON}'
r'\[\s*\[\s*([-+]?\s*\d+\s*,\s*)*\s*([-+]?\s*\d+\s*)?\]\s*,?\s*\]')
_REGEX_PARAM_SYMBOL_SUBSCRIPT = (
r'('
fr'{_REGEX_PARAM_SYMBOL_SUBSCRIPT_BINDX}|'
fr'{_REGEX_PARAM_SYMBOL_SUBSCRIPT_SLICE}|'
fr'{_REGEX_PARAM_SYMBOL_SUBSCRIPT_AINDX}'
r')')
_REGEX_PARAM_SYMBOL_NAME = r'[_a-zA-Z]\w*'
_REGEX_PARAM_SYMBOL_SCALAR = _REGEX_PARAM_SYMBOL_NAME
_REGEX_PARAM_SYMBOL_VECTOR_BINDX = (
fr'\s*{_REGEX_PARAM_SYMBOL_NAME}'
fr'\s*{_REGEX_PARAM_SYMBOL_SUBSCRIPT_BINDX}\s*')
_REGEX_PARAM_SYMBOL_VECTOR_SLICE = (
fr'\s*{_REGEX_PARAM_SYMBOL_NAME}'
fr'\s*{_REGEX_PARAM_SYMBOL_SUBSCRIPT_SLICE}\s*')
_REGEX_PARAM_SYMBOL_VECTOR_AINDX = (
fr'\s*{_REGEX_PARAM_SYMBOL_NAME}'
fr'\s*{_REGEX_PARAM_SYMBOL_SUBSCRIPT_AINDX}\s*')
_REGEX_PARAM_SYMBOL_VECTOR = (
fr'\s*{_REGEX_PARAM_SYMBOL_NAME}'
fr'\s*{_REGEX_PARAM_SYMBOL_SUBSCRIPT}\s*')
_REGEX_PARAM_SYMBOL = (
fr'\s*{_REGEX_PARAM_SYMBOL_NAME}'
fr'\s*{_REGEX_PARAM_SYMBOL_SUBSCRIPT}?\s*')
_REGEX_PARAM_ATTRIB_SYMBOL_NAME = r'[_a-zA-Z]\w*'
def is_param_symbol(x):
return re.match(fr'^{_REGEX_PARAM_SYMBOL}$', x)
def is_param_symbol_name(x):
return re.match(fr'^{_REGEX_PARAM_SYMBOL_NAME}$', x)
def is_param_symbol_scalar(x):
return re.match(fr'^{_REGEX_PARAM_SYMBOL_SCALAR}$', x)
def is_param_symbol_vector(x):
return re.match(fr'^{_REGEX_PARAM_SYMBOL_VECTOR}$', x)
def is_param_symbol_vector_bindx(x):
return re.match(fr'^{_REGEX_PARAM_SYMBOL_VECTOR_BINDX}$', x)
def is_param_symbol_vector_slice(x):
return re.match(fr'^{_REGEX_PARAM_SYMBOL_VECTOR_SLICE}$', x)
def is_param_symbol_vector_aindx(x):
return re.match(fr'^{_REGEX_PARAM_SYMBOL_VECTOR_AINDX}$', x)
def is_param_symbol_subscript(x):
return re.match(fr'^{_REGEX_PARAM_SYMBOL_SUBSCRIPT}$', x)
def is_param_symbol_subscript_bindx(x):
return re.match(fr'^{_REGEX_PARAM_SYMBOL_SUBSCRIPT_BINDX}$', x)
def is_param_symbol_subscript_slice(x):
return re.match(fr'^{_REGEX_PARAM_SYMBOL_SUBSCRIPT_SLICE}$', x)
def is_param_symbol_subscript_aindx(x):
return re.match(fr'^{_REGEX_PARAM_SYMBOL_SUBSCRIPT_AINDX}$', x)
def is_param_attrib_symbol(x):
return re.match(fr'^{_REGEX_PARAM_ATTRIB_SYMBOL_NAME}$', x)
def make_param_symbol_subscript_bindx(index):
return f'[{index}]'
def make_param_symbol_subscript_slice(start='', stop='', step=''):
return f'[{start}:{stop}:{step}]'
def make_param_symbol_subscript_aindx(indices):
return f'[[{", ".join(str(i) for i in indices)}]]'
def make_param_symbol(name, indices):
indices = iterutils.tuplify(indices, False)
if not indices:
result = name
elif len(indices) == 1:
result = f'{name}{make_param_symbol_subscript_bindx(indices[0])}'
else:
result = f'{name}{make_param_symbol_subscript_aindx(indices)}'
return result
def _parse_param_symbol_subscript_bindx(x):
x = stringutils.remove_white_space(x).strip('[]')
return int(x),
def _parse_param_symbol_subscript_slice(x, size):
x = stringutils.remove_white_space(x).strip('[]')
x += ':' * (2 - x.count(':'))
start_str, stop_str, step_str = x.split(':')
start = int(start_str) if start_str else None
stop = int(stop_str) if stop_str else None
step = int(step_str) if step_str else None
return tuple(range(*slice(start, stop, step).indices(size)))
def _parse_param_symbol_subscript_aindx(x):
x = stringutils.remove_white_space(x).strip('[],')
return tuple([int(i) for i in x.split(',')])
def parse_param_symbol_subscript(x, size):
if is_param_symbol_subscript_bindx(x):
indices = _parse_param_symbol_subscript_bindx(x)
elif is_param_symbol_subscript_slice(x):
indices = _parse_param_symbol_subscript_slice(x, size)
elif is_param_symbol_subscript_aindx(x):
indices = _parse_param_symbol_subscript_aindx(x)
else:
raise RuntimeError(f"invalid subscript syntax: {x}")
return indices
def parse_param_symbol_into_name_and_subscript_str(x):
x = stringutils.remove_white_space(x)
name = x[:x.find('[')].strip() if '[' in x else x
subscript = x[x.find('['):].strip() if '[' in x else None
return name, subscript
def parse_param_symbol(x, vector_size=None):
x = stringutils.remove_white_space(x)
name, subscript = parse_param_symbol_into_name_and_subscript_str(x)
valid_indices = None
invalid_indices = None
if vector_size is not None and not subscript:
subscript = '[:]'
if subscript:
indices = parse_param_symbol_subscript(subscript, vector_size)
valid_indices, invalid_indices = iterutils.validate_sequence_indices(
indices, vector_size)
valid_indices = iterutils.unwrap_sequence_indices(
valid_indices, vector_size)
return name, valid_indices, invalid_indices
def make_param_symbols_from_name_and_indices(name, indices):
symbols = []
for index in iterutils.listify(indices):
symbols.append(make_param_symbol(name, index))
return symbols
def make_param_symbols_from_names_and_indices(name_list, indices_list):
symbols = []
for name, indices in zip(name_list, indices_list, strict=True):
symbols.extend(make_param_symbols_from_name_and_indices(name, indices))
return symbols
def make_param_symbols_from_pdesc(pdesc, override_name=None):
name = override_name if override_name else pdesc.name()
indices = None
if isinstance(pdesc, ParamVectorDesc):
indices = list(range(pdesc.size()))
return make_param_symbols_from_name_and_indices(name, indices)
def make_param_symbols_from_pdescs(pdescs, override_names=None):
symbols = []
names = override_names if override_names else [d.name() for d in pdescs]
for name, pdesc in zip(names, pdescs, strict=True):
symbols.extend(make_param_symbols_from_pdesc(pdesc, name))
return symbols
|
bek0sREPO_NAMEgbkfitPATH_START.@gbkfit_extracted@gbkfit-master@src@gbkfit@params@symbols.py@.PATH_END.py
|
{
"filename": "_bgcolor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatterpolargl/hoverlabel/_bgcolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bgcolor", parent_name="scatterpolargl.hoverlabel", **kwargs
):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatterpolargl@hoverlabel@_bgcolor.py@.PATH_END.py
|
{
"filename": "synth_data_2D.py",
"repo_name": "POSYDON-code/POSYDON",
"repo_path": "POSYDON_extracted/POSYDON-main/posydon/active_learning/psy_cris/synthetic_data/synth_data_2D.py",
"type": "Python"
}
|
__authors__ = [
"Kyle Akira Rocha <kylerocha2024@u.northwestern.edu>",
]
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
if (__name__ == "main"):
save_figs = True # plots will be saved
save_data = True # saving sampled data to csv
else:
save_figs = False
save_data = False
############# Sample Params #############
Nx = 10; Ny = 10
file_name = "synth_data.dat"
############# Cls / Regr Geometry #############
def cls_curve(x,y):
vals = regr_func(x,y)
curve_bool = ( y - (-0.1)*(x**3) - 1 ) > -1 # x^3 curve
line = np.where( curve_bool, 2, vals )
bound_1 = 0.4 ; bound_2 = -0.28
cls1 = np.where( vals > bound_1, 1, line ) # banana
cls2 = np.where( (cls1 < bound_1) == (cls1 > bound_2), 0, cls1 ) # background
cls3 = np.where( vals < bound_2, -1, cls2 ) # triangle
return cls3
def regr_func(x,y):
eps = .9 # ~ 1/r padding
a = 1. ; b = 1. # ~ ellipse
return ( np.tanh((x)**3+(y)**2) )/( np.sqrt( (x/a)**2+(y/b)**2) + eps)
################################################
x = np.linspace(-3,3, 140)
y = np.linspace(-3,3, 140)
X,Y = np.meshgrid(x,y)
# %matplotlib notebook
# %matplotlib inline
if (__name__ == "main"):
print("Making analytic plot...")
fig, subs = plt.subplots( nrows=1, ncols=2, dpi=120, figsize=(10,4),
gridspec_kw={'width_ratios': [0.85, 1]})
subs[0].set_title("Classification")
subs[0].pcolormesh( X, Y, cls_curve(X,Y) )
subs[1].set_title("Regression")
pcm = subs[1].pcolormesh(X,Y, regr_func(X,Y), cmap="PiYG") #PiYG
fig.colorbar( pcm )
my_cont = subs[1].contour(X,Y, regr_func(X,Y), colors='k',
levels=[-0.4, -0.3, -0.15, 0, 0.15, 0.3, 0.4], )
my_cont.clabel( fmt='%1.2f', )
for i in range(2):
subs[i].set_xlabel(r"$x$", fontsize=12); subs[i].set_ylabel(r"$y$", fontsize=12)
if save_figs: plt.savefig("cls_regr_original.png")
plt.show()
############# Sampling #############
# x,y vals to query with classifcationa & regression functions
x_vals = np.linspace(-3,3, int(Nx))
y_vals = np.linspace(-3,3, int(Ny))
if (__name__=="main") and save_data:
print("Sampling {0} points...".format(len(x_vals)*len(y_vals)) )
points = []
for i in x_vals:
for j in y_vals:
points.append( np.array([i,j]) )
points = np.array(points)
results = cls_curve( points.T[0], points.T[1] )
unique_classes = np.unique( results )
mapping = {-1:"A", 0:"B", 1:"C", 2:"D"}
str_classification = [ mapping[val] for val in results.astype(int)]
df = pd.DataFrame()
df["input_1"] = points.T[0]
df["input_2"] = points.T[1]
df["class"] = str_classification
df["output_1"] = regr_func( points.T[0], points.T[1] )
if save_data:
df.to_csv( file_name, index = False );
print("Saved to '{0}'.".format(file_name))
if (__name__ == "main"):
print("Making sampled points plot...")
fig, subs = plt.subplots( nrows=1, ncols=2, figsize=(8,4), dpi=120 )
subs[0].set_title("Sampled Points")
subs[0].scatter( df["input_1"], df["input_2"], c = results, s=40, )
subs[1].set_title("Overlay")
subs[1].contourf( X, Y, cls_curve(X,Y), levels = 20, vmin=-1, vmax=2, alpha=1. )
#subs[1].pcolormesh( X, Y, cls_curve(X,Y), levels = 10 )
subs[1].scatter( df["input_1"], df["input_2"], c = results, s=40, edgecolors='w' )
for i in range(2):
subs[i].set_xlim(-3.1,3.1); subs[i].set_ylim(-3.1,3.1)
subs[i].set_xlabel(r"$x$", fontsize=12); subs[i].set_ylabel(r"$y$", fontsize=12)
if save_figs: plt.savefig("cls_regr_sampled.png")
plt.show()
def get_raw_output_2D(x,y):
"""Get the raw output for classification and regression functions
for the 2D synthetic data set. Original class data to strings given by
the following relation {-1:"A", 0:"B", 1:"C", 2:"D"}. """
if isinstance( x, (float,int) ):
x = np.array([x]); y=np.array([y])
elif isinstance(x, list):
x= np.array(x); y=np.array(y)
classification_output = cls_curve(x,y)
regression_output = regr_func(x,y)
return classification_output, regression_output
# For queries
def get_output_2D(x,y):
"""For a set of query points (x,y) in the range (-3,3)
return a DataFrame with the inputs and outputs for
classificaiton and regression.
"""
if isinstance( x, (float,int) ):
x = np.array([x]); y=np.array([y])
elif isinstance(x, list):
x= np.array(x); y=np.array(y)
cls_results, regr_out = get_raw_output_2D(x,y)
if x.ndim > 1 or y.ndim > 1:
cls_results = cls_results.flatten()
regr_out = regr_out.flatten()
x = x.flatten(); y = y.flatten()
str_results = np.array( [mapping[val] for val in cls_results.astype(int)] )
data_frame = pd.DataFrame()
data_frame["input_1"] = x
data_frame["input_2"] = y
data_frame["class"] = str_results
data_frame["output_1"] = regr_out
return data_frame
|
POSYDON-codeREPO_NAMEPOSYDONPATH_START.@POSYDON_extracted@POSYDON-main@posydon@active_learning@psy_cris@synthetic_data@synth_data_2D.py@.PATH_END.py
|
{
"filename": "_line.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/box/_line.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "box"
_path_str = "box.line"
_valid_props = {"color", "width"}
# color
# -----
@property
def color(self):
"""
Sets the color of line bounding the box(es).
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of line bounding the box(es).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the color of line bounding the box(es).
width
Sets the width (in px) of line bounding the box(es).
"""
def __init__(self, arg=None, color=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.box.Line`
color
Sets the color of line bounding the box(es).
width
Sets the width (in px) of line bounding the box(es).
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.box.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.box.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@box@_line.py@.PATH_END.py
|
{
"filename": "plot_all_orders.py",
"repo_name": "iancze/PSOAP",
"repo_path": "PSOAP_extracted/PSOAP-master/scripts/TRES/plot_all_orders.py",
"type": "Python"
}
|
from plot_order import plot
for i in range(51):
plot(i)
|
ianczeREPO_NAMEPSOAPPATH_START.@PSOAP_extracted@PSOAP-master@scripts@TRES@plot_all_orders.py@.PATH_END.py
|
{
"filename": "_align.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatter/hoverlabel/_align.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class AlignValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="align", parent_name="scatter.hoverlabel", **kwargs):
super(AlignValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["left", "right", "auto"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatter@hoverlabel@_align.py@.PATH_END.py
|
{
"filename": "gravsphere.py",
"repo_name": "justinread/gravsphere",
"repo_path": "gravsphere_extracted/gravsphere-master/gravsphere.py",
"type": "Python"
}
|
###########################################################
#GravSphere
###########################################################
#Python programme to Jeans model discrete data assuming
#a "coreNFWtides" spherical dark matter halo and some
#fixed radial profile for the "baryons" with varying
#mass to light ratio. The code and its various improvements
#is described in the following papers:
#https://ui.adsabs.harvard.edu/abs/2017MNRAS.471.4541R/abstract
#https://ui.adsabs.harvard.edu/abs/2018MNRAS.481..860R/abstract
#https://ui.adsabs.harvard.edu/abs/2019MNRAS.484.1401R/abstract
#https://ui.adsabs.harvard.edu/abs/2020MNRAS.498..144G/abstract
#https://ui.adsabs.harvard.edu/abs/2020JCAP...09..004A/abstract
#https://ui.adsabs.harvard.edu/abs/2020arXiv201003572A/abstract
#To run the code, you should first "prepare" your data in
#the format GravSphere needs using the "binulator.py" code.
#You will find examples for how to do this on real and mock
#data there.
###########################################################
#Functions for emcee Jeans fitting:
def lnprior_set_single(theta,n_betpars,bet0min,bet0max,\
betinfmin,betinfmax,\
betr0min,betr0max,betnmin,betnmax,\
nu_components,nupars_min,nupars_max,\
n_mpars,logM200low,logM200high,\
clow,chigh,logrclow,logrchigh,\
nlow,nhigh,logrtlow,logrthigh,\
dellow,delhigh,\
logMcenlow,logMcenhigh,\
acenlow,acenhigh,\
Arotlow,Arothigh,\
drangelow,drangehigh,\
Mstar_min,Mstar_max):
ndims = len(theta)
minarr = np.zeros(ndims)
maxarr = np.zeros(ndims)
minarr[0] = bet0min
maxarr[0] = bet0max
minarr[1] = betinfmin
maxarr[1] = betinfmax
minarr[2] = betr0min
maxarr[2] = betr0max
minarr[3] = betnmin
maxarr[3] = betnmax
minarr[n_betpars:n_betpars+nu_components*2] = nupars_min
maxarr[n_betpars:n_betpars+nu_components*2] = nupars_max
minarr[n_betpars+nu_components*2] = logM200low
maxarr[n_betpars+nu_components*2] = logM200high
minarr[n_betpars+nu_components*2+1] = clow
maxarr[n_betpars+nu_components*2+1] = chigh
minarr[n_betpars+nu_components*2+2] = logrclow
maxarr[n_betpars+nu_components*2+2] = logrchigh
minarr[n_betpars+nu_components*2+3] = nlow
maxarr[n_betpars+nu_components*2+3] = nhigh
minarr[n_betpars+nu_components*2+4] = logrtlow
maxarr[n_betpars+nu_components*2+4] = logrthigh
minarr[n_betpars+nu_components*2+5] = dellow
maxarr[n_betpars+nu_components*2+5] = delhigh
minarr[n_betpars+nu_components*2+6] = logMcenlow
maxarr[n_betpars+nu_components*2+6] = logMcenhigh
minarr[n_betpars+nu_components*2+7] = acenlow
maxarr[n_betpars+nu_components*2+7] = acenhigh
minarr[n_betpars+nu_components*2+8] = Arotlow
maxarr[n_betpars+nu_components*2+8] = Arothigh
minarr[n_betpars+nu_components*2+9] = drangelow
maxarr[n_betpars+nu_components*2+9] = drangehigh
minarr[ndims-1] = Mstar_min
maxarr[ndims-1] = Mstar_max
if all(minarr < theta < maxarr for minarr,theta,maxarr in \
zip(minarr,theta,maxarr)):
return 0.0
return -np.inf
def lnprob_single(theta,x1,x2,y1,y1err,y2,y2err):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta,x1,x2,y1,y1err,y2,y2err)
def lnprob_single_vs(theta,x1,x2,y1,y1err,\
y2,y2err,\
vsp1val,vsp1pdf,vsp2val,vsp2pdf):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta,x1,x2,y1,y1err,\
y2,y2err,\
vsp1val,vsp1pdf,vsp2val,vsp2pdf)
def lnprob_single_prop(theta,x1,x2,x3,y1,y1err,\
y2,y2err,y3,y3err,y4,y4err):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta,x1,x2,x3,y1,y1err,\
y2,y2err,y3,y3err,y4,y4err)
def lnprob_single_prop_vs(theta,x1,x2,x3,y1,y1err,\
y2,y2err,y3,y3err,y4,y4err,\
vsp1val,vsp1pdf,vsp2val,vsp2pdf):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta,x1,x2,x3,y1,y1err,\
y2,y2err,y3,y3err,y4,y4err,\
vsp1val,vsp1pdf,vsp2val,vsp2pdf)
def lnlike_single(theta,x1,x2,y1,y1err,y2,y2err):
betpars = theta[0:n_betpars]
nupars = theta[n_betpars:n_betpars+nu_components*2]
Mpars = theta[n_betpars+nu_components*2:\
n_betpars+nu_components*2+n_mpars]
Arot = theta[n_betpars+nu_components*2+n_mpars]
drange = theta[n_betpars+nu_components*2+n_mpars+1]
Mstar = theta[ndim-1]
nuparsu = np.array(nupars)
Mparsu = np.array(Mpars)
Mparsu[0] = 10.**Mpars[0]
Mparsu[2] = 10.**Mpars[2]
Mparsu[4] = 10.**Mpars[4]
Mparsu[6] = 10.**Mpars[6]
#Add dummy data points for low and high x1
#to ensure +ve definite surface density
#if using negative Plummer components:
if (nuparsu[0] < 0 or nuparsu[1] < 0 or nuparsu[2] < 0):
x1, y1, y1err = Sig_addpnts(x1,y1,y1err)
sigr2, Sig, sigLOS2 = \
sigp_fit(x1*drange,x2*drange,\
nuparsu,Mparsu,betpars,Mstar,Arot)
#And now, shrink the error wherever the
#total density is negative to disfavour
#those models (see binulator_surffuncs.py
#for details):
if (theta[0] < 0 or theta[1] < 0 or theta[2] < 0):
if (np.min(Sig) < 0):
y1err[np.where(Sig < 0)] = \
np.min(y1err)/np.float(len(x1))/1.0e3
#Handle distance uncertainty. Convert distances to
#angle units before doing model-data comparison:
duse = dgal_kpc * drange
x1u = x1 / dgal_kpc
x2u = x2 / dgal_kpc
#And convert model to angel units, but using
#new rather than default distance:
model1 = Sig
model2 = np.sqrt(sigLOS2)/1000.
inv_sigma2_1 = 1.0/y1err**2
inv_sigma2_2 = 1.0/y2err**2
lnlike_out = -0.5*(np.sum((y1-model1)**2*inv_sigma2_1)+\
np.sum((y2-model2)**2*inv_sigma2_2))
if (cosmo_cprior == 'yes'):
#Add the conc. to the likelihood function
#as a Gaussian in logspace:
M200 = Mparsu[0]
log_conc = np.log10(Mparsu[1])
log_cmean = np.log10(cosmo_cfunc(M200,h))
lnlike_out = lnlike_out - \
(log_conc-log_cmean)**2.0/(2.0*sig_c200**2.0)
if (lnlike_out != lnlike_out):
lnlike_out = -np.inf
return lnlike_out
def lnlike_single_vs(theta,x1,x2,y1,y1err,y2,y2err,\
vsp1val,vsp1pdf,vsp2val,vsp2pdf):
betpars = theta[0:n_betpars]
nupars = theta[n_betpars:n_betpars+nu_components*2]
Mpars = theta[n_betpars+nu_components*2:\
n_betpars+nu_components*2+n_mpars]
Arot = theta[n_betpars+nu_components*2+n_mpars]
drange = theta[n_betpars+nu_components*2+n_mpars+1]
Mstar = theta[ndim-1]
nuparsu = np.array(nupars)
Mparsu = np.array(Mpars)
Mparsu[0] = 10.**Mpars[0]
Mparsu[2] = 10.**Mpars[2]
Mparsu[4] = 10.**Mpars[4]
Mparsu[6] = 10.**Mpars[6]
#Add dummy data points for low and high x1
#to ensure +ve definite surface density
#if using negative Plummer components:
if (nuparsu[0] < 0 or nuparsu[1] < 0 or nuparsu[2] < 0):
x1, y1, y1err = Sig_addpnts(x1,y1,y1err)
sigr2, Sig, sigLOS2, vs1, vs2 = \
sigp_fit_vs(x1*drange,x2*drange,\
nuparsu,Mparsu,betpars,Mstar,Arot)
#And now, shrink the error wherever the
#total density is negative to disfavour
#those models (see binulator_surffuncs.py
#for details):
if (theta[0] < 0 or theta[1] < 0 or theta[2] < 0):
if (np.min(Sig) < 0):
y1err[np.where(Sig < 0)] = \
np.min(y1err)/np.float(len(x1))/1.0e3
#Handle distance uncertainty. Convert distances to
#angle units before doing model-data comparison:
duse = dgal_kpc * drange
x1u = x1 / dgal_kpc
x2u = x2 / dgal_kpc
#And convert model to angle units, but using
#new rather than default distance:
model1 = Sig
model2 = np.sqrt(sigLOS2)/1000.
model3 = vs1/1.0e12
model4 = vs2/1.0e12
inv_sigma2_1 = 1.0/y1err**2
inv_sigma2_2 = 1.0/y2err**2
lnlike_out = -0.5*(np.sum((y1-model1)**2*inv_sigma2_1)+\
np.sum((y2-model2)**2*inv_sigma2_2))+\
np.log(vsp_pdf(model3,vsp1val,vsp1pdf))+\
np.log(vsp_pdf(model4,vsp2val,vsp2pdf))
if (cosmo_cprior == 'yes'):
#Add the conc. to the likelihood function
#as a Gaussian in logspace:
M200 = Mparsu[0]
log_conc = np.log10(Mparsu[1])
log_cmean = np.log10(cosmo_cfunc(M200,h))
lnlike_out = lnlike_out - \
(log_conc-log_cmean)**2.0/(2.0*sig_c200**2.0)
if (lnlike_out != lnlike_out):
lnlike_out = -np.inf
return lnlike_out
def lnlike_single_prop(theta,x1,x2,x3,y1,y1err,y2,y2err,\
y3,y3err,y4,y4err):
betpars = theta[0:n_betpars]
nupars = theta[n_betpars:n_betpars+nu_components*2]
Mpars = theta[n_betpars+nu_components*2:\
n_betpars+nu_components*2+n_mpars]
Arot = theta[n_betpars+nu_components*2+n_mpars]
drange = theta[n_betpars+nu_components*2+n_mpars+1]
Mstar = theta[ndim-1]
nuparsu = np.array(nupars)
Mparsu = np.array(Mpars)
Mparsu[0] = 10.**Mpars[0]
Mparsu[2] = 10.**Mpars[2]
Mparsu[4] = 10.**Mpars[4]
Mparsu[6] = 10.**Mpars[6]
#Add dummy data points for low and high x1
#to ensure +ve definite surface density
#if using negative Plummer components:
if (nuparsu[0] < 0 or nuparsu[1] < 0 or nuparsu[2] < 0):
x1, y1, y1err = Sig_addpnts(x1,y1,y1err)
sigr2, Sig, sigLOS2, sigpmr2, sigpmt2 = \
sigp_fit_prop(x1*drange,x2*drange,x3*drange,\
nuparsu,Mparsu,betpars,Mstar,Arot)
#And now, shrink the error wherever the
#total density is negative to disfavour
#those models (see binulator_surffuncs.py
#for details):
if (theta[0] < 0 or theta[1] < 0 or theta[2] < 0):
if (np.min(Sig) < 0):
y1err[np.where(Sig < 0)] = \
np.min(y1err)/np.float(len(x1))/1.0e3
#Handle distance uncertainty. Convert distances to
#angle units before doing model-data comparison:
duse = dgal_kpc * drange
x1u = x1 / dgal_kpc
x2u = x2 / dgal_kpc
x3u = x3 / dgal_kpc
y3u = y3 / dgal_kpc
y3erru = y3err / dgal_kpc
y4u = y4 / dgal_kpc
y4erru = y4err / dgal_kpc
#And convert model to angle units, but using
#new rather than default distance:
model1 = Sig
model2 = np.sqrt(sigLOS2)/1000.
model3 = np.sqrt(sigpmr2)/1000. / duse
model4 = np.sqrt(sigpmt2)/1000. / duse
inv_sigma2_1 = 1.0/y1err**2
inv_sigma2_2 = 1.0/y2err**2
inv_sigma2_3 = 1.0/y3erru**2
inv_sigma2_4 = 1.0/y4erru**2
lnlike_out = -0.5*(np.sum((y1-model1)**2*inv_sigma2_1)+\
np.sum((y2-model2)**2*inv_sigma2_2)+\
np.sum((y3u-model3)**2*inv_sigma2_3)+\
np.sum((y4u-model4)**2*inv_sigma2_4))
if (cosmo_cprior == 'yes'):
#Add the conc. to the likelihood function
#as a Gaussian in logspace:
M200 = Mparsu[0]
log_conc = np.log10(Mparsu[1])
log_cmean = np.log10(cosmo_cfunc(M200,h))
lnlike_out = lnlike_out - \
(log_conc-log_cmean)**2.0/(2.0*sig_c200**2.0)
if (lnlike_out != lnlike_out):
lnlike_out = -np.inf
return lnlike_out
def lnlike_single_prop_vs(theta,x1,x2,x3,y1,y1err,y2,y2err,\
y3,y3err,y4,y4err,\
vsp1val,vsp1pdf,vsp2val,vsp2pdf):
betpars = theta[0:n_betpars]
nupars = theta[n_betpars:n_betpars+nu_components*2]
Mpars = theta[n_betpars+nu_components*2:\
n_betpars+nu_components*2+n_mpars]
Arot = theta[n_betpars+nu_components*2+n_mpars]
drange = theta[n_betpars+nu_components*2+n_mpars+1]
Mstar = theta[ndim-1]
nuparsu = np.array(nupars)
Mparsu = np.array(Mpars)
Mparsu[0] = 10.**Mpars[0]
Mparsu[2] = 10.**Mpars[2]
Mparsu[4] = 10.**Mpars[4]
Mparsu[6] = 10.**Mpars[6]
#Add dummy data points for low and high x1
#to ensure +ve definite surface density
#if using negative Plummer components:
if (nuparsu[0] < 0 or nuparsu[1] < 0 or nuparsu[2] < 0):
x1, y1, y1err = Sig_addpnts(x1,y1,y1err)
sigr2, Sig, sigLOS2, sigpmr2, sigpmt2, vs1, vs2 = \
sigp_fit_prop_vs(x1*drange,x2*drange,x3*drange,\
nuparsu,Mparsu,betpars,Mstar,Arot)
#And now, shrink the error wherever the
#total density is negative to disfavour
#those models (see binulator_surffuncs.py
#for details):
if (theta[0] < 0 or theta[1] < 0 or theta[2] < 0):
if (np.min(Sig) < 0):
y1err[np.where(Sig < 0)] = \
np.min(y1err)/np.float(len(x1))/1.0e3
#Handle distance uncertainty. Convert distances to
#angle units before doing model-data comparison:
duse = dgal_kpc * drange
x1u = x1 / dgal_kpc
x2u = x2 / dgal_kpc
x3u = x3 / dgal_kpc
y3u = y3 / dgal_kpc
y3erru = y3err / dgal_kpc
y4u = y4 / dgal_kpc
y4erru = y4err / dgal_kpc
#And convert model to angle units, but using
#new rather than default distance:
model1 = Sig
model2 = np.sqrt(sigLOS2)/1000.
model3 = np.sqrt(sigpmr2)/1000. / duse
model4 = np.sqrt(sigpmt2)/1000. / duse
model5 = vs1/1.0e12
model6 = vs2/1.0e12
inv_sigma2_1 = 1.0/y1err**2
inv_sigma2_2 = 1.0/y2err**2
inv_sigma2_3 = 1.0/y3erru**2
inv_sigma2_4 = 1.0/y4erru**2
lnlike_out = -0.5*(np.sum((y1-model1)**2*inv_sigma2_1)+\
np.sum((y2-model2)**2*inv_sigma2_2)+\
np.sum((y3u-model3)**2*inv_sigma2_3)+\
np.sum((y4u-model4)**2*inv_sigma2_4))+\
np.log(vsp_pdf(model5,vsp1val,vsp1pdf))+\
np.log(vsp_pdf(model6,vsp2val,vsp2pdf))
if (cosmo_cprior == 'yes'):
#Add the conc. to the likelihood function
#as a Gaussian in logspace:
M200 = Mparsu[0]
log_conc = np.log10(Mparsu[1])
log_cmean = np.log10(cosmo_cfunc(M200,h))
lnlike_out = lnlike_out - \
(log_conc-log_cmean)**2.0/(2.0*sig_c200**2.0)
if (lnlike_out != lnlike_out):
lnlike_out = -np.inf
return lnlike_out
###########################################################
#Main code:
#Suppress warning output:
import warnings
warnings.simplefilter("ignore")
#Forbid plots to screen so GravSphere can run
#remotely:
import matplotlib as mpl
mpl.use('Agg')
#Imports & dependencies:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
import emcee
from multiprocessing import Pool
from multiprocessing import cpu_count
from scipy.integrate import simps as integrator
from functions import *
from constants import *
from binulator_surffuncs import *
from binulator_velfuncs import *
from figures import *
import sys
#Welcome blurb:
print('###### GRAVSPHERE VERSION 1.5 ######\n')
#Default to run on a single CPU:
nprocs = 1
###########################################################
#Code parameters:
datadir = './Data/'
nwalkers = 250
nmodels = 50000
#Codemode [run or plot]:
codemode = 'plot'
###########################################################
#Input data selection here.
#MW satellites:
#from gravsphere_initialise_Draco import *
#from gravsphere_initialise_UMi import *
#from gravsphere_initialise_Carina import *
#from gravsphere_initialise_LeoI import *
#from gravsphere_initialise_LeoII import *
#from gravsphere_initialise_Sextans import *
#from gravsphere_initialise_Sculptor import *
#from gravsphere_initialise_Fornax import *
#from gravsphere_initialise_CVnI import *
#from gravsphere_initialise_SegI import *
from gravsphere_initialise_SMC import *
#from gravsphere_initialise_Ocen import *
#Mocks:
#from gravsphere_initialise_PlumCoreOm import *
#from gravsphere_initialise_PlumCuspOm import *
#from gravsphere_initialise_SMCmock import *
#from gravsphere_initialise_Ocenmock import *
#M31 satellites:
#from gravsphere_initialise_And21 import *
#Output some key choices:
print('Running on %d cores' % (nprocs))
print('Doing galaxy:',whichgal)
print('Model parameters:')
print('M200low, M200high [1e9 Msun]:', \
10.0**logM200low/1.0e9, 10.0**logM200high/1.0e9)
print('clow, chigh:', clow, chigh)
if (cosmo_cprior == 'yes'):
if (mWDM > 0):
print('Warm dark matter cosmology with mWDM(keV):',mWDM)
else:
print('Cold dark matter cosmology')
if (logMcenlow > 0.0):
print('Including central dark mass in the range [1e6 Msun]:', \
10.0**logMcenlow/1.0e6, 10.0**logMcenhigh/1.0e6)
if (Arothigh > 1.0e-3):
print('Including rotation')
if (drangelow < 0.999):
print('Allowing distance to vary in the fit over the range [kpc]:', \
dgal_kpc*drangelow, dgal_kpc*drangehigh)
#Set up output data folder structure:
outdir = outdirbase
if (propermotion == 'yes'):
outdir = outdir + 'Propermotion/'
if (virialshape == 'yes'):
outdir = outdir + 'VirialShape/'
if (cosmo_cprior == 'yes'):
outdir = outdir + 'CosmoC/'
#Set tracer model and vel. ani. functions:
n_betpars = 4
nu = multiplumden
nu_components = 3
Sigfunc = multiplumsurf
n_nupars = nu_components * 2
if (propermotion == 'no'):
if (virialshape == 'no'):
lnlike = lnlike_single
lnprob = lnprob_single
lnprior_set = lnprior_set_single
else:
lnlike = lnlike_single_vs
lnprob = lnprob_single_vs
lnprior_set = lnprior_set_single
elif (propermotion == 'yes'):
if (virialshape == 'no'):
lnlike = lnlike_single_prop
lnprob = lnprob_single_prop
lnprior_set = lnprior_set_single
else:
lnlike = lnlike_single_prop_vs
lnprob = lnprob_single_prop_vs
lnprior_set = lnprior_set_single
###########################################################
#Read in the the data. We assume here that the errors
#on the velocity dispersion are Gaussian symmetric.
#If this is not a good approximation (c.f. output from the
#binulator), then this can be improved. The errors on the
#VSPs are rarely Gaussian symmetric and so we use the
#correct likelihood function from the binulator in this case.
data = np.genfromtxt(infile+'_p0best.txt',dtype='f8')
pfits = data
data = np.genfromtxt(infile+'_Rhalf.txt',dtype='f8')
Rhalf = data[0]
data = np.genfromtxt(infile+'_surfden.txt',dtype='f8')
rbin_phot = data[:,0]
surfden = data[:,1]
surfdenerr = data[:,2]
data = np.genfromtxt(infile+'_vel.txt',dtype='f8')
rbin_kin = data[:,0]
sigpmean = data[:,4]
sigperr = (data[:,6]-data[:,5])/2.0
data = np.genfromtxt(infile+'_vsps.txt',dtype='f8')
vs1bin = data[0,0]
vs1err = (data[0,2]-data[0,1])/2.0
vs1lo = data[0,1]
vs1hi = data[0,2]
vs2bin = data[1,0]
vs2err = (data[1,2]-data[1,1])/2.0
vs2lo = data[1,1]
vs2hi = data[1,2]
data = np.genfromtxt(infile+'_vsp1full.txt',dtype='f8')
vsp1val, vsp1pdf = vsppdf_calc(data)
data = np.genfromtxt(infile+'_vsp2full.txt',dtype='f8')
vsp2val, vsp2pdf = vsppdf_calc(data)
if (propermotion == 'yes'):
data = np.genfromtxt(infile+'_velproptan.txt',dtype='f8')
rbin_kinp = data[:,0]
sigpmt = data[:,4]
sigpmterr = (data[:,6]-data[:,5])/2.0
data = np.genfromtxt(infile+'_velpropR.txt',dtype='f8')
rbin_kinp2 = data[:,0]
sigpmr = data[:,4]
sigpmrerr = (data[:,6]-data[:,5])/2.0
#Check data:
if (np.sum(rbin_kinp-rbin_kinp2) != 0):
print('Need same radial binning for tangential and radial propermotions. Oops! Bye bye.')
sys.exit(0)
print('Inner/outer radial bin (phot):', \
np.min(rbin_phot),np.max(rbin_phot))
print('Inner/outer radial bin (kin):', \
np.min(rbin_kin),np.max(rbin_kin))
#Set up the baryonic mass profile. If this is
#not assumed to have the same radial profile
#as the tracer stars, then it must be set, above,
#in the galaxy initialisation script. This
#should be normalised to peak at 1.0 so that
#when multiplied by Mstar, it yields the total
#stellar mass.
if (baryonmass_follows_tracer == 'yes'):
if (barrad_min == 0):
barrad_min = 1.0e-3
Mstar_rad = np.logspace(np.log10(barrad_min),\
np.log10(barrad_max),np.int(bar_pnts))
norm = pfits[0] + pfits[1] + pfits[2]
Mstar_prof = \
threeplummass(Mstar_rad,pfits[0]/norm,\
pfits[1]/norm,pfits[2]/norm,\
pfits[3],pfits[4],pfits[5])
#Set beta scale radius based on Rhalf:
betr0min = np.log10(0.5*Rhalf)
betr0max = np.log10(2.0*Rhalf)
#Set Jeans radial-grid based also on Rhalf:
if (rmax < 0):
rmin = Rhalf / 100.0
rmax = Rhalf * 50.0
print('Inner/outer radial Jeans grid:', rmin, rmax)
#Set up the mass model functions:
M = lambda r, Mpars: \
corenfw_tides_mass(r,Mpars[0],Mpars[1],Mpars[2],\
Mpars[3],Mpars[4],Mpars[5])
rho = lambda r, Mpars: \
corenfw_tides_den(r,Mpars[0],Mpars[1],Mpars[2],\
Mpars[3],Mpars[4],Mpars[5])
dlnrhodlnr = lambda r, Mpars: \
corenfw_tides_dlnrhodlnr(r,Mpars[0],Mpars[1],Mpars[2],\
Mpars[3],Mpars[4],Mpars[5])
#Central dark mass:
Mcentral = lambda r, Mpars: \
plummass(r,Mpars[6:8])
n_mpars = 8
#Set min/max priors on the stellar mass:
Mstar_min = Mstar - Mstar_err
Mstar_max = Mstar + Mstar_err
#Set up the Jeans functions to use for the fit:
#N.B. +6 on the ndims here corresponds to:
#varying stellar mass to light ratio (+1)
#varying distance (+1)
#varying rotation parameter (+1)
ndim = n_betpars + n_nupars + n_mpars + 3
if (propermotion == 'no'):
if (virialshape == 'no'):
sigp_fit = lambda r1,r2,nupars,Mpars,betpars,Mstar,Arot: \
sigp(r1,r2,nu,Sigfunc,M,Mcentral,beta,betaf,nupars,Mpars,betpars,\
Mstar_rad,Mstar_prof,Mstar,Arot,Rhalf,Guse,rmin,rmax)
else:
sigp_fit_vs = lambda r1,r2,nupars,Mpars,betpars,Mstar,Arot: \
sigp_vs(r1,r2,nu,Sigfunc,M,Mcentral,beta,betaf,nupars,Mpars,betpars,\
Mstar_rad,Mstar_prof,Mstar,Arot,Rhalf,Guse,rmin,rmax)
elif (propermotion == 'yes'):
if (virialshape == 'no'):
sigp_fit_prop = lambda r1,r2,r3,nupars,Mpars,betpars,Mstar,Arot: \
sigp_prop(r1,r2,r3,nu,Sigfunc,M,Mcentral,beta,betaf,nupars,Mpars,betpars,\
Mstar_rad,Mstar_prof,Mstar,Arot,Rhalf,Guse,rmin,rmax)
else:
sigp_fit_prop_vs = lambda r1,r2,r3,nupars,Mpars,betpars,Mstar,Arot: \
sigp_prop_vs(r1,r2,r3,nu,Sigfunc,M,Mcentral,beta,betaf,nupars,Mpars,betpars,\
Mstar_rad,Mstar_prof,Mstar,Arot,Rhalf,Guse,rmin,rmax)
#Set the priors and starting blob for the tracer density profile.
#Code is a bit more involved here just to cope with potentially
#negative Plummer masses, used to fit some steeply falling tracer
#density profiles:
nupars_min = np.zeros(len(pfits))
nupars_max = np.zeros(len(pfits))
nupars_minstart = np.zeros(len(pfits))
nupars_maxstart = np.zeros(len(pfits))
for i in range(len(pfits)):
if (pfits[i] > 0):
nupars_min[i] = pfits[i]*(1.0-tracertol)
nupars_max[i] = pfits[i]*(1.0+tracertol)
else:
nupars_min[i] = pfits[i]*(1.0+tracertol)
nupars_max[i] = pfits[i]*(1.0-tracertol)
if (tracertol < 0.01):
nupars_minstart[i] = nupars_min[i]
nupars_maxstart[i] = nupars_max[i]
else:
if (pfits[i] > 0):
nupars_minstart[i] = pfits[i]*0.99
nupars_maxstart[i] = pfits[i]*1.01
else:
nupars_minstart[i] = pfits[i]*1.01
nupars_maxstart[i] = pfits[i]*0.99
###########################################################
#Emcee fitting code:
#Set up walkers:
if (codemode == 'run'):
print('Running in fitting mode ... ')
print('Will write output to:', outdir)
#Initialise the walkers:
pos = np.zeros((nwalkers, ndim), dtype='float')
pos[:,0] = np.random.uniform(bet0min,bet0max,nwalkers)
pos[:,1] = np.random.uniform(betinfmin,betinfmax,nwalkers)
pos[:,2] = np.random.uniform(betr0min,betr0max,nwalkers)
pos[:,3] = np.random.uniform(betnmin,betnmax,nwalkers)
for i in range(len(pfits)):
pos[:,n_betpars+i] = \
np.random.uniform(nupars_minstart[i],\
nupars_maxstart[i],nwalkers)
pos[:,n_betpars+nu_components*2] = \
np.random.uniform(logM200low,logM200high,nwalkers)
pos[:,n_betpars+nu_components*2+1] = \
np.random.uniform(clow,chigh,nwalkers)
pos[:,n_betpars+nu_components*2+2] = \
np.random.uniform(logrclow,logrchigh,nwalkers)
pos[:,n_betpars+nu_components*2+3] = \
np.random.uniform(nlow,nhigh,nwalkers)
pos[:,n_betpars+nu_components*2+4] = \
np.random.uniform(logrtlow,logrthigh,nwalkers)
pos[:,n_betpars+nu_components*2+5] = \
np.random.uniform(dellow,delhigh,nwalkers)
pos[:,n_betpars+nu_components*2+6] = \
np.random.uniform(logMcenlow,logMcenhigh,nwalkers)
pos[:,n_betpars+nu_components*2+7] = \
np.random.uniform(acenlow,acenhigh,nwalkers)
pos[:,n_betpars+nu_components*2+8] = \
np.random.uniform(Arotlow,Arothigh,nwalkers)
pos[:,n_betpars+nu_components*2+9] = \
np.random.uniform(drangelow,drangehigh,nwalkers)
pos[:,ndim-1] = \
np.random.uniform(Mstar_min,Mstar_max,nwalkers)
#Set up fitting function and priors:
if (propermotion == 'no'):
x1 = rbin_phot
x2 = rbin_kin
y1 = surfden
y1err = surfdenerr
y2 = sigpmean
y2err = sigperr
elif (propermotion == 'yes'):
x1 = rbin_phot
x2 = rbin_kin
x3 = rbin_kinp
y1 = surfden
y1err = surfdenerr
y2 = sigpmean
y2err = sigperr
y3 = sigpmr
y3err = sigpmrerr
y4 = sigpmt
y4err = sigpmterr
lnprior = lambda theta: \
lnprior_set(theta,n_betpars,bet0min,bet0max,\
betinfmin,betinfmax,\
betr0min,betr0max,betnmin,betnmax,\
nu_components,nupars_min,nupars_max,\
n_mpars,logM200low,logM200high,\
clow,chigh,logrclow,logrchigh,\
nlow,nhigh,logrtlow,logrthigh,\
dellow,delhigh,\
logMcenlow,logMcenhigh,\
acenlow,acenhigh,\
Arotlow,Arothigh,\
drangelow,drangehigh,\
Mstar_min,Mstar_max)
print('Running chains ... ')
with Pool(processes = nprocs) as pool:
if (propermotion == 'no'):
if (virialshape == 'no'):
sampler = \
emcee.EnsembleSampler(nwalkers,ndim,lnprob,\
args=(x1,x2,y1,y1err,y2,y2err),pool=pool)
else:
sampler = \
emcee.EnsembleSampler(nwalkers,ndim,lnprob,\
args=(x1,x2,y1,y1err,y2,y2err,\
vsp1val,vsp1pdf,vsp2val,vsp2pdf),pool=pool)
elif (propermotion == 'yes'):
if (virialshape == 'no'):
sampler = emcee.EnsembleSampler(nwalkers,ndim,lnprob,\
args=(x1,x2,x3,y1,y1err,y2,y2err,\
y3,y3err,y4,y4err),pool=pool)
else:
sampler = emcee.EnsembleSampler(nwalkers,ndim,lnprob,\
args=(x1,x2,x3,y1,y1err,y2,y2err,\
y3,y3err,y4,y4err,\
vsp1val,vsp1pdf,vsp2val,vsp2pdf),pool=pool)
sampler.run_mcmc(pos, nmodels, progress = True)
#Store the output (including the data):
print('Writing data to file ... ')
f = open(outdir+'output_sigp.txt','w')
for i in range(len(rbin_kin)):
f.write('%f %f %f\n' % \
(rbin_kin[i], sigpmean[i], sigperr[i]))
f.close()
f = open(outdir+'output_surfden.txt','w')
for i in range(len(rbin_phot)):
f.write('%f %f %f\n' % \
(rbin_phot[i], surfden[i], surfdenerr[i]))
f.close()
if (propermotion == 'yes'):
f = open(outdir+'output_prop.txt','w')
for i in range(len(rbin_kinp)):
f.write('%f %f %f %f %f\n' % \
(rbin_kinp[i],\
sigpmr[i],sigpmrerr[i],sigpmt[i],\
sigpmterr[i]))
f.close()
burn = np.int(0.75*nmodels)
chisq = -2.0 * \
sampler.get_log_prob(discard=burn, flat=True)
par_test = sampler.get_chain(discard=burn, flat=True)
f = open(outdir+'Boutput_chain.txt','w')
for i in range(len(chisq)):
outstr = str(chisq[i]) + ' '
for j in range(ndim):
outstr = outstr + str(par_test[i,j]) + ' '
outstr = outstr + '\n'
f.write(outstr)
f.close()
###########################################################
#Plotting code:
elif (codemode == 'plot'):
print('Running in plotting mode ... ')
print('Loading data from:', outdir)
#Read in the data:
data_in = \
np.genfromtxt(outdir+'output_surfden.txt',dtype='f8')
rbin_phot = data_in[:,0]
surfden = data_in[:,1]
surfdenerr = data_in[:,2]
data_in = \
np.genfromtxt(outdir+'output_sigp.txt',dtype='f8')
rbin_kin = data_in[:,0]
sigpmean = data_in[:,1]
sigperr = data_in[:,2]
if (propermotion == 'yes'):
data_in = \
np.genfromtxt(outdir+'output_prop.txt',dtype='f8')
rbin_kinp = data_in[:,0]
sigpmr = data_in[:,1]
sigpmrerr = data_in[:,2]
sigpmt = data_in[:,3]
sigpmterr = data_in[:,4]
#Set radius array to use for plotting mass profiles
#etc:
if (rplot_inner > 0):
rleft = rplot_inner
else:
rleft = np.min(rbin_phot)
if (rplot_outer > 0):
rright = rplot_outer
else:
rright = np.max(rbin_phot)
if (rplot_pnts < 0):
rplot_pnts = len(rbin_phot)
print('Setting plot range:', rleft, rright)
rbin = np.logspace(np.log10(rleft),np.log10(rright),\
np.int(rplot_pnts))
#Load in the emcee data:
data_in = \
np.genfromtxt(outdir+'Boutput_chain.txt',dtype='f8')
chisq = data_in[:,0]
par_test = np.zeros((len(chisq),ndim), dtype='float')
for i in range(1,ndim+1):
par_test[:,i-1] = data_in[:,i]
#Make sure no *really* bad models remain in the chains.
#In practice, this cut makes no difference to the end result.
if (np.min(chisq) == np.inf):
print('No viable models. Uh oh... bye bye. Minimum chisq:', np.min(chisq))
sys.exit(0)
index = np.where(chisq < np.min(chisq)*500.0)[0]
print('Min/max chisq:', np.min(chisq[index]), np.max(chisq[index]))
#Cut the confidence intervals from the chains:
nsamples = 1000
sample_choose = index[np.random.randint(len(index), \
size=nsamples)]
#Set up arrays to store confidence intervals:
M_int = np.zeros((7,len(rbin)))
rho_int = np.zeros((7,len(rbin)))
dlnrhodlnr_int = np.zeros((7,len(rbin)))
Mstar_int = np.zeros((7,len(Mstar_rad)))
Mdynrat_int = np.zeros((7,len(Mstar_rad)))
nu_int = np.zeros((7,len(Mstar_rad)))
Mcen_int = np.zeros((7,len(rbin)))
if (calc_Jfac == 'yes'):
J_int = np.zeros(7)
if (calc_Dfac == 'yes'):
D_int = np.zeros(7)
Mstore = np.zeros((len(rbin),nsamples))
rhostore = np.zeros((len(rbin),nsamples))
dlnrhodlnrstore = np.zeros((len(rbin),nsamples))
Mstarstore = np.zeros((len(Mstar_rad),nsamples))
Mdynratstore = np.zeros((len(Mstar_rad),nsamples))
nustore = np.zeros((len(Mstar_rad),nsamples))
M200store = np.zeros(nsamples)
vmaxstore = np.zeros(nsamples)
cstore = np.zeros(nsamples)
rcstore = np.zeros(nsamples)
nstore = np.zeros(nsamples)
rtstore = np.zeros(nsamples)
delstore = np.zeros(nsamples)
Mcenstore = np.zeros((len(rbin),nsamples))
Arotstore = np.zeros(nsamples)
dstore = np.zeros(nsamples)
McenMstore = np.zeros(nsamples)
Mcenastore = np.zeros(nsamples)
if (calc_Jfac == 'yes'):
Jstore = np.zeros(nsamples)
if (calc_Dfac == 'yes'):
Dstore = np.zeros(nsamples)
bet_int = np.zeros((7,len(rbin)))
betstar_int = np.zeros((7,len(rbin)))
Sig_int = np.zeros((7,len(rbin)))
sigp_int = np.zeros((7,len(rbin)))
vphirot_int = np.zeros((7,len(rbin)))
if (virialshape == 'yes'):
vs1_int = np.zeros((7,1))
vs2_int = np.zeros((7,1))
if (propermotion == 'yes'):
sigpmr_int = np.zeros((7,len(rbin)))
sigpmt_int = np.zeros((7,len(rbin)))
betstore = np.zeros((len(rbin),nsamples))
betstarstore = np.zeros((len(rbin),nsamples))
Sigstore = np.zeros((len(rbin),nsamples))
sigpstore = np.zeros((len(rbin),nsamples))
vphirotstore = np.zeros((len(rbin),nsamples))
if (virialshape == 'yes'):
vs1store = np.zeros(nsamples)
vs2store = np.zeros(nsamples)
if (propermotion == 'yes'):
sigpmrstore = np.zeros((len(rbin),nsamples))
sigpmtstore = np.zeros((len(rbin),nsamples))
for i in range(nsamples):
theta = par_test[sample_choose[i],:]
betpars = theta[0:n_betpars]
nupars = theta[n_betpars:n_betpars+nu_components*2]
Mpars = theta[n_betpars+nu_components*2:\
n_betpars+nu_components*2+n_mpars]
Arot = theta[n_betpars+nu_components*2+n_mpars]
drange = theta[n_betpars+nu_components*2+n_mpars+1]
Mstar = theta[ndim-1]
nuparsu = np.array(nupars)
Mparsu = np.array(Mpars)
Mparsu[0] = 10.**Mpars[0]
Mparsu[2] = 10.**Mpars[2]
Mparsu[4] = 10.**Mpars[4]
Mparsu[6] = 10.**Mpars[6]
#Calculate all profiles we want to plot:
if (propermotion == 'no'):
if (virialshape == 'no'):
sigr2,Sig,sigLOS2 = \
sigp_fit(rbin,rbin,nuparsu,\
Mparsu,betpars,Mstar,Arot)
else:
sigr2,Sig,sigLOS2,vs1,vs2 = \
sigp_fit_vs(rbin,rbin,nuparsu,\
Mparsu,betpars,Mstar,Arot)
elif (propermotion == 'yes'):
if (virialshape == 'no'):
sigr2,Sig,sigLOS2,sigpmr2,sigpmt2 = \
sigp_fit_prop(rbin,rbin,rbin,nuparsu,Mparsu,betpars,\
Mstar,Arot)
else:
sigr2,Sig,sigLOS2,sigpmr2,sigpmt2,vs1,vs2 = \
sigp_fit_prop_vs(rbin,rbin,rbin,nuparsu,Mparsu,betpars,\
Mstar,Arot)
Mr = M(rbin,Mparsu)
betar = beta(rbin,betpars)
rhor = rho(rbin,Mparsu)
dlnrhodlnrr = dlnrhodlnr(rbin,Mparsu)
Mstarr = Mstar_prof*Mstar
nu_mass_r = multiplummass(Mstar_rad,nuparsu)
Mcenr = Mcentral(rbin,Mparsu)
Mstore[:,i] = Mr
betstore[:,i] = betar
betstarstore[:,i] = betar/(2.0-betar)
sigpstore[:,i] = np.sqrt(sigLOS2)/1000.
Sigstore[:,i] = Sig
vphirotstore[:,i] = np.sqrt(2.0*sigLOS2*Arot*rbin/Rhalf)/1000.
rhostore[:,i] = rhor
dlnrhodlnrstore[:,i] = dlnrhodlnrr
Mstarstore[:,i] = Mstarr
Mdynratstore[:,i] = M(Mstar_rad,Mparsu)/Mstarr
nustore[:,i] = nu_mass_r
Mcenstore[:,i] = Mcenr
vmaxstore[i] = vmax_func(Mparsu[0],Mparsu[1],h)
M200store[i] = Mparsu[0]
cstore[i] = Mparsu[1]
rcstore[i] = Mparsu[2]
nstore[i] = Mparsu[3]
rtstore[i] = Mparsu[4]
delstore[i] = Mparsu[5]
Arotstore[i] = Arot
dstore[i] = drange*dgal_kpc
McenMstore[i] = Mparsu[6]
Mcenastore[i] = Mparsu[7]
if (calc_Jfac == 'yes'):
alpha_rmax = dgal_kpc*alpha_Jfac_deg/deg
Jstore[i] = get_J(rho,Mparsu,dgal_kpc,alpha_rmax)
if (calc_Dfac == 'yes'):
alpha_rmax = dgal_kpc*alpha_Dfac_deg/deg
Dstore[i] = get_D(rho,Mparsu,dgal_kpc,alpha_rmax)
if (virialshape == 'yes'):
vs1store[i] = vs1/1.0e12
vs2store[i] = vs2/1.0e12
if (propermotion == 'yes'):
sigpmrstore[:,i] = np.sqrt(sigpmr2)/1000.
sigpmtstore[:,i] = np.sqrt(sigpmt2)/1000.
#Solve for confidence intervals for each of these:
for j in range(len(rbin)):
M_int[0,j], M_int[1,j], M_int[2,j], M_int[3,j], \
M_int[4,j], M_int[5,j], M_int[6,j] = \
calcmedquartnine(Mstore[j,:])
rho_int[0,j], rho_int[1,j], rho_int[2,j], rho_int[3,j], \
rho_int[4,j], rho_int[5,j], rho_int[6,j] = \
calcmedquartnine(rhostore[j,:])
dlnrhodlnr_int[0,j], dlnrhodlnr_int[1,j],\
dlnrhodlnr_int[2,j], \
dlnrhodlnr_int[3,j], \
dlnrhodlnr_int[4,j], \
dlnrhodlnr_int[5,j], \
dlnrhodlnr_int[6,j] = \
calcmedquartnine(dlnrhodlnrstore[j,:])
for j in range(len(Mstar_rad)):
Mstar_int[0,j], Mstar_int[1,j], Mstar_int[2,j], \
Mstar_int[3,j], \
Mstar_int[4,j], \
Mstar_int[5,j], \
Mstar_int[6,j] = \
calcmedquartnine(Mstarstore[j,:])
for j in range(len(Mstar_rad)):
Mdynrat_int[0,j], Mdynrat_int[1,j], \
Mdynrat_int[2,j], \
Mdynrat_int[3,j], \
Mdynrat_int[4,j], \
Mdynrat_int[5,j], \
Mdynrat_int[6,j] = \
calcmedquartnine(Mdynratstore[j,:])
for j in range(len(Mstar_rad)):
nu_int[0,j], nu_int[1,j], nu_int[2,j], \
nu_int[3,j], \
nu_int[4,j], \
nu_int[5,j], \
nu_int[6,j] = \
calcmedquartnine(nustore[j,:])
for j in range(len(rbin)):
Mcen_int[0,j], Mcen_int[1,j], Mcen_int[2,j], \
Mcen_int[3,j], \
Mcen_int[4,j], \
Mcen_int[5,j], \
Mcen_int[6,j] = \
calcmedquartnine(Mcenstore[j,:])
if (calc_Jfac == 'yes'):
J_int = \
calcmedquartnine(Jstore[:])
if (calc_Dfac == 'yes'):
D_int = \
calcmedquartnine(Dstore[:])
for j in range(len(rbin)):
bet_int[0,j], bet_int[1,j], bet_int[2,j], \
bet_int[3,j], \
bet_int[4,j], \
bet_int[5,j], \
bet_int[6,j] = \
calcmedquartnine(betstore[j,:])
betstar_int[0,j], betstar_int[1,j], \
betstar_int[2,j], betstar_int[3,j], \
betstar_int[4,j], \
betstar_int[5,j], \
betstar_int[6,j] = \
calcmedquartnine(betstarstore[j,:])
sigp_int[0,j], sigp_int[1,j], sigp_int[2,j], \
sigp_int[3,j], \
sigp_int[4,j], \
sigp_int[5,j], \
sigp_int[6,j] = \
calcmedquartnine(sigpstore[j,:])
Sig_int[0,j], Sig_int[1,j], Sig_int[2,j], \
Sig_int[3,j], \
Sig_int[4,j], \
Sig_int[5,j], \
Sig_int[6,j] = \
calcmedquartnine(Sigstore[j,:])
vphirot_int[0,j], vphirot_int[1,j], vphirot_int[2,j], \
vphirot_int[3,j], \
vphirot_int[4,j], \
vphirot_int[5,j], \
vphirot_int[6,j] = \
calcmedquartnine(vphirotstore[j,:])
if (propermotion == 'yes'):
sigpmr_int[0,j], sigpmr_int[1,j], \
sigpmr_int[2,j], sigpmr_int[3,j], \
sigpmr_int[4,j], \
sigpmr_int[5,j], \
sigpmr_int[6,j] = \
calcmedquartnine(sigpmrstore[j,:])
sigpmt_int[0,j], sigpmt_int[1,j], \
sigpmt_int[2,j], sigpmt_int[3,j], \
sigpmt_int[4,j], \
sigpmt_int[5,j], \
sigpmt_int[6,j] = \
calcmedquartnine(sigpmtstore[j,:])
if (virialshape == 'yes'):
vs1_int[0], vs1_int[1], \
vs1_int[2], vs1_int[3], \
vs1_int[4], \
vs1_int[5], \
vs1_int[6] = \
calcmedquartnine(vs1store[:])
vs2_int[0], vs2_int[1], \
vs2_int[2], vs2_int[3], \
vs2_int[4], \
vs2_int[5], \
vs2_int[6] = \
calcmedquartnine(vs2store[:])
#######################################################
#And now make the plots:
#First calculate median distance; plot all data at
#median:
dmed, dsixlow, dsixhi,\
dninelow, dninehi, \
dnineninelow, dnineninehi = calcmedquartnine(dstore)
dcorr = dmed/dgal_kpc
##### Stellar surface density #####
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
plt.loglog()
plt.errorbar(rbin_phot*dcorr,surfden,surfdenerr,\
color='b',ecolor='b',linewidth=2,alpha=0.75,\
fmt='o')
plt.fill_between(rbin,Sig_int[5,:],Sig_int[6,:],\
facecolor='black',alpha=alp3sig,\
edgecolor='none')
plt.fill_between(rbin,Sig_int[3,:],Sig_int[4,:],\
facecolor='black',alpha=0.33,\
edgecolor='none')
plt.fill_between(rbin,Sig_int[1,:],Sig_int[2,:],\
facecolor='black',alpha=0.66,\
edgecolor='none')
plt.plot(rbin,Sig_int[0,:],'k',linewidth=mylinewidth,\
label=r'Fit')
plt.axvline(x=Rhalf,color='blue',alpha=0.5,\
linewidth=mylinewidth)
plt.xlabel(r'$R\,[{\rm kpc}]$',\
fontsize=myfontsize)
plt.ylabel(r'$\Sigma_*\,[N\,{\rm kpc}^{-2}]$',\
fontsize=myfontsize)
plt.xlim([np.min(rbin),np.max(rbin)])
plt.ylim([ymin_Sigstar,ymax_Sigstar])
plt.savefig(outdir+'output_Sigstar.pdf',bbox_inches='tight')
##### Stellar projected velocity dispersion #####
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
plt.errorbar(np.log10(rbin_kin*dcorr),sigpmean,sigperr,\
linewidth=2,color='b',alpha=0.75,\
fmt='o')
sel = sigp_int[0,:] > 0
plt.fill_between(np.log10(rbin[sel]),sigp_int[5,:][sel],\
sigp_int[6,:][sel],\
facecolor='black',alpha=alp3sig,\
edgecolor='none')
plt.fill_between(np.log10(rbin[sel]),sigp_int[3,:][sel],\
sigp_int[4,:][sel],\
facecolor='black',alpha=0.33,\
edgecolor='none')
plt.fill_between(np.log10(rbin[sel]),sigp_int[1,:][sel],\
sigp_int[2,:][sel],\
facecolor='black',alpha=0.66,\
edgecolor='none')
plt.plot(np.log10(rbin[sel]),sigp_int[0,:][sel],'k',linewidth=mylinewidth,\
label=r'Fit')
plt.axvline(x=np.log10(Rhalf),color='blue',alpha=0.5,\
linewidth=mylinewidth)
plt.xlabel(r'${\rm Log}_{10}[R/{\rm kpc}]$',\
fontsize=myfontsize)
plt.ylabel(r'$\sigma_{\rm LOS}[{\rm km\,s}^{-1}]$',\
fontsize=myfontsize)
plt.ylim([0,y_sigLOSmax])
plt.xlim([np.log10(np.min(rbin)),np.log10(np.max(rbin))])
plt.savefig(outdir+'output_sigLOS.pdf',bbox_inches='tight')
##### Stellar proper motion dispersions #####
if (propermotion == 'yes'):
#First in the radial direction (on the sky):
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
psel = sigpmr > 0
plt.errorbar(np.log10(rbin_kinp[psel]*dcorr),sigpmr[psel]*dcorr,sigpmrerr[psel]*dcorr,\
linewidth=2,color='b',alpha=0.75,\
fmt='o')
sel = sigpmr_int[0,:] > 0
plt.fill_between(np.log10(rbin[sel]),sigpmr_int[5,:][sel],\
sigpmr_int[6,:][sel],\
facecolor='black',alpha=alp3sig,\
edgecolor='none')
plt.fill_between(np.log10(rbin[sel]),sigpmr_int[3,:][sel],\
sigpmr_int[4,:][sel],\
facecolor='black',alpha=0.33,\
edgecolor='none')
plt.fill_between(np.log10(rbin[sel]),sigpmr_int[1,:][sel],\
sigpmr_int[2,:][sel],\
facecolor='black',alpha=0.66,\
edgecolor='none')
plt.plot(np.log10(rbin[sel]),sigpmr_int[0,:][sel],'k',linewidth=mylinewidth,\
label=r'Fit')
plt.axvline(x=np.log10(Rhalf),color='blue',alpha=0.5,\
linewidth=mylinewidth)
plt.xlabel(r'${\rm Log}_{10}[R/{\rm kpc}]$',\
fontsize=myfontsize)
plt.ylabel(r'$\sigma_{\rm pmr}[{\rm km\,s}^{-1}]$',\
fontsize=myfontsize)
plt.ylim([0,y_sigLOSmax])
plt.xlim([np.log10(np.min(rbin)),np.log10(np.max(rbin))])
plt.savefig(outdir+'output_sigpmr.pdf',bbox_inches='tight')
#Then in the tangential direction (on the sky):
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
psel = sigpmt > 0
plt.errorbar(np.log10(rbin_kinp[psel]*dcorr),sigpmt[psel]*dcorr,sigpmterr[psel]*dcorr,\
linewidth=2,color='b',alpha=0.75,\
fmt='o')
sel = sigpmt_int[0,:] > 0
plt.fill_between(np.log10(rbin[sel]),sigpmt_int[5,:][sel],\
sigpmt_int[6,:][sel],\
facecolor='black',alpha=alp3sig,\
edgecolor='none')
plt.fill_between(np.log10(rbin[sel]),sigpmt_int[3,:][sel],\
sigpmt_int[4,:][sel],\
facecolor='black',alpha=0.33,\
edgecolor='none')
plt.fill_between(np.log10(rbin[sel]),sigpmt_int[1,:][sel],\
sigpmt_int[2,:][sel],\
facecolor='black',alpha=0.66,\
edgecolor='none')
plt.plot(np.log10(rbin[sel]),sigpmt_int[0,:][sel],'k',linewidth=mylinewidth,\
label=r'Fit')
plt.axvline(x=np.log10(Rhalf),color='blue',alpha=0.5,\
linewidth=mylinewidth)
plt.xlabel(r'${\rm Log}_{10}[R/{\rm kpc}]$',\
fontsize=myfontsize)
plt.ylabel(r'$\sigma_{\rm pmt}[{\rm km\,s}^{-1}]$',\
fontsize=myfontsize)
plt.ylim([0,y_sigLOSmax])
plt.xlim([np.log10(np.min(rbin)),np.log10(np.max(rbin))])
plt.savefig(outdir+'output_sigpmt.pdf',bbox_inches='tight')
##### Stellar beta(r) anisotropy profile #####
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
plt.fill_between(np.log10(rbin),bet_int[5,:],bet_int[6,:],\
facecolor='black',alpha=alp3sig,\
edgecolor='none')
plt.fill_between(np.log10(rbin),bet_int[3,:],bet_int[4,:],\
facecolor='black',alpha=0.33,\
edgecolor='none')
plt.fill_between(np.log10(rbin),bet_int[1,:],bet_int[2,:],\
facecolor='black',alpha=0.66,\
edgecolor='none')
plt.plot(np.log10(rbin),bet_int[0,:],'k',linewidth=mylinewidth,\
label=r'Fit')
#And true answer (for mock data):
if (overtrue == 'yes'):
plt.plot(np.log10(ranal),betatrue,'b--',linewidth=mylinewidth,\
label=r'True')
plt.xlabel(r'${\rm Log}_{10}[r/{\rm kpc}]$',\
fontsize=myfontsize)
plt.ylabel(r'$\beta$',\
fontsize=myfontsize)
plt.ylim([np.min([bet0min,betinfmin]),np.max([bet0max,betinfmax])])
plt.xlim([np.log10(np.min(rbin)),np.log10(np.max(rbin))])
plt.savefig(outdir+'output_beta.pdf',bbox_inches='tight')
#Write the above data to files for comparitive plotting later:
f = open(outdir+'output_bet.txt','w')
for i in range(len(rbin)):
f.write('%f %f %f %f %f %f %f %f\n' % \
(rbin[i],bet_int[0,i],bet_int[1,i],bet_int[2,i],bet_int[3,i],\
bet_int[4,i],bet_int[5,i],bet_int[6,i]))
f.close()
###### Stellar symmetrised betastar(r) profile #####
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
plt.fill_between(np.log10(rbin),betstar_int[5,:],betstar_int[6,:],\
facecolor='black',alpha=alp3sig,\
edgecolor='none')
plt.fill_between(np.log10(rbin),betstar_int[3,:],betstar_int[4,:],\
facecolor='black',alpha=0.33,\
edgecolor='none')
plt.fill_between(np.log10(rbin),betstar_int[1,:],betstar_int[2,:],\
facecolor='black',alpha=0.66,\
edgecolor='none')
plt.plot(np.log10(rbin),betstar_int[0,:],'k',linewidth=mylinewidth,\
label=r'Fit')
#And true answer (for mock data):
if (overtrue == 'yes'):
plt.plot(np.log10(ranal),betatruestar,'b--',linewidth=mylinewidth,\
label=r'True')
plt.xlabel(r'${\rm Log}_{10}[r/{\rm kpc}]$',\
fontsize=myfontsize)
plt.ylabel(r'$\tilde{\beta}$',\
fontsize=myfontsize)
plt.ylim([-1,1])
plt.xlim([np.log10(np.min(rbin)),np.log10(np.max(rbin))])
plt.savefig(outdir+'output_betastar.pdf',bbox_inches='tight')
#Write the above data to files for comparitive plotting later:
f = open(outdir+'output_betstar.txt','w')
for i in range(len(rbin)):
f.write('%f %f %f %f %f %f %f %f\n' % \
(rbin[i],betstar_int[0,i],betstar_int[1,i],\
betstar_int[2,i],betstar_int[3,i],\
betstar_int[4,i],betstar_int[5,i],\
betstar_int[6,i]))
f.close()
##### Cumulative mass profiles #####
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
plt.loglog()
plt.fill_between(rbin,M_int[5,:],M_int[6,:],\
facecolor='black',alpha=alp3sig,\
edgecolor='none')
plt.fill_between(rbin,M_int[3,:],M_int[4,:],\
facecolor='black',alpha=0.33,\
edgecolor='none')
plt.fill_between(rbin,M_int[1,:],M_int[2,:],\
facecolor='black',alpha=0.66,\
edgecolor='none')
if (Mstar > 1.0):
plt.plot(rbin,M_int[0,:],'k',linewidth=mylinewidth,\
label=r'Fit Dark Matter')
else:
plt.plot(rbin,M_int[0,:],'k',linewidth=mylinewidth,\
label=r'Fit')
plt.fill_between(Mstar_rad,Mstar_int[5,:],Mstar_int[6,:],\
facecolor=colorpop2,alpha=alp3sig,\
edgecolor='none')
plt.fill_between(Mstar_rad,Mstar_int[3,:],Mstar_int[4,:],\
facecolor=colorpop2,alpha=0.33,\
edgecolor='none')
plt.fill_between(Mstar_rad,Mstar_int[1,:],Mstar_int[2,:],\
facecolor=colorpop2,alpha=0.66,\
edgecolor='none')
plt.fill_between(rbin,Mcen_int[5,:],Mcen_int[6,:],\
facecolor=colorpop3,alpha=alp3sig,\
edgecolor='none')
plt.fill_between(rbin,Mcen_int[3,:],Mcen_int[4,:],\
facecolor=colorpop3,alpha=0.33,\
edgecolor='none')
plt.fill_between(rbin,Mcen_int[1,:],Mcen_int[2,:],\
facecolor=colorpop3,alpha=0.66,\
edgecolor='none')
if (Mstar > 1.0):
plt.plot(Mstar_rad,Mstar_int[0,:],color=colorpop2,\
linewidth=mylinewidth,\
label=r'Fit Stars')
if (np.max(Mcen_int) > 0.0):
plt.plot(rbin,Mcen_int[0,:],color=colorpop3,\
linewidth=mylinewidth,\
label=r'Fit Central Dark Mass')
#Overplot true answer (for mock data):
if (overtrue == 'yes'):
plt.plot(ranal,truemass,'b--',linewidth=mylinewidth,\
label=r'True')
plt.axvline(x=Rhalf,color='blue',alpha=0.5,\
linewidth=mylinewidth)
plt.xlabel(r'$r\,[{\rm kpc}]$',\
fontsize=myfontsize)
plt.ylabel(r'$M(<r)\,[{\rm M}_\odot]$',\
fontsize=myfontsize)
plt.ylim([yMlow,yMhigh])
plt.xlim([np.min(rbin),np.max(rbin)])
plt.legend(loc='upper left',fontsize=mylegendfontsize)
plt.savefig(outdir+'output_Mass.pdf',bbox_inches='tight')
#Write the above data to files for comparitive plotting later:
f = open(outdir+'output_M.txt','w')
for i in range(len(rbin)):
f.write('%f %f %f %f %f %f %f %f\n' % \
(rbin[i],M_int[0,i],M_int[1,i],M_int[2,i],M_int[3,i],\
M_int[4,i], M_int[5,i], M_int[6,i]))
f.close()
#And the Mdyn/Mstar ratio:
f = open(outdir+'output_MdynMstar.txt','w')
for i in range(len(Mstar_rad)):
f.write('%f %f %f %f %f %f %f %f\n' % \
(Mstar_rad[i],Mdynrat_int[0,i],Mdynrat_int[1,i],Mdynrat_int[2,i],\
Mdynrat_int[3,i],\
Mdynrat_int[4,i], Mdynrat_int[5,i], Mdynrat_int[6,i]))
f.close()
#And nu_mass_r:
f = open(outdir+'output_nu_mass_r.txt','w')
for i in range(len(Mstar_rad)):
f.write('%f %f %f %f %f %f %f %f\n' % \
(Mstar_rad[i],nu_int[0,i],nu_int[1,i],nu_int[2,i],\
nu_int[3,i],\
nu_int[4,i], nu_int[5,i], nu_int[6,i]))
f.close()
#And Mstar:
f = open(outdir+'output_Mass_Mstar.txt','w')
for i in range(len(Mstar_rad)):
f.write('%f %f %f %f %f %f %f %f\n' % \
(Mstar_rad[i],Mstar_int[0,i],Mstar_int[1,i],Mstar_int[2,i],\
Mstar_int[3,i],\
Mstar_int[4,i], Mstar_int[5,i], Mstar_int[6,i]))
f.close()
#And central dark mass:
f = open(outdir+'output_Mass_Mcen.txt','w')
for i in range(len(rbin)):
f.write('%f %f %f %f %f %f %f %f\n' % \
(rbin[i],Mcen_int[0,i],Mcen_int[1,i],Mcen_int[2,i],\
Mcen_int[3,i],\
Mcen_int[4,i], Mcen_int[5,i], Mcen_int[6,i]))
f.close()
##### Dark matter density profile #####
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
plt.loglog()
plt.fill_between(rbin,rho_int[5,:],rho_int[6,:],\
facecolor='black',alpha=alp3sig,\
edgecolor='none')
plt.fill_between(rbin,rho_int[3,:],rho_int[4,:],\
facecolor='black',alpha=0.33,\
edgecolor='none')
plt.fill_between(rbin,rho_int[1,:],rho_int[2,:],\
facecolor='black',alpha=0.66,\
edgecolor='none')
plt.plot(rbin,rho_int[0,:],'k',linewidth=mylinewidth,\
label=r'Fit')
#Overplot true solution (for mock data):
if (overtrue == 'yes'):
plt.plot(ranal,trueden,'b--',linewidth=mylinewidth,\
label=r'True')
plt.axvline(x=Rhalf,color='blue',alpha=0.5,\
linewidth=mylinewidth)
plt.xlabel(r'$r\,[{\rm kpc}]$',\
fontsize=myfontsize)
plt.ylabel(r'$\rho\,[{\rm M}_\odot\,{\rm kpc}^{-3}]$',\
fontsize=myfontsize)
plt.xlim([np.min(rbin),np.max(rbin)])
plt.ylim([yrholow,yrhohigh])
plt.savefig(outdir+'output_rho.pdf',bbox_inches='tight')
#Write the above data to files for comparitive plotting later:
f = open(outdir+'output_rho.txt','w')
for i in range(len(rbin)):
f.write('%f %f %f %f %f %f %f %f\n' % \
(rbin[i],rho_int[0,i],rho_int[1,i],rho_int[2,i],rho_int[3,i],\
rho_int[4,i],rho_int[5,i],rho_int[6,i]))
f.close()
#And the coreNFW parameters:
f = open(outdir+'output_M200c200_chain.txt','w')
for i in range(len(M200store)):
f.write('%f %f %f %f %f %f %f\n' % \
(M200store[i],cstore[i],nstore[i],rcstore[i],rtstore[i],\
delstore[i],vmaxstore[i]))
f.close()
##### Dark matter log density exponent #####
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
plt.fill_between(np.log10(rbin),dlnrhodlnr_int[5,:],dlnrhodlnr_int[6,:],\
facecolor='black',alpha=alp3sig,\
edgecolor='none')
plt.fill_between(np.log10(rbin),dlnrhodlnr_int[3,:],dlnrhodlnr_int[4,:],\
facecolor='black',alpha=0.33,\
edgecolor='none')
plt.fill_between(np.log10(rbin),dlnrhodlnr_int[1,:],dlnrhodlnr_int[2,:],\
facecolor='black',alpha=0.66,\
edgecolor='none')
plt.plot(np.log10(rbin),dlnrhodlnr_int[0,:],'k',linewidth=mylinewidth,\
label=r'Fit')
#And overplot true model (for mock data):
if (overtrue == 'yes'):
plt.plot(np.log10(ranal),truedlnrhodlnr,'b--',linewidth=mylinewidth,\
label=r'True')
plt.axvline(x=np.log10(Rhalf),color='blue',alpha=0.5,\
linewidth=mylinewidth)
plt.xlabel(r'${\rm Log}_{10}[r/{\rm kpc}]$',\
fontsize=myfontsize)
plt.ylabel(r'${\rm dln}\rho/{\rm dln}r$',\
fontsize=myfontsize)
plt.xlim([np.log10(np.min(rbin)),np.log10(np.max(rbin))])
plt.ylim([-4,0])
plt.savefig(outdir+'output_dlnrhodlnr.pdf',bbox_inches='tight')
#Write the above data to files for comparitive plotting later:
f = open(outdir+'output_dlnrhodlnr.txt','w')
for i in range(len(rbin)):
f.write('%f %f %f %f %f %f %f %f\n' % \
(rbin[i],dlnrhodlnr_int[0,i],dlnrhodlnr_int[1,i],\
dlnrhodlnr_int[2,i],dlnrhodlnr_int[3,i],\
dlnrhodlnr_int[4,i],dlnrhodlnr_int[5,i],\
dlnrhodlnr_int[6,i]))
f.close()
##### Rotation velocity profile #####
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
plt.fill_between(np.log10(rbin),vphirot_int[5,:],vphirot_int[6,:],\
facecolor='black',alpha=alp3sig,\
edgecolor='none')
plt.fill_between(np.log10(rbin),vphirot_int[3,:],vphirot_int[4,:],\
facecolor='black',alpha=0.33,\
edgecolor='none')
plt.fill_between(np.log10(rbin),vphirot_int[1,:],vphirot_int[2,:],\
facecolor='black',alpha=0.66,\
edgecolor='none')
plt.axvline(x=np.log10(Rhalf),color='blue',alpha=0.5,\
linewidth=mylinewidth)
plt.xlabel(r'${\rm Log}_{10}[R/{\rm kpc}]$',\
fontsize=myfontsize)
plt.ylabel(r'$v_\phi[{\rm km}\,{\rm s}^{-1}]$',\
fontsize=myfontsize)
plt.xlim([np.log10(np.min(rbin)),np.log10(np.max(rbin))])
plt.ylim([0,y_sigLOSmax])
plt.savefig(outdir+'output_vphirot.pdf',bbox_inches='tight')
#######################################################
#Additional write output
#And write the D+J-factor data:
if (calc_Jfac == 'yes'):
f = open(outdir+'output_Jfac.txt','w')
for i in range(len(Jstore)):
f.write('%f\n' % Jstore[i])
f.close()
if (calc_Dfac == 'yes'):
f = open(outdir+'output_Dfac.txt','w')
for i in range(len(Dstore)):
f.write('%f\n' % Dstore[i])
f.close()
#And write the distance data:
f = open(outdir+'output_dstore.txt','w')
for i in range(len(dstore)):
f.write('%f\n' % dstore[i])
f.close()
#######################################################
##### Histograms #####
##### Distance #####
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
nbin = 25
n, bins, patches = plt.hist(dstore,bins=nbin,\
range=(np.min(dstore),\
np.max(dstore)),\
facecolor='b', \
histtype='bar',alpha=0.5, \
label='distance')
plt.xlabel(r'$d\,[{\rm kpc}]$',\
fontsize=myfontsize)
plt.ylabel(r'$N$',fontsize=myfontsize)
plt.ylim([0,np.max(n)])
plt.savefig(outdir+'output_d.pdf',bbox_inches='tight')
##### J-factor #####
if (calc_Jfac == 'yes'):
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
nbin = 25
n, bins, patches = plt.hist(Jstore,bins=nbin,\
range=(np.min(Jstore),\
np.max(Jstore)),\
facecolor='b', \
histtype='bar',alpha=0.5, \
label='J')
plt.xlabel(r'$J\,[{\rm GeV}^2\,{\rm c}^{-4}\,{\rm cm}^{-5}]$',\
fontsize=myfontsize)
plt.ylabel(r'$N$',fontsize=myfontsize)
plt.ylim([0,np.max(n)])
plt.savefig(outdir+'output_Jfac.pdf',bbox_inches='tight')
##### D-factor #####
if (calc_Dfac == 'yes'):
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
nbin = 25
n, bins, patches = plt.hist(Dstore,bins=nbin,\
range=(np.min(Dstore),\
np.max(Dstore)),\
facecolor='b', \
histtype='bar',alpha=0.5, \
label='D')
plt.xlabel(r'$D\,[{\rm GeV}\,{\rm c}^{-2}\,{\rm cm}^{-2}]$',\
fontsize=myfontsize)
plt.ylabel(r'$N$',fontsize=myfontsize)
plt.ylim([0,np.max(n)])
plt.savefig(outdir+'output_Dfac.pdf',bbox_inches='tight')
##### Virial shape parameters #####
if (virialshape == 'yes'):
#And make a plot of the Virial shape parameters, if
#activated:
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
nbin = 25
n, bins, patches = plt.hist(vs1store,bins=nbin,\
range=(np.min(vs1store),\
np.max(vs1store)),\
facecolor='b', \
histtype='bar',alpha=0.5, \
label='vs_1')
plt.errorbar([vs1bin],[0.5*np.max(n)],\
xerr=[[vs1bin-vs1lo],[vs1hi-vs1bin]],fmt='ob')
plt.xlabel(r'$v_{s1}\,[{\rm km}^4\,{\rm s}^{-4}]$',\
fontsize=myfontsize)
plt.ylabel(r'$N$',fontsize=myfontsize)
plt.ylim([0,np.max(n)])
plt.savefig(outdir+'output_vs1.pdf',bbox_inches='tight')
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
nbin = 25
n, bins, patches = plt.hist(vs2store,bins=nbin,\
range=(np.min(vs2store),\
np.max(vs2store)),\
facecolor='r', \
histtype='bar',alpha=0.5)
plt.errorbar(vs2bin,[0.5*np.max(n)],\
xerr=[[vs2bin-vs2lo],[vs2hi-vs2bin]],fmt='or')
plt.xlabel(r'$v_{s2}\,[{\rm km}^4\,{\rm s}^{-4}\,{\rm kpc}^2]$',\
fontsize=myfontsize)
plt.ylabel(r'$N$',fontsize=myfontsize)
plt.ylim([0,np.max(n)])
plt.savefig(outdir+'output_vs2.pdf',bbox_inches='tight')
##### coreNFWtides model parameters #####
nbin = 15
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
tick_spacing = 0.01
ax.minorticks_on()
ax.tick_params('both', length=20, width=2, which='major')
ax.tick_params('both', length=10, width=1, which='minor')
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
n, bins, patches = plt.hist(np.log10(M200store),bins=nbin,\
range=(logM200low,logM200high),\
facecolor='b', \
histtype='bar',alpha=0.5)
plt.xlabel(r'${\rm Log}_{10}[M_{200}/{\rm M}_\odot]$',\
fontsize=myfontsize)
plt.ylabel(r'$N$',fontsize=myfontsize)
plt.xlim([logM200low,logM200high])
plt.ylim([0,np.max(n)])
plt.savefig(outdir+'output_M200.pdf',bbox_inches='tight')
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
tick_spacing = 0.01
ax.minorticks_on()
ax.tick_params('both', length=20, width=2, which='major')
ax.tick_params('both', length=10, width=1, which='minor')
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
vmaxlow = 5.0
vmaxhigh = 50.0
n, bins, patches = plt.hist(vmaxstore,bins=nbin,\
range=(vmaxlow,vmaxhigh),\
facecolor='b', \
histtype='bar',alpha=0.5)
plt.xlabel(r'$v_{\rm max}\,[{\rm km/s}]$',\
fontsize=myfontsize)
plt.ylabel(r'$N$',fontsize=myfontsize)
plt.xlim([vmaxlow,vmaxhigh])
plt.ylim([0,np.max(n)])
plt.savefig(outdir+'output_vmax.pdf',bbox_inches='tight')
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
tick_spacing = 0.01
ax.minorticks_on()
ax.tick_params('both', length=20, width=2, which='major')
ax.tick_params('both', length=10, width=1, which='minor')
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
n, bins, patches = plt.hist(cstore,bins=nbin,\
range=(clow,chigh),\
facecolor='b', \
histtype='bar',alpha=0.5)
plt.xlabel(r'$c$',fontsize=myfontsize)
plt.ylabel(r'$N$',fontsize=myfontsize)
plt.xlim([clow,chigh])
plt.ylim([0,np.max(n)])
plt.savefig(outdir+'output_c.pdf',bbox_inches='tight')
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
tick_spacing = 0.01
ax.minorticks_on()
ax.tick_params('both', length=20, width=2, which='major')
ax.tick_params('both', length=10, width=1, which='minor')
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
ax.xaxis.set_ticks(np.arange(logrclow,logrchigh+1.0,1.0))
n, bins, patches = plt.hist(np.log10(rcstore),bins=nbin,\
range=(logrclow,logrchigh),\
facecolor='k', \
histtype='bar',alpha=0.5)
plt.xlabel(r'${\rm Log}_{10}[r_c/{\rm kpc}]$',fontsize=myfontsize)
plt.xlim([logrclow,logrchigh])
plt.ylabel(r'$N$',fontsize=myfontsize)
plt.ylim([0,np.max(n)])
plt.savefig(outdir+'output_rc.pdf',bbox_inches='tight')
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
tick_spacing = 0.01
ax.minorticks_on()
ax.tick_params('both', length=20, width=2, which='major')
ax.tick_params('both', length=10, width=1, which='minor')
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
n, bins, patches = plt.hist(np.log10(rtstore),bins=nbin,\
range=(logrtlow,\
logrthigh),\
facecolor='k', \
histtype='bar',alpha=0.5)
plt.xlabel(r'${\rm Log}_{10}[r_t/{\rm kpc}]$',fontsize=myfontsize)
plt.ylabel(r'$N$',fontsize=myfontsize)
plt.savefig(outdir+'output_rt.pdf',bbox_inches='tight')
sigmstore = np.zeros(len(rcstore))
for i in range(len(rcstore)):
sigmstore[i] = sidm_novel(rcstore[i],M200store[i],cstore[i],\
oden,rhocrit)
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
tick_spacing = 0.01
ax.minorticks_on()
ax.tick_params('both', length=20, width=2, which='major')
ax.tick_params('both', length=10, width=1, which='minor')
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
n, bins, patches = plt.hist(sigmstore,bins=nbin,\
range=(sigmlow,sigmhigh),\
facecolor='k', \
histtype='bar',alpha=0.5)
plt.ylim([0.0,np.max(n)])
plt.xlabel(r'$\sigma/m\,({\rm cm}^2/{\rm g})$',\
fontsize=myfontsize)
plt.ylabel(r'$N$',fontsize=myfontsize)
plt.savefig(outdir+'output_sigm.pdf',bbox_inches='tight')
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
tick_spacing = 0.01
ax.minorticks_on()
ax.tick_params('both', length=20, width=2, which='major')
ax.tick_params('both', length=10, width=1, which='minor')
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
n, bins, patches = plt.hist(nstore,bins=nbin,\
range=(nlow,nhigh),\
facecolor='b', \
histtype='bar',alpha=0.5)
plt.xlabel(r'$n$',\
fontsize=myfontsize)
plt.ylabel(r'$N$',fontsize=myfontsize)
plt.xlim([nlow,nhigh])
plt.ylim([0,np.max(n)])
plt.savefig(outdir+'output_n.pdf',bbox_inches='tight')
fig = plt.figure(figsize=(figx,figy))
ax = fig.add_subplot(111)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
tick_spacing = 0.01
ax.minorticks_on()
ax.tick_params('both', length=20, width=2, which='major')
ax.tick_params('both', length=10, width=1, which='minor')
plt.xticks(fontsize=myfontsize)
plt.yticks(fontsize=myfontsize)
n, bins, patches = plt.hist(delstore,bins=nbin,\
range=(dellow,delhigh),\
facecolor='b', \
histtype='bar',alpha=0.5)
plt.xlabel(r'$\delta$',\
fontsize=myfontsize)
plt.ylabel(r'$N$',fontsize=myfontsize)
plt.xlim([dellow,delhigh])
plt.ylim([0,np.max(n)])
plt.savefig(outdir+'output_del.pdf',bbox_inches='tight')
#Calculate M200 +/- 68%:
M200med, M200sixlow, M200sixhi,\
M200ninelow, M200ninehi, \
M200nineninelow, M200nineninehi = calcmedquartnine(M200store)
print('*******************************')
print('M200 -/+ 68% :: ', M200med, M200sixlow, M200sixhi)
f = open(outdir+'output_M200vals.txt','w')
f.write('%f %f %f %f %f %f %f\n' % \
(M200med, M200sixlow, M200sixhi,\
M200ninelow, M200ninehi, \
M200nineninelow, M200nineninehi))
f.close()
#And same for vmax:
vmaxmed, vmaxsixlow, vmaxsixhi,\
vmaxninelow, vmaxninehi, \
vmaxnineninelow, vmaxnineninehi = calcmedquartnine(vmaxstore)
print('*******************************')
print('vmax -/+ 68% :: ', vmaxmed, vmaxsixlow, vmaxsixhi)
f = open(outdir+'output_vmaxvals.txt','w')
f.write('%f %f %f %f %f %f %f\n' % \
(vmaxmed, vmaxsixlow, vmaxsixhi,\
vmaxninelow, vmaxninehi, \
vmaxnineninelow, vmaxnineninehi))
f.close()
#And the same for rt:
rtmed, rtsixlow, rtsixhi,\
rtninelow, rtninehi, \
rtnineninelow, rtnineninehi = calcmedquartnine(rtstore)
print('*******************************')
print('rt -/+ 68% :: ', rtmed, rtsixlow, rtsixhi)
#And the same for d (already calculated, above):
print('*******************************')
print('d -/+ 68% :: ', dmed, dsixlow, dsixhi)
#And the same for Mcen:
Mcenmed, Mcensixlow, Mcensixhi,\
Mcenninelow, Mcenninehi, \
Mcennineninelow, Mcennineninehi = calcmedquartnine(McenMstore)
print('*******************************')
print('Mcen -/+ 68% :: ', Mcenmed, Mcensixlow, Mcensixhi)
#And the same for acen:
acenmed, acensixlow, acensixhi,\
acenninelow, acenninehi, \
acennineninelow, acennineninehi = calcmedquartnine(Mcenastore)
print('*******************************')
print('acen -/+ 68% :: ', acenmed, acensixlow, acensixhi)
#And the same for J-factor:
if (calc_Jfac == 'yes'):
Jmed, Jsixlow, Jsixhi,\
Jninelow, Jninehi, \
Jnineninelow, Jnineninehi = calcmedquartnine(Jstore)
print('*******************************')
print('J -/+ 68% :: ', Jmed, Jsixlow, Jsixhi)
###########################################################
#Exit:
print('\nThank you for using GravSphere! Have a nice day.\n')
|
justinreadREPO_NAMEgravspherePATH_START.@gravsphere_extracted@gravsphere-master@gravsphere.py@.PATH_END.py
|
{
"filename": "forefrontai.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/llms/forefrontai.ipynb",
"type": "Jupyter Notebook"
}
|
# ForefrontAI
The `Forefront` platform gives you the ability to fine-tune and use [open-source large language models](https://docs.forefront.ai/get-started/models).
This notebook goes over how to use Langchain with [ForefrontAI](https://www.forefront.ai/).
## Imports
```python
import os
from langchain.chains import LLMChain
from langchain_community.llms import ForefrontAI
from langchain_core.prompts import PromptTemplate
```
## Set the Environment API Key
Make sure to get your API key from ForefrontAI. You are given a 5 day free trial to test different models.
```python
# get a new token: https://docs.forefront.ai/forefront/api-reference/authentication
from getpass import getpass
FOREFRONTAI_API_KEY = getpass()
```
```python
os.environ["FOREFRONTAI_API_KEY"] = FOREFRONTAI_API_KEY
```
## Create the ForefrontAI instance
You can specify different parameters such as the model endpoint url, length, temperature, etc. You must provide an endpoint url.
```python
llm = ForefrontAI(endpoint_url="YOUR ENDPOINT URL HERE")
```
## Create a Prompt Template
We will create a prompt template for Question and Answer.
```python
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
```
## Initiate the LLMChain
```python
llm_chain = LLMChain(prompt=prompt, llm=llm)
```
## Run the LLMChain
Provide a question and run the LLMChain.
```python
question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"
llm_chain.run(question)
```
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@llms@forefrontai.ipynb@.PATH_END.py
|
{
"filename": "__main__.py",
"repo_name": "saopicc/killMS",
"repo_path": "killMS_extracted/killMS-master/killMS/__main__.py",
"type": "Python"
}
|
def kms_main():
from killMS import kMS
kMS.driver()
def smoothsols_main():
from killMS import SmoothSols
SmoothSols.driver()
def plotsolsim_main():
from killMS import PlotSolsIm
PlotSolsIm.driver()
def plotsols_main():
from killMS import PlotSols
PlotSols.driver()
def mergesols_main():
from killMS import MergeSols
MergeSols.driver()
def makeplotmovie_main():
from killMS import MakePlotMovie
MakePlotMovie.driver()
def interpsols_main():
from killMS import InterpSols
InterpSols.driver()
def grepall_main():
from killMS import grepall
grepall.driver()
def dsc_main():
from killMS import dsc
dsc.driver()
def clipcal_main():
from killMS import ClipCal
ClipCal.driver()
def blcal_main():
from killMS import BLCal
BLCal.driver()
def aqweight_main():
from killMS import AQWeight
AQWeight.driver()
|
saopiccREPO_NAMEkillMSPATH_START.@killMS_extracted@killMS-master@killMS@__main__.py@.PATH_END.py
|
{
"filename": "interface.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/community/fastkick/interface.py",
"type": "Python"
}
|
from amuse.community import *
#~from amuse.community.interface.gd import GravitationalDynamics
from amuse.community.interface.gd import GravityFieldCode, GravityFieldInterface
from amuse.community.interface.common import CommonCodeInterface, CommonCode
class FastKickInterface(CodeInterface, CommonCodeInterface, GravityFieldInterface):
"""
"""
include_headers = ['worker_code.h']
MODE_CPU = 'cpu'
MODE_GPU = 'gpu'
def __init__(self, mode=MODE_CPU, **options):
CodeInterface.__init__(self, name_of_the_worker=self.get_name_of_the_worker(mode), **options)
def get_name_of_the_worker(self, mode):
if mode == self.MODE_CPU:
return "fastkick_worker"
if mode == self.MODE_GPU:
return "fastkick_worker_gpu"
else:
return "fastkick_worker"
@legacy_function
def new_particle():
function = LegacyFunctionSpecification()
function.must_handle_array = True
function.addParameter('index_of_the_particle', dtype='int32', direction=function.OUT)
function.addParameter('mass', dtype='float64', direction=function.IN, description = "The mass of the particle")
function.addParameter('x', dtype='float64', direction=function.IN, description = "The initial position vector of the particle")
function.addParameter('y', dtype='float64', direction=function.IN, description = "The initial position vector of the particle")
function.addParameter('z', dtype='float64', direction=function.IN, description = "The initial position vector of the particle")
function.addParameter('npoints', dtype='i', direction=function.LENGTH)
function.result_type = 'int32'
return function
@legacy_function
def delete_particle():
function = LegacyFunctionSpecification()
function.must_handle_array = True
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)
function.addParameter('npoints', dtype='i', direction=function.LENGTH)
function.result_type = 'int32'
return function
@legacy_function
def commit_particles():
function = LegacyFunctionSpecification()
function.result_type = 'int32'
return function
@legacy_function
def recommit_particles():
function = LegacyFunctionSpecification()
function.result_type = 'int32'
return function
@legacy_function
def get_eps2():
function = LegacyFunctionSpecification()
function.addParameter('epsilon_squared', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_eps2():
function = LegacyFunctionSpecification()
function.addParameter('epsilon_squared', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_potential_energy():
function = LegacyFunctionSpecification()
function.addParameter('potential_energy', dtype='float64', direction=function.OUT, unit=nbody_system.energy)
function.result_type = 'int32'
return function
@legacy_function
def get_mass():
"""
Retrieve the mass of a particle. Mass is a scalar property of a particle,
this function has one OUT argument.
"""
function = LegacyFunctionSpecification()
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN,
description = "Index of the particle to get the state from. This index must have been returned by an earlier call to :meth:`new_particle`")
function.addParameter('mass', dtype='float64', direction=function.OUT, description = "The current mass of the particle")
function.addParameter('npoints', dtype='i', direction=function.LENGTH)
function.result_type = 'int32'
function.must_handle_array = True
function.result_doc = """
0 - OK
particle was removed from the model
-1 - ERROR
particle could not be found
"""
return function
@legacy_function
def set_mass():
"""
Update the mass of a particle. Mass is a scalar property of a particle.
"""
function = LegacyFunctionSpecification()
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN,
description = "Index of the particle for which the state is to be updated. This index must have been returned by an earlier call to :meth:`new_particle`")
function.addParameter('mass', dtype='float64', direction=function.IN, description = "The new mass of the particle")
function.addParameter('npoints', dtype='i', direction=function.LENGTH)
function.result_type = 'int32'
function.must_handle_array = True
function.result_doc = """
0 - OK
particle was found in the model and the information was set
-1 - ERROR
particle could not be found
-2 - ERROR
code does not support updating of a particle
"""
return function
class FastKickDoc(object):
def __get__(self, instance, owner):
return instance.legacy_doc+"\n\n"+instance.parameters.__doc__
class FastKick(CommonCode, GravityFieldCode):
__doc__ = FastKickDoc()
def __init__(self, unit_converter = None, **options):
self.unit_converter = unit_converter
legacy_interface = FastKickInterface(**options)
self.legacy_doc = legacy_interface.__doc__
CommonCode.__init__(self, legacy_interface, **options)
def define_methods(self, handler):
CommonCode.define_methods(self, handler)
handler.add_method("new_particle", [nbody_system.mass] + [nbody_system.length]*3,
(handler.INDEX, handler.ERROR_CODE))
handler.add_method(
"get_eps2",
(),
(nbody_system.length * nbody_system.length, handler.ERROR_CODE,)
)
handler.add_method(
"set_eps2",
(nbody_system.length * nbody_system.length, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"set_mass",
(
handler.NO_UNIT,
nbody_system.mass,
),
(
handler.ERROR_CODE
)
)
handler.add_method(
"get_mass",
(
handler.NO_UNIT,
),
(
nbody_system.mass,
handler.ERROR_CODE
)
)
def define_state(self, handler):
CommonCode.define_state(self, handler)
handler.add_transition('END', 'INITIALIZED', 'initialize_code', False)
handler.add_transition('INITIALIZED', 'EDIT', 'commit_parameters')
handler.add_transition('RUN', 'CHANGE_PARAMETERS_RUN', 'before_set_parameter', False)
handler.add_transition('EDIT', 'CHANGE_PARAMETERS_EDIT', 'before_set_parameter', False)
handler.add_transition('UPDATE', 'CHANGE_PARAMETERS_UPDATE', 'before_set_parameter', False)
handler.add_transition('CHANGE_PARAMETERS_RUN', 'RUN', 'recommit_parameters')
handler.add_transition('CHANGE_PARAMETERS_EDIT', 'EDIT', 'recommit_parameters')
handler.add_transition('CHANGE_PARAMETERS_UPDATE', 'UPDATE', 'recommit_parameters')
handler.add_method('CHANGE_PARAMETERS_RUN', 'before_set_parameter')
handler.add_method('CHANGE_PARAMETERS_EDIT', 'before_set_parameter')
handler.add_method('CHANGE_PARAMETERS_UPDATE', 'before_set_parameter')
handler.add_method('CHANGE_PARAMETERS_RUN', 'before_get_parameter')
handler.add_method('CHANGE_PARAMETERS_EDIT', 'before_get_parameter')
handler.add_method('CHANGE_PARAMETERS_UPDATE', 'before_get_parameter')
handler.add_method('RUN', 'before_get_parameter')
handler.add_method('EDIT', 'before_get_parameter')
handler.add_method('UPDATE','before_get_parameter')
handler.add_method('EDIT', 'new_particle')
handler.add_method('EDIT', 'delete_particle')
handler.add_method('UPDATE', 'new_particle')
handler.add_method('UPDATE', 'delete_particle')
handler.add_transition('EDIT', 'RUN', 'commit_particles')
handler.add_transition('RUN', 'UPDATE', 'new_particle', False)
handler.add_transition('RUN', 'UPDATE', 'delete_particle', False)
handler.add_transition('UPDATE', 'RUN', 'recommit_particles')
GravityFieldCode.define_state(self, handler)
handler.add_method('RUN', 'get_potential_energy')
def define_converter(self, handler):
if not self.unit_converter is None:
handler.set_converter(self.unit_converter.as_converter_from_si_to_generic())
def commit_parameters(self):
self.parameters.send_not_set_parameters_to_code()
self.parameters.send_cached_parameters_to_code()
self.overridden().commit_parameters()
def cleanup_code(self):
self.overridden().cleanup_code()
handler = self.get_handler('PARTICLES')
handler._cleanup_instances()
def reset(self):
parameters = self.parameters.copy()
self.cleanup_code()
self.initialize_code()
self.parameters.reset_from_memento(parameters)
def define_parameters(self, handler):
handler.add_method_parameter(
"get_eps2",
"set_eps2",
"epsilon_squared",
"smoothing parameter for gravity calculations",
default_value = 0.0 | nbody_system.length * nbody_system.length
)
def define_particle_sets(self, handler):
handler.define_set('particles', 'index_of_the_particle')
handler.set_new('particles', 'new_particle')
handler.set_delete('particles', 'delete_particle')
handler.add_setter('particles', 'set_mass')
handler.add_getter('particles', 'get_mass', names = ('mass',))
def define_properties(self, handler):
handler.add_property("get_potential_energy")
Fastkick = FastKick
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@community@fastkick@interface.py@.PATH_END.py
|
{
"filename": "axion_density_profile.py",
"repo_name": "SophieMLV/axionHMcode",
"repo_path": "axionHMcode_extracted/axionHMcode-master/halo_model/axion_density_profile.py",
"type": "Python"
}
|
"""functions for the density profile of axions"""
import numpy as np
from scipy import optimize, integrate
from scipy.interpolate import interp1d
from astropy import constants as const
from cosmology.variance import *
from cosmology.overdensities import *
from cosmology.basic_cosmology import *
from HMcode_params import *
from cold_density_profile import *
def func_core_radius(M, cosmo_dic):
"""
M in solar_mass/h
computes the core radius of the soliton given
as in https://arxiv.org/abs/2007.08256 eq. 8
returns r in Mpc/h
"""
grav_const = const.G.to('m**3/(Msun*s**2)').value
M_tot = (1 + cosmo_dic['omega_ax_0']/cosmo_dic['omega_db_0']) * M/cosmo_dic['h'] # in solar_mass
r_vir = func_r_vir((1 + cosmo_dic['omega_ax_0']/cosmo_dic['omega_db_0']) * M, cosmo_dic, cosmo_dic['Omega_m_0']) / cosmo_dic['h'] * 3.086e+22 # in m
v_vir = np.sqrt(grav_const*M_tot/r_vir) # in m/s
h_bar = const.hbar.value
m_ax = cosmo_dic['m_ax'] * 1.78266269594644e-36 # in kg
r_core = 2 * np.pi * h_bar / (7.5 * m_ax * v_vir) # in m
return r_core / (3.086e+22) * cosmo_dic['h'] * (1+cosmo_dic['z'])**(-1./2.) # in Mpc/h
def func_rho_soliton(r, M, cosmo_dic, rho_central_param):
"""
soliton profile for axions as in https://arxiv.org/abs/1407.7762 eq.3
but with core radius as in func_core_radius
r in Mpc/h, M_vir in solar_mass/h and m_ax in eV
the rho_central_param scales the central density, this is needed
for the complete axion density profile, see my masterthesis eq TBC
returns the soliton denity profile in solar_mass/pc^3 * h^2
"""
m_ax = cosmo_dic['m_ax']
z = cosmo_dic['z']
A = (1+z) * 0.019 * (m_ax/1e-22)**(-2)
x_c = func_core_radius(M, cosmo_dic) * 1e3 / cosmo_dic['h'] #in the formula we need units kpc
r_in_formula = r * 1e3 / cosmo_dic['h'] #in the formula we need units kpc
if isinstance(M, (int, float)) == True:
return A * rho_central_param / ( x_c**4 * (1 + 0.091 * (r_in_formula/x_c)**2)**8 )\
* cosmo_dic['h']**2 * 1e18 #transform from solar_mass/pc^3 to solar_mass/pc^3 * h^2
else:
return A * rho_central_param / ( np.outer(x_c, np.ones(len(r))) **4 * (1 + 0.091 * np.outer(1/x_c, r_in_formula)**2)**8 ) \
* cosmo_dic['h']**2 * 1e18 #transform from solar_mass/pc^3 to solar_mass/pc^3 * h^2hubble units
def func_dens_profile_ax(r_arr, M, cosmo_dic, power_spec_dic_sigma, M_cut, rho_central_param, eta_given=False):
"""
r_arr in Mpc/h, M and M_cut in solar_mass/h
returns the axion density profile
with a solition core and a NFW profile in the
outer region and with the free patameter
rho_central_param. This free parameter is set such that
we get the correct mass of the soliton halo,
see func_central_density_param
the density profile has units solar_mass/Mpc^3 * h^2
see masterthesis sec. 5.2.3.
"""
#distinguish whether M is an array or a scalar
if isinstance(M, (int, float)) == True:
#there is no axion halo, if the cold halo is below a cut-off
if rho_central_param == 0 or M_cut > M:
if isinstance(r_arr, (int, float)) == True:
return 0.0
else:
return np.zeros(len(r_arr))
else:
hmcode_params = HMCode_param_dic(cosmo_dic, power_spec_dic_sigma['k'], power_spec_dic_sigma['cold'])
NFW = cosmo_dic['omega_ax_0']/cosmo_dic['omega_db_0'] * \
NFW_profile(M, r_arr, power_spec_dic_sigma['k'], power_spec_dic_sigma['cold'], cosmo_dic,
hmcode_params, cosmo_dic['Omega_db_0'], cosmo_dic['Omega_db_0'], eta_given = eta_given)
soliton = func_rho_soliton(r_arr, M, cosmo_dic, rho_central_param)
idx_arr = np.argwhere(np.diff(np.sign(NFW - soliton))).flatten() #found the intersection points
if len(idx_arr)<=0:
return soliton
else:
return np.where(r_arr > r_arr[idx_arr[-1]], NFW, soliton)
else:
return_arr = []
hmcode_params = HMCode_param_dic(cosmo_dic, power_spec_dic_sigma['k'], power_spec_dic_sigma['cold'])
for idx, m in enumerate(M):
if rho_central_param[idx] == 0 or M_cut > m:
if isinstance(r_arr, (int, float)) == True:
return_arr.append(0.0)
else:
return_arr.append(np.zeros(len(r_arr)))
else:
NFW = cosmo_dic['omega_ax_0']/cosmo_dic['omega_db_0'] * \
NFW_profile(m, r_arr, power_spec_dic_sigma['k'], power_spec_dic_sigma['cold'], cosmo_dic,
hmcode_params, cosmo_dic['Omega_db_0'], cosmo_dic['Omega_db_0'], eta_given = eta_given)
soliton = func_rho_soliton(r_arr, m, cosmo_dic, rho_central_param[idx])
idx_arr = np.argwhere(np.diff(np.sign(NFW - soliton))).flatten() #found the intersection points
if len(idx_arr)<=0:
return_arr.append(soliton)
else:
return_arr.append(np.where(r_arr > r_arr[idx_arr[-1]], NFW, soliton))
return return_arr
def func_ax_halo_mass(M, cosmo_dic, power_spec_dic_sigma, M_cut, rho_central_param, eta_given=False):
"""
M and M_cut in solar_mass/h
The free parameter rho_central_param is set such that
we get the correct mass of the soliton halo,
see func_central_density_param
returns the axion halo mass by integrating the halo
density profile in units of solar_mass/h
"""
#distinguish whether M is an array or a scalar
if isinstance(M, (int, float)) == True:
r_vir = func_r_vir(M, cosmo_dic, cosmo_dic['Omega_db_0'])
r_arr = np.geomspace(1e-15, r_vir, num=1000)
integrand = func_dens_profile_ax(r_arr, M, cosmo_dic, power_spec_dic_sigma, M_cut, rho_central_param, eta_given=eta_given) * r_arr**2
return 4 * np.pi * integrate.simps(y=integrand, x = r_arr)
else:
return [4 * np.pi * integrate.simps(y=func_dens_profile_ax(np.geomspace(1e-15, func_r_vir(M[i], cosmo_dic, cosmo_dic['Omega_db_0']), num=1000),
M[i], cosmo_dic, power_spec_dic_sigma, M_cut, rho_central_param[i], eta_given=eta_given) * \
np.geomspace(1e-15, func_r_vir(M[i], cosmo_dic, cosmo_dic['Omega_db_0']), num=1000)**2,
x=np.geomspace(1e-15, func_r_vir(M[i], cosmo_dic, cosmo_dic['Omega_db_0']), num=1000)) for i in range(len(M))]
def func_central_density_param(M, cosmo_dic, power_spec_dic_sigma, M_cut, eta_given=False):
"""
M and M_cut in solar_mass/h
The central density of the soliton profile
has to be change in such a way that the total
mass of the axion halo matches the abundance,
ie M_ax_halo = Omega_ax/Omega_cold * M_cold_halo
"""
#distinguish whether M is an array or a scalar
if isinstance(M, (int, float)) == True:
if M < M_cut:
return np.array(0.0)
else:
r_c = func_core_radius(M, cosmo_dic)
hmcode = HMCode_param_dic(cosmo_dic, power_spec_dic_sigma['k'], power_spec_dic_sigma['cold'])
#need a gues to find the correct central_dens_param:
#guess is set via Omega_ax/Omega_cold * M = int_0_rvir \rho *r^2 dr
#so we need the soliton and NFW part
def integrand_ax(x):
return func_dens_profile_ax(x, M, cosmo_dic, power_spec_dic_sigma, M_cut, 1., eta_given=eta_given)*x**2
integral_soliton = integrate.quad(integrand_ax, 0, r_c)[0]
r_arr = np.geomspace(1e-10 , 2*r_c, 1000)
integrand_cold = NFW_profile(M, r_arr, power_spec_dic_sigma['k'], power_spec_dic_sigma['cold'], cosmo_dic, hmcode, cosmo_dic['Omega_db_0'],
cosmo_dic['Omega_db_0'], eta_given = eta_given) \
*r_arr**2 * cosmo_dic['Omega_ax_0']/cosmo_dic['Omega_db_0']
integral_NFW = integrate.simps(y=integrand_cold, x = r_arr)
guess = (M + integral_NFW) / integral_soliton
#find the central density parameter by solving the eq:
#M_ax_halo = Omega_ax/Omega_cold * M_cold_halo
def func_find_root(dens):
return func_ax_halo_mass(M, cosmo_dic, power_spec_dic_sigma, M_cut, dens, eta_given=eta_given) - cosmo_dic['Omega_ax_0']/cosmo_dic['Omega_db_0'] * M
dens_param = optimize.root(func_find_root, x0 = guess).x
#sometimes the solution is not really a solution,
#so set than the central density paameter to zero, ie no solution can be found
if np.abs(guess - dens_param) > 100.:
return 0.
else:
return float(dens_param)
else:
dens_param_arr = []
r_c = func_core_radius(M, cosmo_dic)
hmcode = HMCode_param_dic(cosmo_dic, power_spec_dic_sigma['k'], power_spec_dic_sigma['cold'])
for idx, m in enumerate(M):
if m < M_cut:
dens_param_arr.append(0.)
else:
#need a gues to find the correct central_dens_param:
#guess is set via Omega_ax/Omega_cold * M = int_0_rvir \rho *r^2 dr
#so we need the soliton and NFW part
def integrand_ax(x):
return func_dens_profile_ax(x, m, cosmo_dic, power_spec_dic_sigma, M_cut, 1., eta_given=eta_given)*x**2
integral_soliton = integrate.quad(integrand_ax, 0, r_c[idx])[0]
r_arr = np.geomspace(1e-15 , r_c[idx], 1000)
integrand_cold = NFW_profile(m, r_arr, power_spec_dic_sigma['k'], power_spec_dic_sigma['cold'], cosmo_dic, hmcode, cosmo_dic['Omega_db_0'],
cosmo_dic['Omega_db_0'], eta_given = eta_given)*r_arr**2 * cosmo_dic['Omega_ax_0']/cosmo_dic['Omega_db_0']
integral_NFW = integrate.simps(y=integrand_cold, x = r_arr)
guess = integral_NFW / integral_soliton
#find the central density parameter by solving the eq:
#M_ax_halo = Omega_ax/Omega_cold * M_cold_halo
def func_find_root(dens):
return func_ax_halo_mass(m, cosmo_dic, power_spec_dic_sigma, M_cut, dens, eta_given=eta_given) - cosmo_dic['Omega_ax_0']/cosmo_dic['Omega_db_0'] * m
dens_param = optimize.root(func_find_root, x0 = guess).x
#sometimes the solution is not really a solution,
#so set than the central density paameter to zero, ie so solution can be found
if np.abs(guess - dens_param) > 100:
dens_param_arr.append(0.)
else:
dens_param_arr.append(float(dens_param))
return dens_param_arr
def func_dens_profile_ax_kspace(k, M, cosmo_dic, power_spec_dic_sigma, M_cut, central_dens_param, eta_given=False):
"""
k in units of h/Mpc and M and M_cut in solar_mass/h
The free parameter rho_central_param is set such that
we get the correct mass of the soliton halo,
see func_central_density_param
return kspace denisty profile for the axion halo
the normalised density profile is demensionles
"""
#the kspace density profile is defined via
# \rho(k) = 4*\pi* int_0^r_vir \rho(r) * r^2 * sin(kr)/kr dr
M_ax = func_ax_halo_mass(M, cosmo_dic, power_spec_dic_sigma, M_cut, central_dens_param)
r_vir = func_r_vir(M, cosmo_dic, cosmo_dic['Omega_db_0'])
#distinguish whether M is an array or a scalar
if isinstance(M, (int, float)) == True:
r_arr = np.geomspace(1e-15, r_vir, num=1000)
dens_profile_arr = func_dens_profile_ax(r_arr, M, cosmo_dic, power_spec_dic_sigma, M_cut, central_dens_param, eta_given=eta_given) \
* r_arr**2 * np.sin(np.outer(k, r_arr)) / np.outer(k, r_arr)
return list(4 * np.pi * integrate.simps(y=dens_profile_arr, x = r_arr, axis=-1) / M_ax)
else:
dens_profile_kspace_arr = []
for idx, m in enumerate(M):
if M_ax[idx] == 0:
dens_profile_kspace_arr.append(list(np.zeros(len(k))))
else:
r_arr = np.geomspace(1e-15, r_vir[idx], num=1000)
dens_profile_arr = func_dens_profile_ax(r_arr, m, cosmo_dic, power_spec_dic_sigma, M_cut, central_dens_param[idx], eta_given=eta_given) \
* r_arr**2 * np.sin(np.outer(k, r_arr)) / np.outer(k, r_arr)
dens_kspace = list(4 * np.pi * integrate.simps(y=dens_profile_arr, x = r_arr, axis=-1) / M_ax[idx] )
dens_profile_kspace_arr.append(dens_kspace)
return dens_profile_kspace_arr
|
SophieMLVREPO_NAMEaxionHMcodePATH_START.@axionHMcode_extracted@axionHMcode-master@halo_model@axion_density_profile.py@.PATH_END.py
|
{
"filename": "_shadowsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/isosurface/hoverlabel/font/_shadowsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="shadowsrc",
parent_name="isosurface.hoverlabel.font",
**kwargs,
):
super(ShadowsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@isosurface@hoverlabel@font@_shadowsrc.py@.PATH_END.py
|
{
"filename": "_regionprops_utils.py",
"repo_name": "scikit-image/scikit-image",
"repo_path": "scikit-image_extracted/scikit-image-main/skimage/measure/_regionprops_utils.py",
"type": "Python"
}
|
from math import sqrt
from numbers import Real
import numpy as np
from scipy import ndimage as ndi
STREL_4 = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.uint8)
STREL_8 = np.ones((3, 3), dtype=np.uint8)
# Coefficients from
# Ohser J., Nagel W., Schladitz K. (2002) The Euler Number of Discretized Sets
# - On the Choice of Adjacency in Homogeneous Lattices.
# In: Mecke K., Stoyan D. (eds) Morphology of Condensed Matter. Lecture Notes
# in Physics, vol 600. Springer, Berlin, Heidelberg.
# The value of coefficients correspond to the contributions to the Euler number
# of specific voxel configurations, which are themselves encoded thanks to a
# LUT. Computing the Euler number from the addition of the contributions of
# local configurations is possible thanks to an integral geometry formula
# (see the paper by Ohser et al. for more details).
EULER_COEFS2D_4 = [0, 1, 0, 0, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0, 0, 0]
EULER_COEFS2D_8 = [0, 0, 0, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0, 0, -1, 0]
EULER_COEFS3D_26 = np.array(
[
0,
1,
1,
0,
1,
0,
-2,
-1,
1,
-2,
0,
-1,
0,
-1,
-1,
0,
1,
0,
-2,
-1,
-2,
-1,
-1,
-2,
-6,
-3,
-3,
-2,
-3,
-2,
0,
-1,
1,
-2,
0,
-1,
-6,
-3,
-3,
-2,
-2,
-1,
-1,
-2,
-3,
0,
-2,
-1,
0,
-1,
-1,
0,
-3,
-2,
0,
-1,
-3,
0,
-2,
-1,
0,
1,
1,
0,
1,
-2,
-6,
-3,
0,
-1,
-3,
-2,
-2,
-1,
-3,
0,
-1,
-2,
-2,
-1,
0,
-1,
-3,
-2,
-1,
0,
0,
-1,
-3,
0,
0,
1,
-2,
-1,
1,
0,
-2,
-1,
-3,
0,
-3,
0,
0,
1,
-1,
4,
0,
3,
0,
3,
1,
2,
-1,
-2,
-2,
-1,
-2,
-1,
1,
0,
0,
3,
1,
2,
1,
2,
2,
1,
1,
-6,
-2,
-3,
-2,
-3,
-1,
0,
0,
-3,
-1,
-2,
-1,
-2,
-2,
-1,
-2,
-3,
-1,
0,
-1,
0,
4,
3,
-3,
0,
0,
1,
0,
1,
3,
2,
0,
-3,
-1,
-2,
-3,
0,
0,
1,
-1,
0,
0,
-1,
-2,
1,
-1,
0,
-1,
-2,
-2,
-1,
0,
1,
3,
2,
-2,
1,
-1,
0,
1,
2,
2,
1,
0,
-3,
-3,
0,
-1,
-2,
0,
1,
-1,
0,
-2,
1,
0,
-1,
-1,
0,
-1,
-2,
0,
1,
-2,
-1,
3,
2,
-2,
1,
1,
2,
-1,
0,
2,
1,
-1,
0,
-2,
1,
-2,
1,
1,
2,
-2,
3,
-1,
2,
-1,
2,
0,
1,
0,
-1,
-1,
0,
-1,
0,
2,
1,
-1,
2,
0,
1,
0,
1,
1,
0,
]
)
def euler_number(image, connectivity=None):
"""Calculate the Euler characteristic in binary image.
For 2D objects, the Euler number is the number of objects minus the number
of holes. For 3D objects, the Euler number is obtained as the number of
objects plus the number of holes, minus the number of tunnels, or loops.
Parameters
----------
image: (M, N[, P]) ndarray
Input image. If image is not binary, all values greater than zero
are considered as the object.
connectivity : int, optional
Maximum number of orthogonal hops to consider a pixel/voxel
as a neighbor.
Accepted values are ranging from 1 to input.ndim. If ``None``, a full
connectivity of ``input.ndim`` is used.
4 or 8 neighborhoods are defined for 2D images (connectivity 1 and 2,
respectively).
6 or 26 neighborhoods are defined for 3D images, (connectivity 1 and 3,
respectively). Connectivity 2 is not defined.
Returns
-------
euler_number : int
Euler characteristic of the set of all objects in the image.
Notes
-----
The Euler characteristic is an integer number that describes the
topology of the set of all objects in the input image. If object is
4-connected, then background is 8-connected, and conversely.
The computation of the Euler characteristic is based on an integral
geometry formula in discretized space. In practice, a neighborhood
configuration is constructed, and a LUT is applied for each
configuration. The coefficients used are the ones of Ohser et al.
It can be useful to compute the Euler characteristic for several
connectivities. A large relative difference between results
for different connectivities suggests that the image resolution
(with respect to the size of objects and holes) is too low.
References
----------
.. [1] S. Rivollier. Analyse d’image geometrique et morphometrique par
diagrammes de forme et voisinages adaptatifs generaux. PhD thesis,
2010. Ecole Nationale Superieure des Mines de Saint-Etienne.
https://tel.archives-ouvertes.fr/tel-00560838
.. [2] Ohser J., Nagel W., Schladitz K. (2002) The Euler Number of
Discretized Sets - On the Choice of Adjacency in Homogeneous
Lattices. In: Mecke K., Stoyan D. (eds) Morphology of Condensed
Matter. Lecture Notes in Physics, vol 600. Springer, Berlin,
Heidelberg.
Examples
--------
>>> import numpy as np
>>> SAMPLE = np.zeros((100,100,100));
>>> SAMPLE[40:60, 40:60, 40:60]=1
>>> euler_number(SAMPLE) # doctest: +ELLIPSIS
1...
>>> SAMPLE[45:55,45:55,45:55] = 0;
>>> euler_number(SAMPLE) # doctest: +ELLIPSIS
2...
>>> SAMPLE = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
... [1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0],
... [0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1],
... [0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]])
>>> euler_number(SAMPLE) # doctest:
0
>>> euler_number(SAMPLE, connectivity=1) # doctest:
2
"""
# as image can be a label image, transform it to binary
image = (image > 0).astype(int)
image = np.pad(image, pad_width=1, mode='constant')
# check connectivity
if connectivity is None:
connectivity = image.ndim
# config variable is an adjacency configuration. A coefficient given by
# variable coefs is attributed to each configuration in order to get
# the Euler characteristic.
if image.ndim == 2:
config = np.array([[0, 0, 0], [0, 1, 4], [0, 2, 8]])
if connectivity == 1:
coefs = EULER_COEFS2D_4
else:
coefs = EULER_COEFS2D_8
bins = 16
else: # 3D images
if connectivity == 2:
raise NotImplementedError(
'For 3D images, Euler number is implemented '
'for connectivities 1 and 3 only'
)
config = np.array(
[
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 1, 4], [0, 2, 8]],
[[0, 0, 0], [0, 16, 64], [0, 32, 128]],
]
)
if connectivity == 1:
coefs = EULER_COEFS3D_26[::-1]
else:
coefs = EULER_COEFS3D_26
bins = 256
# XF has values in the 0-255 range in 3D, and in the 0-15 range in 2D,
# with one unique value for each binary configuration of the
# 27-voxel cube in 3D / 8-pixel square in 2D, up to symmetries
XF = ndi.convolve(image, config, mode='constant', cval=0)
h = np.bincount(XF.ravel(), minlength=bins)
if image.ndim == 2:
return coefs @ h
else:
return int(0.125 * coefs @ h)
def perimeter(image, neighborhood=4):
"""Calculate total perimeter of all objects in binary image.
Parameters
----------
image : (M, N) ndarray
Binary input image.
neighborhood : 4 or 8, optional
Neighborhood connectivity for border pixel determination. It is used to
compute the contour. A higher neighborhood widens the border on which
the perimeter is computed.
Returns
-------
perimeter : float
Total perimeter of all objects in binary image.
References
----------
.. [1] K. Benkrid, D. Crookes. Design and FPGA Implementation of
a Perimeter Estimator. The Queen's University of Belfast.
http://www.cs.qub.ac.uk/~d.crookes/webpubs/papers/perimeter.doc
Examples
--------
>>> from skimage import data, util
>>> from skimage.measure import label
>>> # coins image (binary)
>>> img_coins = data.coins() > 110
>>> # total perimeter of all objects in the image
>>> perimeter(img_coins, neighborhood=4) # doctest: +ELLIPSIS
7796.867...
>>> perimeter(img_coins, neighborhood=8) # doctest: +ELLIPSIS
8806.268...
"""
if image.ndim != 2:
raise NotImplementedError('`perimeter` supports 2D images only')
if neighborhood == 4:
strel = STREL_4
else:
strel = STREL_8
image = image.astype(np.uint8)
eroded_image = ndi.binary_erosion(image, strel, border_value=0)
border_image = image - eroded_image
perimeter_weights = np.zeros(50, dtype=np.float64)
perimeter_weights[[5, 7, 15, 17, 25, 27]] = 1
perimeter_weights[[21, 33]] = sqrt(2)
perimeter_weights[[13, 23]] = (1 + sqrt(2)) / 2
perimeter_image = ndi.convolve(
border_image,
np.array([[10, 2, 10], [2, 1, 2], [10, 2, 10]]),
mode='constant',
cval=0,
)
# You can also write
# return perimeter_weights[perimeter_image].sum()
# but that was measured as taking much longer than bincount + np.dot (5x
# as much time)
perimeter_histogram = np.bincount(perimeter_image.ravel(), minlength=50)
total_perimeter = perimeter_histogram @ perimeter_weights
return total_perimeter
def perimeter_crofton(image, directions=4):
"""Calculate total Crofton perimeter of all objects in binary image.
Parameters
----------
image : (M, N) ndarray
Input image. If image is not binary, all values greater than zero
are considered as the object.
directions : 2 or 4, optional
Number of directions used to approximate the Crofton perimeter. By
default, 4 is used: it should be more accurate than 2.
Computation time is the same in both cases.
Returns
-------
perimeter : float
Total perimeter of all objects in binary image.
Notes
-----
This measure is based on Crofton formula [1], which is a measure from
integral geometry. It is defined for general curve length evaluation via
a double integral along all directions. In a discrete
space, 2 or 4 directions give a quite good approximation, 4 being more
accurate than 2 for more complex shapes.
Similar to :func:`~.measure.perimeter`, this function returns an
approximation of the perimeter in continuous space.
References
----------
.. [1] https://en.wikipedia.org/wiki/Crofton_formula
.. [2] S. Rivollier. Analyse d’image geometrique et morphometrique par
diagrammes de forme et voisinages adaptatifs generaux. PhD thesis,
2010.
Ecole Nationale Superieure des Mines de Saint-Etienne.
https://tel.archives-ouvertes.fr/tel-00560838
Examples
--------
>>> from skimage import data, util
>>> from skimage.measure import label
>>> # coins image (binary)
>>> img_coins = data.coins() > 110
>>> # total perimeter of all objects in the image
>>> perimeter_crofton(img_coins, directions=2) # doctest: +ELLIPSIS
8144.578...
>>> perimeter_crofton(img_coins, directions=4) # doctest: +ELLIPSIS
7837.077...
"""
if image.ndim != 2:
raise NotImplementedError('`perimeter_crofton` supports 2D images only')
# as image could be a label image, transform it to binary image
image = (image > 0).astype(np.uint8)
image = np.pad(image, pad_width=1, mode='constant')
XF = ndi.convolve(
image, np.array([[0, 0, 0], [0, 1, 4], [0, 2, 8]]), mode='constant', cval=0
)
h = np.bincount(XF.ravel(), minlength=16)
# definition of the LUT
if directions == 2:
coefs = [
0,
np.pi / 2,
0,
0,
0,
np.pi / 2,
0,
0,
np.pi / 2,
np.pi,
0,
0,
np.pi / 2,
np.pi,
0,
0,
]
else:
coefs = [
0,
np.pi / 4 * (1 + 1 / (np.sqrt(2))),
np.pi / (4 * np.sqrt(2)),
np.pi / (2 * np.sqrt(2)),
0,
np.pi / 4 * (1 + 1 / (np.sqrt(2))),
0,
np.pi / (4 * np.sqrt(2)),
np.pi / 4,
np.pi / 2,
np.pi / (4 * np.sqrt(2)),
np.pi / (4 * np.sqrt(2)),
np.pi / 4,
np.pi / 2,
0,
0,
]
total_perimeter = coefs @ h
return total_perimeter
def _normalize_spacing(spacing, ndims):
"""Normalize spacing parameter.
The `spacing` parameter should be a sequence of numbers matching
the image dimensions. If `spacing` is a scalar, assume equal
spacing along all dimensions.
Parameters
---------
spacing : Any
User-provided `spacing` keyword.
ndims : int
Number of image dimensions.
Returns
-------
spacing : array
Corrected spacing.
Raises
------
ValueError
If `spacing` is invalid.
"""
spacing = np.array(spacing)
if spacing.shape == ():
spacing = np.broadcast_to(spacing, shape=(ndims,))
elif spacing.shape != (ndims,):
raise ValueError(
f"spacing isn't a scalar nor a sequence of shape {(ndims,)}, got {spacing}."
)
if not all(isinstance(s, Real) for s in spacing):
raise TypeError(
f"Element of spacing isn't float or integer type, got {spacing}."
)
if not all(np.isfinite(spacing)):
raise ValueError(
f"Invalid spacing parameter. All elements must be finite, got {spacing}."
)
return spacing
|
scikit-imageREPO_NAMEscikit-imagePATH_START.@scikit-image_extracted@scikit-image-main@skimage@measure@_regionprops_utils.py@.PATH_END.py
|
{
"filename": "function_parameter_canonicalizer_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/util/function_parameter_canonicalizer_test.py",
"type": "Python"
}
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tensorflow::FunctionParameterCanonicalizer`."""
from tensorflow.python.platform import test
from tensorflow.python.util import _function_parameter_canonicalizer_binding_for_test
class FunctionParameterCanonicalizerTest(test.TestCase):
def setUp(self):
super(FunctionParameterCanonicalizerTest, self).setUp()
self._matmul_func = (
_function_parameter_canonicalizer_binding_for_test
.FunctionParameterCanonicalizer([
'a', 'b', 'transpose_a', 'transpose_b', 'adjoint_a', 'adjoint_b',
'a_is_sparse', 'b_is_sparse', 'name'
], (False, False, False, False, False, False, None)))
def testPosOnly(self):
self.assertEqual(
self._matmul_func.canonicalize(2, 3),
[2, 3, False, False, False, False, False, False, None])
def testPosOnly2(self):
self.assertEqual(
self._matmul_func.canonicalize(2, 3, True, False, True),
[2, 3, True, False, True, False, False, False, None])
def testPosAndKwd(self):
self.assertEqual(
self._matmul_func.canonicalize(
2, 3, transpose_a=True, name='my_matmul'),
[2, 3, True, False, False, False, False, False, 'my_matmul'])
def testPosAndKwd2(self):
self.assertEqual(
self._matmul_func.canonicalize(2, b=3),
[2, 3, False, False, False, False, False, False, None])
def testMissingPos(self):
with self.assertRaisesRegex(TypeError,
'Missing required positional argument'):
self._matmul_func.canonicalize(2)
def testMissingPos2(self):
with self.assertRaisesRegex(TypeError,
'Missing required positional argument'):
self._matmul_func.canonicalize(
transpose_a=True, transpose_b=True, adjoint_a=True)
def testTooManyArgs(self):
with self.assertRaisesRegex(TypeError, 'Too many arguments were given.'
' Expected 9 but got 10.'):
self._matmul_func.canonicalize(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
def testInvalidKwd(self):
with self.assertRaisesRegex(TypeError,
'Got an unexpected keyword argument'):
self._matmul_func.canonicalize(2, 3, hohoho=True)
def testDuplicatedArg(self):
with self.assertRaisesRegex(TypeError,
"Got multiple values for argument 'b'"):
self._matmul_func.canonicalize(2, 3, False, b=4)
def testDuplicatedArg2(self):
with self.assertRaisesRegex(
TypeError, "Got multiple values for argument 'transpose_a'"):
self._matmul_func.canonicalize(2, 3, False, transpose_a=True)
def testKwargNotInterned(self):
func = (
_function_parameter_canonicalizer_binding_for_test
.FunctionParameterCanonicalizer(['long_parameter_name'], ()))
kwargs = dict([('_'.join(['long', 'parameter', 'name']), 5)])
func.canonicalize(**kwargs)
if __name__ == '__main__':
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@util@function_parameter_canonicalizer_test.py@.PATH_END.py
|
{
"filename": "initialpower.py",
"repo_name": "SBU-COSMOLIKE/CAMBLateDE",
"repo_path": "CAMBLateDE_extracted/CAMBLateDE-main/camb/initialpower.py",
"type": "Python"
}
|
# Initial power spectrum parameters
from .baseconfig import F2003Class, CAMBError, fortran_class, \
c_int, c_double, POINTER, byref, numpy_1d, np
tensor_parameterization_names = ["tensor_param_indeptilt", "tensor_param_rpivot", "tensor_param_AT"]
tensor_param_indeptilt = 1
tensor_param_rpivot = 2
tensor_param_AT = 3
class InitialPower(F2003Class):
"""
Abstract base class for initial power spectrum classes
"""
_fortran_class_module_ = 'InitialPower'
def set_params(self):
pass
@fortran_class
class SplinedInitialPower(InitialPower):
"""
Object to store a generic primordial spectrum set from a set of sampled k_i, P(k_i) values
"""
_fortran_class_name_ = 'TSplinedInitialPower'
_fields_ = [
('effective_ns_for_nonlinear', c_double, "Effective n_s to use for approximate non-linear correction models")]
_methods_ = [('HasTensors', [], c_int),
('SetScalarTable', [POINTER(c_int), numpy_1d, numpy_1d]),
('SetTensorTable', [POINTER(c_int), numpy_1d, numpy_1d]),
('SetScalarLogRegular', [POINTER(c_double), POINTER(c_double), POINTER(c_int), numpy_1d]),
('SetTensorLogRegular', [POINTER(c_double), POINTER(c_double), POINTER(c_int), numpy_1d])]
def __init__(self, **kwargs):
if kwargs.get('PK', None) is not None:
self.set_scalar_table(kwargs['ks'], kwargs['PK'])
ns_eff = kwargs.get('effective_ns_for_nonlinear', None)
if ns_eff is not None:
self.effective_ns_for_nonlinear = ns_eff
def has_tensors(self):
"""
Is the tensor spectrum set?
:return: True if tensors
"""
return self.f_HasTensors() != 0
def set_scalar_table(self, k, PK):
"""
Set arrays of k and P(k) values for cublic spline interpolation.
Note that using :meth:`set_scalar_log_regular` may be better
(faster, and easier to get fine enough spacing a low k)
:param k: array of k values (Mpc^{-1})
:param PK: array of scalar power spectrum values
"""
self.f_SetScalarTable(byref(c_int(len(k))), np.ascontiguousarray(k, dtype=np.float64),
np.ascontiguousarray(PK, dtype=np.float64))
def set_tensor_table(self, k, PK):
"""
Set arrays of k and P_t(k) values for cublic spline interpolation
:param k: array of k values (Mpc^{-1})
:param PK: array of tensor power spectrum values
"""
self.f_SetTensorTable(byref(c_int(len(k))), np.ascontiguousarray(k, dtype=np.float64),
np.ascontiguousarray(PK, dtype=np.float64))
def set_scalar_log_regular(self, kmin, kmax, PK):
"""
Set log-regular cublic spline interpolation for P(k)
:param kmin: minimum k value (not minimum log(k))
:param kmax: maximum k value (inclusive)
:param PK: array of scalar power spectrum values, with PK[0]=P(kmin) and PK[-1]=P(kmax)
"""
self.f_SetScalarLogRegular(byref(c_double(kmin)), byref(c_double(kmax)), byref(c_int(len(PK))),
np.ascontiguousarray(PK, dtype=np.float64))
def set_tensor_log_regular(self, kmin, kmax, PK):
"""
Set log-regular cublic spline interpolation for tensor spectrum P_t(k)
:param kmin: minimum k value (not minimum log(k))
:param kmax: maximum k value (inclusive)
:param PK: array of scalar power spectrum values, with PK[0]=P_t(kmin) and PK[-1]=P_t(kmax)
"""
self.f_SetTensorLogRegular(byref(c_double(kmin)), byref(c_double(kmax)), byref(c_int(len(PK))),
np.ascontiguousarray(PK, dtype=np.float64))
@fortran_class
class InitialPowerLaw(InitialPower):
"""
Object to store parameters for the primordial power spectrum in the standard power law expansion.
"""
_fields_ = [
("tensor_parameterization", c_int, {"names": tensor_parameterization_names, "start": 1}),
("ns", c_double),
("nrun", c_double),
("nrunrun", c_double),
("nt", c_double),
("ntrun", c_double),
("r", c_double),
("pivot_scalar", c_double),
("pivot_tensor", c_double),
("As", c_double),
("At", c_double)
]
_fortran_class_name_ = 'TInitialPowerLaw'
def __init__(self, **kwargs):
self.set_params(**kwargs)
def set_params(self, As=2e-9, ns=0.96, nrun=0, nrunrun=0.0, r=0.0, nt=None, ntrun=0.0,
pivot_scalar=0.05, pivot_tensor=0.05, parameterization="tensor_param_rpivot"):
r"""
Set parameters using standard power law parameterization. If nt=None, uses inflation consistency relation.
:param As: comoving curvature power at k=pivot_scalar (:math:`A_s`)
:param ns: scalar spectral index :math:`n_s`
:param nrun: running of scalar spectral index :math:`d n_s/d \log k`
:param nrunrun: running of running of spectral index, :math:`d^2 n_s/d (\log k)^2`
:param r: tensor to scalar ratio at pivot
:param nt: tensor spectral index :math:`n_t`. If None, set using inflation consistency
:param ntrun: running of tensor spectral index
:param pivot_scalar: pivot scale for scalar spectrum
:param pivot_tensor: pivot scale for tensor spectrum
:param parameterization: See CAMB notes. One of
- tensor_param_indeptilt = 1
- tensor_param_rpivot = 2
- tensor_param_AT = 3
:return: self
"""
if parameterization not in [tensor_param_rpivot, tensor_param_indeptilt, "tensor_param_rpivot",
"tensor_param_indeptilt"]:
raise CAMBError('Initial power parameterization not supported here')
self.tensor_parameterization = parameterization
self.As = As
self.ns = ns
self.nrun = nrun
self.nrunrun = nrunrun
if nt is None:
# set from inflationary consistency
if ntrun:
raise CAMBError('ntrun set but using inflation consistency (nt=None)')
if tensor_param_rpivot != tensor_param_rpivot:
raise CAMBError('tensor parameterization not tensor_param_rpivot with inflation consistency')
self.nt = - r / 8.0 * (2.0 - ns - r / 8.0)
self.ntrun = r / 8.0 * (r / 8.0 + ns - 1)
else:
self.nt = nt
self.ntrun = ntrun
self.r = r
self.pivot_scalar = pivot_scalar
self.pivot_tensor = pivot_tensor
return self
def has_tensors(self):
"""
Do these settings have non-zero tensors?
:return: True if non-zero tensor amplitude
"""
return self.r > 0
|
SBU-COSMOLIKEREPO_NAMECAMBLateDEPATH_START.@CAMBLateDE_extracted@CAMBLateDE-main@camb@initialpower.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/restricted/googletest/googletest/include/gtest/internal/custom/README.md",
"type": "Markdown"
}
|
# Customization Points
The custom directory is an injection point for custom user configurations.
## Header `gtest.h`
### The following macros can be defined:
* `GTEST_OS_STACK_TRACE_GETTER_` - The name of an implementation of
`OsStackTraceGetterInterface`.
* `GTEST_CUSTOM_TEMPDIR_FUNCTION_` - An override for `testing::TempDir()`. See
`testing::TempDir` for semantics and signature.
## Header `gtest-port.h`
The following macros can be defined:
### Logging:
* `GTEST_LOG_(severity)`
* `GTEST_CHECK_(condition)`
* Functions `LogToStderr()` and `FlushInfoLog()` have to be provided too.
### Threading:
* `GTEST_HAS_NOTIFICATION_` - Enabled if Notification is already provided.
* `GTEST_HAS_MUTEX_AND_THREAD_LOCAL_` - Enabled if `Mutex` and `ThreadLocal`
are already provided. Must also provide `GTEST_DECLARE_STATIC_MUTEX_(mutex)`
and `GTEST_DEFINE_STATIC_MUTEX_(mutex)`
* `GTEST_EXCLUSIVE_LOCK_REQUIRED_(locks)`
* `GTEST_LOCK_EXCLUDED_(locks)`
### Underlying library support features
* `GTEST_HAS_CXXABI_H_`
### Exporting API symbols:
* `GTEST_API_` - Specifier for exported symbols.
## Header `gtest-printers.h`
* See documentation at `gtest/gtest-printers.h` for details on how to define a
custom printer.
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@restricted@googletest@googletest@include@gtest@internal@custom@README.md@.PATH_END.py
|
{
"filename": "PTObit.py",
"repo_name": "bill-cotton/Obit",
"repo_path": "Obit_extracted/Obit-master/ObitSystem/Obit/python/PTObit.py",
"type": "Python"
}
|
# Interactive routines to Obit use from ParselTongue
# $Id$
#-----------------------------------------------------------------------
# Copyright (C) 2004,2005
# Associated Universities, Inc. Washington DC, USA.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 675 Massachusetts Ave, Cambridge,
# MA 02139, USA.
#
# Correspondence concerning this software should be addressed as follows:
# Internet email: bcotton@nrao.edu.
# Postal address: William Cotton
# National Radio Astronomy Observatory
# 520 Edgemont Road
# Charlottesville, VA 22903-2475 USA
#-----------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import print_function
import Obit, Table, FArray, OErr, InfoList, History, AIPSDir, OSystem
import Image, ImageDesc, TableList, ODisplay, UV, OWindow
import os, AIPS
# ParselTongue classes
import AIPSData, FITSData
# from PTObit import *
# ObitStart()
#global Adisk
err=OErr.OErr()
ObitSys=None
Adisk = 1
# Display connection
disp = ODisplay.ODisplay("ObitView", "ObitView", err)
#Initialize Obit system
# Get list of FITS disks
FITSdisks = []
for dsk in ["FITS","FITS01","FITS02","FITS03","FITS04","FITS05","FITS06"]:
dir = os.getenv(dsk)
if dir:
FITSdisks.append(dir)
nFITS = len(FITSdisks)
# Get list of AIPS disks
AIPSdisks = []
for dsk in ["DA01","DA02","DA03","DA04","DA05","DA06","DA07","DA08","DA09","DA10"]:
dir = os.getenv(dsk)
if dir:
AIPSdisks.append(dir)
nAIPS = len(AIPSdisks)
# Init Obit
userno = AIPS.AIPS.userno
popsno = 1
ObitSys=OSystem.OSystem ("Interactive", popsno, userno, nAIPS, AIPSdisks, \
nFITS, FITSdisks, True, False, err)
OErr.printErrMsg(err, "Error with Obit startup")
def ShowErr(err=err):
"""
Print any errors and clear stack
* err = Python Obit Error/message stack, default of PTObit version
"""
################################################################
OErr.printErrMsg(err, "Error")
# end ShowErr
def ClearErr(err=err):
"""
Print any errors and clear stack
* err = Python Obit Error/message stack, default of PTObit version
"""
################################################################
OErr.printErrMsg(err, "Error")
# end ClearErr
def Acat(disk=Adisk, first=1, last=1000):
"""
Catalog listing of AIPS files on disk disk
The class remembers the last disk accessed
* disk = AIPS disk number to list
* first = lowest slot number to list
* last =highest slot number to list
"""
################################################################
Adisk = disk
AIPSDir.PListDir(disk, err, first=first, last=last)
OErr.printErrMsg(err, "Error with AIPS catalog")
# end Acat
def AMcat(disk=Adisk, first=1, last=1000):
"""
Catalog listing of AIPS Image files on disk disk
* disk = AIPS disk number to list
* first = lowest slot number to list
* last =highest slot number to list
"""
################################################################
Adisk = disk
AIPSDir.PListDir(disk, err, type=AIPSDir.MAType, first=first, last=last)
OErr.printErrMsg(err, "Error with AIPS catalog")
# end AMcat
def AUcat(disk=Adisk, first=1, last=1000):
"""
Catalog listing of AIPS UV data files on disk disk
* disk = AIPS disk number to list
* first = lowest slot number to list
* last = highest slot number to list
"""
################################################################
Adisk = disk
AIPSDir.PListDir(disk, err, type=AIPSDir.UVType, first=first, last=last)
OErr.printErrMsg(err, "Error with AIPS catalog")
# end AUcat
def getname(cno, disk=Adisk):
"""
Return Obit object for AIPS file in cno on disk
* cno = AIPS catalog slot number
* disk = AIPS disk number
"""
################################################################
Adisk = disk
user = AIPS.AIPS.userno
s = AIPSDir.PInfo(disk, user, cno, err)
OErr.printErrMsg(err, "Error with AIPS catalog")
# parse returned string
Aname = s[0:12]
Aclass = s[13:19]
Aseq = int(s[20:25])
Atype = s[26:28]
if Atype == 'MA':
out = Image.newPAImage("AIPS image", Aname, Aclass, disk, Aseq, True, err)
print("AIPS Image",Aname, Aclass, disk, Aseq)
elif Atype == 'UV':
out = UV.newPAUV("AIPS UV data", Aname, Aclass, disk, Aseq, True, err)
print("AIPS UV",Aname, Aclass, disk, Aseq)
out.Aname = Aname
out.Aclass = Aclass
out.Aseq = Aseq
out.Atype = Atype
out.Disk = disk
out.Acno = cno
return out
# end getname
def getFITS(file, disk=Adisk, Ftype='Image'):
"""
Return Obit object for FITS file in file on disk
* file = FITS file name
* disk = FITS disk number
* Ftype = FITS data type: 'Image', 'UV'
"""
################################################################
if Ftype == 'Image':
out = Image.newPFImage("FITS image", file, disk, True, err)
elif Ftype == 'UV':
out = UV.newPFUV("FITS UV data", file, disk, True, err)
out.Fname = file
out.Disk = disk
out.Otype = Ftype
return out
# end getFITS
def tvlod(image, window=None):
"""
display image
* image = Obit Image, created with getname, getFITS
* window = Optional window for image to edit
"""
################################################################
if Image.PIsA(image):
# Obit/Image
ODisplay.PImage(disp, image, err, window=window)
elif image.__class__==AIPSData.AIPSImage:
# AIPS Image
tmp = Image.newPAImage("AIPS Image",image.name, image.klass, image.disk, \
image.seq, True, err)
ODisplay.PImage(disp, tmp, err, window=window)
del tmp
elif image.__class__==FITSData.FITSImage:
# FITS Image
tmp = Image.newPFImage("FITS Image",image.filename, image.disk, True, err)
ODisplay.PImage(disp, tmp, err, window=window)
del tmp
# end tvlod
def window (image):
"""
Make a window object for an image
Returns OWindow object
* image = Obit image object
"""
################################################################
if Image.IsA(image):
# Obit/Image
naxis = image.Desc.Dict["inaxes"][0:2]
elif image.__class__==AIPSData.AIPSImage:
# AIPS Image
tmp = Image.newPAImage("AIPS Image",image.name, image.klass, image.disk, \
image.seq, True, err)
naxis = tmp.Desc.Dict["inaxes"][0:2]
del tmp
elif image.__class__==FITSData.FITSImage:
# FITS Image
tmp = Image.newPFImage("FITS Image",image.filename, image.disk, True, err)
naxis = tmp.Desc.Dict["inaxes"][0:2]
del tmp
return OWindow.PCreate1("Window", naxis, err)
# end window
def imhead (ObitObj):
"""
List header
* ObitObj = Obit or ParselTongue data object
"""
################################################################
if ObitObj.__class__==AIPSData.AIPSImage:
# AIPS Image
tmp = Image.newPAImage("AIPS Image",ObitObj.name, ObitObj.klass, ObitObj.disk, \
ObitObj.seq, True, err)
tmp.Header(err)
del tmp
elif ObitObj.__class__==FITSData.FITSImage:
# FITS Image
tmp = Image.newPFImage("FITS Image",ObitObj.filename, ObitObj.disk, True, err)
tmp.Header(err)
del tmp
elif ObitObj.__class__==AIPSData.AIPSUVData:
# AIPS UVData
tmp = UV.newPAImage("AIPS UVData",ObitObj.name, ObitObj.klass, ObitObj.disk, \
ObitObj.seq, True, err)
tmp.Header(err)
del tmp
elif ObitObj.__class__==FITSData.FITSUVData:
# FITS UVData
tmp = UV.newPFImage("FITS UVData",ObitObj.filename, ObitObj.disk, True, err)
tmp.Header(err)
del tmp
else:
# Presume it's an Obit object
ObitObj.Header(err)
# end imhead
def setname (inn, out):
"""
Copy file definition from inn to out as in...
Supports both FITS and AIPS
Copies Data type and file name, disk, class etc
* inn = Obit data object, created with getname, getFITS
* out = ObitTask object,
"""
################################################################
out.DataType = inn.FileType
out.inDisk = inn.Disk
if inn.FileType == 'FITS':
out.inFile = inn.Fname
else: # AIPS
out.inName = inn.Aname
out.inClass = inn.Aclass
out.inSeq = inn.Aseq
# end setname
def set2name (in2, out):
"""
Copy file definition from in2 to out as in2...
Supports both FITS and AIPS
Copies Data type and file name, disk, class etc
* in2 = Obit data object, created with getname, getFITS
* out = ObitTask object,
"""
################################################################
out.DataType = in2.FileType
out.in2Disk = in2.Disk
if in2.FileType == 'FITS':
out.in2File = in2.Fname
else: # AIPS
out.in2Name = in2.Aname
out.in2Class = in2.Aclass
out.in2Seq = in2.Aseq
# end set2name
def setoname (inn, out):
"""
Copy file definition from inn to out as outdisk...
Supports both FITS and AIPS
Copies Data type and file name, disk, class etc
* inn = Obit data object, created with getname, getFITS
* out = ObitTask object,
"""
################################################################
out.DataType = inn.FileType
out.outDisk = inn.Disk
if inn.FileType == 'FITS':
out.outFile = inn.Fname
else: # AIPS
out.outName = inn.Aname
out.outClass = inn.Aclass
out.outSeq = inn.Aseq
# end setoname
def setwindow (w, out):
"""
Set BLC and TRC members on out from OWindow w
Uses first window in first field on w which must be a rectangle
This may be set interactively using tvlod
* w = OWindow object
* out = ObitTask object, BLC and TRC members [0] and [1] are modified
"""
################################################################
# Must be rectangle
l = OWindow.PGetList(w, 1, err)
if l[0][1] !=0:
raise TypeError("Window MUST be a rectangle")
out.BLC[0] = l[0][2]+1 # make 1-rel
out.BLC[1] = l[0][3]+1
out.TRC[0] = l[0][4]+1
out.TRC[1] = l[0][5]+1
# end setwindow
def zap (o):
"""
Zap object o
Removes all external components (files)
* o = Obit Data object to delete
"""
################################################################
o.Zap(err)
# end zap
|
bill-cottonREPO_NAMEObitPATH_START.@Obit_extracted@Obit-master@ObitSystem@Obit@python@PTObit.py@.PATH_END.py
|
{
"filename": "constants.py",
"repo_name": "PeterKamphuis/pyFAT-astro",
"repo_path": "pyFAT-astro_extracted/pyFAT-astro-main/pyFAT_astro/Support/constants.py",
"type": "Python"
}
|
# -*- coding: future_fstrings -*-
#def initialize():
global H_0
H_0 = 69.7 #km/s/Mpc
global c
c=299792458 #light speed in m/s
global pc
pc=3.086e+18 #parsec in cm
global solar_mass
solar_mass=1.98855e30 #Solar mass in kg
global solar_luminosity
solar_luminosity = 3.828e26 # Bolometric Solar Luminosity in W
global HI_mass
HI_mass=1.6737236e-27 #Mass of hydrogen in kg
global G
G = 6.67430e-11 #m^3/kg*s^2
global Gsol
Gsol = G/(1000.**3)*solar_mass #km^3/M_sol*s^2
|
PeterKamphuisREPO_NAMEpyFAT-astroPATH_START.@pyFAT-astro_extracted@pyFAT-astro-main@pyFAT_astro@Support@constants.py@.PATH_END.py
|
{
"filename": "mccd.py",
"repo_name": "CosmoStat/mccd",
"repo_path": "mccd_extracted/mccd-master/mccd/mccd.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
r"""MCCD CLASS.
This module contains the main MCCD class.
:Authors: Tobias Liaudat <tobias.liaudat@cea.fr>
Jerome Bonnin <https://github.com/jerome-bonnin>
Morgan Schmitz <https://github.com/MorganSchmitz>
"""
from __future__ import absolute_import, print_function
import numpy as np
from numpy.core.defchararray import join
from scipy.interpolate import Rbf
from modopt.signal.wavelet import get_mr_filters, filter_convolve
from modopt.opt.cost import costObj
from modopt.opt.reweight import cwbReweight
import modopt.opt.algorithms as optimalg
import galsim as gs
import mccd.proxs as prox
import mccd.grads as grads
import mccd.utils as utils
import mccd.mccd_utils as mccd_utils
def mccd_quickload(path):
r"""Load pre-fitted MCCD model.
(saved with :func:`mccd.quicksave`).
Parameters
----------
path : str
Path to the npy file containing the saved MCCD model.
Returns
-------
loaded_model : MCCD class
The MCCD model.
"""
if path[-4:] != '.npy':
path += '.npy'
mccd_params, fitted_model = np.load(path, allow_pickle=True)
loaded_model = MCCD(**mccd_params)
loaded_model.n_ccd = fitted_model['n_ccd']
loaded_model.obs_pos = fitted_model['obs_pos']
loaded_model.A_loc = fitted_model['A_loc']
loaded_model.A_glob = fitted_model['A_glob']
loaded_model.S = fitted_model['S']
loaded_model.flux_ref = fitted_model['flux_ref']
loaded_model.psf_size = fitted_model['psf_size']
loaded_model.VT = fitted_model['VT']
loaded_model.Pi = fitted_model['Pi']
loaded_model.alpha = fitted_model['alpha']
loaded_model.is_fitted = True
try:
loaded_model.ccd_list = fitted_model['ccd_list']
except Exception:
loaded_model.ccd_list = None
return loaded_model
class MCCD(object):
r"""Multi-CCD Resolved Components Analysis.
Parameters
----------
n_comp_loc: int
Number of components to learn for each CCD.
The interpretation may depend on the ``loc_model``
parameter of the :func:`fit()` function.
d_comp_glob: int
Degree of polynomial components for the global model.
d_hyb_loc: int
Degree of the polynomial component for the local part in case the
local model used is ``hybrid``.
Default is ``2``.
min_d_comp_glob: int or None
The minimum degree of the polynomial for the global component.
For example, if the paramter is set to 1, the polynomials of degree
0 and 1 will be excluded from the global polynomial variations.
``None`` means that we are not excluding any degree.
Default is ``None``.
upfact: int
Upsampling factor. Default is 1 (no superresolution).
ksig_loc: float
Value of :math:`K_{\sigma}^{Loc}` for the thresholding in
wavelet (Starlet by default) domain (taken to be
:math:`K\sigma`, where :math:`\sigma` is the estimated noise standard
deviation.). It is used for the thresholding of the
local eigenPSF :math:`S_{K}` update.
Default is ``1``.
ksig_glob: float
Value of :math:`K_{\sigma}^{Glob}` for the thresholding in Starlet
domain (taken to be :math:`k\sigma`, where :math:`\sigma` is the
estimated noise standard deviation.).
It is used for the thresholding of the
global eigenPSF :math:`\tilde{S}` update.
Default is ``1``.
rmse_thresh: float
Parameter concerning the CCD outlier rejection. Once the PSF model is
calculated we perform an outlier check on the training stars.
We divide each star in two parts with a given circle. The inner part
corresponds to the most of the PSF/star energy while the outer part
corresponds to the observation background. The outer part is used to
calculate the noise level and the inner part to calculate the model
residual (star observation - PSF model reconstruction). If the RMSE
error of the residual divided by the noise level is over the
``rmse_thresh`` the star will be considered an outlier.
A perfect reconstruction would have ``rmse_thresh`` equal to 1.
Default is ``1.25``.
ccd_star_thresh: float
Parameter concerning the CCD outlier rejection. If the percentage
of outlier stars in a single CCD is bigger than ``ccd_star_thresh``,
the CCD is considered to be an outlier. In this case, the CCD is
rejected from the PSF model. A value lower than 0 means that no
outlier rejection will be done.
Default is ``0.15``.
n_scales: int
Number of wavelet (Default Starlet) scales to use for the
sparsity constraint.
Default is ``3``. Unused if ``filters`` are provided.
ksig_init: float
Similar to ``ksig``, for use when estimating shifts and noise levels,
as it might be desirable to have it set higher than ``ksig``.
Unused if ``shifts`` are provided when running :func:`RCA.fit`.
Default is ``5``.
filters: numpy.ndarray
Optional filters to the transform domain wherein eigenPSFs are
assumed to be sparse; convolution by them should amount to
applying :math:`\Phi`. Optional; if not provided, the
Starlet transform with `n_scales` scales will be used.
verbose: bool or int
If True, will only output RCA-specific lines to stdout.
If verbose is set to 2, will run ModOpt's optimization
algorithms in verbose mode.
Default to ``2``.
fp_geometry: str
Geometry of the focal plane. It defines the transformations to use
to go from local to global coordinates.
Default is ``CFIS``.
Available options are ``CFIS``, ``EUCLID``.
"""
def __init__(self, n_comp_loc, d_comp_glob, d_hyb_loc=2,
min_d_comp_glob=None, upfact=1, ksig_loc=1.,
ksig_glob=1., rmse_thresh=1.25, ccd_star_thresh=0.15,
n_scales=3, ksig_init=1., filters=None, verbose=2,
fp_geometry='CFIS'):
r"""General parameter initialisations."""
# Main model paramters
self.n_comp_loc = n_comp_loc
self.d_comp_glob = d_comp_glob
self.min_d_comp_glob = min_d_comp_glob
self.n_comp_glob = (self.d_comp_glob + 1) * (self.d_comp_glob + 2) // 2
if self.min_d_comp_glob is not None:
if self.min_d_comp_glob > self.d_comp_glob:
raise ValueError("The total global degree must be" +\
" bigger than the minimum degree.")
print('Reducing the global polynomial degree with d_min = ',
self.min_d_comp_glob)
self.n_comp_glob -= (self.min_d_comp_glob + 1) * (
self.min_d_comp_glob + 2) // 2
self.d_hyb_loc = d_hyb_loc
self.upfact = upfact
self.ksig_loc = ksig_loc
self.ksig_glob = ksig_glob
self.ksig_init = ksig_init
self.iter_outputs = False
# Focal plane geometry
# This configuration is specific for CFIS MegaCam configuration
if fp_geometry == 'CFIS':
self.loc2glob = mccd_utils.Loc2Glob()
elif fp_geometry == 'EUCLID':
self.loc2glob = mccd_utils.Loc2Glob_EUCLID_sim()
else:
raise NotImplementedError
# Outlier rejection parameters
self.ccd_star_thresh = ccd_star_thresh
self.rmse_thresh = rmse_thresh
if filters is None:
# option strings for mr_transform
# ModOpt sparse2d get_mr_filters() convention
# self.opt = ['-t2', '-n{}'.format(n_scales)]
# Pysap sparse2d get_mr_filters() convention
self.opt = 'BsplineWaveletTransformATrousAlgorithm'
self.n_scales = n_scales
self.default_filters = True
else:
self.Phi_filters = filters
self.default_filters = False
self.n_scales = n_scales
self.verbose = verbose
if self.verbose > 1:
self.modopt_verb = True
else:
self.modopt_verb = False
self.is_fitted = False
# Define the attributes that will be initialised later.
self.d_comp_loc = None
self.obs_data = None
self.n_ccd = None
self.loc_model = None
self.ccd_list = None
self.SNR_weight_list = None
self.shap = None
self.im_hr_shape = None
self.obs_pos = None
self.obs_weights = None
self.loc_model = None
self.S = None
self.VT = None
self.Pi = None
self.A_loc = None
self.A_glob = None
self.alpha = None
self.shifts = None
self.psf_size_type = None
self.psf_size = None
self.sigmas = None
self.sigs = None
self.flux = None
self.flux_ref = None
self.nb_iter = None
self.nb_iter_glob = None
self.nb_iter_loc = None
self.nb_subiter_S_loc = None
self.nb_subiter_A_loc = None
self.nb_subiter_S_glob = None
self.nb_subiter_A_glob = None
self.nb_reweight = None
self.n_eigenvects = None
self.pi_degree = None
self.graph_kwargs = None
if self.iter_outputs:
self.iters_glob_A = None
self.iters_glob_S = None
self.iters_loc_A = None
self.iters_loc_S = None
def quicksave(self, path):
r"""Save fitted model.
Save fitted MCCD model for later use. Ideally, you would probably
want to store the whole MCCD instance, though this might mean
storing a lot of data you are not likely to use if you do not alter
the fit that was already performed.
Stored models can be loaded with :func:`mccd.mccd_quickload`.
Parameters
----------
path: str
Path to where the fitted MCCDF model should be saved.
"""
if not self.is_fitted:
raise ValueError('MCCD instance has not yet been fitted to\
observations. Please run the fit method.')
mccd_params = {'n_comp_loc': self.n_comp_loc,
'd_comp_glob': self.d_comp_glob, 'upfact': self.upfact}
fitted_model = {'n_ccd': self.n_ccd, 'obs_pos': self.obs_pos,
'A_loc': self.A_loc, 'A_glob': self.A_glob,
'S': self.S, 'flux_ref': self.flux_ref,
'psf_size': self.psf_size, 'VT': self.VT,
'Pi': self.Pi, 'alpha': self.alpha,
'ccd_list': self.ccd_list}
if self.iter_outputs is True:
iters_dic = {'iters_glob_A': self.iters_glob_A,
'iters_glob_S': self.iters_glob_S,
'iters_loc_A': self.iters_loc_A,
'iters_loc_S': self.iters_loc_S}
np.save(path + '__iter_outputs_dic', iters_dic)
if path[-4:] != '.npy':
path += '.npy'
np.save(path, [mccd_params, fitted_model])
def fit(self, obs_data, obs_pos, ccd_list, obs_weights=None,
SNR_weight_list=None, S=None, VT=None, Pi=None, alpha=None,
shifts=None, sigs=None, psf_size=6, psf_size_type='R2',
flux=None, nb_iter=1, nb_iter_glob=2, nb_iter_loc=2,
nb_subiter_S_loc=100, nb_reweight=0, nb_subiter_A_loc=500,
nb_subiter_S_glob=30, nb_subiter_A_glob=200, n_eigenvects=5,
loc_model='hybrid', pi_degree=2, graph_kwargs={}):
r"""Fits MCCD to observed star field.
Parameters
----------
obs_data: list of numpy.ndarray
Observed data (each element of the list being one CCD).
obs_pos: list of numpy.ndarray
Corresponding positions (global coordinate system).
ccd_list: list of numpy.ndarray
List containing the ccd_ids of each set of observations,
positions and weights.
It is of utmost importance that the ccd_list contains the ccd_id
in the same order as in the other lists.
Ex: obs_data[0] is the data from the ccd ccd_list[0].
obs_weights: list of numpy.ndarray
Corresponding weights. Can be either one per observed star,
or contain pixel-wise values. Masks can be handled via binary
weights. Default is None (in which case no weights are applied).
Note if fluxes and shifts are not provided, weights will be ignored
for their estimation. Noise level estimation only removes
bad pixels (with weight strictly equal to 0) and otherwise
ignores weights.
SNR_weight_list: list of floats
List of weights to be used on each star. They can probably be based
on a specific function of the SNR and should represent the degree
of significance we give to a specific star.
The values should be around 1.
Default is ``None`` meaning that no weights will be used.
S: list of numpy.ndarray
First guess (or warm start) eigenPSFs :math:`S`
(last matrix is global). Default is ``None``.
VT: list of numpy.ndarray
Matrices of concatenated eigenvectors of the different
graph Laplacians. Default is ``None``.
Pi: list of numpy.ndarray
Matrices of polynomials in positions. Default is ``None``.
alpha: list numpy.ndarray
First guess (or warm start) weights :math:`\alpha`,
after factorization by ``VT`` (last matrix is global).
Default is ``None``.
shifts: list of numpy.ndarray
Corresponding sub-pixel shifts. Default is ``None``;
will be estimated from observed data if not provided.
sigs: numpy.ndarray
Estimated noise levels. Default is ``None``;
will be estimated from data if not provided.
psf_size: float
Approximate expected PSF size in ``psf_size_type``;
will be used for the size of the Gaussian window for centroid
estimation.
``psf_size_type`` determines the convention used for
this size (default is FWHM).
Ignored if ``shifts`` are provided.
Default is ``'R2'`` of ``6``.
psf_size_type: str
Can be any of ``'R2'``, ``'fwhm'`` or ``'sigma'``, for the size
defined from quadrupole moments, full width at half maximum
(e.g. from SExtractor) or 1-sigma width of the best matching
2D Gaussian. ``'sigma'`` is the value outputed from Galsim's
HSM adaptive moment estimator.
Default is ``'R2'``.
flux: list of numpy.ndarray
Flux levels. Default is ``None``;
will be estimated from data if not provided.
nb_iter: int
Number of overall iterations (i.e. of alternations).
Note the weights and global components do not
get updated the last time around, so they actually get
``nb_iter-1`` updates.
Paramter :math:`l_{max}` on the MCCD article pseudo-algorithm.
Default is ``1``.
nb_iter_glob: int
Number of iterations on the global model estimation.
The times we go trough the global :math:`S,A` updates.
Paramter :math:`n_G` on the MCCD article pseudo-algorithm.
Default value is ``2``.
nb_iter_loc: int
Number of iterations on the local model estimation.
The times we go trough the local :math:`S,A` updates.
Paramter :math:`n_L` on the MCCD article pseudo-algorithm.
Default value is ``2``.
nb_subiter_S_loc: int
Number of iterations when solving the optimization problem (III)
concerning the local eigenPSFs :math:`S_{k}`.
Default is ``100``.
nb_subiter_A_loc: int
Number of iterations when solving the optimization problem (IV)
concerning the local weights :math:`\alpha_{k}`.
Default is ``500``.
nb_subiter_S_glob: int
Number of iterations when solving the optimization problem (I)
concerning the global eigenPSFs :math:`\tilde{S}`.
Default is ``30``.
nb_subiter_A_glob: int
Number of iterations when solving the optimization problem (II)
concerning the global weights :math:`\tilde{\alpha}`.
Default is ``200``.
nb_reweight: int
Number of reweightings to apply during :math:`S` updates.
See equation (33) in RCA paper.
Default is ``0``.
n_eigenvects: int
Maximum number of eigenvectors to consider per :math:`(e,a)`
couple. Default is ``None``;
if not provided, *all* eigenvectors will be considered,
which can lead to a poor selection of graphs, especially when data
is undersampled. Ignored if ``VT`` and ``alpha`` are provided.
loc_model: str
Defines the type of local model to use, it can be:
``'rca'``, ``'poly'`` or ``'hybrid'``.
Thus defining the MCCD-RCA, MCCD-POL and MCCD-HYB.
When MCCD-POL is used, ``n_comp_loc`` should be used
as the ``d_comp_glob`` (max degree of the polynomial)
for the local model.
When MCCD-HYB is used, ``n_comp_loc`` should be used
as in MCCD-RCA, the number of graph-based eigenPSFs.
The max local polynomial degree is set to ``2``.
pi_degree: int
Maximum degree of polynomials in Pi. Default is ``2``.
Ignored if Pi is provided.
graph_kwargs: dict
List of optional kwargs to be passed on
to the :func:`utils.GraphBuilder`.
"""
# Initialize the needed variables
self.obs_data = [np.copy(obs_data_k) for obs_data_k in obs_data]
self.n_ccd = len(self.obs_data)
self.loc_model = loc_model
self.ccd_list = ccd_list
if SNR_weight_list is None:
self.SNR_weight_list = [np.ones(pos.shape[0]) for pos in obs_pos]
else:
self.SNR_weight_list = SNR_weight_list
self.shap = [self.obs_data[k].shape for k in range(self.n_ccd)]
self.shap.append(np.concatenate(self.obs_data, axis=2).shape)
self.im_hr_shape = [(self.upfact * self.shap[k][0],
self.upfact * self.shap[k][1],
self.shap[k][2])
for k in range(self.n_ccd)]
self.obs_pos = obs_pos
if obs_weights is None:
self.obs_weights = [np.ones(self.shap[k]) for k in
range(self.n_ccd)]
elif obs_weights[0].shape == self.shap[0]:
self.obs_weights = [obs_weights[k] /
np.expand_dims(np.sum(obs_weights[k], axis=2),
2) * self.shap[k][2]
for k in range(self.n_ccd)]
elif obs_weights.shape[0] == (self.shap[0][2],):
self.obs_weights = [obs_weights[k].reshape(1, 1, -1) /
np.sum(obs_weights[k]) * self.shap[k][2]
for k in range(self.n_ccd)]
else:
raise ValueError(
'Shape mismatch; weights should be of shape:' +
' {} (for per-pixel weights) or'.format(self.shap[0]) +
' {} (per-observation)'.format(self.shap[0][2:]))
if self.loc_model == 'poly':
self.d_comp_loc = self.n_comp_loc
self.n_comp_loc = (self.n_comp_loc + 1) * (
self.n_comp_loc + 2) // 2
if self.loc_model == 'hybrid':
# Hardcoded a poly deg 2 for the local polynome [TL] [improve]
self.n_comp_loc += ((self.d_hyb_loc + 1) * (
self.d_hyb_loc + 2) // 2)
if S is None:
# global eigenPSFs are the last ones
self.S = [np.zeros(self.im_hr_shape[0][:2] + (self.n_comp_loc,))
for k in range(self.n_ccd)]
self.S.append(
np.zeros(self.im_hr_shape[0][:2] + (self.n_comp_glob,)))
else:
self.S = S
self.VT = VT
self.Pi = Pi
self.alpha = alpha
self.shifts = shifts
self.psf_size_type = psf_size_type
if shifts is None:
self.psf_size = self._set_psf_size(psf_size, self.psf_size_type)
self.sigmas = None
self.sigs = sigs
self.flux = flux
self.nb_iter = nb_iter
self.nb_iter_glob = nb_iter_glob
self.nb_iter_loc = nb_iter_loc
self.nb_subiter_S_loc = nb_subiter_S_loc
if nb_subiter_A_loc is None:
nb_subiter_A_loc = 2 * nb_subiter_S_loc
self.nb_subiter_A_loc = nb_subiter_A_loc
self.nb_subiter_S_glob = nb_subiter_S_glob
self.nb_subiter_A_glob = nb_subiter_A_glob
self.nb_reweight = nb_reweight
self.n_eigenvects = n_eigenvects
self.pi_degree = pi_degree
self.graph_kwargs = graph_kwargs
if self.iter_outputs:
self.iters_glob_A = []
self.iters_glob_S = []
self.iters_loc_A = [[] for _ in range(self.n_ccd)]
self.iters_loc_S = [[] for _ in range(self.n_ccd)]
# Start the initialization
if self.verbose:
print('Running basic initialization tasks...')
self._initialize()
if self.verbose:
print('... Done.')
if self.VT is None or self.alpha is None:
if self.verbose:
print('Constructing local spatial constraint...')
if self.loc_model == 'rca':
self._initialize_graph_constraint()
elif self.loc_model == 'poly':
self._initialize_loc_poly_model()
elif self.loc_model == 'hybrid':
self._initialize_loc_hybrid_model()
else:
raise ValueError(
'Local model not undersood. Should be <rca> or <poly>.')
if self.verbose:
print('... Done.')
else:
self.A_loc = [self.alpha[k].dot(self.VT[k])
for k in range(self.n_ccd)]
if self.Pi is None or len(self.alpha) <= self.n_ccd:
if self.verbose:
print('Building position polynomials...')
self._initialize_poly_model()
if self.verbose:
print('... Done.')
else:
self.A_glob = [self.alpha[self.n_ccd].dot(self.Pi[k])
for k in range(self.n_ccd)]
# Finally fit the model
self._fit()
self.is_fitted = True
# Remove outliers
if self.ccd_star_thresh > 0:
self.remove_outlier_ccds()
return self.S, self.A_loc, self.A_glob, self.alpha, self.Pi
@staticmethod
def _set_psf_size(psf_size, psf_size_type):
r"""Handle different ``size`` conventions."""
if psf_size is not None:
if psf_size_type == 'fwhm':
return psf_size / (2 * np.sqrt(2 * np.log(2)))
elif psf_size_type == 'R2':
return np.sqrt(psf_size / 2)
elif psf_size_type == 'sigma':
return psf_size
else:
raise ValueError(
'psf_size_type should be one of "fwhm", "R2" or "sigma"')
else:
print('''WARNING: neither shifts nor an estimated PSF size were
provided to RCA; the shifts will be estimated from the data using
the default Gaussian window of 7.5 pixels.''')
return 7.5
def remove_ccd_from_model(self, ccd_idx):
r""" Remove ccd from the trained model. """
self.n_ccd -= 1
_ = self.obs_pos.pop(ccd_idx)
_ = self.A_loc.pop(ccd_idx)
_ = self.A_glob.pop(ccd_idx)
_ = self.S.pop(ccd_idx)
_ = self.flux_ref.pop(ccd_idx)
_ = self.VT.pop(ccd_idx)
_ = self.Pi.pop(ccd_idx)
_ = self.alpha.pop(ccd_idx)
_ = self.ccd_list.pop(ccd_idx)
def remove_outlier_ccds(self):
r""" Remove all CCDs with outliers.
Reminder: the outlier rejection is done on the train stars.
We will reject a CCD if the percentage of outlier stars in
a single CCD is bigger than ``ccd_star_thresh``.
They outlier threshold is based in ``rmse_thresh``.
A perfect reconstruction would have ``rmse_thresh`` equal to 1.
"""
dim_x = self.obs_data[0].shape[0]
dim_y = self.obs_data[0].shape[1]
win_rad = np.floor(np.max([dim_x, dim_y]) / 3)
# Calculate the observation noise
noise_estimator = utils.NoiseEstimator((dim_x, dim_y), win_rad)
ccd_outliers = []
for k in range(len(self.obs_data)):
# Extract observations and reconstructions from the CCD
stars = utils.reg_format(self.obs_data[k])
# Reconstruct the PSFs
psfs = self.validation_stars(
self.obs_data[k],
self.obs_pos[k],
self.obs_weights[k],
self.ccd_list[k],
mccd_debug=False)
# Estimate noise
sigmas_obs = np.array([noise_estimator.estimate_noise(star)
for star in stars])
# Window to consider the central PSF only
window = ~noise_estimator.window
# Calculate the windowed RMSE normalized by the noise level
rmse_sig = np.array([
np.sqrt(np.mean(((_mod - _obs)**2)[window])) / _sig
for _mod, _obs, _sig in zip(psfs, stars, sigmas_obs)
])
# Select outlier stars
outlier_ids = rmse_sig >= self.rmse_thresh
num_outliers = np.sum(outlier_ids)
# Check if the number of outliers depasses the star threshold
num_stars = stars.shape[0]
star_thresh_num = np.ceil(self.ccd_star_thresh * num_stars)
print("CCD num %02d, \t %d outliers, \t%d stars,"
" \t%d star threshold number." % (
self.ccd_list[k], num_outliers, num_stars,
star_thresh_num))
if num_outliers > star_thresh_num:
# We have to reject the CCD
ccd_outliers.append(k)
print('Removing CCD %d.' % (self.ccd_list[k]))
# Remove all the outliers
for index in sorted(ccd_outliers, reverse=True):
self.remove_ccd_from_model(index)
def _initialize(self):
r"""Initialize internal tasks.
Tasks related to noise levels, shifts and flux.
Note it includes renormalizing observed data,
so needs to be ran even if all three are provided.
"""
if self.default_filters:
init_filters = mccd_utils.get_mr_filters(
self.shap[0][:2], opt=self.opt, coarse=True)
else:
init_filters = self.Phi_filters
# Initialize sizes with Galsim's HSM
if self.sigmas is None:
star_moms = [[gs.hsm.FindAdaptiveMom(
gs.Image(star), badpix=gs.Image(np.rint(np.abs(badpix - 1))),
guess_sig=self.psf_size, strict=False)
for star, badpix in zip(utils.reg_format(self.obs_data[k]),
utils.reg_format(self.obs_weights[k]))]
for k in range(self.n_ccd)]
self.sigmas = [np.array(
[moms.moments_sigma for moms in star_moms[k]])
for k in range(self.n_ccd)]
# Replace failed measurements by the guess size
for it in range(len(self.sigmas)):
self.sigmas[it][self.sigmas[it] < 0] = self.psf_size
# Initialize noise levels
if self.sigs is None:
transf_data = [
utils.apply_transform(self.obs_data[k], init_filters)
for k in range(self.n_ccd)]
transf_mask = [
utils.transform_mask(self.obs_weights[k], init_filters[0])
for k in range(self.n_ccd)]
sigmads = [
np.array([1.4826 * utils.mad(fs[0], w)
for fs, w in zip(transf_data[k],
utils.reg_format(transf_mask[k]))])
for k in range(self.n_ccd)]
self.sigs = [sigmads[k] / np.linalg.norm(init_filters[0])
for k in range(self.n_ccd)]
else:
self.sigs = [np.copy(self.sigs[k]) for k in range(self.n_ccd)]
self.sig_min = [np.min(self.sigs[k]) for k in range(self.n_ccd)]
self.sig_min.append(np.min(self.sig_min))
# Initialize intra-pixel shifts
if self.shifts is None:
thresh_data = [np.copy(self.obs_data[k])
for k in range(self.n_ccd)]
cents = [[] for k in range(self.n_ccd)]
for k in range(self.n_ccd):
for i in range(self.shap[k][2]):
# don't allow thresholding to be
# over 80% of maximum observed pixel
nsig_shifts = min(self.ksig_init,
0.8 * self.obs_data[k][:, :, i].max() /
self.sigs[k][i])
thresh_data[k][:, :, i] = utils.HardThresholding(
thresh_data[k][:, :, i],
nsig_shifts * self.sigs[k][i])
cents[k] += [
utils.CentroidEstimator(thresh_data[k][:, :, i],
sig=self.sigmas[k][i])]
self.shifts = [np.array([ce.return_shifts() for ce in cents[k]])
for k in range(self.n_ccd)]
lanc_rad = np.ceil(3. * np.max(
np.array([np.max(_sigma) for _sigma in self.sigmas]))).astype(int)
self.shift_ker_stack, self.shift_ker_stack_adj = zip(
*[utils.shift_ker_stack(self.shifts[k], self.upfact,
lanc_rad=lanc_rad)
for k in range(self.n_ccd)])
# Flux levels
if self.flux is None:
centroids = [np.array([[ce.xc, ce.yc] for ce in cents[k]]) for k in
range(self.n_ccd)]
self.flux = [
utils.flux_estimate_stack(self.obs_data[k], cent=centroids[k],
sigmas=self.sigmas[k]) for k in
range(self.n_ccd)]
self.flux_ref = [np.median(self.flux[k]) for k in range(self.n_ccd)]
self.flux_ref.append(np.median(np.concatenate(self.flux)))
# Normalize noise levels observed data
for k in range(self.n_ccd):
self.sigs[k] /= self.sig_min[k]
self.obs_data[k] /= self.sigs[k].reshape(1, 1, -1)
def _initialize_graph_constraint(self):
r"""Initialize of the graph constraint.
``graph_kwards`` parameters used.
"""
gber = [utils.GraphBuilder(self.obs_data[k], self.obs_pos[k],
self.obs_weights[k], self.n_comp_loc,
n_eigenvects=self.n_eigenvects,
verbose=self.verbose,
**self.graph_kwargs) for k in
range(self.n_ccd)]
self.VT, self.alpha, self.distances = (
[gber[k].VT for k in range(self.n_ccd)],
[gber[k].alpha for k in range(self.n_ccd)],
[gber[k].distances for k in range(self.n_ccd)])
self.sel_e, self.sel_a = ([gber[k].sel_e for k in range(self.n_ccd)],
[gber[k].sel_a for k in range(self.n_ccd)])
self.A_loc = [self.alpha[k].dot(self.VT[k]) for k in range(self.n_ccd)]
def _initialize_poly_model(self):
r"""Initialize the global polynomial model.
The positions in the global coordinate system
are used.
Normalization of the polynomials is being done here.
"""
# Calculate max and min values of global coordinate system
min_x, max_x = self.loc2glob.x_coord_range()
min_y, max_y = self.loc2glob.y_coord_range()
self.Pi = [
utils.poly_pos(pos=self.obs_pos[k], max_degree=self.d_comp_glob,
center_normalice=True,
x_lims=[min_x, max_x], y_lims=[min_y, max_y],
normalice_Pi=False, min_degree=self.min_d_comp_glob)
for k in range(self.n_ccd)]
self.alpha.append(np.eye(self.n_comp_glob))
# Global position model normalisation
# Start with the list Pi
conc_Pi = np.concatenate((self.Pi), axis=1)
Pi_norms = np.sqrt(np.sum(conc_Pi**2, axis=1)).reshape(-1, 1)
self.Pi = [self.Pi[k] / Pi_norms for k in range(self.n_ccd)]
self.A_glob = [self.alpha[self.n_ccd].dot(self.Pi[k])
for k in range(self.n_ccd)]
def _initialize_loc_poly_model(self):
r"""Initialize the local polynomial model."""
self.VT = [
utils.poly_pos(pos=self.obs_pos[k], max_degree=self.d_comp_loc,
center_normalice=True,
x_lims=None, y_lims=None)
for k in range(self.n_ccd)]
self.alpha = [np.eye(self.n_comp_loc) for _it in range(self.n_ccd)]
self.A_loc = [self.alpha[k].dot(self.VT[k]) for k in range(self.n_ccd)]
def _initialize_loc_hybrid_model(self):
r"""Initialize the hybrid local model.
Graphs + polynomials.
"""
max_deg = self.d_hyb_loc
n_poly_comp = (max_deg + 1) * (max_deg + 2) // 2
# Take the number of local component top the graph value
self.n_comp_loc -= n_poly_comp
# First initialize the graph constraint
self._initialize_graph_constraint()
# Calculate the local polynomial and add it to
# the graph-calculated values
for k in range(self.n_ccd):
poly_VT = utils.poly_pos(pos=self.obs_pos[k], max_degree=max_deg,
center_normalice=True,
x_lims=None, y_lims=None)
poly_alpha = np.eye(n_poly_comp)
n_comp_hyb = poly_alpha.shape[0]
n_vec_hyb = poly_alpha.shape[1]
zero_concat_1 = np.zeros((self.n_comp_loc, n_vec_hyb))
zero_concat_2 = np.zeros((n_comp_hyb, self.alpha[k].shape[1]))
tmp_alpha_1 = np.concatenate((self.alpha[k], zero_concat_1),
axis=1)
tmp_alpha_2 = np.concatenate((zero_concat_2, poly_alpha), axis=1)
self.alpha[k] = np.concatenate((tmp_alpha_1, tmp_alpha_2), axis=0)
self.VT[k] = np.concatenate((self.VT[k], poly_VT), axis=0)
self.A_loc[k] = self.alpha[k].dot(self.VT[k])
self.n_comp_loc += n_poly_comp
def _fit(self):
r"""Perform the main model fitting."""
# Function variables
comp = self.S
alpha = self.alpha
weights_loc = self.A_loc
weights_glob = self.A_glob
# Very useful shortcut
conc = np.concatenate
# Estimated models (local and global for each CCD)
H_loc = [comp[k].dot(weights_loc[k]) for k in range(self.n_ccd)]
H_glob = [comp[self.n_ccd].dot(weights_glob[k]) for k in
range(self.n_ccd)]
# Dual variables (for Condat algorithm)
dual_comp = [np.zeros((self.im_hr_shape[k])) for k in
range(self.n_ccd)]
dual_alpha = [np.zeros(self.A_loc[k].shape) for k in range(self.n_ccd)]
dual_alpha.append(np.zeros(conc(self.A_glob, axis=1).shape))
# Starlet filters and associated spectral radius
if self.default_filters:
self.Phi_filters = mccd_utils.get_mr_filters(
self.im_hr_shape[0][:2], opt=self.opt,
coarse=True, trim=False)
rho_phi = np.sqrt(
np.sum(np.sum(np.abs(self.Phi_filters), axis=(1, 2)) ** 2))
# Gradient objects
source_loc_grad = [grads.SourceLocGrad(
self.obs_data[k],
self.obs_weights[k],
weights_loc[k],
H_glob[k],
self.flux[k],
self.sigs[k],
self.shift_ker_stack[k],
self.shift_ker_stack_adj[k],
self.SNR_weight_list[k],
self.upfact,
self.Phi_filters,
save_iter_cost=self.iter_outputs,
verbose=self.verbose) for k in range(self.n_ccd)]
weight_loc_grad = [grads.CoeffLocGrad(
self.obs_data[k],
self.obs_weights[k],
comp[k],
self.VT[k],
H_glob[k],
self.flux[k],
self.sigs[k],
self.shift_ker_stack[k],
self.shift_ker_stack_adj[k],
self.SNR_weight_list[k],
self.upfact,
save_iter_cost=self.iter_outputs,
verbose=self.verbose) for k in range(self.n_ccd)]
source_glob_grad = grads.SourceGlobGrad(
conc(self.obs_data, axis=2),
conc(self.obs_weights, axis=2),
conc(weights_glob, axis=1),
conc(H_loc, axis=2),
conc(self.flux),
conc(self.sigs),
conc(self.shift_ker_stack, axis=2),
conc(self.shift_ker_stack_adj, axis=2),
conc(self.SNR_weight_list),
self.upfact,
self.Phi_filters,
save_iter_cost=self.iter_outputs,
verbose=self.verbose)
weight_glob_grad = grads.CoeffGlobGrad(
conc(self.obs_data, axis=2),
conc(self.obs_weights, axis=2),
comp[self.n_ccd],
conc(self.Pi, axis=1),
conc(H_loc, axis=2),
conc(self.flux),
conc(self.sigs),
conc(self.shift_ker_stack, axis=2),
conc(self.shift_ker_stack_adj, axis=2),
self.upfact,
conc(self.SNR_weight_list),
save_iter_cost=self.iter_outputs,
verbose=self.verbose)
# Proxs for component optimization
sparsity_prox = prox.StarletThreshold(0)
pos_prox = [prox.PositityOff(H_k) for H_k in H_glob]
lin_recombine = [prox.LinRecombine(weights_loc[k], self.Phi_filters)
for k in range(self.n_ccd)]
# Proxs for weight optimization
# Local model
# The last (1-steady_state_thresh_loc)*100% elements will have same
# threshold
# min_elements_loc: Minimum number of elements to maintain when
# threshold is the highest
steady_state_thresh_loc = 0.8
min_elements_loc = 5
def iter_func_loc(x, elem_size):
return np.min(
[np.floor((elem_size / 2 - 1) * (1 / np.sqrt(
self.nb_subiter_A_loc * steady_state_thresh_loc)) \
* np.sqrt(x)) + min_elements_loc,
np.floor(elem_size / 2)])
# [TL] Using strong sparsity inducing function
iter_func_loc = lambda x, elem_size: np.floor(np.sqrt(x)) + 1
coeff_prox_loc = prox.KThreshold(iter_func_loc)
# Global model
# The last (1-steady_state_thresh_glob)*100% elements will have same
# threshold
# min_elements_glob: Minimum number of elements to maintain when
# threshold is the highest
steady_state_thresh_glob = 0.8
min_elements_glob = 5
def iter_func_glob(x, elem_size):
return np.min(
[np.floor((elem_size / 2 - 1) * (1 / np.sqrt(
self.nb_subiter_A_glob * steady_state_thresh_glob)) \
* np.sqrt(x)) + min_elements_glob,
np.floor(elem_size / 2)])
# [TL] Using strong sparsity inducing function
iter_func_glob_v2 = lambda x, elem_size: np.floor(np.sqrt(x)) + 1
coeff_prox_glob = prox.KThreshold(iter_func_glob_v2)
norm_prox = prox.proxNormalization(type='columns')
lin_recombine_alpha = [prox.LinRecombineAlpha(self.VT[k])
for k in range(self.n_ccd)]
lin_recombine_alpha.append(
prox.LinRecombineAlpha(conc(self.Pi, axis=1)))
# Cost functions
source_loc_cost = [
costObj([source_loc_grad[k]], verbose=self.modopt_verb)
for k in range(self.n_ccd)]
weight_loc_cost = [
costObj([weight_loc_grad[k]], verbose=self.modopt_verb)
for k in range(self.n_ccd)]
source_glob_cost = costObj([source_glob_grad],
verbose=self.modopt_verb)
weight_glob_cost = costObj([weight_glob_grad],
verbose=self.modopt_verb)
# Transformed components in wavelet (default: Starlet) domain
transf_comp = [utils.apply_transform(comp[k], self.Phi_filters)
for k in range(self.n_ccd + 1)]
# Big loop: Main iteration
for main_it in range(self.nb_iter):
# Global Optimization
for l_glob in range(self.nb_iter_glob):
# Global Components Optimization
# Components gradient update
source_glob_grad.update_A(conc(weights_glob, axis=1))
source_glob_grad.update_H_loc(conc(H_loc, axis=2))
# Lipschitz constant for ForwardBackward
beta = source_glob_grad.spec_rad * 1.5 + rho_phi
tau = 1. / beta
# Sparsity prox thresholds update
thresh = utils.reg_format(
utils.acc_sig_maps(
self.shap[self.n_ccd],
conc(self.shift_ker_stack_adj, axis=2),
conc(self.sigs),
conc(self.flux),
self.flux_ref[self.n_ccd],
self.upfact,
conc(weights_glob, axis=1),
sig_data=np.ones(
(self.shap[self.n_ccd][2],)) \
* self.sig_min[self.n_ccd]))
thresholds = self.ksig_glob * np.sqrt(
np.array(
[filter_convolve(Sigma_k ** 2, self.Phi_filters ** 2)
for Sigma_k in thresh]))
sparsity_prox.update_threshold(tau * thresholds)
# Reweighting. Borrowed from original RCA code
if self.nb_reweight:
reweighter = cwbReweight(self.nb_reweight)
for _ in range(self.nb_reweight):
# Optimize!
source_optim = optimalg.ForwardBackward(
transf_comp[self.n_ccd],
source_glob_grad, sparsity_prox,
cost=source_glob_cost,
beta_param=1. / beta, auto_iterate=False,
verbose=self.verbose, progress=self.verbose)
source_optim.iterate(max_iter=self.nb_subiter_S_glob)
transf_comp[self.n_ccd] = source_optim.x_final
reweighter.reweight(transf_comp[self.n_ccd])
thresholds = reweighter.weights
else:
# Optimize!
source_optim = optimalg.ForwardBackward(
transf_comp[self.n_ccd],
source_glob_grad, sparsity_prox, cost=source_glob_cost,
beta_param=1. / beta, auto_iterate=False,
verbose=self.verbose, progress=self.verbose)
source_optim.iterate(max_iter=self.nb_subiter_S_glob)
transf_comp[self.n_ccd] = source_optim.x_final
# Save iteration diagnostic data
if self.iter_outputs:
self.iters_glob_S.append(
source_glob_grad.get_iter_cost())
source_glob_grad.reset_iter_cost()
# Update pixel domain global components
comp[self.n_ccd] = utils.rca_format(
np.array([filter_convolve(transf_Sj,
self.Phi_filters,
filter_rot=True) for
transf_Sj in transf_comp[self.n_ccd]]))
# Global Weights Optimization
# Weights gradient update
weight_glob_grad.update_S(comp[self.n_ccd])
weight_glob_grad.update_H_loc(conc(H_loc, axis=2))
# Coefficient sparsity prox update
coeff_prox_glob.reset_iter()
if l_glob < self.nb_iter_glob - 1 or l_glob == 0:
# Conda's algorithm parameters
# (lipschitz of diff. part and operator norm of lin. part)
# See Conda's paper for more details.
beta = weight_glob_grad.spec_rad * 1.5
tau = 1. / beta
sigma = (1. / lin_recombine_alpha[
self.n_ccd].norm ** 2) * beta / 2
# Optimize !
weight_optim = optimalg.Condat(
alpha[self.n_ccd],
dual_alpha[self.n_ccd],
weight_glob_grad,
coeff_prox_glob,
norm_prox,
linear=lin_recombine_alpha[self.n_ccd],
cost=weight_glob_cost,
max_iter=self.nb_subiter_A_glob,
tau=tau,
sigma=sigma,
verbose=self.verbose,
progress=self.verbose)
alpha[self.n_ccd] = weight_optim.x_final
weights_glob = [alpha[self.n_ccd].dot(self.Pi[k])
for k in range(self.n_ccd)]
# Save iteration diagnostic data
if self.iter_outputs:
self.iters_glob_A.append(
weight_glob_grad.get_iter_cost())
weight_glob_grad.reset_iter_cost()
# Global model update
H_glob = [comp[self.n_ccd].dot(weights_glob[k])
for k in range(self.n_ccd)]
# Local models update
for l_loc in range(self.nb_iter_loc):
# Loop on all CCDs
for k in range(self.n_ccd):
# Local Components Optimization
# Components gradient update
source_loc_grad[k].update_A(weights_loc[k])
source_loc_grad[k].update_H_glob(H_glob[k])
# Positivity prox update
pos_prox[k].update_offset(H_glob[k])
lin_recombine[k].update_A(weights_loc[k])
# Conda parameters
# (lipschitz of diff. part and operator norm of lin. part)
beta = source_loc_grad[k].spec_rad * 1.5 + rho_phi
tau = 1. / beta
sigma = (1. / lin_recombine[k].norm ** 2) * beta / 2
# Sparsity prox thresholds update
thresh = utils.reg_format(
utils.acc_sig_maps(
self.shap[k],
self.shift_ker_stack_adj[k],
self.sigs[k],
self.flux[k],
self.flux_ref[k],
self.upfact,
weights_loc[k],
sig_data=np.ones((self.shap[k][2],)) \
* self.sig_min[k]))
thresholds = self.ksig_loc * np.sqrt(
np.array([filter_convolve(
Sigma_k ** 2, self.Phi_filters ** 2)
for Sigma_k in thresh]))
sparsity_prox.update_threshold(tau * thresholds)
# Reweighting
if self.nb_reweight:
reweighter = cwbReweight(self.nb_reweight)
for _ in range(self.nb_reweight):
# Optimize!
source_optim = optimalg.Condat(
transf_comp[k],
dual_comp[k],
source_loc_grad[k],
sparsity_prox,
pos_prox[k],
linear=lin_recombine[k],
cost=source_loc_cost[k],
max_iter=self.nb_subiter_S_loc,
tau=tau,
sigma=sigma,
verbose=self.verbose,
progress=self.verbose)
transf_comp[k] = source_optim.x_final
reweighter.reweight(transf_comp[k])
thresholds = reweighter.weights
else:
# Optimize!
source_optim = optimalg.Condat(
transf_comp[k],
dual_comp[k],
source_loc_grad[k],
sparsity_prox,
pos_prox[k],
linear=lin_recombine[k],
cost=source_loc_cost[k],
max_iter=self.nb_subiter_S_loc,
tau=tau, sigma=sigma,
verbose=self.verbose,
progress=self.verbose)
transf_comp[k] = source_optim.x_final
# Save iteration diagnostic data
if self.iter_outputs:
self.iters_loc_S[k].append(
source_loc_grad[k].get_iter_cost())
source_loc_grad[k].reset_iter_cost()
# Update pixel domain local components
comp[k] = utils.rca_format(
np.array([filter_convolve(transf_Sj,
self.Phi_filters,
filter_rot=True) for
transf_Sj in transf_comp[k]]))
# Local weights Optimization
# Weights gradient update
weight_loc_grad[k].update_S(comp[k])
weight_loc_grad[k].update_H_glob(H_glob[k])
# Coeff sparsity prox update
coeff_prox_loc.reset_iter()
# Skip alpha updates for the last iterations
if l_loc < self.nb_iter_loc - 1 or l_loc == 0:
# Conda parameters
# (lipschitz of diff. part and
# operator norm of lin. part)
beta = weight_loc_grad[k].spec_rad * 1.5
tau = 1. / beta
sigma = (1. / lin_recombine_alpha[k].norm ** 2) * (
beta / 2)
# Optimize
weight_optim = optimalg.Condat(
alpha[k],
dual_alpha[k],
weight_loc_grad[k],
coeff_prox_loc,
norm_prox,
linear=lin_recombine_alpha[k],
cost=weight_loc_cost[k],
max_iter=self.nb_subiter_A_loc,
tau=tau,
sigma=sigma,
verbose=self.verbose,
progress=self.verbose)
alpha[k] = weight_optim.x_final
weights_loc[k] = alpha[k].dot(self.VT[k])
# Save iteration diagnostic data
if self.iter_outputs:
self.iters_loc_A[k].append(
weight_loc_grad[k].get_iter_cost())
weight_loc_grad[k].reset_iter_cost()
# Local model update
H_loc[k] = comp[k].dot(weights_loc[k])
# Final values
self.S = comp
self.alpha = alpha
self.A_loc = weights_loc
self.A_glob = weights_glob
def interpolate_psf_pipeline(self, test_pos, ccd_n, centroid=None):
r""" Estimate PSF at desired position with the required centroid.
This function is a consequence of following the requirements
needed to use the MCCD model in a pipeline.
(ShapePipe now but could be adapted)
The returned PSFs should be normalized with unitary flux and with
the required centroid.
Please note the differences between the pixel conventions between
this package and Galsim. In the first one the position of a pixel is
in its lower left corner and the indexing starts at zero (numpy style).
Galsim's convention is that the position is in the center of the pixel
and the indexation starts at one.
Returns the model in "regular" format, (n_stars, n_pixels, n_pixels).
Parameters
----------
test_pos: numpy.ndarray
Positions where the PSF should be estimated.
Should be in the same format (coordinate system, units, etc.) as
the ``obs_pos`` fed to :func:`MCCD.fit`.
ccd_n: int
``ccd_id`` of the positions to be tested.
centroid: list of floats, [xc, yc]
Required centroid for the output PSF. It has to be specified
following the MCCD pixel convention.
Default will be the vignet centroid.
Returns
-------
PSFs: numpy.ndarray
Returns the interpolated PSFs in regular format.
"""
if not self.is_fitted:
raise ValueError('''MCCD instance has not yet been fitted to
observations. Please run the fit method.''')
# Default values for interpolation
n_loc_neighbors = 15
n_glob_neighbors = 15
rbf_function = 'thin_plate'
ntest = test_pos.shape[0]
test_weights_glob = np.zeros((self.n_comp_glob, ntest))
test_weights_loc = np.zeros((self.n_comp_loc, ntest))
# Turn ccd_n into list number.
# Select the correct indexes of the requested CCD.
try:
ccd_idx = np.where(np.array(self.ccd_list) == ccd_n)[0][0]
except Exception:
# If the CCD was not used for training the output should be
# None and be handled by the wrapping function.
return None
# PSF recovery
for j, pos in enumerate(test_pos):
# Local model
# Determine neighbors
nbs_loc, pos_nbs_loc = mccd_utils.return_loc_neighbors(
pos,
self.obs_pos[ccd_idx],
self.A_loc[ccd_idx].T,
n_loc_neighbors)
# Train RBF and interpolate for each component
for i in range(self.n_comp_loc):
rbfi = Rbf(pos_nbs_loc[:, 0],
pos_nbs_loc[:, 1],
nbs_loc[:, i],
function=rbf_function)
test_weights_loc[i, j] = rbfi(pos[0], pos[1])
# Global model
# Using return_loc_neighbors()
nbs_glob, pos_nbs_glob = mccd_utils.return_loc_neighbors(
pos,
self.obs_pos[ccd_idx],
self.A_glob[ccd_idx].T,
n_glob_neighbors)
for i in range(self.n_comp_glob):
rbfi = Rbf(pos_nbs_glob[:, 0],
pos_nbs_glob[:, 1],
nbs_glob[:, i],
function=rbf_function)
test_weights_glob[i, j] = rbfi(pos[0], pos[1])
# Reconstruct the global and local PSF contributions
PSFs_loc = self._loc_transform(test_weights_loc, ccd_idx)
PSFs_glob = self._glob_transform(test_weights_glob)
# PSFs is in reg format: batch dim is the first one
PSFs = utils.reg_format(PSFs_glob + PSFs_loc)
# Let's handle the shifts
if centroid is None:
centroid = [PSFs.shape[1] / 2., PSFs.shape[2] / 2.]
psf_moms = [gs.hsm.FindAdaptiveMom(gs.Image(psf), strict=False)
for psf in PSFs]
sigmas = np.array([moms.moments_sigma for moms in psf_moms])
# This centroids are with MCCD's pixel convention
cents = np.array([[moms.moments_centroid.x - 0.5,
moms.moments_centroid.y - 0.5]
for moms in psf_moms])
shifts = np.array([[centroid[0] - ce[0], centroid[1] - ce[1]]
for ce in cents])
if sigmas is None:
lanc_rad = 8
else:
lanc_rad = np.ceil(3. * np.max(sigmas)).astype(int)
shift_kernels, _ = utils.shift_ker_stack(shifts, self.upfact,
lanc_rad=lanc_rad)
# PSFs changed into reg_format in the degradation process
PSFs = np.array(
[utils.degradation_op(PSFs[j, :, :],
shift_kernels[:, :, j],
self.upfact)
for j in range(ntest)])
# Normalize the PSFs flux
PSFs = np.array(
[PSFs[j, :, :] / np.sum(PSFs[j, :, :]) for j in range(ntest)])
return PSFs
def estimate_psf(self, test_pos, ccd_n, n_loc_neighbors=15,
n_glob_neighbors=15, rbf_function='thin_plate',
apply_degradation=False, shifts=None, flux=None,
sigmas=None, upfact=None, mccd_debug=False,
global_pol_interp=None):
r"""Estimate and return PSF at desired positions.
Returns the model in "regular" format, (n_stars, n_pixels, n_pixels).
Parameters
----------
test_pos: numpy.ndarray
Positions where the PSF should be estimated.
Should be in the same format (coordinate system, units, etc.) as
the ``obs_pos`` fed to :func:`MCCD.fit`.
ccd_n: int
``ccd_id`` of the positions to be tested.
n_loc_neighbors: int
Number of neighbors for the local model to use for RBF
interpolation. Default is 15.
n_glob_neighbors: int
Number of neighbors for the global model to use for RBF
interpolation. Default is 15.
rbf_function: str
Type of RBF kernel to use. Default is ``'thin_plate'``.
Check scipy RBF documentation for more models.
apply_degradation: bool
Whether PSF model should be degraded (shifted and
resampled on coarse grid), for instance for comparison with stars.
If True, expects shifts to be provided.
Needed if pixel validation against stars will be required.
Default is False.
shifts: numpy.ndarray
Intra-pixel shifts to apply if ``apply_degradation`` is set
to True.
Needed to match the observed stars in case of pixel validation.
flux: numpy.ndarray
Flux levels by which reconstructed PSF will be multiplied if
provided. For pixel validation with stars if ``apply_degradation``
is set to True.
sigmas: numpy.ndarray
Sigmas (shapes) are used to have a better estimate of the size
of the lanczos interpolant that will be used when performing the
intra-pixel shift.
Default is None, where a predefined value is used for the
interpolant size.
upfact: int
Upsampling factor; default is None, in which case that of the
MCCD instance will be used.
mccd_debug: bool
Debug option. It returns the model plus the local and the global
reconstruction components.
Default is False.
global_pol_interp: numpy.ndarray
If is None, the global interpolation is done with th RBF
interpolation as in the local model.
If is not None, the global interpolation is done directly using
position polynomials.
In this case, ``global_pol_interp`` should be the normalized Pi
interpolation matrix.
Default is None.
Returns
-------
PSFs: numpy.ndarray
Returns the interpolated PSFs in regular format.
"""
if not self.is_fitted:
raise ValueError('''MCCD instance has not yet been fitted to
observations. Please run the fit method.''')
if upfact is None:
upfact = self.upfact
if sigmas is None:
lanc_rad = 8
else:
lanc_rad = np.ceil(3. * np.max(sigmas)).astype(int)
ntest = test_pos.shape[0]
test_weights_glob = np.zeros((self.n_comp_glob, ntest))
test_weights_loc = np.zeros((self.n_comp_loc, ntest))
# Turn ccd_n into list number.
# Select the correct indexes of the requested CCD.
try:
ccd_idx = np.where(np.array(self.ccd_list) == ccd_n)[0][0]
except Exception:
# If the CCD was not used for training the output should be
# None and be handled by the wrapping function.
if mccd_debug:
return None, None, None
else:
return None
# PSF recovery
for j, pos in enumerate(test_pos):
# Local model
# Determine neighbors
nbs_loc, pos_nbs_loc = mccd_utils.return_loc_neighbors(
pos,
self.obs_pos[ccd_idx],
self.A_loc[ccd_idx].T,
n_loc_neighbors)
# Train RBF and interpolate for each component
for i in range(self.n_comp_loc):
rbfi = Rbf(pos_nbs_loc[:, 0],
pos_nbs_loc[:, 1],
nbs_loc[:, i],
function=rbf_function)
test_weights_loc[i, j] = rbfi(pos[0], pos[1])
# Global model
if global_pol_interp is None:
# Use RBF interpolation for the global component
# Depending on the problem one may want to use neighbors
# from the corresponding CCD (return_loc_neighbors) or from
# any CCD (return_glob_neighbors).
# Using return_loc_neighbors()
nbs_glob, pos_nbs_glob = mccd_utils.return_loc_neighbors(
pos,
self.obs_pos[ccd_idx],
self.A_glob[ccd_idx].T,
n_glob_neighbors)
# Using return_glob_neighbors()
# nbs_glob, pos_nbs_glob = mccd_utils.return_glob_neighbors(
# pos,
# self.obs_pos,
# self.A_glob,
# n_glob_neighbors)
for i in range(self.n_comp_glob):
rbfi = Rbf(pos_nbs_glob[:, 0],
pos_nbs_glob[:, 1],
nbs_glob[:, i],
function=rbf_function)
test_weights_glob[i, j] = rbfi(pos[0], pos[1])
else:
# Use classic PSFEx-like position polynomial interpolation
# for the global component
test_weights_glob = self.alpha[-1] @ global_pol_interp
# Reconstruct the global and local PSF contributions
PSFs_loc = self._loc_transform(test_weights_loc, ccd_idx)
PSFs_glob = self._glob_transform(test_weights_glob)
PSFs = PSFs_glob + PSFs_loc
if apply_degradation:
shift_kernels, _ = utils.shift_ker_stack(shifts, self.upfact,
lanc_rad=lanc_rad)
# PSFs changed into reg_format in the degradation process
deg_PSFs = np.array(
[utils.degradation_op(PSFs[:, :, j],
shift_kernels[:, :, j],
upfact)
for j in range(ntest)])
if flux is not None:
deg_PSFs *= flux.reshape(-1, 1, 1) / self.flux_ref[ccd_idx]
if mccd_debug:
deg_PSFs_glob = np.array([utils.degradation_op(
PSFs_glob[:, :, j], shift_kernels[:, :, j], upfact)
for j in range(ntest)])
deg_PSFs_loc = np.array([utils.degradation_op(
PSFs_loc[:, :, j], shift_kernels[:, :, j], upfact)
for j in range(ntest)])
if flux is not None:
deg_PSFs_glob *= flux.reshape(-1, 1, 1) / self.flux_ref[
ccd_idx]
deg_PSFs_loc *= flux.reshape(-1, 1, 1) / self.flux_ref[
ccd_idx]
if mccd_debug:
return deg_PSFs, deg_PSFs_glob, deg_PSFs_loc
else:
return deg_PSFs
else:
# If PSF are not degraded they come in rca_format from before so
# we change to regular_format.
# I should normalize the flux of the PSFs before the output when
# no degradation is done
PSFs = np.array(
[PSFs[:, :, j] / np.sum(PSFs[:, :, j]) for j in range(ntest)])
if mccd_debug:
return PSFs, PSFs_glob, PSFs_loc
else:
return PSFs
def validation_stars(self, test_stars, test_pos, test_masks=None,
ccd_id=None, mccd_debug=False,
response_flag=False, global_pol_interp=None):
r"""Match PSF model to stars.
The match is done in flux, shift and pixel sampling -
for validation tests.
Returns the matched PSFs' stamps.
Returned matched PSFs will be None if there is no model on that
specific CCD due to the lack of training stars.
Parameters
----------
test_stars: numpy.ndarray
Star stamps to be used for comparison with the PSF model.
Should be in "rca" format,
i.e. with axises (n_pixels, n_pixels, n_stars).
test_pos: numpy.ndarray
Their corresponding positions in global coordinate system.
test_masks: numpy.ndarray
Masks to be used on the star stamps.
If None, all the pixels will be used.
Default is None.
ccd_id: int
The corresponding ccd_id
(ie ccd number corresponding to the megacam geometry).
Do not mistake for ccd_idx (index).
mccd_debug: bool
Debug option. It returns the local and the global
reconstruction components.
response_flag: bool
Response option. True if in response mode.
global_pol_interp: Position pols of numpy.ndarray or None
If is None, the global interpolation is done with th RBF
interpolation as in the local model.
If is not None, the global interpolation is done directly
using position polynomials.
In this case, it should be the normalized Pi interpolation matrix.
"""
if not self.is_fitted:
raise ValueError('''MCCD instance has not yet been fitted to
observations. Please run the fit method.''')
if response_flag:
test_shifts = np.zeros((test_pos.shape[0], 2))
test_fluxes = None
sigmas = np.ones((test_pos.shape[0],)) * self.psf_size
else:
if test_masks is None:
test_masks = np.ones(test_stars.shape)
star_moms = [gs.hsm.FindAdaptiveMom(gs.Image(star),
badpix=gs.Image(np.rint(
np.abs(badpix - 1))),
guess_sig=self.psf_size,
strict=False)
for star, badpix in zip(utils.reg_format(test_stars),
utils.reg_format(test_masks))]
sigmas = np.array([moms.moments_sigma for moms in star_moms])
cents = [
utils.CentroidEstimator(test_stars[:, :, it], sig=sigmas[it])
for it in range(test_stars.shape[2])]
test_shifts = np.array([ce.return_shifts() for ce in cents])
test_fluxes = utils.flux_estimate_stack(test_stars, sigmas=sigmas)
matched_psfs = self.estimate_psf(test_pos, ccd_id,
apply_degradation=True,
shifts=test_shifts,
flux=test_fluxes,
sigmas=sigmas,
mccd_debug=mccd_debug,
global_pol_interp=global_pol_interp)
# Optimized flux matching
if matched_psfs is not None:
# matched_psfs will be None if there is no model on that
# specific CCD due to the lack of training stars.
norm_factor = np.array(
[np.sum(_star * _psf) / np.sum(_psf * _psf)
for _star, _psf in zip(utils.reg_format(test_stars),
matched_psfs)]).reshape(-1, 1, 1)
matched_psfs *= norm_factor
return matched_psfs
def _loc_transform(self, A_loc_weights, ccd_idx):
r"""Transform local weights to local contributions of the PSFs."""
return self.S[ccd_idx].dot(A_loc_weights)
def _glob_transform(self, A_glob_weights):
r"""Transform global weights to global contribution of the PSFs."""
return self.S[-1].dot(A_glob_weights)
|
CosmoStatREPO_NAMEmccdPATH_START.@mccd_extracted@mccd-master@mccd@mccd.py@.PATH_END.py
|
{
"filename": "_namelength.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram/hoverlabel/_namelength.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NamelengthValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="namelength", parent_name="histogram.hoverlabel", **kwargs
):
super(NamelengthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
min=kwargs.pop("min", -1),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram@hoverlabel@_namelength.py@.PATH_END.py
|
{
"filename": "concatenate.py",
"repo_name": "psheehan/pdspy",
"repo_path": "pdspy_extracted/pdspy-master/pdspy/interferometry/concatenate.py",
"type": "Python"
}
|
from .libinterferometry import Visibilities
import numpy
def concatenate(visibilities):
for i, vis in enumerate(visibilities):
if i == 0:
u = vis.u.copy()
v = vis.v.copy()
real = vis.real.copy()
imag = vis.imag.copy()
amp = vis.amp.copy()
weights = vis.weights.copy()
freq = vis.freq.copy()
if type(vis.baseline) != type(None):
baseline = vis.baseline.copy()
incl_baselines = True
else:
incl_baselines = False
else:
u = numpy.concatenate((u, vis.u.copy()))
v = numpy.concatenate((v, vis.v.copy()))
real = numpy.concatenate((real, vis.real.copy()))
imag = numpy.concatenate((imag, vis.imag.copy()))
amp = numpy.concatenate((amp, vis.amp.copy()))
weights = numpy.concatenate((weights, vis.weights.copy()))
if incl_baselines:
if type(vis.baseline) != type(None):
baseline = numpy.concatenate((baseline,vis.baseline.copy()))
else:
incl_baselines = False
if incl_baselines:
return Visibilities(u, v, freq, real, imag, weights, baseline=baseline)
else:
return Visibilities(u, v, freq, real, imag, weights)
|
psheehanREPO_NAMEpdspyPATH_START.@pdspy_extracted@pdspy-master@pdspy@interferometry@concatenate.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "athob/py-ananke",
"repo_path": "py-ananke_extracted/py-ananke-main/README.md",
"type": "Markdown"
}
|
# `py-ananke`
[](https://py-ananke.readthedocs.io/en/latest/?badge=latest)
[](https://github.com/athob/py-ananke/actions/workflows/main.yml)
[](https://joss.theoj.org/papers/357c0445d891fc10e1b0ca4dba1e3cc0)
[](https://arxiv.org/abs/2312.02268)
[](https://zenodo.org/badge/latestdoi/498927304)
Welcome to `py-ananke`, a Python package that offers `ananke` which is a comprehensive pipeline designed to generate mock astrometric and photometric catalogs of synthetic stars derived from particle-based simulated star populations.
## Project genesis
[](https://fire.northwestern.edu/ananke/)
*RGB flux map of a synthetic survey of the FIRE-2 simulated galaxy m12f (image credit: [Sanderson et al. 2020](https://ui.adsabs.harvard.edu/abs/2020ApJS..246....6S/abstract)).*
The package was designed to provide easy installation and distribution of the `ananke` software, as described in [Sanderson et al. 2020](https://ui.adsabs.harvard.edu/abs/2020ApJS..246....6S/abstract). In this work, the team focused on cosmological simulations, such as the latte suite of FIRE simulations which have limited resolution and cannot accurately represent fully resolved stellar populations with individual stars. To address this challenge, the authors of `ananke` developed a framework consisting of scripts and data that enabled the generation of synthetic GAIA star surveys from these simulated galaxies. The framework combines density estimations and IMF sampling techniques to create representative populations of mock stars.
An essential aspect of `ananke` is its integration with the [`EnLink`](https://ui.adsabs.harvard.edu/abs/2009ApJ...703.1061S/abstract)/[`EnBiD`](http://ascl.net/1109.012) C++ software for computing phase space densities. These computed densities are then used as input for the [`Galaxia`](http://ascl.net/1101.007) C++ software, which generates synthetic surveys by incorporating user-supplied GAIA isochrones to produce the mock photometry.
The development of `py-ananke` aims to make this sophisticated framework accessible to a broader community. By providing a self-contained and easily installable Python package, we strive to facilitate the usage and adoption of `ananke` for generating mock star surveys from cosmological simulations, enabling the investigation of stellar halos around nearby galaxies.
## Getting started
`py-ananke` is compatible with Python versions above 3.7.12 and below 3.11. The project is organized into three branches: [main](https://github.com/athob/py-ananke/tree/main), [stable](https://github.com/athob/py-ananke/tree/stable), and [develop](https://github.com/athob/py-ananke/tree/develop). The main branch contains the latest released version, while the stable and develop branches host versions currently in development, with stable being the most recent stable version. `py-ananke` uses dedicated wrapper submodules, namely [`py-EnBiD-ananke`](https://github.com/athob/py-EnBiD-ananke) and [`py-Galaxia-ananke`](https://github.com/athob/py-Galaxia-ananke), specifically developed to handle the installation and utilization of the C++ backend software, [`EnBiD`](http://ascl.net/1109.012), and a modified version of [`Galaxia`](http://ascl.net/1101.007) called [`galaxia-ananke`](https://github.com/athob/galaxia-ananke). These submodules relieve users from the need to directly manage the C++ software while isolating the C++ wrapping process. This allows `py-ananke` to focus on processing inputs and outputs using pure Python. It is worth noting that [`galaxia-ananke`](https://github.com/athob/galaxia-ananke) incorporates several pre-installed photometric systems, represented by sets of isochrones generated from the [CMD web interface](http://stev.oapd.inaf.it/cgi-bin/cmd) (commonly referred to as Padova isochrones). Among the available options are HST, GAIA, Euclid, Rubin, JWST & Roman.
### Installation
To install `py-ananke`, you can use the following pip command, which pulls the latest version directly from the repository's main branch:
pip install git+https://github.com/athob/py-ananke@main
or
python -m pip install git+https://github.com/athob/py-ananke@main
Alternatively, if you prefer, you may clone the repository to your local machine and then install `py-ananke` using the following pip command, which installs it from your local copy of the repository:
git clone https://github.com/athob/py-ananke
cd py-ananke
pip install .
Please note that the command with flag `pip install . --no-cache-dir` may be necessary due to some dependencies issues.
***Warning: DO NOT download the repository as a ZIP archive with intention to install it this way, the installation requires the git set up of the repository to propertly install its submodule dependencies.***
After installation, the module can be imported in Python under the name `ananke` and be ran as such.
### Simplified use case
The repository includes a Jupyter notebook that demonstrates a simplified use case utilizing a dummy set of randomly generated particle data. You can access the notebook directly at [jupyter/example_notebook.ipynb](jupyter/example_notebook.ipynb). This notebook provides a step-by-step example to help you understand the functionality and usage of `py-ananke` in a straightforward manner.
Note that you will need Jupyter installed on the machine where you intend to test this notebook, which will not be automatically installed by the `py-ananke`'s installation as it is not required to run its modules. Please follow the [Project Jupyter dedicated instructions](https://jupyter.org/install) for installing its tools such as [JupyterLab](https://jupyter.org/install#jupyterlab) or [Jupyter Notebook](https://jupyter.org/install#jupyter-notebook).
## What's under the hood
Work in progress...
## On-going development
`py-ananke` is now [published in The Journal of Open Source Software](https://joss.theoj.org/papers/10.21105/joss.06234). You can also access the associated arXiv publication [here](https://arxiv.org/abs/2312.02268).
### Upcoming updates
We have an exciting roadmap of upcoming updates planned, which we aim to implement prior to or following the submission. Here are some of the planned updates in no particular order:
- **Improving Extinction**: The extinction feature is currently in an experimental state, and we have identified areas for significant improvement. Firstly, while the user can supply their own extinction coefficients Aλ/A0 for any photometric system, only GAIA currently has default coefficients. Future updates will expand the range of default extinction coefficients for different systems. Secondly, the estimation of dust column density maps per particle currently requires user input. Our plan is to incorporate a treatment that directly computes dust column densities from the simulated metal-enriched gas provided by the user.
- **Implementing Error Modelling**: The original `ananke` framework ([Sanderson et al. 2020](https://ui.adsabs.harvard.edu/abs/2020ApJS..246....6S/abstract)) featured error modelling as a significant component. In future updates, we will introduce a framework that allows for the incorporation of simple error models into the pipeline, enhancing the robustness of the generated mock surveys.
- **Interfacing with Isochrone Databases**: `py-ananke` currently includes pre-loaded isochrones for a chosen photometric system (some of which are listed in the introduction section). Our plan is to implement a direct interface with established isochrone databases such as [Padova](http://stev.oapd.inaf.it/cgi-bin/cmd) or [MIST](https://waps.cfa.harvard.edu/MIST/), enabling users to download available photometric systems on-the-fly. Additionally, we aim to develop a framework that allows `py-ananke` to output photometry in a range of commonly used calibration standards.
- **Additional Modularization**: While [`EnBiD`](http://ascl.net/1109.012) serves as the density estimation routine of choice, we plan to expand the options by adding more choices such as [`EnLink`](https://ui.adsabs.harvard.edu/abs/2009ApJ...703.1061S/abstract). Furthermore, we intend to diversify the selection of kernel functions for density estimation and sampling mock stars in phase space, making it possible to utilize anisotropic kernel functions. Additionally, we will enhance the flexibility of `py-ananke` by incorporating a wider range of initial mass functions (IMFs) and allowing mass sampling based on present mass functions, particularly for generating mock stars in globular clusters.
- **Quality of Life Updates**: We are dedicated to enhancing the user experience and overall usability of `py-ananke`. To that end, we will be implementing various quality of life updates, refining the software interface, improving documentation, and streamlining the overall workflow.
These upcoming updates signify our commitment to continuously improve `py-ananke` and address the evolving needs of the community. We encourage users to stay engaged with the project, provide feedback, and contribute to its development as we work towards a more comprehensive and user-friendly tool for generating mock surveys.
### Contributing
You can readily access the code in the [main GitHub repository](https://github.com/athob/py-ananke), as well as its submodules repositories. We encourage users to utilize the [Github issues](https://github.com/athob/py-ananke/issues) feature to report any bugs encountered or suggest new ideas for improvement. Contributions to the project are highly valued and greatly appreciated. To contribute, we kindly request that you make your changes in a separate branch or fork of the repository. Once your contributions are ready, you can submit a [pull request](https://github.com/athob/py-ananke/pulls) to merge them into the [develop](https://github.com/athob/py-ananke/tree/develop) branch. If you choose to make your contributions in a fork, please make sure to base those changes from the develop branch in that fork, and have your pull requests originate from your forked develop branch and target this repository's develop branch. Additionally, you may need to confirm that the Workflow permissions in your fork settings are set to Read and write (Settings > Actions > General > Workflow permissions) in case the [CI/CD GitHub Action](https://github.com/athob/py-ananke/actions/workflows/main.yml) doesn't complete.
## `py-ananke` & the community
### License
`py-ananke` is distributed under the [GNU General Public License (GPL) version 3](LICENSE), offering you the freedom to utilize, modify, and distribute the software. The GPL v3 ensures that you have the flexibility to adapt py-ananke to suit your specific needs, while also contributing to the open-source community. We encourage you to read the full license text to understand your rights and responsibilities when using py-ananke.
### Citing `py-ananke`
If py-ananke has played a role in your research project or software development, we kindly request that you acknowledge and cite the project. Citing py-ananke not only gives credit to the dedicated efforts of its creators but also helps others discover and benefit from this software.
To cite `py-ananke`, please use DOI `10.21105/joss.06234` as a reference in your publications, or cite as the following:
```text
Thob, Adrien C. R. et al. 2024, “Generating synthetic star catalogs from simulated data for next-gen observatories with py-ananke”, The Journal of Open Source Software, 9, 6234, doi:10.21105/joss.06234.
```
Alternatively, you may use [one of the entries associated with `py-ananke` as listed by The SAO/NASA Astrophysics Data System](https://ui.adsabs.harvard.edu/abs/2023arXiv231202268T/exportcitation), such as the following BibTeX entry:
```text
@ARTICLE{2024JOSS....9.6234T,
author = {{Thob}, Adrien and {Sanderson}, Robyn and {Eden}, Andrew and {Nikakhtar}, Farnik and {Panithanpaisal}, Nondh and {Garavito-Camargo}, Nicol{\'a}s and {Sharma}, Sanjib},
title = "{Generating synthetic star catalogs from simulated data for next-gen observatories with py-ananke}",
journal = {The Journal of Open Source Software},
keywords = {C++, astronomy, galaxies, stars, simulations, mock observations, Jupyter Notebook, Python, Astrophysics - Astrophysics of Galaxies, Astrophysics - Instrumentation and Methods for Astrophysics},
year = 2024,
month = oct,
volume = {9},
number = {102},
eid = {6234},
pages = {6234},
doi = {10.21105/joss.06234},
archivePrefix = {arXiv},
eprint = {2312.02268},
primaryClass = {astro-ph.GA},
adsurl = {https://ui.adsabs.harvard.edu/abs/2024JOSS....9.6234T},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
```
### Dissemination
Below, you will find a selection of scientific papers that showcase the potential and capabilities of py-ananke. These papers serve as valuable resources for understanding the practical implementation and impact of py-ananke in various domains.
- Gray et al. 2024, “EDGE: A new model for Nuclear Star Cluster formation in dwarf galaxies”, arXiv e-prints, arXiv:2405.19286, [doi:10.48550/arXiv.2405.19286](https://arxiv.org/pdf/2405.19286)
`py-ananke` has also been discussed extensively in various workshops and conferences.
- [`py-ananke_v0.0.2-beta2`](https://github.com/athob/py-ananke/releases/tag/v0.0.2-beta2) was featured at [EAS 2023 in Krakow](https://eas2023programme.kuoni-congress.info/presentation/generating-mock-euclid-and-roman-surveys-of-stellar-halos-of-simulated-nearby-galaxies-using-the-py-ananke-pipeline) as an e-poster, check it at [this URL](https://k-poster.kuoni-congress.info/eas-2023/poster/5bf40113-efa9-4bfc-89a5-b67ebd81f7dd).
- [`py-ananke_v0.1.0-beta2`](https://github.com/athob/py-ananke/releases/tag/v0.1.0-beta2) was featured at the 243rd AAS meeting in New Orleans.
- a Galactic Plane survey application of `py-ananke` was featured at:
1. the [Roman@Yerkes workshop on Galactic Science with the Nancy Grace Roman Space Telescope](https://sites.google.com/cfa.harvard.edu/romanyerkes/home),
2. [EAS 2024 in Padova](https://eas2024programme.kuoni-congress.info/presentation/synthetic-survey-catalogs-for-the-galactic-roman-infrared-plane-survey-grips-using-py-ananke) as an e-poster (available [here](https://k-poster.kuoni-congress.info/eas-2024/poster/749fd8f1-ec4c-4167-911f-914e2215eeab)),
3. the [Challenging Theory with Roman meeting at Caltech](https://conference.ipac.caltech.edu/roman2024/) (recording available [here](https://youtu.be/93hF1ZCDzw8)),
4. the [2024 IAU GA in Cape Town](https://astronomy2024.org/) (live streamed [here](https://m.youtube.com/watch?v=8R-Wc9tGWcg&t=7218s)).
## Acknowledgements
We extend our sincere gratitude to [Robyn Sanderson (UPenn)](https://live-sas-physics.pantheon.sas.upenn.edu/people/standing-faculty/robyn-sanderson), [Andrew Eden (FloridaTech)](mailto:Andrew%20Eden%20<aeden2019@my.fit.edu>), [Farnik Nikakhtar (Yale)](https://physics.yale.edu/people/farnik-nikakhtar), [Nondh Panithanpaisal (UPenn/CarnegieObs)](https://nonsk131.github.io), and [Nicolas Garavito-Camargo (FlatironCCA)](https://jngaravitoc.github.io/Garavito-Camargo/) for their invaluable contributions and support during the development of this package. Their expertise, guidance, and collaboration have been instrumental in shaping the vision and advancement of this project. We also appreciate the valuable feedback and suggestions provided by the wider community, including the extended [Galaxy Dynamics @ UPenn group](https://web.sas.upenn.edu/dynamics/) and the participants of the "anankethon" workshop, which have significantly contributed to the refinement and enhancement of the package.
|
athobREPO_NAMEpy-anankePATH_START.@py-ananke_extracted@py-ananke-main@README.md@.PATH_END.py
|
{
"filename": "deblender.py",
"repo_name": "b-biswas/MADNESS",
"repo_path": "MADNESS_extracted/MADNESS-main/madness_deblender/deblender.py",
"type": "Python"
}
|
"""Perform Deblending."""
import logging
import os
import time
import galcheat
import numpy as np
import sep
import tensorflow as tf
import tensorflow_probability as tfp
from madness_deblender.extraction import extract_cutouts
from madness_deblender.FlowVAEnet import FlowVAEnet
from madness_deblender.utils import get_data_dir_path
tfd = tfp.distributions
# logging level set to INFO
logging.basicConfig(format="%(message)s", level=logging.INFO)
LOG = logging.getLogger(__name__)
def vectorized_compute_reconst_loss(args):
"""Compute reconstruction loss after being passed to tf.map_fn.
Parameters
----------
args: nested tensor
(blended_field, reconstructions, index_pos_to_sub, num_components, sig_sq).
passes all parameters to the compute_residual function.
Returns
-------
reconstruction loss: tensor
reconstruction loss term of the loss function.
"""
(
blended_field,
reconstructions,
index_pos_to_sub,
num_components,
sig_sq,
) = args
# tf.print("reached 1")
residual_field = compute_residual(
blended_field,
reconstructions,
index_pos_to_sub=index_pos_to_sub,
num_components=num_components,
)
reconst_loss = residual_field**2 / sig_sq
# tf.print("reached 2")
return tf.math.reduce_sum(reconst_loss) / 2
# @tf.function(jit_compile=True)
def compute_residual(
blended_field,
reconstructions,
use_scatter_and_sub=True,
index_pos_to_sub=None,
num_components=1,
padding_infos=None,
):
"""Compute residual in a field.
Parameters
----------
blended_field: tf tensor
field with all the galaxies
reconstructions: tf tensor
reconstructions to be subtracted
use_scatter_and_sub: bool
uses tf.scatter_and_sub for subtraction instead of padding.
index_pos_to_sub:
index position for subtraction is `use_scatter_and_sub` is True
num_components: int
number of components/galaxies in the field
padding_infos:
padding parameters for reconstructions so that they can be subtracted from the field.
Used when `use_scatter_and_sub` is False.
Returns
-------
residual_field: tf tensor
residual of the field after subtracting the reconstructions.
"""
residual_field = tf.convert_to_tensor(blended_field, dtype=tf.float32)
if use_scatter_and_sub:
def one_step(i, residual_field):
indices = index_pos_to_sub[i]
reconstruction = reconstructions[i]
residual_field = tf.tensor_scatter_nd_sub(
residual_field,
indices,
tf.reshape(reconstruction, [tf.math.reduce_prod(reconstruction.shape)]),
)
return i + 1, residual_field
c = lambda i, *_: i < num_components
_, residual_field = tf.while_loop(
c,
one_step,
(0, residual_field),
maximum_iterations=num_components,
parallel_iterations=1,
)
return residual_field
else:
if padding_infos is None:
raise ValueError(
"Pass padding infos or use the scatter_and_sub function instead"
)
def one_step(i, residual_field):
# padding = tf.cast(padding_infos[i], dtype=tf.int32)
padding = padding_infos[i]
reconstruction = tf.pad(
tf.gather(reconstructions, i), padding, "CONSTANT", name="padding"
)
# tf.where(mask, tf.zeros_like(tensor), tensor)
residual_field = residual_field - reconstruction
return i + 1, residual_field
c = lambda i, _: i < num_components
_, residual_field = tf.while_loop(
c,
one_step,
(tf.constant(0, dtype=tf.int32), residual_field),
maximum_iterations=num_components,
parallel_iterations=1,
)
return residual_field
class Deblender:
"""Run the deblender."""
def __init__(
self,
stamp_shape=45,
latent_dim=16,
filters_encoder=[32, 128, 256, 512],
filters_decoder=[64, 96, 128],
kernels_encoder=[5, 5, 5, 5],
kernels_decoder=[5, 5, 5],
dense_layer_units=512,
num_nf_layers=6,
weights_path=None,
load_weights=True,
survey=galcheat.get_survey("LSST"),
):
"""Initialize class variables.
Parameters
----------
stamp_shape: int
size of input postage stamp
latent_dim: int
size of latent space.
filters_encoder: list
filters used for the convolutional layers in the encoder
filters_decoder: list
filters used for the convolutional layers in the decoder
kernels_encoder: list
kernels used for the convolutional layers in the encoder
kernels_decoder: list
kernels used for the convolutional layers in the decoder
num_nf_layers: int
number of layers in the flow network
dense_layer_units: int
number of units in the dense layer
weights_path: string
base path to load weights.
flow weights are loaded from weights_path/flow6/val_loss
vae weights are loaded from weights_path/deblender/val_loss
survey: galcheat.survey object
galcheat survey object to fetch survey details
load_weights: bool
Should be used as True to load pre-trained weights.
if False, random weights are used(used for testing purposes).
"""
self.latent_dim = latent_dim
self.survey = survey
self.flow_vae_net = FlowVAEnet(
stamp_shape=stamp_shape,
latent_dim=latent_dim,
filters_encoder=filters_encoder,
kernels_encoder=kernels_encoder,
filters_decoder=filters_decoder,
kernels_decoder=kernels_decoder,
dense_layer_units=dense_layer_units,
num_nf_layers=num_nf_layers,
survey=survey,
)
if load_weights:
if weights_path is None:
data_dir_path = get_data_dir_path()
weights_path = os.path.join(data_dir_path, survey.name)
self.flow_vae_net.load_flow_weights(
weights_path=os.path.join(weights_path, "flow/val_loss")
)
self.flow_vae_net.flow_model.trainable = False
self.flow_vae_net.load_vae_weights(
weights_path=os.path.join(weights_path, "vae/val_loss")
)
self.flow_vae_net.load_encoder_weights(
weights_path=os.path.join(weights_path, "deblender/val_loss")
)
self.flow_vae_net.vae_model.trainable = False
# self.flow_vae_net.vae_model.trainable = False
# self.flow_vae_net.flow_model.trainable = False
# self.flow_vae_net.vae_model.summary()
self.blended_fields = None
self.detected_positions = None
self.cutout_size = stamp_shape
self.num_components = None
self.channel_last = None
self.noise_sigma = None
self.num_bands = len(survey.available_filters)
self.field_size = None
self.use_log_prob = None
self.linear_norm_coeff = None
self.optimizer = None
self.max_iter = None
self.z = None
def __call__(
self,
blended_fields,
detected_positions,
num_components,
noise_sigma=None,
max_iter=60,
use_log_prob=True,
channel_last=False,
linear_norm_coeff=10000,
convergence_criterion=None,
use_debvader=True,
optimizer=None,
map_solution=True,
):
"""Run the Deblending operation.
Parameters
----------
blended_fields: np.ndarray
batch of blended fields.
detected_positions: list
List of detected positions.
as in array and not image
num_components: list
list of number of galaxies present in the image.
noise_sigma: list of float
background noise-level in each band
max_iter: int
number of iterations in the deblending step
use_log_prob: bool
decides whether or not to use the log_prob output of the flow deblender in the optimization.
channel_last: bool
if the channels/filters are the last column of the blended_fields
linear_norm_coeff: int/list
list stores the bandwise linear normalizing/scaling factor.
if int is passed, the same scaling factor is used for all.
convergence_criterion: tfp.optimizer.convergence_criteria
For termination of the optimization loop.
use_debvader: bool
Use the encoder as a deblender to set the initial position for deblending.
optimizer: tf.keras.optimizers
Optimizer to use used for gradient descent.
map_solution: bool
To obtain the map solution (MADNESS) or debvader solution.
Both `map_solution` and `use_debvader` cannot be False simultaneously.
"""
# tf.config.run_functions_eagerly(False)
self.linear_norm_coeff = linear_norm_coeff
self.max_iter = max_iter
self.num_components = tf.convert_to_tensor(num_components, dtype=tf.int32)
self.use_log_prob = use_log_prob
self.components = None
self.channel_last = channel_last
self.noise_sigma = noise_sigma
if self.channel_last:
self.blended_fields = tf.convert_to_tensor(
blended_fields / linear_norm_coeff,
dtype=tf.float32,
)
else:
self.blended_fields = tf.convert_to_tensor(
np.transpose(blended_fields, axes=[0, 2, 3, 1]) / linear_norm_coeff,
dtype=tf.float32,
)
self.detected_positions = np.array(detected_positions)
self.max_number = self.detected_positions.shape[1]
self.num_fields = self.detected_positions.shape[0]
self.field_size = np.shape(blended_fields)[2]
self.results = self.gradient_decent(
convergence_criterion=convergence_criterion,
use_debvader=use_debvader,
optimizer=optimizer,
map_solution=map_solution,
)
def get_components(self):
"""Return the predicted components.
The final returned image has the same value of channel_last as the input image.
"""
if self.channel_last:
return self.components
return np.moveaxis(self.components, -1, -3)
def compute_loss(
self,
z,
sig_sq,
index_pos_to_sub,
):
"""Compute loss at each epoch of Deblending optimization.
Parameters
----------
z: tf tensor
latent space representations of the reconstructions.
sig_sq: tf tensor
Factor for division to convert the MSE to Gaussian approx to Poisson noise.
index_pos_to_sub:
index position for subtraction is `use_scatter_and_sub` is True
Returns
-------
final_loss: tf float
Final loss to be minimized
reconstruction_loss: tf float
Loss from residuals in the field
log_prob: tf float
log prob evaluated by normalizing flow
residual_field: tf tensor
residual field after deblending.
"""
reconstructions = self.flow_vae_net.decoder(z)
reconstructions = tf.reshape(
reconstructions,
[
self.num_fields,
self.max_number,
self.cutout_size,
self.cutout_size,
self.num_bands,
],
)
# tf.print(postage_stamp.shape)
# tf.print(reconstructions.shape)
# tf.print(index_pos_to_sub.shape)
# tf.print(num_components.shape)
# tf.print(sig_sq.shape)
reconstruction_loss = tf.map_fn(
vectorized_compute_reconst_loss,
elems=(
self.blended_fields,
reconstructions,
index_pos_to_sub,
self.num_components,
sig_sq,
),
parallel_iterations=20,
fn_output_signature=tf.TensorSpec(
[],
dtype=tf.float32,
),
)
# print(f"num fields: {self.num_fields}")
# reconstruction_loss = residual_field**2 / sig_sq
# tf.print(sig_sq, output_stream=sys.stdout)
# reconstruction_loss = tf.math.reduce_sum(reconstruction_loss, axis=[1, 2, 3])
# tf.print(reconstruction_loss.shape)
# reconstruction_loss = reconstruction_loss / 2
log_prob = self.flow_vae_net.flow(z)
log_prob = tf.reduce_sum(
tf.reshape(log_prob, [self.num_fields, self.max_number]), axis=[1]
)
# tf.print(log_prob.shape)
# tf.print(reconstruction_loss, output_stream=sys.stdout)
# tf.print(log_likelihood, output_stream=sys.stdout)
# tf.print(reconstruction_loss)
# tf.print(log_likelihood)
final_loss = reconstruction_loss
if self.use_log_prob:
final_loss = reconstruction_loss - log_prob
return final_loss, reconstruction_loss, log_prob
# def get_index_pos_to_sub(self):
# """Get index position to run tf.tensor_scatter_nd_sub."""
# index_list = []
# for field_num in range(self.num_fields):
# inner_list = []
# for i in range(self.max_number):
# indices = (
# np.indices((self.cutout_size, self.cutout_size, self.num_bands))
# .reshape(3, -1)
# .T
# )
# detected_position = self.detected_positions[field_num][i]
# starting_pos_x = round(detected_position[0]) - int(
# (self.cutout_size - 1) / 2
# )
# starting_pos_y = round(detected_position[1]) - int(
# (self.cutout_size - 1) / 2
# )
# indices[:, 0] += int(starting_pos_x)
# indices[:, 1] += int(starting_pos_y)
# inner_list.append(indices)
# index_list.append(inner_list)
# return np.array(index_list)
def get_index_pos_to_sub(self):
"""Get index position to run tf.tensor_scatter_nd_sub."""
indices = (
np.indices((self.cutout_size, self.cutout_size, self.num_bands))
.reshape(3, -1)
.T
)
indices = np.repeat(np.expand_dims(indices, axis=0), self.max_number, axis=0)
indices = np.repeat(np.expand_dims(indices, axis=0), self.num_fields, axis=0)
starting_positions = np.round(self.detected_positions).astype(int) - int(
(self.cutout_size - 1) / 2
)
indices = np.moveaxis(indices, 2, 0)
indices[:, :, :, 0:2] += starting_positions
indices = np.moveaxis(indices, 0, 2)
return indices
def get_padding_infos(self):
"""Compute padding info to convert galaxy cutout into field."""
padding_infos_list = []
for field_num in range(self.num_fields):
inner_list = []
for detected_position in self.detected_positions[field_num]:
starting_pos_x = round(detected_position[0]) - int(
(self.cutout_size - 1) / 2
)
starting_pos_y = round(detected_position[1]) - int(
(self.cutout_size - 1) / 2
)
padding = [
[
starting_pos_x,
self.field_size - (starting_pos_x + int(self.cutout_size)),
],
[
starting_pos_y,
self.field_size - (starting_pos_y + int(self.cutout_size)),
],
[0, 0],
]
inner_list.append(padding)
padding_infos_list.append(inner_list)
return np.array(padding_infos_list)
def compute_noise_sigma(self):
"""Compute noise level with sep."""
sig = []
for i in range(len(self.survey.available_filters)):
sig.append(
sep.Background(
np.ascontiguousarray(self.blended_fields.numpy()[0][:, :, i])
).globalrms
)
return np.array(sig)
def gradient_decent(
self,
initZ=None,
convergence_criterion=None,
use_debvader=True,
optimizer=None,
map_solution=True,
):
"""Perform the gradient descent step to separate components (galaxies).
Parameters
----------
initZ: np.ndarray
initial value of the latent space.
convergence_criterion: tfp.optimizer.convergence_criteria
For termination of the optimization loop
use_debvader: bool
Use the encoder as a deblender to set the initial position for deblending.
optimizer: tf.keras.optimizers
Optimizer to use used for gradient descent.
map_solution: bool
To obtain the map solution or debvader solution.
Both `map_solution` and `use_debvader` cannot be False at the same time.
Returns
-------
results: list
variation of the loss over deblending iterations.
"""
LOG.info("use debvader: " + str(use_debvader))
if not map_solution and not use_debvader:
raise ValueError(
"Both use_debvader and map_solution cannot be False at the same time"
)
if not use_debvader:
# check constraint parameter over here
z = tf.Variable(
self.flow_vae_net.td.sample(self.num_fields * self.max_number)
)
else:
# use the encoder to find a good starting point.
LOG.info("\nUsing encoder for initial point")
t0 = time.time()
cutouts = np.zeros(
(
self.num_fields * self.max_number,
self.cutout_size,
self.cutout_size,
self.num_bands,
)
)
for field_num in range(self.num_fields):
cutouts[
field_num * self.max_number : field_num * self.max_number
+ self.num_components[field_num]
] = extract_cutouts(
self.blended_fields.numpy()[field_num],
pos=self.detected_positions[field_num][
: self.num_components[field_num]
],
cutout_size=self.cutout_size,
channel_last=True,
)[
0
]
initZ = tfp.layers.MultivariateNormalTriL(self.latent_dim)(
self.flow_vae_net.encoder(cutouts)
)
LOG.info("Time taken for initialization: " + str(time.time() - t0))
z = tf.Variable(
tf.reshape(
initZ.mean(), (self.num_fields * self.max_number, self.latent_dim)
)
)
if map_solution:
if optimizer is None:
lr_scheduler = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=0.075,
decay_steps=30,
decay_rate=0.8,
staircase=True,
)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_scheduler)
LOG.info("\n--- Starting gradient descent in the latent space ---")
LOG.info(f"Maximum number of iterations: {self.max_iter}")
# LOG.info("Learning rate: " + str(optimizer.lr.numpy()))
LOG.info(f"Number of fields: {self.num_fields}")
LOG.info(f"Number of Galaxies: {self.num_components}")
LOG.info(f"Dimensions of latent space: {self.latent_dim}")
t0 = time.time()
index_pos_to_sub = self.get_index_pos_to_sub()
index_pos_to_sub = tf.convert_to_tensor(
self.get_index_pos_to_sub(),
dtype=tf.int32,
)
# padding_infos = self.get_padding_infos()
if self.noise_sigma is None:
noise_level = self.compute_noise_sigma()
noise_level = tf.convert_to_tensor(
noise_level,
dtype=tf.float32,
)
# Calculate sigma^2 with Gaussian approximation to Poisson noise.
# Note here that self.postage stamp is normalized but it must be divided again
# to ensure that the log likelihood does not change due to scaling/normalizing
sig_sq = self.blended_fields / self.linear_norm_coeff + noise_level**2
# sig_sq[sig_sq <= (5 * noise_level)] = 0
results = tfp.math.minimize(
loss_fn=self.generate_grad_step_loss(
z=z,
sig_sq=sig_sq,
index_pos_to_sub=index_pos_to_sub,
),
trainable_variables=[z],
num_steps=self.max_iter,
optimizer=optimizer,
convergence_criterion=convergence_criterion,
)
""" LOG.info(f"Final loss {output.objective_value.numpy()}")
LOG.info("converged "+ str(output.converged.numpy()))
LOG.info("converged "+ str(output.num_iterations.numpy()))
z_flatten = output.position
z = tf.reshape(z_flatten, shape=[self.num_components, self.latent_dim]) """
LOG.info("--- Gradient descent complete ---")
LOG.info("Time taken for gradient descent: " + str(time.time() - t0))
else:
results = None
self.components = tf.reshape(
self.flow_vae_net.decoder(z) * self.linear_norm_coeff,
[
self.num_fields,
self.max_number,
self.cutout_size,
self.cutout_size,
self.num_bands,
],
)
self.z = tf.reshape(z, (self.num_fields, self.max_number, self.latent_dim))
return results
def generate_grad_step_loss(
self,
z,
sig_sq,
index_pos_to_sub,
):
"""Return function compute training loss that has no arguments.
Parameters
----------
z: tf tensor
latent space representations of the reconstructions.
sig_sq: tf tensor
Factor for the division to convert the MSE to Gaussian approx to Poisson noise.
index_pos_to_sub:
index position for subtraction is `use_scatter_and_sub` is True
Returns
-------
training_loss: python function
computes loss without taking any arguments.
"""
@tf.function
def training_loss():
"""Compute training loss."""
loss, *_ = self.compute_loss(
z=z,
sig_sq=sig_sq,
index_pos_to_sub=index_pos_to_sub,
)
return loss
return training_loss
|
b-biswasREPO_NAMEMADNESSPATH_START.@MADNESS_extracted@MADNESS-main@madness_deblender@deblender.py@.PATH_END.py
|
{
"filename": "normalize_spec.py",
"repo_name": "saltastro/pyhrs",
"repo_path": "pyhrs_extracted/pyhrs-master/scripts/normalize_spec.py",
"type": "Python"
}
|
import sys
from PyQt4 import QtGui,QtCore
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from astropy.io import fits
import numpy as np
import argparse
class FitWindow(QtGui.QWidget):
"""A class describing with PyQt4 GUI for normalizing the spectra by fitting continuum .
Parameters
----------
warr: ~numpy.ndarray
Array with wavelength
farr: ~numpy.ndarray
Array with fluxes
oarr: ~numpy.ndarray
Array with orders numbers
outfile: ~str
Name of the fits file the output will be written to
fittype: ~str
Fitting type. Can be "individual" for fitting individual orders. For other values of this parameter whole spectrum will be fit simultaneously.
Returns
-------
The resulting spectrum will be saved to "outfile". The normalization will be performed only for orders which were inspected by hand.
Notes
-----
Rejected region should be in format "(Lower wavelength)-(Higher wavelength)" without spaces. In case of multiple regions
"""
def __init__(self,warr,farr, oarr,outfile,fittype):
self.warr=warr #table with wavelengths
self.farr=farr #table with fluxes
self.oarr=oarr #table with orders
self.fittype=fittype #fit type
self.outfile=outfile #name of output file
super(FitWindow,self).__init__()
self.setWindowTitle("Normalizing spectrum")
self.layout()
def layout(self):
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.toolbar = NavigationToolbar(self.canvas, self)
self.fit_orderL = QtGui.QLabel('Fit order')
self.fit_orderE = QtGui.QLineEdit()
self.fit_orderE.setText("6")
self.iterationsL = QtGui.QLabel('Iterations')
self.iterationsE = QtGui.QLineEdit()
self.iterationsE.setText("10")
self.lowrejL = QtGui.QLabel('Lower rejection')
self.lowrejE = QtGui.QLineEdit()
self.lowrejE.setText("1")
self.uprejL = QtGui.QLabel('Upper rejection')
self.uprejE = QtGui.QLineEdit()
self.uprejE.setText("3")
self.rej_regionL = QtGui.QLabel('Rejection regions')
self.rej_regionE = QtGui.QLineEdit()
self.fitbutton=QtGui.QPushButton("Refit",self)
self.fitbutton.clicked.connect(self.fit)
self.exitbutton=QtGui.QPushButton("Exit + Save",self)
self.exitbutton.clicked.connect(self.exit_fitting)
self.nextbutton=QtGui.QPushButton("Next",self)
self.nextbutton.clicked.connect(self.next_order)
self.previousbutton=QtGui.QPushButton("Previous",self)
self.previousbutton.clicked.connect(self.previous_order)
grid = QtGui.QGridLayout()
grid.setSpacing(10)
grid.addWidget(self.canvas, 1, 0,1,8)
grid.addWidget(self.toolbar, 2, 0,1,8)
grid.addWidget(self.fit_orderL, 3, 0)
grid.addWidget(self.fit_orderE, 3, 1)
grid.addWidget(self.iterationsL, 3, 2)
grid.addWidget(self.iterationsE, 3, 3)
grid.addWidget(self.lowrejL, 3, 4)
grid.addWidget(self.lowrejE, 3, 5)
grid.addWidget(self.uprejL, 3, 6)
grid.addWidget(self.uprejE, 3, 7)
grid.addWidget(self.rej_regionL, 4, 0,1,1)
grid.addWidget(self.rej_regionE, 4, 1,1,7)
grid.addWidget(self.fitbutton, 5, 0,1,2)
grid.addWidget(self.previousbutton, 5, 2,1,2)
grid.addWidget(self.nextbutton, 5, 4,1,2)
grid.addWidget(self.exitbutton, 5, 7,1,1)
self.orders=np.unique(self.oarr) #list of all orders in the spectrum
self.order_count=0 #keeps track of witch orders is being normalized
self.coefficients_tab=np.zeros_like(self.orders).tolist() #array that stores coefficients of polynomial fit to the order
self.fit()
self.setLayout(grid)
self.show()
def fit(self): #fitting part of module
plt.clf() #cleaning plots so that previous fits are not stored on screen
try: #reading parameters of the fit
fit_order=int(self.fit_orderE.text())
iterations=int(self.iterationsE.text())
lowrej=float(self.lowrejE.text())
uprej=float(self.uprejE.text())
rej_regions=str(self.rej_regionE.text()).split()
except ValueError:
print "Bad input"
if self.fittype=="individual": #if fittype=="individual" fit will be only to one order
o=self.orders[self.order_count]
o=int(o)
i_order=np.where(self.oarr==o)
else: #fitting to all orders and
i_order=np.where(self.farr > 0.)
for i in range(iterations): #iterative fitting
if i==0: #first iteration - there are no residuals available
coefficients=np.polyfit(self.warr[i_order], self.farr[i_order], fit_order)
else: #fitting only to good points (not outliers)
if i_fit[0].size==0: #if all point are outliers print error
print "Error while rejecting points - try higher values of upper rejection and lower rejection"
i_fit=order_i
coefficients=np.polyfit(self.warr[i_fit], self.farr[i_fit], fit_order)
#actual fitting
func=np.poly1d(coefficients)
fitted=np.polyval(func,self.warr)
residuals=(self.farr-fitted)
#rejecting outliers and bad regions
mean_res=np.mean(residuals[i_order])
std_res=np.std(residuals[i_order])
if self.fittype=="individual":
i_fit=np.where( (self.oarr==o) & (residuals<uprej*std_res) & (residuals>-lowrej*std_res))
else:
i_fit=np.where( (self.farr > 0.) & (residuals<uprej*std_res) & (residuals>-lowrej*std_res))
for j in range(len(rej_regions)):
region=np.array(rej_regions[j].split("-")).astype(float)
if len(region)==2:
i_tmp=range(len(self.warr))
i_region=np.where((self.warr > np.min(region)) & (self.warr < np.max(region)))
mask_r1=np.in1d(i_tmp, i_region)
mask_r2=np.in1d(i_tmp, i_fit)
i_fit=np.where(~mask_r1 & mask_r2)
else:
print "Bad region: ",rej_regions[j]
#making list of outliers (only for plotting)
i_outliers=range(len(self.farr))
mask1=np.in1d(i_outliers, i_fit)
mask2=np.in1d(i_outliers, i_order)
i_outliers=np.where(~mask1 & mask2)
self.coefficients_tab[self.order_count]=coefficients #storing the fit coefficients
ax1 = self.figure.add_subplot(211)
ax1.plot(self.warr[i_order],self.farr[i_order],c="green")
ax1.scatter(self.warr[i_outliers],self.farr[i_outliers],c="red",edgecolor="None")
ax1.axes.get_xaxis().set_visible(False)
ax1.plot(self.warr[i_order],fitted[i_order],c="blue")
ax2 = self.figure.add_subplot(212, sharex=ax1)
ax2.hold(False)
ax2.plot(self.warr[i_order],self.farr[i_order]/fitted[i_order],c="blue")
ax2.set_xlabel(r"Wavelength [$\AA$]")
plt.tight_layout()
self.canvas.draw()
def next_order(self): #moving to the next order when fitting individual orders and the current order is not the last one
if (not self.order_count==(len(self.orders)-1) ) and (self.fittype=="individual"):
self.order_count+=1
self.fit()
def previous_order(self): #moving to the previous order when fitting individual orders and the current order is not the first one
if not self.order_count==0:
self.order_count-=1
self.fit()
def exit_fitting(self,textbox): #exit fitting window and store the normalized spectrum in outfile
for j in range(len(self.orders)):
check_fit=False #checks whether fit was performed for this order
if self.fittype=="individual": #if fitting individual orders
if type(self.coefficients_tab[j]) is np.ndarray: #if the fitting was performed for this order
func=np.poly1d(self.coefficients_tab[j])
check_fit=True
else:
func=np.poly1d(self.coefficients_tab[0]) #if fitting all the orders simultaneously the fitting coefficients are always stored in the first
#element of coefficients_tab
check_fit=True
fitted=np.polyval(func,self.warr)
i_order=np.where(self.oarr==self.orders[j])
if check_fit:
self.farr[i_order]=self.farr[i_order]/fitted[i_order]
else:
self.farr[i_order]=self.farr[i_order]
c1 = fits.Column(name='Wavelength', format='D', array=self.warr, unit='Angstroms')
c2 = fits.Column(name='Flux', format='D', array=self.farr, unit='Counts')
c3 = fits.Column(name='Order', format='I', array=self.oarr)
tbhdu = fits.BinTableHDU.from_columns([c1,c2,c3])
tbhdu.writeto(outfile, clobber=True)
self.close()
def normalize_spec(warr,farr,oarr,outfile,fit="individual"):
"""A function calling the fitting window
Parameters
----------
warr: ~numpy.ndarray
Array with wavelength
farr: ~numpy.ndarray
Array with fluxes
oarr: ~numpy.ndarray
Array with orders numbers
outfile: ~str
Name of the fits file the output will be written to
fittype: ~str
Fitting type. Can be "individual" for fitting individual orders. For other values of this parameter whole spectrum will be fit simultaneously.
"""
fitting_app=QtGui.QApplication(sys.argv)
fitting_window=FitWindow(warr,farr,oarr,outfile,fit)
fitting_window.raise_()
fitting_app.exec_()
fitting_app.deleteLater()
return True
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("spectrum_fits",help="Fits file with an extracted HRS spectrum",type=str)
parser.add_argument("-a","--all",help="Fits all orders simultaneously",action="store_true")
args=parser.parse_args()
img_sci = args.spectrum_fits
hdu_sci = fits.open(img_sci)
wave_sci = hdu_sci[1].data['Wavelength']
flux_sci = hdu_sci[1].data['Flux']
order_sci = hdu_sci[1].data['Order']
#sorting arrays
i_sort=np.argsort(wave_sci)
wave_sci=wave_sci[i_sort]
flux_sci=flux_sci[i_sort]
order_sci=order_sci[i_sort]
outfile="n"+sys.argv[1]
if args.all:
normalize_spec(wave_sci,flux_sci,order_sci,outfile,fit="all")
else:
normalize_spec(wave_sci,flux_sci,order_sci,outfile)
|
saltastroREPO_NAMEpyhrsPATH_START.@pyhrs_extracted@pyhrs-master@scripts@normalize_spec.py@.PATH_END.py
|
{
"filename": "_reversescale.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattersmith/marker/line/_reversescale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ReversescaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="reversescale",
parent_name="scattersmith.marker.line",
**kwargs,
):
super(ReversescaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattersmith@marker@line@_reversescale.py@.PATH_END.py
|
{
"filename": "test_prefix_param_inheritance.py",
"repo_name": "nanograv/PINT",
"repo_path": "PINT_extracted/PINT-master/tests/test_prefix_param_inheritance.py",
"type": "Python"
}
|
import io
from pint.models import get_model
input_par = """PSRJ J0523-7125
EPHEM DE405
CLK TT(TAI)
UNITS TDB
START 55415.8045121523831364
FINISH 59695.2673406681377430
TIMEEPH FB90
T2CMETHOD IAU2000B
DILATEFREQ N
DMDATA N
NTOA 87
CHI2 404.35757416343705
RAJ 5:23:48.66000000
DECJ -71:25:52.58000000
PMRA 0.0
PMDEC 0.0
PX 0.0
POSEPOCH 59369.0000000000000000
F0 3.1001291305547288772 1 2.7544353718657238425e-11
F1 -2.4892219423278130317e-15 1 2.8277388169449218064e-19
PEPOCH 59609.0000000000000000
"""
def test_prefixparaminheritance_stayfrozen():
# start with the case that has frozen parameters. make sure they remain so
m = get_model(io.StringIO(input_par + "\nF2 0\nF3 0"))
assert m.F2.frozen
assert m.F3.frozen
def test_prefixparaminheritance_unfrozen():
# start with the case that has the new parameters unfrozen
m = get_model(io.StringIO(input_par + "\nF2 0 1\nF3 0 1"))
assert not m.F2.frozen
assert not m.F3.frozen
|
nanogravREPO_NAMEPINTPATH_START.@PINT_extracted@PINT-master@tests@test_prefix_param_inheritance.py@.PATH_END.py
|
{
"filename": "_usrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/cone/_usrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class UsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="usrc", parent_name="cone", **kwargs):
super(UsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@cone@_usrc.py@.PATH_END.py
|
{
"filename": "allskyf31.py",
"repo_name": "kapteyn-astro/kapteyn",
"repo_path": "kapteyn_extracted/kapteyn-master/doc/source/EXAMPLES/allskyf31.py",
"type": "Python"
}
|
from kapteyn import maputils
import numpy
from service import *
fignum = 31
fig = plt.figure(figsize=figsize)
frame = fig.add_axes(plotbox)
title = r"""Zenith equal area projection (ZEA) oblique with:
$\alpha_p=0^\circ$, $\theta_p=30^\circ$ and $\phi_p = 150^\circ$. (Cal. fig.33c)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---ZEA',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -3.5,
'CTYPE2' : 'DEC--ZEA',
'CRVAL2' : 30.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 3.5,
'PV1_3' : 150.0 # Works only with patched WCSLIB 4.3
}
X = numpy.arange(0,360.0,15.0)
Y = numpy.arange(-75,90,15.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
grat.setp_lineswcs0((0,180), color='g', lw=2)
grat.setp_lineswcs1(0, lw=2)
lon_world = list(range(0,360,30))
lat_world = [-60, -30, 30, 60]
labkwargs0 = {'color':'r', 'va':'center', 'ha':'center'}
labkwargs1 = {'color':'b', 'va':'center', 'ha':'right'}
doplot(frame, fignum, annim, grat, title,
lon_world=lon_world, lat_world=lat_world,
labkwargs0=labkwargs0, labkwargs1=labkwargs1,
markerpos=markerpos)
|
kapteyn-astroREPO_NAMEkapteynPATH_START.@kapteyn_extracted@kapteyn-master@doc@source@EXAMPLES@allskyf31.py@.PATH_END.py
|
{
"filename": "agent.py",
"repo_name": "simonsobs/socs",
"repo_path": "socs_extracted/socs-main/socs/agents/xy_stage/agent.py",
"type": "Python"
}
|
import argparse
import os
import time
import txaio
from ocs import ocs_agent, site_config
from ocs.ocs_twisted import Pacemaker, TimeoutLock
ON_RTD = os.environ.get('READTHEDOCS') == 'True'
if not ON_RTD:
# yes I shouldn't have named that module agent
from xy_agent.xy_connect import XY_Stage
class LATRtXYStageAgent:
"""
Agent for connecting to the LATRt XY Stages.
Args:
ip_addr: IP address where RPi server is running.
port: Port the RPi Server is listening on.
mode: 'acq': Start data acquisition on initialize.
samp: Default sampling frequency in Hz.
"""
def __init__(self, agent, ip_addr, port, mode=None, samp=2):
self.ip_addr = ip_addr
self.port = int(port)
self.xy_stage = None
self.initialized = False
self.take_data = False
self.is_moving = False
self.agent = agent
self.log = agent.log
self.lock = TimeoutLock()
if mode == 'acq':
self.auto_acq = True
else:
self.auto_acq = False
self.sampling_frequency = float(samp)
# register the position feeds
agg_params = {
'frame_length': 10 * 60, # [sec]
}
self.agent.register_feed('positions',
record=True,
agg_params=agg_params,
buffer_time=0)
@ocs_agent.param('_')
def init_xy_stage(self, session, params=None):
"""init_xy_stage()
**Task** - Perform first time setup for communication with XY stages.
"""
self.log.debug("Trying to acquire lock")
with self.lock.acquire_timeout(timeout=0, job='init') as acquired:
# Locking mechanism stops code from proceeding if no lock acquired
if not acquired:
self.log.warn("Could not start init because {} is already running".format(self.lock.job))
return False, "Could not acquire lock."
# Run the function you want to run
self.log.debug("Lock Acquired Connecting to Stages")
self.xy_stage = XY_Stage(self.ip_addr, self.port)
self.xy_stage.init_stages()
print("XY Stages Initialized")
# This part is for the record and to allow future calls to proceed,
# so does not require the lock
self.initialized = True
if self.auto_acq:
self.agent.start('acq', params={'sampling_frequency': self.sampling_frequency})
return True, 'XY Stages Initialized.'
@ocs_agent.param('distance', type=float)
@ocs_agent.param('velocity', type=float, check=lambda x: 0 <= x < 1.2)
def move_x_cm(self, session, params):
"""move_x_cm(distance, velocity)
**Task** - Move the X axis.
Parameters:
distance (float): Distance to move in cm.
velocity (float): Velocity to move at. Must be less than 1.2.
"""
with self.lock.acquire_timeout(timeout=3, job='move_x_cm') as acquired:
if not acquired:
self.log.warn(f"Could not start x move because lock held by {self.lock.job}")
return False
self.xy_stage.move_x_cm(params.get('distance', 0), params.get('velocity', 1))
time.sleep(1)
while True:
# data acquisition updates the moving field if it is running
if not self.take_data:
with self.lock.acquire_timeout(timeout=3, job='move_x_cm') as acquired:
if not acquired:
self.log.warn(f"Could not check because lock held by {self.lock.job}")
return False, "Could not acquire lock"
self.is_moving = self.xy_stage.moving
if not self.is_moving:
break
return True, "X Move Complete"
@ocs_agent.param('distance', type=float)
@ocs_agent.param('velocity', type=float, check=lambda x: 0 <= x < 1.2)
def move_y_cm(self, session, params):
"""move_y_cm(distance, velocity)
**Task** - Move the Y axis.
Parameters:
distance (float): Distance to move in cm.
velocity (float): Velocity to move at. Must be less than 1.2.
"""
with self.lock.acquire_timeout(timeout=3, job='move_y_cm') as acquired:
if not acquired:
self.log.warn(f"Could not start y move because lock held by {self.lock.job}")
return False, "could not acquire lock"
self.xy_stage.move_y_cm(params.get('distance', 0), params.get('velocity', 1))
time.sleep(1)
while True:
# data acquisition updates the moving field if it is running
if not self.take_data:
with self.lock.acquire_timeout(timeout=3, job='move_y_cm') as acquired:
if not acquired:
self.log.warn(f"Could not check for move because lock held by {self.lock.job}")
return False, "could not acquire lock"
self.is_moving = self.xy_stage.moving
if not self.is_moving:
break
return True, "Y Move Complete"
@ocs_agent.param('position', type=tuple)
def set_position(self, session, params):
"""set_position(position)
**Task** - Set position of the XY stage.
Parameters:
position (tuple): (X, Y) position.
"""
with self.lock.acquire_timeout(timeout=3, job='set_position') as acquired:
if not acquired:
self.log.warn(f"Could not set position because lock held by {self.lock.job}")
return False, "Could not acquire lock"
self.xy_stage.position = params['position']
return True, "Position Updated"
@ocs_agent.param('_')
def set_enabled(self, session, params=None):
"""set_enabled()
**Task** - Tell the controller to hold stages enabled.
"""
with self.lock.acquire_timeout(timeout=3, job='set_enabled') as acquired:
if not acquired:
self.log.warn(
f"Could not set position because lock held by {self.lock.job}")
return False, "Could not acquire lock"
self.xy_stage.enable()
return True, "Enabled"
@ocs_agent.param('_')
def set_disabled(self, session, params=None):
"""set_disabled()
**Task** - Tell the controller to hold stages disabled.
"""
with self.lock.acquire_timeout(timeout=3, job='set_disabled') as acquired:
if not acquired:
self.log.warn(
f"Could not set position because lock held by {self.lock.job}")
return False, "Could not acquire lock"
self.xy_stage.disable()
return True, "Disabled"
@ocs_agent.param('sampling_frequency', default=None, type=float)
def acq(self, session, params=None):
"""acq(sampling_frequency=2)
**Process** - Run data acquisition.
Parameters:
sampling_frequency (float): Sampling rate to acquire data at.
Defaults to value set in site config file (or 2 Hz if
unspecified.)
Notes:
The most recent positions are stored in the session.data object in the
format::
>>> response.session['data']
{"positions":
{"x": x position in cm,
"y": y position in cm}
}
"""
if params is None:
params = {}
f_sample = params.get('sampling_frequency', self.sampling_frequency)
pm = Pacemaker(f_sample, quantize=True)
if not self.initialized or self.xy_stage is None:
raise Exception("Connection to XY Stages not initialized")
with self.lock.acquire_timeout(timeout=0, job='acq') as acquired:
if not acquired:
self.log.warn("Could not start acq because {} is already running".format(self.lock.job))
return False, "Could not acquire lock."
self.log.info(f"Starting Data Acquisition for XY Stages at {f_sample} Hz")
self.take_data = True
last_release = time.time()
while self.take_data:
if time.time() - last_release > 1.:
if not self.lock.release_and_acquire(timeout=10):
self.log.warn(f"Could not re-acquire lock now held by {self.lock.job}.")
return False, "could not re-acquire lock"
last_release = time.time()
pm.sleep()
data = {'timestamp': time.time(), 'block_name': 'positions', 'data': {}}
pos = self.xy_stage.position
self.is_moving = self.xy_stage.moving
data['data']['x'] = pos[0]
data['data']['y'] = pos[1]
self.agent.publish_to_feed('positions', data)
session.data.update(data['data'])
return True, 'Acquisition exited cleanly.'
def _stop_acq(self, session, params=None):
"""
params:
dict: {}
"""
if self.take_data:
self.take_data = False
return True, 'requested to stop taking data.'
else:
return False, 'acq is not currently running.'
def make_parser(parser=None):
"""Build the argument parser for the Agent. Allows sphinx to automatically
build documentation based on this function.
"""
if parser is None:
parser = argparse.ArgumentParser()
# Add options specific to this agent.
pgroup = parser.add_argument_group('Agent Options')
pgroup.add_argument('--ip-address')
pgroup.add_argument('--port')
pgroup.add_argument('--mode')
pgroup.add_argument('--sampling_frequency')
return parser
def main(args=None):
# For logging
txaio.use_twisted()
txaio.make_logger()
# Start logging
txaio.start_logging(level=os.environ.get("LOGLEVEL", "info"))
parser = make_parser()
# Interpret options in the context of site_config.
args = site_config.parse_args(agent_class='LATRtXYStageAgent',
parser=parser,
args=args)
agent, runner = ocs_agent.init_site_agent(args)
xy_agent = LATRtXYStageAgent(agent, args.ip_address, args.port, args.mode, args.sampling_frequency)
agent.register_task('init_xy_stage', xy_agent.init_xy_stage)
agent.register_task('move_x_cm', xy_agent.move_x_cm)
agent.register_task('move_y_cm', xy_agent.move_y_cm)
agent.register_task('set_position', xy_agent.set_position)
agent.register_process('acq', xy_agent.acq, xy_agent._stop_acq)
runner.run(agent, auto_reconnect=True)
if __name__ == '__main__':
main()
|
simonsobsREPO_NAMEsocsPATH_START.@socs_extracted@socs-main@socs@agents@xy_stage@agent.py@.PATH_END.py
|
{
"filename": "ImageEmbedderOptions.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/g3doc/api_docs/python/tflite_support/task/vision/ImageEmbedderOptions.md",
"type": "Markdown"
}
|
page_type: reference
description: Options for the image embedder task.
<link rel="stylesheet" href="/site-assets/css/style.css">
<!-- DO NOT EDIT! Automatically generated file. -->
<div itemscope itemtype="http://developers.google.com/ReferenceObject">
<meta itemprop="name" content="tflite_support.task.vision.ImageEmbedderOptions" />
<meta itemprop="path" content="Stable" />
<meta itemprop="property" content="__eq__"/>
<meta itemprop="property" content="__init__"/>
</div>
# tflite_support.task.vision.ImageEmbedderOptions
<!-- Insert buttons and diff -->
<table class="tfo-notebook-buttons tfo-api nocontent" align="left">
<td>
<a target="_blank" href="https://github.com/tensorflow/tflite-support/blob/v0.4.4/tensorflow_lite_support/python/task/vision/image_embedder.py#L32-L44">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub
</a>
</td>
</table>
Options for the image embedder task.
<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
<code>tflite_support.task.vision.ImageEmbedderOptions(
base_options: <a href="../../../tflite_support/task/core/BaseOptions"><code>tflite_support.task.core.BaseOptions</code></a>,
embedding_options: <a href="../../../tflite_support/task/processor/EmbeddingOptions"><code>tflite_support.task.processor.EmbeddingOptions</code></a> = dataclasses.field(default_factory=_EmbeddingOptions)
)
</code></pre>
<!-- Placeholder for "Used in" -->
<!-- Tabular view -->
<table class="responsive fixed orange">
<colgroup><col width="214px"><col></colgroup>
<tr><th colspan="2"><h2 class="add-link">Attributes</h2></th></tr>
<tr>
<td>
`base_options`<a id="base_options"></a>
</td>
<td>
Base options for the image embedder task.
</td>
</tr><tr>
<td>
`embedding_options`<a id="embedding_options"></a>
</td>
<td>
Embedding options for the image embedder task.
</td>
</tr>
</table>
## Methods
<h3 id="__eq__"><code>__eq__</code></h3>
<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
<code>__eq__(
other
)
</code></pre>
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@g3doc@api_docs@python@tflite_support@task@vision@ImageEmbedderOptions.md@.PATH_END.py
|
{
"filename": "_bgcolorsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/sunburst/hoverlabel/_bgcolorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="sunburst.hoverlabel", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@sunburst@hoverlabel@_bgcolorsrc.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "handley-lab/fgivenx",
"repo_path": "fgivenx_extracted/fgivenx-master/setup.py",
"type": "Python"
}
|
#!/usr/bin/env python3
try:
from setuptools import setup, Command
except ImportError:
from distutils.core import setup, Command
def readme():
with open('README.rst') as f:
return f.read()
def get_version(short=False):
with open('README.rst') as f:
for line in f:
if ':Version:' in line:
ver = line.split(':')[2].strip()
if short:
subver = ver.split('.')
return '%s.%s' % tuple(subver[:2])
else:
return ver
setup(name='fgivenx',
version=get_version(),
description='fgivenx: Functional Posterior Plotter',
long_description=readme(),
author='Will Handley',
author_email='wh260@cam.ac.uk',
url='https://github.com/fgivenx/fgivenx',
packages=['fgivenx', 'fgivenx.test'],
install_requires=['matplotlib', 'numpy', 'scipy'],
setup_requires=['pytest-runner'],
extras_require={
'docs': ['sphinx', 'sphinx_rtd_theme', 'numpydoc'],
'parallel': ['joblib'],
'progress_bar': ['tqdm'],
'getdist_chains': ['getdist']
},
tests_require=['pytest', 'pytest-mpl'],
include_package_data=True,
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
],
)
|
handley-labREPO_NAMEfgivenxPATH_START.@fgivenx_extracted@fgivenx-master@setup.py@.PATH_END.py
|
{
"filename": "geometry.py",
"repo_name": "ratt-ru/CubiCal",
"repo_path": "CubiCal_extracted/CubiCal-master/cubical/degridder/geometry.py",
"type": "Python"
}
|
import numpy as np
import scipy.stats as sstats
import scipy.signal as ssig
import scipy.spatial as spat
import copy
import time
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
if 'log_time' in kw:
name = kw.get('log_name', method.__name__.upper())
kw['log_time'][name] = int((te - ts) * 1000)
else:
print(('%r %2.2f ms' % \
(method.__name__, (te - ts) * 1000)))
return result
return timed
DEBUG = True
class BoundingConvexHull(object):
def __init__(self, list_hulls, name="unnamed", mask = None, check_mask_outofbounds=True):
""" Initializes a bounding convex hull around a list of bounding convex hulls or series of
points. A unity-weighted mask is computed for the region that falls within this convex hull
if a mask of (y, x) coordinates is not provided. Otherwise if a mask is provided and the
check_mask_outofbounds value is set the masked coordinates are not verified to fall within
the hull. The latter should thus be used with some caution by the user, but can potentially
significantly speed up the mask creation process for axis aligned regions.
"""
self._name = name
self._check_mask_outofbounds = check_mask_outofbounds
self._cached_filled_mask = None
self._vertices = points = np.vstack([b.corners
if hasattr(b, "corners") else [b[0], b[1]] for b in list_hulls])
self._hull = spat.ConvexHull(points)
if mask is None:
self._mask, self._mask_weights = self.init_mask()
else:
self.sparse_mask = mask
def invalidate_cached_masks(self):
""" Invalidates the cached masks (sparse or regular) """
self._cached_filled_mask = None
self._mask, self._mask_weights = self.init_mask()
def __str__(self):
return ",".join(["({0:d},{1:d})".format(x,y) for (x,y) in self.corners])
def init_mask(self):
""" creates a sparse mask of the convex hull of the form (y, x) tuples """
lines = np.hstack([self.corners, np.roll(self.corners, -1, axis=0)])
minx = np.min(lines[:, 0:4:2]); maxx = np.max(lines[:, 0:4:2])
miny = np.min(lines[:, 1:4:2]); maxy = np.max(lines[:, 1:4:2])
x = np.arange(minx, maxx + 1, 1) #upper limit inclusive
y = np.arange(miny, maxy + 1, 1)
meshgrid = np.meshgrid(y, x)
bounding_mesh = list(zip(*[np.ravel(x) for x in np.meshgrid(y, x)]))
sparse_mask = bounding_mesh if not self._check_mask_outofbounds else \
[c for c in bounding_mesh if c[::-1] in self]
mask_weights = np.ones(len(sparse_mask)) #initialize to unity, this should be modified when coadding
return sparse_mask, mask_weights
@property
def sprase_mask_weights(self):
""" returns sparse mask weights """
return self._mask_weights
@property
def sparse_mask(self):
""" returns a sparse mask (y, x) values of all points in the masked region """
return self._mask
@sparse_mask.setter
def sparse_mask(self, mask):
""" Sets the mask of the hull from a sparse mask - list of (y, x) coordinates """
if not isinstance(mask, list):
raise TypeError("Mask must be list")
if not (hasattr(mask, "__len__") and (len(mask) == 0 or (hasattr(mask[0], "__len__") and len(mask[0]) == 2))):
raise TypeError("Mask must be a sparse mask of 2 element values")
if self._check_mask_outofbounds:
self._mask = copy.deepcopy([c for c in mask if (c[1], c[0]) in self])
else:
self._mask = copy.deepcopy(mask)
self._mask_weights = np.ones(len(self._mask))
@property
def mask(self, dtype=np.float64):
""" Creates a filled rectangular mask grid of size y, x """
if self._cached_filled_mask is not None:
return self._cached_filled_mask
lines = np.hstack([self.corners, np.roll(self.corners, -1, axis=0)])
minx = np.min(lines[:, 0:4:2]); maxx = np.max(lines[:, 0:4:2])
miny = np.min(lines[:, 1:4:2]); maxy = np.max(lines[:, 1:4:2])
nx = maxx - minx + 1 # inclusive
ny = maxy - miny + 1
mesh = np.zeros(nx*ny, dtype=dtype)
if nx==0 or ny==0 or len(self.sparse_mask) == 0:
self._cached_filled_mask = mesh.reshape((ny, nx))
else:
sparse_mask = np.array(self.sparse_mask)
sel = np.logical_and(np.logical_and(sparse_mask[:, 1] >= minx,
sparse_mask[:, 1] <= maxx),
np.logical_and(sparse_mask[:, 0] >= miny,
sparse_mask[:, 0] <= maxy))
flat_index = (sparse_mask[sel][:, 0] - miny)*nx + (sparse_mask[sel][:, 1] - minx)
mesh[flat_index] = self._mask_weights[sel]
self._cached_filled_mask = mesh.reshape((ny, nx))
return self._cached_filled_mask
@classmethod
def regional_data(cls, sel_region, data_cube, axes=(2, 3), oob_value=0):
""" 2D array containing all values within convex hull
sliced out along axes provided as argument. Portions of sel_region
that are outside of the data_cube is set to oob_value
assumes the last value of axes is the fastest varying axis
"""
if not isinstance(sel_region, BoundingConvexHull):
raise TypeError("Object passed in is not of type BoundingConvexHull")
if not (hasattr(axes, "__len__") and len(axes) == 2):
raise ValueError("Expected a tupple of axes along which to slice out a region")
axes = sorted(axes)
lines = np.hstack([sel_region.corners, np.roll(sel_region.corners, -1, axis=0)])
minx = np.min(lines[:, 0:4:2]); maxx = np.max(lines[:, 0:4:2])
miny = np.min(lines[:, 1:4:2]); maxy = np.max(lines[:, 1:4:2])
x = np.arange(minx, maxx + 1, 1)
y = np.arange(miny, maxy + 1, 1)
pad_left = max(0, 0 - minx)
pad_bottom = max(0, 0 - miny)
pad_right = max(0, maxx - data_cube.shape[axes[1]] + 1) #inclusive of upper limit
pad_top = max(0, maxy - data_cube.shape[axes[0]] + 1)
if minx > data_cube.shape[axes[0]] or miny > data_cube.shape[axes[1]] or \
maxy < 0 or maxx < 0:
raise ValueError("Expected a bounding hull that is at least partially within the image")
# extract data, pad if necessary
slc_data = [slice(None)] * len(data_cube.shape)
for (start, end), axis in zip([(miny + pad_bottom, maxy - pad_top + 1),
(minx + pad_left, maxx - pad_right + 1)], axes):
slc_data[axis] = slice(start, end)
slc_padded = [slice(None)] * len(data_cube.shape)
for (start, end), axis in zip([(pad_bottom, -miny + maxy + 1 - pad_top),
(pad_left, -minx + maxx + 1 - pad_right)], axes):
slc_padded[axis] = slice(start, end)
selected_data = data_cube[tuple(slc_data)]
new_shape = list(data_cube.shape)
new_shape[axes[0]] = (maxy - miny + 1)
new_shape[axes[1]] = (maxx - minx + 1)
if any(np.array([pad_left, pad_bottom, pad_right, pad_top]) > 0):
padded_data = np.zeros(tuple(new_shape), dtype=selected_data.dtype) * oob_value
padded_data[tuple(slc_padded)] = selected_data.copy()
else:
padded_data = selected_data.copy()
# finally apply mask
slc_padded_data = [slice(None)] * len(padded_data.shape)
for (start, end), axis in zip([(0, maxy - miny + 1), #mask starts at origin in the padded image
(0, maxx - minx + 1)], axes):
slc_padded_data[axis] = slice(start, end)
slc_mask = [None] * len(padded_data.shape)
for (start, end), axis in zip([(0, sel_region.mask.shape[0]), #mask starts at origin in the padded image
(0, sel_region.mask.shape[1])], axes):
slc_mask[axis] = slice(start, end)
mask = sel_region.mask.copy()
mask[mask == 0] = oob_value
padded_data[tuple(slc_padded_data)] *= mask[tuple(slc_mask)]
window_extents = [minx, maxx,
miny, maxy]
return padded_data, window_extents
@classmethod
def normalize_masks(cls, regions, only_overlapped_regions=True):
""" Normalizes region masks for overlapping pixels. This is necessary to properly coadd
overlapping facets. If masks are guarenteed to be initialized to unity (e.g. after
bounding region creation) the user can skip normalizing non-overlapping regions with
flag only_overlapped_regions.
"""
if not all([isinstance(reg, BoundingConvexHull) for reg in regions]):
raise TypeError("Expected a list of bounding convex hulls")
# Implements painters-like algorithm to
# count the number of times a pixel coordinate falls within masks
# The overlapping sections of regions can then be normalized
# For now all regions have equal contribution
allmasks = []
for reg in regions:
allmasks += list(reg.sparse_mask) if isinstance(reg.sparse_mask, np.ndarray) else reg.sparse_mask
# flatten for faster comparisons
allmasks = np.array(allmasks)
maxx = np.max(allmasks[:, 1])
nx = maxx + 1
allmasks_flatten = allmasks[:, 0] * nx + allmasks[:, 1]
# now count the number of times a pixel is painted onto
unique_pxls_flatten, paint_count = np.unique(allmasks_flatten, return_counts=True)
paint_count = paint_count.astype(np.float)
if only_overlapped_regions:
sel = paint_count > 1
unique_pxls_flatten = unique_pxls_flatten[sel]
paint_count = paint_count[sel]
# with the reduced number of overlap pixels unflatten
unique_pxls = np.vstack([unique_pxls_flatten // nx,
unique_pxls_flatten % nx]).T
unique_pxls = list(map(tuple, unique_pxls))
paint_count[...] = 1.0 / paint_count
# and finally update mask weights
for reg in regions:
reg._cached_filled_mask = None # invalidate
overlap = [x for x in zip(paint_count, unique_pxls) if x[1] in reg.sparse_mask]
for px_pc, px in overlap:
sel = reg.sparse_mask.index(px) if isinstance(reg.sparse_mask, list) else \
np.all(reg.sparse_mask - px == 0, axis=1)
reg._mask_weights[sel] = px_pc
@property
def circumference(self):
""" area contained in hull """
lines = self.edges
return np.sum(np.linalg.norm(lines[:, 1, :] - lines[:, 0, :], axis=1) + 1)
@property
def area(self):
""" area contained in hull """
lines = np.hstack([self.corners, np.roll(self.corners, -1, axis=0)])
return 0.5 * np.abs(np.sum([x1*(y2)-(x2)*y1 for x1,y1,x2,y2 in lines])) + 0.5 * self.circumference - 1
@property
def name(self):
return self._name
@name.setter
def name(self, v):
self._name = v
@property
def corners(self):
""" Returns vertices and guarentees clockwise winding """
return self._vertices[self._hull.vertices][::-1]
def normals(self, left = True):
""" return a list of left normals to the hull """
normals = []
for i in range(self.corners.shape[0]):
# assuming clockwise winding
j = (i + 1) % self.corners.shape[0]
edge = self.corners[j, :] - self.corners[i, :]
if left:
normals.append((-edge[1], edge[0]))
else:
normals.append((edge[1], -edge[0]))
return np.asarray(normals, dtype=np.double)
@property
def edges(self):
""" return edge segments of the hull (clockwise wound) """
edges = []
for i in range(self.corners.shape[0]):
# assuming clockwise winding
j = (i + 1) % self.corners.shape[0]
edge = tuple([self.corners[i, :], self.corners[j, :]])
edges.append(edge)
return np.asarray(edges, dtype=np.double)
@property
def edge_midpoints(self):
""" return edge midpoints of the hull (clockwise wound) """
edges = self.edges
return np.mean(edges, axis=1)
@property
def lnormals(self):
""" left normals to the edges of the hull """
return self.normals(left = True)
@property
def rnormals(self):
""" right normals to the edges of the hull """
return self.normals(left=False)
def overlaps_with(self, other, min_sep_dist=0.5): #less than half a pixel away
"""
Implements the separating lines collision detection theorem
to test whether the hull intersects with 'other' hull
"""
if not isinstance(other, BoundingConvexHull):
raise TypeError("rhs must be a BoundingConvexHull")
# get the projection axes
normals = np.vstack([self.lnormals, other.lnormals])
norms = np.linalg.norm(normals, axis=1)
normals = normals / norms[None, 2]
# compute vectors to corners from origin
vecs_reg1 = self.corners
vecs_reg2 = other.corners
# compute projections onto normals
for ni, n in enumerate(normals):
projs = np.dot(vecs_reg1, n.T)
minproj_reg1 = np.min(projs)
maxproj_reg1 = np.max(projs)
projs = np.dot(vecs_reg2, n.T)
minproj_reg2 = np.min(projs)
maxproj_reg2 = np.max(projs)
if minproj_reg2 - maxproj_reg1 > min_sep_dist or minproj_reg1 - maxproj_reg2 > min_sep_dist:
return False
return True
@property
def centre(self, integral=True):
""" Barycentre of hull """
if integral:
def rnd(x):
return int(np.floor(x) if x >= 0 else np.ceil(x))
return [rnd(x) for x in np.mean(self._vertices, axis=0)]
else:
return np.mean(self._vertices, axis=0)
def __contains__(self, s, tolerance=0.5): #less than half a pixel away
""" tests whether a point s(x,y) is in the convex hull """
# there are three cases to consider
# CASE 1:
# scalar projection between all inner pointing right normals (clockwise winding)
# and the point must be positive if the point were to lie inside
# the region (true)
# CASE 2:
# point is on an edge - the scalar projection onto the axis is 0 for that edge
# and greater than 0 for the other edges (true)
# CASE 3:
# it is outside (false)
x, y = s
isin = True
normals = self.rnormals
xyvec = np.array([x, y])[None, :] - np.array(self.corners)
dot = np.einsum("ij,ij->i", normals, xyvec)
return np.all(dot > -tolerance)
class BoundingBox(BoundingConvexHull):
def __init__(self, xl, xu, yl, yu, name="unnamed", mask=None, **kwargs):
if not all([isinstance(x, (int, np.int64, np.int32, np.int16)) for x in [xl, xu, yl, yu]]):
raise ValueError("Box limits must be integers")
self.__xnpx = abs(xu - xl + 1) #inclusive of the upper pixel
self.__ynpx = abs(yu - yl + 1)
BoundingConvexHull.__init__(self,
[[xl,yl],[xl,yu],[xu,yu],[xu,yl]],
name,
mask=mask,
**kwargs)
def init_mask(self):
""" creates a sparse mask of the convex hull of the form (y, x) tuples """
lines = np.hstack([self.corners, np.roll(self.corners, -1, axis=0)])
minx = np.min(lines[:, 0:4:2]); maxx = np.max(lines[:, 0:4:2])
miny = np.min(lines[:, 1:4:2]); maxy = np.max(lines[:, 1:4:2])
x = np.arange(minx, maxx + 1, 1) #upper limit inclusive
y = np.arange(miny, maxy + 1, 1)
meshgrid = np.meshgrid(y, x)
bounding_mesh = list(zip(*[np.ravel(x) for x in np.meshgrid(y, x)]))
sparse_mask = np.asarray(bounding_mesh) # by default for a BB region the mask is always going to be the entire region
mask_weights = np.ones(len(sparse_mask)) #initialize to unity, this should be modified when coadding
return sparse_mask, mask_weights
def __contains__(self, s):
""" tests whether a point s(x,y) is in the box"""
lines = np.hstack([self.corners, np.roll(self.corners, -1, axis=0)])
minx = np.min(lines[:, 0:4:2]); maxx = np.max(lines[:, 0:4:2])
miny = np.min(lines[:, 1:4:2]); maxy = np.max(lines[:, 1:4:2])
return s[0] >= minx and s[0] <= maxx and s[1] >= miny and s[1] <= maxy
@property
def box_npx(self):
return (self.__xnpx, self.__ynpx)
@property
def sparse_mask(self):
""" returns a sparse mask (y, x) values of all points in the masked region """
return self._mask
@sparse_mask.setter
def sparse_mask(self, mask):
""" Sets the mask of the hull from a sparse mask - list of (y, x) coordinates """
if not isinstance(mask, list) and not isinstance(mask, np.ndarray):
raise TypeError("Mask must be list")
if not (hasattr(mask, "__len__") and (len(mask) == 0 or (hasattr(mask[0], "__len__") and len(mask[0]) == 2))):
raise TypeError("Mask must be a sparse mask of 2 element values")
if mask == []:
self._mask = []
else:
lines = np.hstack([self.corners, np.roll(self.corners, -1, axis=0)])
minx = np.min(lines[:, 0:4:2]); maxx = np.max(lines[:, 0:4:2])
miny = np.min(lines[:, 1:4:2]); maxy = np.max(lines[:, 1:4:2])
nx = maxx - minx + 1 # inclusive
ny = maxy - miny + 1
sparse_mask = np.asarray(mask)
sel = np.logical_and(np.logical_and(sparse_mask[:, 1] >= minx,
sparse_mask[:, 1] <= maxx),
np.logical_and(sparse_mask[:, 0] >= miny,
sparse_mask[:, 0] <= maxy))
self._mask = sparse_mask[sel]
self._mask_weights = np.ones(len(self._mask))
@classmethod
def project_regions(cls, regional_data_list, regions_list, axes=(2, 3), dtype=np.float64, **kwargs):
""" Projects individial regions back onto a single contiguous cube """
if not (hasattr(regional_data_list, "__len__") and hasattr(regions_list, "__len__") and \
len(regions_list) == len(regional_data_list)):
raise TypeError("Region data list and regions lists must be lists of equal length")
if not all([isinstance(x, np.ndarray) for x in regional_data_list]):
raise TypeError("Region data list must be a list of ndarrays")
if not all([isinstance(x, BoundingBox) for x in regions_list]):
raise TypeError("Region list must be a list of Axis Aligned Bounding Boxes")
if regions_list == []:
return np.empty((0))
if not all([reg.ndim == regional_data_list[0].ndim for reg in regional_data_list]):
raise ValueError("All data cubes must be of equal dimension")
axes = tuple(sorted(axes))
minx = np.min([np.min(f.corners[:, 0]) for f in regions_list])
maxx = np.max([np.max(f.corners[:, 0]) for f in regions_list])
miny = np.min([np.min(f.corners[:, 1]) for f in regions_list])
maxy = np.max([np.max(f.corners[:, 1]) for f in regions_list])
npxx = maxx - minx + 1
npxy = maxy - miny + 1
global_offsetx = -minx #-min(0, minx)
global_offsety = -miny #-min(0, miny)
projected_image_size = list(regional_data_list[0].shape)
projected_image_size[axes[0]] = npxy
projected_image_size[axes[1]] = npxx
stitched_img = np.zeros(tuple(projected_image_size), dtype=dtype)
combined_mask = []
for f, freg in zip(regional_data_list, regions_list):
f[np.isnan(f)] = 0
xl = max(0, global_offsetx+np.min(freg.corners[:, 0]))
xu = min(global_offsetx+np.max(freg.corners[:, 0]) + 1, npxx)
yl = max(0, global_offsety+np.min(freg.corners[:, 1]))
yu = min(global_offsety+np.max(freg.corners[:, 1]) + 1, npxy)
fnx = xu - xl + 1 # inclusive
fny = yu - yl + 1 # inclusive
if f.shape[axes[0]] != fny - 1 or f.shape[axes[1]] != fnx - 1:
raise ValueError("One or more bounding box descriptors does not match shape of corresponding data cubes")
slc_data = [slice(None)] * len(stitched_img.shape)
for (start, end), axis in zip([(yl, yu), (xl, xu)], axes):
slc_data[axis] = slice(start, end)
stitched_img[tuple(slc_data)] += f
combined_mask += list(freg.sparse_mask)
return stitched_img, BoundingBox(minx, maxx, miny, maxy, mask=combined_mask, **kwargs)
########################################################################
## Factories
########################################################################
class BoundingBoxFactory(object):
@classmethod
def AxisAlignedBoundingBox(cls, convex_hull_object, square=False, enforce_odd=True, **kwargs):
""" Constructs an axis aligned bounding box around convex hull """
if not isinstance(convex_hull_object, BoundingConvexHull):
raise TypeError("Convex hull object passed in constructor is not of type BoundingConvexHull")
if square:
nx = np.max(convex_hull_object.corners[:, 0]) - np.min(convex_hull_object.corners[:, 0]) + 1 #inclusive
ny = np.max(convex_hull_object.corners[:, 1]) - np.min(convex_hull_object.corners[:, 1]) + 1 #inclusive
boxdiam = max(nx, ny)
boxrad = boxdiam // 2
cx, cy = convex_hull_object.centre
xl = cx - boxrad
xu = cx + boxdiam - boxrad - 1
yl = cy - boxrad
yu = cy + boxdiam - boxrad - 1
else:
xl = np.min(convex_hull_object.corners[:, 0])
xu = np.max(convex_hull_object.corners[:, 0])
yl = np.min(convex_hull_object.corners[:, 1])
yu = np.max(convex_hull_object.corners[:, 1])
xu += (xu - xl) % 2 if enforce_odd else 0
yu += (yu - yl) % 2 if enforce_odd else 0
return BoundingBox(xl, xu, yl, yu,
convex_hull_object.name,
mask=convex_hull_object.sparse_mask,
**kwargs)
@classmethod
def SplitBox(cls, bounding_box_object, nsubboxes=1, **kwargs):
""" Split a axis-aligned bounding box into smaller boxes """
if not isinstance(bounding_box_object, BoundingBox):
raise TypeError("Expected bounding box object")
if not (isinstance(nsubboxes, int) and nsubboxes >= 1):
raise ValueError("nsubboxes must be integral type and be 1 or more")
xl = np.min(bounding_box_object.corners[:, 0])
xu = np.max(bounding_box_object.corners[:, 0])
yl = np.min(bounding_box_object.corners[:, 1])
yu = np.max(bounding_box_object.corners[:, 1])
# construct a nonregular meshgrid bound to xu and yu
x = xl + np.arange(0, nsubboxes + 1) * int(np.ceil((xu - xl + 1) / float(nsubboxes)))
y = yl + np.arange(0, nsubboxes + 1) * int(np.ceil((yu - yl + 1) / float(nsubboxes)))
xx, yy = np.meshgrid(x, y)
# split into boxes
xls = xx[0:-1, 0:-1].copy()
xus = xx[1:, 1:].copy()
yls = yy[0:-1, 0:-1].copy()
yus = yy[1:, 1:].copy()
# make sure no boxes overlap
xus = xus - 1
yus = yus - 1
# clamp the final coordinate to the upper end (may result in rectanglular box at the end)
xus[:, -1] = max(xu, min(xus[0, -1], xu))
yus[-1, :] = max(yu, min(yus[-1, 0], yu))
#coordinates for all the contained boxes, anti-clockwise wound
xls = xls.ravel()
yls = yls.ravel()
xus = xus.ravel()
yus = yus.ravel()
bl = list(zip(xls, yls))
br = list(zip(xus, yls))
ur = list(zip(xus, yus))
ul = list(zip(xls, yus))
contained_boxes = list(zip(bl, br, ur, ul))
#finally create bbs for each of the contained boxes with the mask
#chopped up between the boxes by the convex hull initializer
new_regions = [BoundingBox(bl[0], br[0], bl[1], ul[1],
bounding_box_object.name,
mask=bounding_box_object.sparse_mask,
**kwargs)
for bl, br, ur, ul in contained_boxes]
return new_regions
@classmethod
def PadBox(cls, bounding_box_object, desired_nx, desired_ny, **kwargs):
""" Creates a box with a padded border around a axis-aligned bounding box """
if not isinstance(bounding_box_object, BoundingBox):
raise TypeError("Expected bounding box object")
nx, ny = bounding_box_object.box_npx
if desired_nx - nx < 0 or desired_ny - ny < 0:
raise ValueError("Padded box must be bigger than original box")
pad_left = desired_nx // 2
pad_right = desired_nx - pad_left - 1
pad_bottom = desired_ny // 2
pad_top = desired_ny - pad_bottom - 1
cx, cy = bounding_box_object.centre
xl = cx - pad_left
xu = cx + pad_right
yl = cy - pad_bottom
yu = cy + pad_top
return BoundingBox(xl, xu, yl, yu,
bounding_box_object.name,
mask=bounding_box_object.sparse_mask,
**kwargs) #mask unchanged in the new shape, border frame discarded
|
ratt-ruREPO_NAMECubiCalPATH_START.@CubiCal_extracted@CubiCal-master@cubical@degridder@geometry.py@.PATH_END.py
|
{
"filename": "_tickangle.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/mesh3d/colorbar/_tickangle.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickangleValidator(_plotly_utils.basevalidators.AngleValidator):
def __init__(
self, plotly_name="tickangle", parent_name="mesh3d.colorbar", **kwargs
):
super(TickangleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@mesh3d@colorbar@_tickangle.py@.PATH_END.py
|
{
"filename": "common.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/integration_tests/vectorstores/qdrant/common.py",
"type": "Python"
}
|
from typing import List
from langchain_core.documents import Document
def qdrant_is_not_running() -> bool:
"""Check if Qdrant is not running."""
import requests
try:
response = requests.get("http://localhost:6333", timeout=10.0)
response_json = response.json()
return response_json.get("title") != "qdrant - vector search engine"
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
return True
def assert_documents_equals(actual: List[Document], expected: List[Document]): # type: ignore[no-untyped-def]
assert len(actual) == len(expected)
for actual_doc, expected_doc in zip(actual, expected):
assert actual_doc.page_content == expected_doc.page_content
assert "_id" in actual_doc.metadata
assert "_collection_name" in actual_doc.metadata
actual_doc.metadata.pop("_id")
actual_doc.metadata.pop("_collection_name")
assert actual_doc.metadata == expected_doc.metadata
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@integration_tests@vectorstores@qdrant@common.py@.PATH_END.py
|
{
"filename": "e1_envs.py",
"repo_name": "mit-ll/spacegym-kspdg",
"repo_path": "spacegym-kspdg_extracted/spacegym-kspdg-main/src/kspdg/sb1/e1_envs.py",
"type": "Python"
}
|
# Copyright (c) 2024, MASSACHUSETTS INSTITUTE OF TECHNOLOGY
# Subject to FAR 52.227-11 – Patent Rights – Ownership by the Contractor (May 2014).
# SPDX-License-Identifier: MIT
from kspdg.sb1.sb1_base import SunBlockingGroup1Env
class SB1_E1_ParentEnv(SunBlockingGroup1Env):
def __init__(self, loadfile: str, **kwargs):
super().__init__(loadfile=loadfile, **kwargs)
def evasive_maneuvers(self):
'''Do not perform evasive maneuvers
'''
pass
class SB1_E1_I1_Env(SB1_E1_ParentEnv):
def __init__(self, **kwargs):
super().__init__(loadfile=SunBlockingGroup1Env.LOADFILE_I1, **kwargs)
class SB1_E1_I2_Env(SB1_E1_ParentEnv):
def __init__(self, **kwargs):
super().__init__(loadfile=SunBlockingGroup1Env.LOADFILE_I2, **kwargs)
class SB1_E1_I3_Env(SB1_E1_ParentEnv):
def __init__(self, **kwargs):
super().__init__(loadfile=SunBlockingGroup1Env.LOADFILE_I3, **kwargs)
class SB1_E1_I4_Env(SB1_E1_ParentEnv):
def __init__(self, **kwargs):
super().__init__(loadfile=SunBlockingGroup1Env.LOADFILE_I4, **kwargs)
class SB1_E1_I5_Env(SB1_E1_ParentEnv):
def __init__(self, **kwargs):
super().__init__(loadfile=SunBlockingGroup1Env.LOADFILE_I5, **kwargs)
|
mit-llREPO_NAMEspacegym-kspdgPATH_START.@spacegym-kspdg_extracted@spacegym-kspdg-main@src@kspdg@sb1@e1_envs.py@.PATH_END.py
|
{
"filename": "config.py",
"repo_name": "LSSTDESC/descqa",
"repo_path": "descqa_extracted/descqa-master/descqaweb/config.py",
"type": "Python"
}
|
from __future__ import unicode_literals
__all__ = ['site_title', 'root_dir', 'general_info', 'static_dir', 'run_per_page', 'logo_filename', 'github_url', 'months_to_search']
root_dir = '/global/cfs/cdirs/lsst/groups/CS/descqa/run/v2'
site_title = 'DESCQA (v2): LSST DESC Quality Assurance for Galaxy Catalogs'
run_per_page = 20
months_to_search = 3
static_dir = 'web-static'
logo_filename = 'desc-logo-small.png'
github_url = 'https://github.com/lsstdesc/descqa'
general_info = '''
This is DESCQA v2. You can also visit the previous version, <a class="everblue" href="https://portal.nersc.gov/cfs/lsst/descqa/v1/">DESCQA v1</a>.
<br><br>
The DESCQA framework executes validation tests on mock galaxy catalogs.
These tests and catalogs are contributed by LSST DESC collaborators.
See <a href="https://arxiv.org/abs/1709.09665" target="_blank">the DESCQA paper</a> for more information.
Full details about the catalogs and tests, and how to contribute, are available <a href="https://confluence.slac.stanford.edu/x/Z0uKDQ" target="_blank">here</a> (collaborators only).
The source code of DESCQA is hosted in <a href="https://github.com/LSSTDESC/descqa/" target="_blank">this GitHub repo</a>.
'''
use_latest_run_as_home = False
|
LSSTDESCREPO_NAMEdescqaPATH_START.@descqa_extracted@descqa-master@descqaweb@config.py@.PATH_END.py
|
{
"filename": "ex_mvelliptical.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/sandbox/distributions/examples/ex_mvelliptical.py",
"type": "Python"
}
|
"""examples for multivariate normal and t distributions
Created on Fri Jun 03 16:00:26 2011
@author: josef
for comparison I used R mvtnorm version 0.9-96
"""
import numpy as np
from numpy.testing import assert_array_almost_equal
import matplotlib.pyplot as plt
import statsmodels.api as sm
import statsmodels.distributions.mixture_rvs as mix
import statsmodels.sandbox.distributions.mv_normal as mvd
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
mu = np.array([-1, 0.0, 2.0])
#************** multivariate normal distribution ***************
mvn3 = mvd.MVNormal(mu, cov3)
#compare with random sample
x = mvn3.rvs(size=1000000)
xli = [[2., 1., 1.5],
[0., 2., 1.5],
[1.5, 1., 2.5],
[0., 1., 1.5]]
xliarr = np.asarray(xli).T[None,:, :]
#from R session
#pmvnorm(lower=-Inf,upper=(x[0,.]-mu)/sqrt(diag(cov3)),mean=rep(0,3),corr3)
r_cdf = [0.3222292, 0.3414643, 0.5450594, 0.3116296]
r_cdf_errors = [1.715116e-05, 1.590284e-05, 5.356471e-05, 3.567548e-05]
n_cdf = [mvn3.cdf(a) for a in xli]
assert_array_almost_equal(r_cdf, n_cdf, decimal=4)
print(n_cdf)
print('')
print((x<np.array(xli[0])).all(-1).mean(0))
print((x[...,None]<xliarr).all(1).mean(0))
print(mvn3.expect_mc(lambda x: (x<xli[0]).all(-1), size=100000))
print(mvn3.expect_mc(lambda x: (x[...,None]<xliarr).all(1), size=100000))
#other methods
mvn3n = mvn3.normalized()
assert_array_almost_equal(mvn3n.cov, mvn3n.corr, decimal=15)
assert_array_almost_equal(mvn3n.mean, np.zeros(3), decimal=15)
xn = mvn3.normalize(x)
xn_cov = np.cov(xn, rowvar=0)
assert_array_almost_equal(mvn3n.cov, xn_cov, decimal=2)
assert_array_almost_equal(np.zeros(3), xn.mean(0), decimal=2)
mvn3n2 = mvn3.normalized2()
assert_array_almost_equal(mvn3n.cov, mvn3n2.cov, decimal=2)
#mistake: "normalized2" standardizes - FIXED
#assert_array_almost_equal(np.eye(3), mvn3n2.cov, decimal=2)
xs = mvn3.standardize(x)
xs_cov = np.cov(xn, rowvar=0)
#another mixup xs is normalized
#assert_array_almost_equal(np.eye(3), xs_cov, decimal=2)
assert_array_almost_equal(mvn3.corr, xs_cov, decimal=2)
assert_array_almost_equal(np.zeros(3), xs.mean(0), decimal=2)
mv2m = mvn3.marginal(np.array([0,1]))
print(mv2m.mean)
print(mv2m.cov)
mv2c = mvn3.conditional(np.array([0,1]), [0])
print(mv2c.mean)
print(mv2c.cov)
mv2c = mvn3.conditional(np.array([0]), [0, 0])
print(mv2c.mean)
print(mv2c.cov)
mod = sm.OLS(x[:,0], sm.add_constant(x[:,1:], prepend=True))
res = mod.fit()
print(res.model.predict(np.array([1,0,0])))
mv2c = mvn3.conditional(np.array([0]), [0, 0])
print(mv2c.mean)
mv2c = mvn3.conditional(np.array([0]), [1, 1])
print(res.model.predict(np.array([1,1,1])))
print(mv2c.mean)
#the following wrong input does not raise an exception but produces wrong numbers
#mv2c = mvn3.conditional(np.array([0]), [[1, 1],[2,2]])
#************** multivariate t distribution ***************
mvt3 = mvd.MVT(mu, cov3, 4)
xt = mvt3.rvs(size=100000)
assert_array_almost_equal(mvt3.cov, np.cov(xt, rowvar=0), decimal=1)
mvt3s = mvt3.standardized()
mvt3n = mvt3.normalized()
#the following should be equal or correct up to numerical precision of float
assert_array_almost_equal(mvt3.corr, mvt3n.sigma, decimal=15)
assert_array_almost_equal(mvt3n.corr, mvt3n.sigma, decimal=15)
assert_array_almost_equal(np.eye(3), mvt3s.sigma, decimal=15)
xts = mvt3.standardize(xt)
xts_cov = np.cov(xts, rowvar=0)
xtn = mvt3.normalize(xt)
xtn_cov = np.cov(xtn, rowvar=0)
xtn_corr = np.corrcoef(xtn, rowvar=0)
assert_array_almost_equal(mvt3n.mean, xtn.mean(0), decimal=2)
#the following might fail sometimes (random test), add seed in tests
assert_array_almost_equal(mvt3n.corr, xtn_corr, decimal=1)
#watch out cov is not the same as sigma for t distribution, what's right here?
#normalize by sigma or by cov ? now normalized by sigma
assert_array_almost_equal(mvt3n.cov, xtn_cov, decimal=1)
assert_array_almost_equal(mvt3s.cov, xts_cov, decimal=1)
a = [0.0, 1.0, 1.5]
mvt3_cdf0 = mvt3.cdf(a)
print(mvt3_cdf0)
print((xt<np.array(a)).all(-1).mean(0))
print('R', 0.3026741) # "error": 0.0004832187
print('R', 0.3026855) # error 3.444375e-06 with smaller abseps
print('diff', mvt3_cdf0 - 0.3026855)
a = [0.0, 0.5, 1.0]
mvt3_cdf1 = mvt3.cdf(a)
print(mvt3_cdf1)
print((xt<np.array(a)).all(-1).mean(0))
print('R', 0.1946621) # "error": 0.0002524817)
print('R', 0.1946217) # "error:"2.748699e-06 with smaller abseps)
print('diff', mvt3_cdf1 - 0.1946217)
assert_array_almost_equal(mvt3_cdf0, 0.3026855, decimal=5)
assert_array_almost_equal(mvt3_cdf1, 0.1946217, decimal=5)
mu2 = np.array([4, 2.0, 2.0])
mvn32 = mvd.MVNormal(mu2, cov3/2., 4)
md = mix.mv_mixture_rvs([0.4, 0.6], 5, [mvt3, mvt3n], 3)
rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3)
#rvs2 = rvs[:,:2]
fig = plt.figure()
fig.add_subplot(2, 2, 1)
plt.plot(rvs[:,0], rvs[:,1], '.', alpha=0.25)
plt.title('1 versus 0')
fig.add_subplot(2, 2, 2)
plt.plot(rvs[:,0], rvs[:,2], '.', alpha=0.25)
plt.title('2 versus 0')
fig.add_subplot(2, 2, 3)
plt.plot(rvs[:,1], rvs[:,2], '.', alpha=0.25)
plt.title('2 versus 1')
#plt.show()
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@sandbox@distributions@examples@ex_mvelliptical.py@.PATH_END.py
|
{
"filename": "Model_Data_Comparison-TIEGCM.ipynb",
"repo_name": "nasa/Kamodo",
"repo_path": "Kamodo_extracted/Kamodo-master/Tutorials/Model_Data_Comparison-TIEGCM.ipynb",
"type": "Jupyter Notebook"
}
|
# Kamodo Model/Data Comparison Notebook
## TIEGCM Model/Data comparison
#### Start with extracting satellite positions from model output
```python
# Import some libraries
from kamodo import Kamodo
import kamodo_ccmc.flythrough.model_wrapper as MW
from kamodo_ccmc.flythrough.model_wrapper import Choose_Model
from kamodo_ccmc.flythrough import SatelliteFlythrough as SF
from kamodo_ccmc.flythrough.plots import SatPlot4D
from kamodo_ccmc.readers.hapi import HAPI
from kamodo_ccmc.flythrough.SF_output import Functionalize_TimeSeries
```
```bash
%%bash
touch ModelFlythrough_TIEGCM
rm -f ModelFlythrough_TIEGCM*
# Flythrough complains if overwriting files, so make sure they are gone first. BE CAREFUL HERE
```
```python
# Set some values for the next few steps
# NOTE: The TIEGCM data from this run is stored locally (2GB)
model='TIEGCM'
file_dir = 'DATA/TIE-GCM/Uriel_Ramirez_012517_IT_1/'
```
```python
# Show the time periods covered by the model output we are using
MW.File_Times(model, file_dir)
```
```python
# Show the variables included in the model output
MW.Variable_Search(model, file_dir)
```
```python
# Set input values for RealFlight function call
dataset = 'cnofs'
start_utcts, end_utcts = 1426638000, 1426665600
variable_list = ['T_i'] #list of desired variable names from above list, must be in list form
coord_type = 'GEO' #GEO cartesian coordinates as the sample coordinate system for trajectory.
output_name = 'ModelFlythrough_TIEGCM.csv' #filename for DATA output with extension
plot_coord = 'GSE' #coordinate system chosen for output plots
```
```python
#run RealFlight with cnofs satellite trajectory (CINDI)
results = SF.RealFlight(dataset, start_utcts, end_utcts, model, file_dir, variable_list, coord_type,
output_name=output_name, plot_coord=plot_coord)
#open plots in separate internet browser window for interactivity. Nothing will open here.
```
```python
# The results from the satellite extraction can be plugged into Kamodo easily
kamodo_object = SF.WO.Functionalize_SFResults(model,results)
kamodo_object
```
```python
# Done with results, delete to save memory
del results
```
```python
# Using Kamodo to generate a plot of the chosen variable
kamodo_object.plot('T_i')
```
#### Get CINDI data via HAPI server from CDAWeb
```python
# Set details of data and grab it
server = 'https://cdaweb.gsfc.nasa.gov/hapi'
dataset = 'CNOFS_CINDI_IVM_500MS'
parameters = 'ionTemperature'
start = '2015-03-18T00:20:00'
stop = '2015-03-18T08:00:00'
hapiCDA = HAPI(server, dataset, parameters, start, stop)
```
```python
# Plot The values
hapiCDA.plot('ionTemperature')
```
```python
# You can copy data from one Kamodo object into another to plot together
kamodo_object['T_iCINDI']=hapiCDA['ionTemperature']
```
```python
# Interpolate model data T_i onto same time series as observed data
interpT_i = kamodo_object.T_i(hapiCDA.dtarray)
# Compute difference of two identical length arrays
deltaT_i = hapiCDA.variables['ionTemperature']['data'] - interpT_i
# Add the new time series back into the Kamodo object for further analysis/plotting
kamodo_object = Functionalize_TimeSeries(hapiCDA.tsarray, 'DIFFERENCE', 'K', deltaT_i, kamodo_object=kamodo_object)
```
```python
# Now we can plot model and data on the same figure with the difference
kamodo_object.plot('T_i','T_iCINDI','DIFFERENCE')
```
```python
```
|
nasaREPO_NAMEKamodoPATH_START.@Kamodo_extracted@Kamodo-master@Tutorials@Model_Data_Comparison-TIEGCM.ipynb@.PATH_END.py
|
{
"filename": "initializers.py",
"repo_name": "google/flax",
"repo_path": "flax_extracted/flax-main/flax/nnx/nn/initializers.py",
"type": "Python"
}
|
# Copyright 2024 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing as tp
from jax.nn.initializers import constant as constant
from jax.nn.initializers import delta_orthogonal as delta_orthogonal
from jax.nn.initializers import glorot_normal as glorot_normal
from jax.nn.initializers import glorot_uniform as glorot_uniform
from jax.nn.initializers import he_normal as he_normal
from jax.nn.initializers import he_uniform as he_uniform
from jax.nn.initializers import kaiming_normal as kaiming_normal
from jax.nn.initializers import kaiming_uniform as kaiming_uniform
from jax.nn.initializers import lecun_normal as lecun_normal
from jax.nn.initializers import lecun_uniform as lecun_uniform
from jax.nn.initializers import normal as normal
from jax.nn.initializers import ones as ones
from jax.nn.initializers import orthogonal as orthogonal
from jax.nn.initializers import truncated_normal as truncated_normal
from jax.nn.initializers import uniform as uniform
from jax.nn.initializers import variance_scaling as variance_scaling
from jax.nn.initializers import xavier_normal as xavier_normal
from jax.nn.initializers import xavier_uniform as xavier_uniform
from jax.nn.initializers import zeros as zeros
from flax.typing import Initializer
DtypeLikeInexact = tp.Any
def zeros_init() -> Initializer:
"""Builds an initializer that returns a constant array full of zeros.
>>> import jax, jax.numpy as jnp
>>> from flax.nnx import initializers
>>> zeros_initializer = initializers.zeros_init()
>>> zeros_initializer(jax.random.key(42), (2, 3), jnp.float32)
Array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)
"""
return zeros
def ones_init() -> Initializer:
"""Builds an initializer that returns a constant array full of ones.
>>> import jax, jax.numpy as jnp
>>> from flax.nnx import initializers
>>> ones_initializer = initializers.ones_init()
>>> ones_initializer(jax.random.key(42), (3, 2), jnp.float32)
Array([[1., 1.],
[1., 1.],
[1., 1.]], dtype=float32)
"""
return ones
|
googleREPO_NAMEflaxPATH_START.@flax_extracted@flax-main@flax@nnx@nn@initializers.py@.PATH_END.py
|
{
"filename": "_ticks.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatterpolargl/marker/colorbar/_ticks.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicksValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="ticks",
parent_name="scatterpolargl.marker.colorbar",
**kwargs,
):
super(TicksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["outside", "inside", ""]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatterpolargl@marker@colorbar@_ticks.py@.PATH_END.py
|
{
"filename": "gi_minus_projected.py",
"repo_name": "astropy/halotools",
"repo_path": "halotools_extracted/halotools-master/halotools/mock_observables/ia_correlations/gi_minus_projected.py",
"type": "Python"
}
|
r"""
Module containing the `~halotools.mock_observables.alignments.gi_minus_projected` function used to
calculate the projected gravitational shear-intrinsic ellipticity (GI) correlation
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from math import pi
from .alignment_helpers import process_projected_alignment_args
from ..mock_observables_helpers import (enforce_sample_has_correct_shape,
get_separation_bins_array, get_line_of_sight_bins_array, get_period, get_num_threads)
from ..pair_counters.mesh_helpers import _enforce_maximum_search_length
from ..pair_counters import positional_marked_npairs_xy_z, marked_npairs_xy_z
__all__ = ['gi_minus_projected']
__author__ = ['Duncan Campbell']
np.seterr(divide='ignore', invalid='ignore') # ignore divide by zero in e.g. DD/RR
def gi_minus_projected(sample1, orientations1, ellipticities1, sample2, rp_bins, pi_max,
randoms1=None, randoms2=None, weights1=None, weights2=None,
ran_weights1=None, ran_weights2=None, estimator='Landy-Szalay',
period=None, num_threads=1, approx_cell1_size=None, approx_cell2_size=None):
r"""
Calculate the projected gravitational shear-intrinsic ellipticity correlation function (GI),
:math:`w_{g-}(r_p)`, where :math:`r_p` is the separation perpendicular to the line-of-sight (LOS)
between two galaxies. See the 'Notes' section for details of this calculation.
The first two dimensions define the plane for perpendicular distances. The third
dimension is used for parallel distances, i.e. x,y positions are on the plane of the
sky, and z is the redshift coordinate. This is the 'distant observer' approximation.
Note in particular that the `~halotools.mock_observables.alignments.gi_minus_projected` function does not
accept angular coordinates for the input ``sample1`` or ``sample2``.
Parameters
----------
sample1 : array_like
Npts1 x 3 numpy array containing 3-D positions of points with associated
orientations and ellipticities.
See the :ref:`mock_obs_pos_formatting` documentation page, or the
Examples section below, for instructions on how to transform
your coordinate position arrays into the format accepted by the ``sample1`` and ``sample2`` arguments.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
orientations1 : array_like
Npts1 x 2 numpy array containing projected orientation vectors for each point in ``sample1``.
these will be normalized if not already.
ellipticities1: array_like
Npts1 x 1 numpy array containing ellipticities for each point in ``sample1``.
sample2 : array_like, optional
Npts2 x 3 array containing 3-D positions of points.
rp_bins : array_like
array of boundaries defining the radial bins perpendicular to the LOS in which
pairs are counted.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
pi_max : float
maximum LOS distance defining the projection integral length-scale in the z-dimension.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
randoms1 : array_like, optional
Nran1 x 3 array containing 3-D positions of randomly distributed points corresponding to ``sample1``.
If no randoms are provided (the default option), the
calculation can proceed using analytical randoms
(only valid for periodic boundary conditions).
randoms2 : array_like, optional
Nran2 x 3 array containing 3-D positions of randomly distributed points corresponding to ``sample2``.
If no randoms are provided (the default option), the
calculation can proceed using analytical randoms
(only valid for periodic boundary conditions).
weights1 : array_like, optional
Npts1 array of weghts. If this parameter is not specified, it is set to numpy.ones(Npts1).
weights2 : array_like, optional
Npts2 array of weghts. If this parameter is not specified, it is set to numpy.ones(Npts2).
ran_weights1 : array_like, optional
Npran1 array of weghts. If this parameter is not specified, it is set to numpy.ones(Nran1).
ran_weights2 : array_like, optional
Nran2 array of weghts. If this parameter is not specified, it is set to numpy.ones(Nran2).
estimator : string, optional
string indicating which estimator to use
period : array_like, optional
Length-3 sequence defining the periodic boundary conditions
in each dimension. If you instead provide a single scalar, Lbox,
period is assumed to be the same in all Cartesian directions.
If set to None (the default option), PBCs are set to infinity,
in which case ``randoms`` must be provided.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
num_threads : int, optional
Number of threads to use in calculation, where parallelization is performed
using the python ``multiprocessing`` module. Default is 1 for a purely serial
calculation, in which case a multiprocessing Pool object will
never be instantiated. A string 'max' may be used to indicate that
the pair counters should use all available cores on the machine.
approx_cell1_size : array_like, optional
Length-3 array serving as a guess for the optimal manner by how points
will be apportioned into subvolumes of the simulation box.
The optimum choice unavoidably depends on the specs of your machine.
Default choice is to use Lbox/10 in each dimension,
which will return reasonable result performance for most use-cases.
Performance can vary sensitively with this parameter, so it is highly
recommended that you experiment with this parameter when carrying out
performance-critical calculations.
approx_cell2_size : array_like, optional
Analogous to ``approx_cell1_size``, but for sample2. See comments for
``approx_cell1_size`` for details.
Returns
-------
correlation_function : numpy.array
*len(rp_bins)-1* length array containing the correlation function :math:`w_{g+}(r_p)`
computed in each of the bins defined by input ``rp_bins``.
Notes
-----
The projected GI-correlation function is calculated as:
.. math::
w_{g-}(r_p) = 2 \int_0^{\pi_{\rm max}} \xi_{g-}(r_p, \pi) \mathrm{d}\pi
If the Landy-Szalay estimator is indicated, the correlation function is estimated as:
.. math::
\xi_{g-}(r_p, \pi) = \frac{S_{-}D-S_{-}R}{R_sR}
where
.. math::
S_{-}D = \sum_{i \neq j} w_jw_i e_{-}(j|i)
:math:`w_j` and :math:`w_j` are weights. Weights are set to 1 for all galaxies by default.
The alingment of the :math:`j`-th galaxy relative to the direction to the :math:`i`-th galaxy is given by:
.. math::
e_{-}(j|i) = e_j\sin(2\phi)
where :math:`e_j` is the ellipticity of the :math:`j`-th galaxy. :math:`\phi` is the angle between the
orientation vector, :math:`\vec{o}_j`, and the projected direction between the :math:`j`-th
and :math:`i`-th galaxy, :math:`\vec{r}_{p i,j}`.
.. math::
\cos(\phi) = \vec{o}_j \cdot \vec{r}_{p i,j}
:math:`S_{-}R` is analgous to :math:`S_{-}D` but instead is computed
with respect to a "random" catalog of galaxies. :math:`R_sR` are random pair counts,
where :math:`R_s` corresponds to the shapes sample, i.e. the sample with orienttions
and ellipticies, ``sample1``, and R correspoinds to ``sample2``.
Examples
--------
For demonstration purposes we create a randomly distributed set of points within a
periodic cube of Lbox = 250 Mpc/h.
>>> Npts = 1000
>>> Lbox = 250
>>> x = np.random.uniform(0, Lbox, Npts)
>>> y = np.random.uniform(0, Lbox, Npts)
>>> z = np.random.uniform(0, Lbox, Npts)
We transform our *x, y, z* points into the array shape used by the pair-counter by
taking the transpose of the result of `numpy.vstack`. This boilerplate transformation
is used throughout the `~halotools.mock_observables` sub-package:
>>> sample1 = np.vstack((x,y,z)).T
Alternatively, you may use the `~halotools.mock_observables.return_xyz_formatted_array`
convenience function for this same purpose, which provides additional wrapper
behavior around `numpy.vstack` such as placing points into redshift-space.
We then create a set of random orientation vectors and ellipticities for each point
>>> random_orientations = np.random.random((Npts,2))
>>> random_ellipticities = np.random.random(Npts)
We can the calculate the projected auto-GI correlation between these points:
>>> rp_bins = np.logspace(-1,1,10)
>>> pi_max = 0.25
>>> w = gi_minus_projected(sample1, random_orientations, random_ellipticities, sample1, rp_bins, pi_max, period=Lbox)
"""
# process arguments
alignment_args = (sample1, orientations1, ellipticities1, weights1,
sample2, None, None, weights2,
randoms1, ran_weights1, randoms2, ran_weights2)
sample1, orientations1, ellipticities1, weights1, sample2,\
orientations2, ellipticities2, weights2, randoms1, ran_weights1,\
randoms2, ran_weights2 = process_projected_alignment_args(*alignment_args)
function_args = (sample1, rp_bins, pi_max, sample2, randoms1, randoms2,
period, num_threads, approx_cell1_size, approx_cell2_size)
sample1, rp_bins, pi_bins, sample2, randoms1, randoms2,\
period, num_threads, PBCs, no_randoms = _gi_minus_projected_process_args(*function_args)
# How many points are there (for normalization purposes)?
N1 = len(sample1)
N2 = len(sample2)
if no_randoms: # set random density the the same as the sampels
NR1 = N1
NR2 = N2
else:
NR1 = len(randoms1)
NR2 = len(randoms2)
#define merk vectors to use in pair counting
# sample 1
marks1 = np.ones((N1, 3))
marks1[:, 0] = ellipticities1 * weights1
marks1[:, 1] = orientations1[:, 0]
marks1[:, 2] = orientations1[:, 1]
# sample 2
marks2 = weights2
# randoms 1
ran_marks1 = np.ones((NR1, 3))
ran_marks1[:, 0] = ran_weights1
ran_marks1[:, 1] = 0 # dummy
ran_marks1[:, 2] = 0 # dummy
# randoms 2
ran_marks2 = ran_weights2
# define pi bins
pi_bins = np.array([0.0, pi_max])
do_SD, do_SR, do_RR = GI_estimator_requirements(estimator)
# count marked pairs
if do_SD:
SD = marked_pair_counts(sample1, sample2, marks1, marks2,
rp_bins, pi_bins, period, num_threads,
approx_cell1_size, approx_cell2_size)
else:
SD = None
# count marked random pairs
if do_SR:
if no_randoms:
SR = 0.0
else:
SR = marked_pair_counts(sample1, randoms2, marks1, ran_marks2,
rp_bins, pi_bins, period, num_threads,
approx_cell1_size, approx_cell2_size)
else:
SR = None
# count random pairs
if do_RR:
RR = random_counts(randoms1, randoms2, ran_weights1, ran_weights2,
rp_bins, pi_bins, N1, N2, no_randoms, period, PBCs,
num_threads, approx_cell1_size, approx_cell2_size)
else:
RR = None
result = GI_estimator(SD, SR, RR, N1, N2, NR1, NR2, estimator)
return result*2.0*pi_max # factor of 2pi_max accounts for integration
def GI_estimator(SD, SR, RR, N1, N2, NR1, NR2, estimator='Landy-Szalay'):
r"""
apply the supplied GI estimator to calculate the correlation function.
"""
if estimator == 'Landy-Szalay':
factor = (NR1*NR2)/(N1*N2)
return factor*(SD-SR)/RR
else:
msg = ('The estimator provided is not supported.')
raise ValueError(msg)
def GI_estimator_requirements(estimator):
r"""
Return the requirments for the supplied GI estimator.
"""
do_SD = False
do_SR = False
do_RR = False
if estimator == 'Landy-Szalay':
do_SD = True
do_SR = True
do_RR = True
return do_SD, do_SR, do_RR
else:
msg = ('The estimator provided is not supported.')
raise ValueError(msg)
def marked_pair_counts(sample1, sample2, weights1, weights2, rp_bins, pi_bins, period,
num_threads, approx_cell1_size, approx_cell2_size):
r"""
Count marked pairs.
"""
weight_func_id = 3
SD = positional_marked_npairs_xy_z(sample1, sample2, rp_bins, pi_bins, period=period,
weights1=weights1, weights2=weights2, weight_func_id=weight_func_id,
num_threads=num_threads, approx_cell1_size=approx_cell1_size,
approx_cell2_size=approx_cell1_size)[0]
SD = np.diff(np.diff(SD, axis=0), axis=1)
SD = SD.flatten()
return SD
def random_counts(randoms1, randoms2, ran_weights1, ran_weights2, rp_bins, pi_bins,
N1, N2, no_randoms, period,
PBCs, num_threads, approx_cell1_size, approx_cell2_size):
r"""
Count random pairs.
"""
if no_randoms is False:
RR = marked_npairs_xy_z(randoms1, randoms2, rp_bins, pi_bins,
period=period, num_threads=num_threads, weight_func_id=1,
weights1=ran_weights1, weights2=ran_weights2,
approx_cell1_size=approx_cell1_size,
approx_cell2_size=approx_cell2_size)
RR = np.diff(np.diff(RR, axis=0), axis=1)
RR = RR.flatten()
return RR
else:
# set 'number' or randoms
# setting Nran to Ndata makes normalization simple
NR1 = N1
NR2 = N2
# do volume calculations
v = cylinder_volume(rp_bins, 2.0*pi_bins)
dv = np.diff(np.diff(v, axis=0), axis=1)
global_volume = period.prod()
# calculate the expected random-random pairs.
rhor = (NR1*NR2)/global_volume
RR = (dv*rhor)
return RR.flatten()
def cylinder_volume(R, h):
r"""
Calculate the volume of a cylinder(s), used for the analytical randoms.
"""
return pi*np.outer(R**2.0, h)
def _gi_minus_projected_process_args(sample1, rp_bins, pi_max, sample2, randoms1, randoms2,
period, num_threads, approx_cell1_size, approx_cell2_size):
r"""
Private method to do bounds-checking on the arguments passed to
`~halotools.mock_observables.alignments.gi_minus_projected`.
"""
sample1 = enforce_sample_has_correct_shape(sample1)
if randoms1 is not None:
randoms1 = np.atleast_1d(randoms1)
no_randoms1 = False
else: no_randoms1 = True
if randoms2 is not None:
randoms2 = np.atleast_1d(randoms2)
no_randoms2 = False
else: no_randoms2 = True
#if one of the randoms is missing, raise an error
no_randoms = True
if no_randoms1:
if no_randoms2 is False:
msg = "if one set of randoms is provided, both randoms must be provided.\n"
raise ValueError(msg)
elif no_randoms2:
if no_randoms1 is False:
msg = "if one set of randoms is provided, both randoms must be provided.\n"
raise ValueError(msg)
else:
no_randoms = False
pi_max = float(pi_max)
pi_bins = np.array([0.0, pi_max])
rp_bins = get_separation_bins_array(rp_bins)
rp_max = np.amax(rp_bins)
period, PBCs = get_period(period)
_enforce_maximum_search_length([rp_max, rp_max, pi_max], period)
if (randoms1 is None) & (PBCs is False):
msg = "If no PBCs are specified, both randoms must be provided.\n"
raise ValueError(msg)
num_threads = get_num_threads(num_threads)
return sample1, rp_bins, pi_bins, sample2, randoms1, randoms2, period, num_threads, PBCs, no_randoms
|
astropyREPO_NAMEhalotoolsPATH_START.@halotools_extracted@halotools-master@halotools@mock_observables@ia_correlations@gi_minus_projected.py@.PATH_END.py
|
{
"filename": "ngsolve_reaction_coefficient.py",
"repo_name": "hmuellergoe/mrbeam",
"repo_path": "mrbeam_extracted/mrbeam-main/mr_beam/itreg/examples/ngsolve_reaction_coefficient.py",
"type": "Python"
}
|
# Run this file in IPython like
# import netgen.gui
# %run path/to/this/file
# to get graphical output.
import logging
import ngsolve as ngs
from ngsolve.meshes import MakeQuadMesh
import numpy as np
import regpy.stoprules as rules
from regpy.operators.ngsolve import Coefficient
from regpy.solvers import HilbertSpaceSetting
from regpy.solvers.landweber import Landweber
from regpy.hilbert import L2, Sobolev
from regpy.discrs.ngsolve import NgsSpace
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)s %(name)-40s :: %(message)s'
)
meshsize_domain = 10
meshsize_codomain = 10
mesh = MakeQuadMesh(meshsize_domain, meshsize_domain)
fes_domain = ngs.H1(mesh, order=1)
domain = NgsSpace(fes_domain)
mesh = MakeQuadMesh(meshsize_codomain, meshsize_codomain)
bdr = "left|top|right|bottom"
fes_codomain = ngs.H1(mesh, order=3, dirichlet=bdr)
codomain = NgsSpace(fes_codomain, bdr=bdr)
rhs = 1 * ngs.sin(ngs.x) * ngs.sin(ngs.y)
op = Coefficient(
domain, rhs, codomain=codomain, bc = 0.1, diffusion=False,
reaction=True
)
exact_solution_coeff = 1+0.8*ngs.sin(2*np.pi*ngs.x) * ngs.sin(2*np.pi*ngs.y)
exact_solution = domain.from_ngs( exact_solution_coeff )
exact_data = op(exact_solution)
noise = 0.0001 * codomain.randn()
data = exact_data+noise
init = domain.from_ngs ( 1 )
init_data = op(init)
setting = HilbertSpaceSetting(op=op, Hdomain=L2, Hcodomain=Sobolev)
landweber = Landweber(setting, data, init, stepsize=1)
stoprule = (
rules.CountIterations(50000) +
rules.Discrepancy(setting.Hcodomain.norm, data, noiselevel=setting.Hcodomain.norm(noise), tau=1.1))
reco, reco_data = landweber.run(stoprule)
domain.draw(exact_solution, "exact")
# Draw reconstructed solution
domain.draw(reco, "reco")
# Draw data space
codomain.draw(data, "data")
codomain.draw(reco_data, "reco_data")
|
hmuellergoeREPO_NAMEmrbeamPATH_START.@mrbeam_extracted@mrbeam-main@mr_beam@itreg@examples@ngsolve_reaction_coefficient.py@.PATH_END.py
|
{
"filename": "KnownRVSurvey.py",
"repo_name": "dsavransky/EXOSIMS",
"repo_path": "EXOSIMS_extracted/EXOSIMS-master/EXOSIMS/SurveySimulation/KnownRVSurvey.py",
"type": "Python"
}
|
from EXOSIMS.Prototypes.SurveySimulation import SurveySimulation
from EXOSIMS.util.deltaMag import deltaMag
import numpy as np
import astropy.units as u
class KnownRVSurvey(SurveySimulation):
"""KnownRVSurvey
Survey Simulation module based on Know RV planets
This class uses estimates of delta magnitude (int_dMag) and instrument
working angle (int_WA) for integration time calculation, specific to
the known RV planets.
Args:
**specs:
user specified values
"""
def __init__(self, **specs):
# call prototype constructor
SurveySimulation.__init__(self, **specs)
TL = self.TargetList
SU = self.SimulatedUniverse
# reinitialize working angles and delta magnitudes used for integration
self.int_WA = np.zeros(TL.nStars) * u.arcsec
self.int_dMag = np.zeros(TL.nStars)
# calculate estimates of shortest int_WA and largest int_dMag for each target
for sInd in range(TL.nStars):
pInds = np.where(SU.plan2star == sInd)[0]
self.int_WA[sInd] = np.arctan(np.min(SU.a[pInds]) / TL.dist[sInd]).to(
"arcsec"
)
phis = np.array([np.pi / 2] * pInds.size)
dMags = deltaMag(SU.p[pInds], SU.Rp[pInds], SU.a[pInds], phis)
self.int_dMag[sInd] = np.min(dMags)
# populate outspec with arrays
self._outspec["int_WA"] = self.int_WA.value
self._outspec["int_dMag"] = self.int_dMag
|
dsavranskyREPO_NAMEEXOSIMSPATH_START.@EXOSIMS_extracted@EXOSIMS-master@EXOSIMS@SurveySimulation@KnownRVSurvey.py@.PATH_END.py
|
{
"filename": "latitude.py",
"repo_name": "rodluger/starry_process",
"repo_path": "starry_process_extracted/starry_process-master/starry_process/latitude.py",
"type": "Python"
}
|
from .wigner import R
from .integrals import WignerIntegral
from .ops import LatitudeIntegralOp, CheckBoundsOp
from .defaults import defaults
from .math import is_tensor
from .compat import tt, ifelse
from scipy.stats import beta as Beta
import numpy as np
__all__ = ["gauss2beta", "beta2gauss", "LatitudeIntegral"]
def gauss2beta(
mu,
sigma,
log_alpha_max=defaults["log_alpha_max"],
log_beta_max=defaults["log_beta_max"],
):
"""
Return the shape parameters ``a`` and ``b`` of the latitude Beta distribution
closest to the Gaussian with mean ``mu`` and standard deviation ``sigma``.
Args:
mu (scalar or vector): The mean latitude in degrees.
sigma (scalar or vector): The latitude standard deviation in degrees.
log_alpha_max (float, optional): The maximum value of ``ln(alpha)``.
Default is %%defaults["log_alpha_max"]%%.
log_beta_max (float, optional): The maximum value of ``ln(beta)``.
Default is %%defaults["log_alpha_max"]%%.
The shape parameters ``a`` and ``b`` are related to the shape parameters of
the Beta distribution in cosine latitude via the transformations
.. code-block::python
alpha = exp(a * log_alpha_max)
beta = exp(log(0.5) + b * (log_beta_max - log(0.5)))
.. note::
This is a utility function that can accept and return
either numeric values or tensors. If both ``mu`` and ``sigma``
are numeric quantities, the result will be a numeric
quantity; otherwise it will be a tensor.
"""
if is_tensor(mu, sigma):
math = tt
is_vector = True
m = mu * np.pi / 180
v = (sigma * np.pi / 180) ** 2
else:
math = np
is_vector = hasattr(mu, "__len__")
if is_vector:
assert hasattr(sigma, "__len__")
assert len(mu) == len(sigma)
else:
assert not hasattr(sigma, "__len__")
m = np.atleast_1d(mu) * np.pi / 180
v = (np.atleast_1d(sigma) * np.pi / 180) ** 2
c1 = math.cos(m)
c2 = math.cos(2 * m)
c3 = math.cos(3 * m)
term = 1.0 / (16 * v * math.cos(0.5 * m) ** 4)
alpha = (2 + 4 * v + (3 + 8 * v) * c1 + 2 * c2 + c3) * term
beta = (c1 + 2 * v * (3 + c2) - c3) * term
a = math.log(alpha) / log_alpha_max
b = math.maximum(
0.0, (math.log(beta) - math.log(0.5)) / (log_beta_max - math.log(0.5))
)
if is_vector:
return a, b
else:
return a[0], b[0]
def beta2gauss(
a,
b,
log_alpha_max=defaults["log_alpha_max"],
log_beta_max=defaults["log_beta_max"],
):
"""
Return the mode ``mu`` and standard deviation ``sigma`` of Laplace's
(Gaussian) approximation to the PDF of the latitude Beta distribution
with shape parameters ``a`` and ``b``.
Args:
a (scalar or vector): Shape parameter.
b (scalar or vector): Shape parameter.
log_alpha_max (float, optional): The maximum value of ``ln(alpha)``.
Default is %%defaults["log_alpha_max"]%%.
log_beta_max (float, optional): The maximum value of ``ln(beta)``.
Default is %%defaults["log_alpha_max"]%%.
The shape parameters ``a`` and ``b`` are related to the shape parameters of
the Beta distribution in cosine latitude via the transformations
.. code-block::python
alpha = exp(a * log_alpha_max)
beta = exp(log(0.5) + b * (log_beta_max - log(0.5)))
.. note::
This is a utility function that can accept and return
either numeric values or tensors. If both ``a`` and ``b``
are numeric quantities, the result will be a numeric
quantity; otherwise it will be a tensor.
"""
if is_tensor(a, b):
math = tt
is_vector = True
alpha = tt.exp(a * log_alpha_max)
beta = tt.exp(np.log(0.5) + b * (log_beta_max - np.log(0.5)))
else:
math = np
is_vector = hasattr(a, "__len__")
if is_vector:
assert hasattr(b, "__len__")
assert len(a) == len(b)
else:
assert not hasattr(b, "__len__")
alpha = np.atleast_1d(np.exp(a * log_alpha_max))
beta = np.atleast_1d(
np.exp(np.log(0.5) + b * (log_beta_max - np.log(0.5)))
)
term = (
4 * alpha ** 2
- 8 * alpha
- 6 * beta
+ 4 * alpha * beta
+ beta ** 2
+ 5
)
mu = 2 * math.arctan(math.sqrt(2 * alpha + beta - 2 - math.sqrt(term)))
term = (
1
- alpha
+ beta
+ (beta - 1) * math.cos(mu)
+ (alpha - 1) / math.cos(mu) ** 2
)
sigma = math.sin(mu) / math.sqrt(term)
if is_tensor(a, b):
if a.ndim == 0:
invalid = tt.or_(tt.le(alpha, 1.0), tt.le(beta, 0.5))
mu = ifelse(invalid, np.nan, mu)
sigma = ifelse(invalid, np.nan, sigma)
else:
invalid = tt.or_(
tt.le(alpha, tt.ones_like(alpha)),
tt.le(beta, tt.ones_like(beta)),
)
mu = tt.switch(invalid, tt.ones_like(mu) * np.nan, mu)
sigma = tt.switch(invalid, tt.ones_like(sigma) * np.nan, sigma)
else:
mu[(alpha <= 1) | (beta <= 0.5)] = np.nan
sigma[(alpha <= 1) | (beta <= 0.5)] = np.nan
if is_vector:
return mu / (np.pi / 180), sigma / (np.pi / 180)
else:
return mu[0] / (np.pi / 180), sigma[0] / (np.pi / 180)
class LatitudeIntegral(WignerIntegral):
def _ingest(self, a, b, **kwargs):
"""
Ingest the parameters of the distribution and
set up the transform and rotation operators.
"""
# Ingest
abmin = kwargs.get("abmin", defaults["abmin"])
self._a = CheckBoundsOp(name="a", lower=0, upper=1)(a)
self._a = ifelse(tt.lt(self._a, abmin), abmin, self._a)
self._b = CheckBoundsOp(name="b", lower=0, upper=1)(b)
self._b = ifelse(tt.lt(self._b, abmin), abmin, self._b)
self._params = [self._a, self._b]
# Transform to the shape parameters of the Beta distribution.
# alpha is bounded on (1.0, exp(log_alpha_max))
# beta is bounded on (0.5, exp(log_beta_max))
self._log_alpha_max = kwargs.get(
"log_alpha_max", defaults["log_alpha_max"]
)
self._log_beta_max = kwargs.get(
"log_beta_max", defaults["log_beta_max"]
)
self._sigma_max = (
kwargs.get("sigma_max", defaults["sigma_max"]) * self._angle_fac
)
self._alpha = tt.exp(self._a * self._log_alpha_max)
self._beta = tt.exp(
np.log(0.5) + self._b * (self._log_beta_max - np.log(0.5))
)
self._compute_mu_and_sigma()
# Set up the rotation operator
self._R = R(
self._ydeg, cos_alpha=0, sin_alpha=1, cos_gamma=0, sin_gamma=-1
)
# Compute the integrals
self._integral_op = LatitudeIntegralOp(self._ydeg, **kwargs)
self._q, _, _, self._Q, _, _ = self._integral_op(
self._alpha, self._beta
)
@property
def mu(self):
return self._mu * self._angle_fac
@property
def sigma(self):
return self._sigma * self._angle_fac
def _compute_mu_and_sigma(self):
term = (
4 * self._alpha ** 2
- 8 * self._alpha
- 6 * self._beta
+ 4 * self._alpha * self._beta
+ self._beta ** 2
+ 5
)
mu = 2 * tt.arctan(
tt.sqrt(2 * self._alpha + self._beta - 2 - tt.sqrt(term))
)
term = (
1
- self._alpha
+ self._beta
+ (self._beta - 1) * tt.cos(mu)
+ (self._alpha - 1) / tt.cos(mu) ** 2
)
var = tt.sin(mu) ** 2 / term
self._mu = mu
self._sigma = tt.sqrt(var)
def _pdf(self, phi, a, b):
"""
Return the probability density function evaluated at a
latitude `phi`.
.. note::
This function operates on and returns numeric values.
It is used internally in the `perform` step of a `PDFOp`.
"""
alpha = np.exp(a * self._log_alpha_max)
beta = np.exp(np.log(0.5) + b * (self._log_beta_max - np.log(0.5)))
phi = np.array(phi) * self._angle_fac
return (
0.5
* np.abs(np.sin(phi) * self._angle_fac)
* Beta.pdf(np.cos(phi), alpha, beta)
)
def _sample(self, a, b, nsamples=1):
"""
Draw samples from the latitude distribution (in degrees).
.. note::
This function operates on and returns numeric values.
It is used internally in the `perform` step of a `SampleOp`.
"""
alpha = np.exp(a * self._log_alpha_max)
beta = np.exp(np.log(0.5) + b * (self._log_beta_max - np.log(0.5)))
x = Beta.rvs(alpha, beta, size=nsamples)
sgn = 2 * (np.random.randint(0, 2, nsamples) - 0.5)
return sgn * np.arccos(x) / self._angle_fac
def _log_jac(self):
"""
Return the log of the absolute value of the Jacobian for the
transformation from `(a, b)` to `(mu, sigma)`.
"""
log_jac = tt.log(
tt.abs_(
(
self._alpha
* self._beta
* (1 + tt.cos(self._mu)) ** 3
* tt.sin(2 * self._mu) ** 3
)
/ (
self._sigma
* (
-3
+ 2 * self._alpha
+ self._beta
+ (-1 + 2 * self._alpha + self._beta)
* tt.cos(self._mu)
)
* (
2 * (-1 + self._alpha + self._beta)
+ 3 * (-1 + self._beta) * tt.cos(self._mu)
- 2
* (-1 + self._alpha - self._beta)
* tt.cos(2 * self._mu)
+ (-1 + self._beta) * tt.cos(3 * self._mu)
)
** 2
)
)
)
return ifelse(tt.gt(self._sigma, self._sigma_max), -np.inf, log_jac)
|
rodlugerREPO_NAMEstarry_processPATH_START.@starry_process_extracted@starry_process-master@starry_process@latitude.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.